diff --git a/go.mod b/go.mod index 8de84b349..326fe1020 100644 --- a/go.mod +++ b/go.mod @@ -14,12 +14,13 @@ require ( github.com/IBM/networking-go-sdk v0.26.0 github.com/aliyun/alibaba-cloud-sdk-go v1.61.1286 github.com/aws/aws-sdk-go v1.38.49 - github.com/davecgh/go-spew v1.1.1 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/florianl/go-nfqueue v1.3.2 - github.com/go-logr/logr v1.4.1 + github.com/go-logr/logr v1.4.2 github.com/go-logr/zapr v1.3.0 github.com/google/go-cmp v0.6.0 github.com/google/gopacket v1.1.19 + github.com/istio-ecosystem/sail-operator v0.0.0-20241001175228-c7164d4a99dd github.com/jongio/azidext/go/azidext v0.4.0 github.com/maistra/istio-operator v0.0.0-20240712143246-fd7dfc8af831 github.com/openshift/api v3.9.1-0.20190924102528-32369d4db2ad+incompatible @@ -27,32 +28,31 @@ require ( github.com/openshift/library-go v0.0.0-20240419113445-f1541d628746 github.com/operator-framework/api v0.15.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.18.0 - github.com/prometheus/common v0.45.0 - github.com/spf13/cobra v1.7.0 - github.com/stretchr/testify v1.8.4 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/common v0.55.0 + github.com/spf13/cobra v1.8.1 + github.com/stretchr/testify v1.9.0 github.com/summerwind/h2spec v0.0.0-20200804131034-70ac22940108 github.com/tcnksm/go-httpstat v0.2.1-0.20191008022543-e866bb274419 - go.uber.org/zap v1.26.0 - golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 - golang.org/x/time v0.3.0 + go.uber.org/zap v1.27.0 + golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7 + golang.org/x/time v0.5.0 google.golang.org/api v0.126.0 - google.golang.org/grpc v1.58.3 + google.golang.org/grpc v1.65.0 gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.30.2 - k8s.io/apiextensions-apiserver v0.30.2 - k8s.io/apimachinery v0.30.2 - k8s.io/apiserver v0.30.2 + k8s.io/api v0.30.3 + k8s.io/apiextensions-apiserver v0.30.3 + k8s.io/apimachinery v0.30.3 + k8s.io/apiserver v0.30.3 k8s.io/client-go v12.0.0+incompatible - k8s.io/utils v0.0.0-20230726121419-3b25d923346b + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/gateway-api v0.5.1-0.20220921185115-ee7a83814203 ) require ( - cloud.google.com/go/compute v1.23.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect @@ -64,24 +64,24 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect + github.com/emicklei/go-restful/v3 v3.12.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect - github.com/fatih/color v1.16.0 // indirect + github.com/fatih/color v1.17.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/go-openapi/errors v0.21.0 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/strfmt v0.22.1 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.19.0 // indirect - github.com/goccy/go-yaml v1.8.8 // indirect + github.com/goccy/go-yaml v1.12.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -92,10 +92,10 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.11.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/websocket v1.5.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/imdario/mergo v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -106,7 +106,6 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mdlayher/netlink v1.6.0 // indirect github.com/mdlayher/socket v0.1.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -119,33 +118,33 @@ require ( github.com/operator-framework/operator-sdk v0.18.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/robfig/cron v1.2.0 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/net v0.27.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/term v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.30.2 // indirect - k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/component-base v0.30.3 // indirect + k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-aggregator v0.29.0 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/kube-openapi v0.0.0-20240423202451-8948a665c108 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect @@ -159,3 +158,5 @@ replace ( github.com/openshift/api => github.com/openshift/api v0.0.0-20240806152114-6b4a57ec20b0 k8s.io/client-go => k8s.io/client-go v0.30.2 ) + +replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.5 diff --git a/go.sum b/go.sum index b6c6bd75b..286a62b93 100644 --- a/go.sum +++ b/go.sum @@ -178,13 +178,12 @@ cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOV cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -772,8 +771,9 @@ github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1Vqq github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -794,8 +794,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -836,7 +836,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/coreos/prometheus-operator v0.38.1-0.20200424145508-7e176fda06cc/go.mod h1:erio69w1R/aC14D5nfvAXSlE8FT8jt2Hnavc50Dp33A= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -852,8 +852,9 @@ github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0 github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= @@ -909,8 +910,9 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT github.com/emicklei/go-restful v2.10.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.11.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= +github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -926,16 +928,16 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= @@ -944,8 +946,8 @@ github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwo github.com/fatih/color v0.0.0-20161025120501-bf82308e8c85/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/florianl/go-nfqueue v1.3.2 h1:8DPzhKJHywpHJAE/4ktgcqveCL7qmMLsEsVD68C4x4I= github.com/florianl/go-nfqueue v1.3.2/go.mod h1:eSnAor2YCfMCVYrVNEhkLGN/r1L+J4uDjc0EUy0tfq4= @@ -993,8 +995,9 @@ github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -1018,8 +1021,9 @@ github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwds github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= @@ -1027,8 +1031,9 @@ github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3Hfo github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -1060,8 +1065,9 @@ github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/ github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= @@ -1087,6 +1093,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -1121,8 +1129,9 @@ github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kE github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-yaml v1.8.8 h1:MGfRB1GeSn/hWXYWS2Pt67iC2GJNnebdIro01ddyucA= github.com/goccy/go-yaml v1.8.8/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA= +github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM= +github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= @@ -1253,8 +1262,9 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -1315,8 +1325,9 @@ github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7 github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -1385,17 +1396,14 @@ github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73t github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/istio-ecosystem/sail-operator v0.0.0-20241001175228-c7164d4a99dd h1:54xMphKUhH9g/obU8nYSUCcf9pTd3+Skd1JGWBwYfc0= +github.com/istio-ecosystem/sail-operator v0.0.0-20241001175228-c7164d4a99dd/go.mod h1:ejeHHKkI//Bajypdq5Bpa/rs+PniHjjcXBP1+12AHw0= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -1535,8 +1543,6 @@ github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mdlayher/netlink v1.6.0 h1:rOHX5yl7qnlpiVkFWoqccueppMtXzeziFjWAjLg6sz0= github.com/mdlayher/netlink v1.6.0/go.mod h1:0o3PlBmGst1xve7wQ7j/hwpNaFaH4qCRyWCdcZk8/vA= @@ -1634,8 +1640,8 @@ github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7 github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU= +github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1664,8 +1670,8 @@ github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3ev github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= -github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= -github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1753,8 +1759,8 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1762,8 +1768,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -1773,8 +1779,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1787,8 +1793,8 @@ github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v1.8.2-0.20200110114423-1e64d757f711/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= github.com/prometheus/prometheus v2.3.2+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= @@ -1809,8 +1815,9 @@ github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.5.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rubenv/sql-migrate v0.0.0-20200212082348-64f95ea68aa3/go.mod h1:rtQlpHw+eR6UrqaS3kX1VYeaCxzCVdimDS7g5Ln4pPc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -1840,8 +1847,8 @@ github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= @@ -1867,8 +1874,8 @@ github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHN github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1895,8 +1902,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/summerwind/h2spec v0.0.0-20200804131034-70ac22940108 h1:ecwW3RnjsJlnVZkvXHaxa+Hg0EW+6KF8mFovyFk+WZA= github.com/summerwind/h2spec v0.0.0-20200804131034-70ac22940108/go.mod h1:jn/HqC28P+sr5C23xwQQyRzsvJPpWnPGX20sJKIJIwE= @@ -2000,8 +2008,8 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -2042,8 +2050,9 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2059,8 +2068,9 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 h1:tnebWN09GYg9OLPss1KXj8txwZc6X6uMr6VFdcGNbHw= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7 h1:wDLEX9a7YQoKdKNQt88rtydkqDxeGaBUTnIYc3iG/mA= +golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2206,8 +2216,9 @@ golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2241,8 +2252,8 @@ golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= -golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= -golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2264,8 +2275,9 @@ golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2404,8 +2416,8 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2423,8 +2435,9 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2447,16 +2460,18 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2560,8 +2575,9 @@ golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2569,8 +2585,9 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= @@ -2801,20 +2818,19 @@ google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaL google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= -google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= -google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY= +google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d h1:JU0iKnSg02Gmb5ZdV8nYsKEKsP6o/FGVWTrw4i1DA9A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -2863,8 +2879,8 @@ google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2884,8 +2900,9 @@ google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -2964,16 +2981,17 @@ k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs= k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= -k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI= k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI= +k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= +k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= k8s.io/apiextensions-apiserver v0.16.7/go.mod h1:6xYRp4trGp6eT5WZ6tPi/TB2nfWQCzwUvBlpg8iswe0= k8s.io/apiextensions-apiserver v0.17.1/go.mod h1:DRIFH5x3jalE4rE7JP0MQKby9zdYk9lUJQuMmp+M/L0= k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo= k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= -k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE= -k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= +k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmijhB9U= +k8s.io/apiextensions-apiserver v0.30.3/go.mod h1:uhXxYDkMAvl6CJw4lrDN4CPbONkF3+XL9cacCT44kV4= k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= @@ -2988,8 +3006,9 @@ k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCk k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg= k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= +k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= k8s.io/apiserver v0.0.0-20191122221311-9d521947b1e1/go.mod h1:RbsZY5zzBIWnz4KbctZsTVjwIuOpTp4Z8oCgFHN4kZQ= k8s.io/apiserver v0.16.7/go.mod h1:/5zSatF30/L9zYfMTl55jzzOnx7r/gGv5a5wtRp8yAw= @@ -2997,8 +3016,8 @@ k8s.io/apiserver v0.17.1/go.mod h1:BQEUObJv8H6ZYO7DeKI5vb50tjk6paRJ4ZhSyJsiSco= k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw= k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= -k8s.io/apiserver v0.30.2 h1:ACouHiYl1yFI2VFI3YGM+lvxgy6ir4yK2oLOsLI1/tw= -k8s.io/apiserver v0.30.2/go.mod h1:BOTdFBIch9Sv0ypSEcUR6ew/NUFGocRFNl72Ra7wTm8= +k8s.io/apiserver v0.30.3 h1:QZJndA9k2MjFqpnyYv/PH+9PE0SHhx3hBho4X0vE65g= +k8s.io/apiserver v0.30.3/go.mod h1:6Oa88y1CZqnzetd2JdepO0UXzQX4ZnOekx2/PtEjrOg= k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ= k8s.io/cli-runtime v0.18.2/go.mod h1:yfFR2sQQzDsV0VEKGZtrJwEy4hLZ2oj4ZIfodgxAHWQ= @@ -3021,8 +3040,8 @@ k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1 k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= -k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII= -k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE= +k8s.io/component-base v0.30.3 h1:Ci0UqKWf4oiwy8hr1+E3dsnliKnkMLZMVbWzeorlk7s= +k8s.io/component-base v0.30.3/go.mod h1:C1SshT3rGPCuNtBs14RmVD2xW0EhRSeLvBh7AGk1quA= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -3039,8 +3058,9 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-aggregator v0.17.1/go.mod h1:H5LcB3fx+P1gpowuZpzDu5B1XfABdO7JBKyB9J9bt34= k8s.io/kube-aggregator v0.29.0 h1:N4fmtePxOZ+bwiK1RhVEztOU+gkoVkvterHgpwAuiTw= k8s.io/kube-aggregator v0.29.0/go.mod h1:bjatII63ORkFg5yUFP2qm2OC49R0wwxZhRVIyJ4Z4X0= @@ -3052,8 +3072,9 @@ k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKf k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-openapi v0.0.0-20240423202451-8948a665c108 h1:Q8Z7VlGhcJgBHJHYugJ/K/7iB8a2eSxCyxdVjJp+lLY= +k8s.io/kube-openapi v0.0.0-20240423202451-8948a665c108/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+IjYA/E= k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU= k8s.io/kubectl v0.18.2/go.mod h1:OdgFa3AlsPKRpFFYE7ICTwulXOcMGXHTc+UKhHKvrb4= @@ -3068,8 +3089,9 @@ k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= maistra.io/api v0.0.0-20210601141927-1cbee4cb8ce8/go.mod h1:Os/zrIv6nsjgC43UAo17FFv+fvYlzANUWIpNNZEZ/KE= diff --git a/pkg/operator/client/client.go b/pkg/operator/client/client.go index e9848f16b..ce4325a29 100644 --- a/pkg/operator/client/client.go +++ b/pkg/operator/client/client.go @@ -10,8 +10,7 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" - maistrav1 "github.com/maistra/istio-operator/pkg/apis/maistra/v1" - maistrav2 "github.com/maistra/istio-operator/pkg/apis/maistra/v2" + sailv1alpha1 "github.com/istio-ecosystem/sail-operator/api/v1alpha1" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" gatewayapiv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" @@ -59,10 +58,7 @@ func init() { if err := operatorsv1alpha1.AddToScheme(scheme); err != nil { panic(err) } - if err := maistrav1.SchemeBuilder.AddToScheme(scheme); err != nil { - panic(err) - } - if err := maistrav2.SchemeBuilder.AddToScheme(scheme); err != nil { + if err := sailv1alpha1.SchemeBuilder.AddToScheme(scheme); err != nil { panic(err) } if err := machinev1.Install(scheme); err != nil { diff --git a/pkg/operator/controller/gateway-service-dns/controller.go b/pkg/operator/controller/gateway-service-dns/controller.go index 3beeca93f..cc487cf1d 100644 --- a/pkg/operator/controller/gateway-service-dns/controller.go +++ b/pkg/operator/controller/gateway-service-dns/controller.go @@ -168,15 +168,15 @@ func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( return reconcile.Result{}, err } - if len(service.Spec.Selector[gatewayNameLabelKey]) == 0 { - log.Info(`service selector has no "`+gatewayNameLabelKey+`" label; reconciliation will be skipped`, "request", request) + if len(service.Labels[gatewayNameLabelKey]) == 0 { + log.Info("service does not have a label with the expected key; reconciliation will be skipped", "request", request, "labelKey", gatewayNameLabelKey) return reconcile.Result{}, nil } var gateway gatewayapiv1beta1.Gateway gatewayName := types.NamespacedName{ Namespace: service.Namespace, - Name: service.Spec.Selector[gatewayNameLabelKey], + Name: service.Labels[gatewayNameLabelKey], } if err := r.cache.Get(ctx, gatewayName, &gateway); err != nil { if apierrors.IsNotFound(err) { diff --git a/pkg/operator/controller/gateway-service-dns/controller_test.go b/pkg/operator/controller/gateway-service-dns/controller_test.go index bac5794b1..2746839b0 100644 --- a/pkg/operator/controller/gateway-service-dns/controller_test.go +++ b/pkg/operator/controller/gateway-service-dns/controller_test.go @@ -60,16 +60,13 @@ func Test_Reconcile(t *testing.T) { Port: gatewayapiv1beta1.PortNumber(port), } } - svc := func(name string, labels, selector map[string]string, ingresses ...corev1.LoadBalancerIngress) *corev1.Service { + svc := func(name string, labels map[string]string, ingresses ...corev1.LoadBalancerIngress) *corev1.Service { return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, Namespace: "openshift-ingress", Name: name, }, - Spec: corev1.ServiceSpec{ - Selector: selector, - }, Status: corev1.ServiceStatus{ LoadBalancer: corev1.LoadBalancerStatus{ Ingress: ingresses, @@ -77,11 +74,9 @@ func Test_Reconcile(t *testing.T) { }, } } - gatewayManagedLabel := map[string]string{ + exampleManagedGatewayLabel := map[string]string{ "gateway.istio.io/managed": "example-gateway", - } - exampleGatewayLabel := map[string]string{ - "istio.io/gateway-name": "example-gateway", + "istio.io/gateway-name": "example-gateway", } ingHost := func(hostname string) corev1.LoadBalancerIngress { return corev1.LoadBalancerIngress{ @@ -126,7 +121,7 @@ func Test_Reconcile(t *testing.T) { existingObjects: []runtime.Object{ infraConfig, gw("example-gateway", l("stage-http", "*.stage.example.com", 80)), - svc("example-gateway", gatewayManagedLabel, exampleGatewayLabel, ingHost("lb.example.com")), + svc("example-gateway", exampleManagedGatewayLabel, ingHost("lb.example.com")), }, reconcileRequest: req("openshift-ingress", "example-gateway"), expectCreate: []client.Object{}, @@ -139,7 +134,7 @@ func Test_Reconcile(t *testing.T) { existingObjects: []runtime.Object{ dnsConfig, gw("example-gateway", l("stage-http", "*.stage.example.com", 80)), - svc("example-gateway", gatewayManagedLabel, exampleGatewayLabel, ingHost("lb.example.com")), + svc("example-gateway", exampleManagedGatewayLabel, ingHost("lb.example.com")), }, reconcileRequest: req("openshift-ingress", "example-gateway"), expectCreate: []client.Object{}, @@ -152,7 +147,7 @@ func Test_Reconcile(t *testing.T) { existingObjects: []runtime.Object{ dnsConfig, infraConfig, gw("example-gateway"), - svc("example-gateway", gatewayManagedLabel, exampleGatewayLabel, ingHost("lb.example.com")), + svc("example-gateway", exampleManagedGatewayLabel, ingHost("lb.example.com")), }, reconcileRequest: req("openshift-ingress", "example-gateway"), expectCreate: []client.Object{}, @@ -169,12 +164,12 @@ func Test_Reconcile(t *testing.T) { l("stage-https", "*.stage.example.com", 443), l("prod-https", "*.prod.example.com", 443), ), - svc("example-gateway", gatewayManagedLabel, exampleGatewayLabel, ingHost("lb.example.com")), + svc("example-gateway", exampleManagedGatewayLabel, ingHost("lb.example.com")), }, reconcileRequest: req("openshift-ingress", "example-gateway"), expectCreate: []client.Object{ - dnsrecord("example-gateway-76456f8647-wildcard", "*.prod.example.com.", iov1.ManagedDNS, exampleGatewayLabel, "lb.example.com"), - dnsrecord("example-gateway-64754456b8-wildcard", "*.stage.example.com.", iov1.ManagedDNS, exampleGatewayLabel, "lb.example.com"), + dnsrecord("example-gateway-76456f8647-wildcard", "*.prod.example.com.", iov1.ManagedDNS, exampleManagedGatewayLabel, "lb.example.com"), + dnsrecord("example-gateway-64754456b8-wildcard", "*.stage.example.com.", iov1.ManagedDNS, exampleManagedGatewayLabel, "lb.example.com"), }, expectUpdate: []client.Object{}, expectDelete: []client.Object{}, @@ -188,13 +183,13 @@ func Test_Reconcile(t *testing.T) { l("http", "*.example.com", 80), l("https", "*.example.com", 443), ), - svc("example-gateway", gatewayManagedLabel, exampleGatewayLabel, ingHost("newlb.example.com")), - dnsrecord("example-gateway-7bdcfc8f68-wildcard", "*.example.com.", iov1.ManagedDNS, exampleGatewayLabel, "oldlb.example.com"), + svc("example-gateway", exampleManagedGatewayLabel, ingHost("newlb.example.com")), + dnsrecord("example-gateway-7bdcfc8f68-wildcard", "*.example.com.", iov1.ManagedDNS, exampleManagedGatewayLabel, "oldlb.example.com"), }, reconcileRequest: req("openshift-ingress", "example-gateway"), expectCreate: []client.Object{}, expectUpdate: []client.Object{ - dnsrecord("example-gateway-7bdcfc8f68-wildcard", "*.example.com.", iov1.ManagedDNS, exampleGatewayLabel, "newlb.example.com"), + dnsrecord("example-gateway-7bdcfc8f68-wildcard", "*.example.com.", iov1.ManagedDNS, exampleManagedGatewayLabel, "newlb.example.com"), }, expectDelete: []client.Object{}, }, @@ -206,16 +201,16 @@ func Test_Reconcile(t *testing.T) { "example-gateway", l("http", "*.new.example.com", 80), ), - svc("example-gateway", gatewayManagedLabel, exampleGatewayLabel, ingHost("lb.example.com")), - dnsrecord("example-gateway-64754456b8-wildcard", "*.old.example.com.", iov1.ManagedDNS, exampleGatewayLabel, "lb.example.com"), + svc("example-gateway", exampleManagedGatewayLabel, ingHost("lb.example.com")), + dnsrecord("example-gateway-64754456b8-wildcard", "*.old.example.com.", iov1.ManagedDNS, exampleManagedGatewayLabel, "lb.example.com"), }, reconcileRequest: req("openshift-ingress", "example-gateway"), expectCreate: []client.Object{ - dnsrecord("example-gateway-68ffc6d64-wildcard", "*.new.example.com.", iov1.ManagedDNS, exampleGatewayLabel, "lb.example.com"), + dnsrecord("example-gateway-68ffc6d64-wildcard", "*.new.example.com.", iov1.ManagedDNS, exampleManagedGatewayLabel, "lb.example.com"), }, expectUpdate: []client.Object{}, expectDelete: []client.Object{ - dnsrecord("example-gateway-64754456b8-wildcard", "*.old.example.com.", iov1.ManagedDNS, exampleGatewayLabel, "lb.example.com"), + dnsrecord("example-gateway-64754456b8-wildcard", "*.old.example.com.", iov1.ManagedDNS, exampleManagedGatewayLabel, "lb.example.com"), }, }, { @@ -223,11 +218,11 @@ func Test_Reconcile(t *testing.T) { existingObjects: []runtime.Object{ dnsConfig, infraConfig, gw("example-gateway", l("stage-http", "*.stage.example.com", 80), l("stage-https", "*.stage.example.com", 443)), - svc("example-gateway", gatewayManagedLabel, exampleGatewayLabel, ingHost("lb.example.com")), + svc("example-gateway", exampleManagedGatewayLabel, ingHost("lb.example.com")), }, reconcileRequest: req("openshift-ingress", "example-gateway"), expectCreate: []client.Object{ - dnsrecord("example-gateway-64754456b8-wildcard", "*.stage.example.com.", iov1.ManagedDNS, exampleGatewayLabel, "lb.example.com"), + dnsrecord("example-gateway-64754456b8-wildcard", "*.stage.example.com.", iov1.ManagedDNS, exampleManagedGatewayLabel, "lb.example.com"), }, expectUpdate: []client.Object{}, expectDelete: []client.Object{}, @@ -237,11 +232,11 @@ func Test_Reconcile(t *testing.T) { existingObjects: []runtime.Object{ dnsConfig, infraConfig, gw("example-gateway", l("http", "*.foo.com", 80)), - svc("example-gateway", gatewayManagedLabel, exampleGatewayLabel, ingHost("lb.example.com")), + svc("example-gateway", exampleManagedGatewayLabel, ingHost("lb.example.com")), }, reconcileRequest: req("openshift-ingress", "example-gateway"), expectCreate: []client.Object{ - dnsrecord("example-gateway-795d4b47fd-wildcard", "*.foo.com.", iov1.UnmanagedDNS, exampleGatewayLabel, "lb.example.com"), + dnsrecord("example-gateway-795d4b47fd-wildcard", "*.foo.com.", iov1.UnmanagedDNS, exampleManagedGatewayLabel, "lb.example.com"), }, expectUpdate: []client.Object{}, expectDelete: []client.Object{}, diff --git a/pkg/operator/controller/gatewayclass/controller.go b/pkg/operator/controller/gatewayclass/controller.go index 842df6efb..e7eb6a67f 100644 --- a/pkg/operator/controller/gatewayclass/controller.go +++ b/pkg/operator/controller/gatewayclass/controller.go @@ -68,7 +68,7 @@ func NewUnmanaged(mgr manager.Manager, config Config) (controller.Controller, er // controller. type Config struct { // OperatorNamespace is the namespace in which the operator should - // create the ServiceMeshControlPlane CR. + // create the Istio CR. OperatorNamespace string // OperandNamespace is the namespace in which Istio should be deployed. OperandNamespace string @@ -97,7 +97,7 @@ func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( if _, _, err := r.ensureServiceMeshOperatorSubscription(ctx); err != nil { errs = append(errs, err) } - if _, _, err := r.ensureServiceMeshControlPlane(ctx, &gatewayclass); err != nil { + if _, _, err := r.ensureIstio(ctx, &gatewayclass); err != nil { errs = append(errs, err) } return reconcile.Result{}, utilerrors.NewAggregate(errs) diff --git a/pkg/operator/controller/gatewayclass/istio.go b/pkg/operator/controller/gatewayclass/istio.go new file mode 100644 index 000000000..8b972620e --- /dev/null +++ b/pkg/operator/controller/gatewayclass/istio.go @@ -0,0 +1,174 @@ +package gatewayclass + +import ( + "context" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + gatewayapiv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" + + sailv1alpha1 "github.com/istio-ecosystem/sail-operator/api/v1alpha1" + + "github.com/openshift/cluster-ingress-operator/pkg/operator/controller" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" +) + +// ensureIstio attempts to ensure that an Istio CR is present and returns a +// Boolean indicating whether it exists, the CR if it exists, and an error +// value. +func (r *reconciler) ensureIstio(ctx context.Context, gatewayclass *gatewayapiv1beta1.GatewayClass) (bool, *sailv1alpha1.Istio, error) { + name := controller.IstioName(r.config.OperandNamespace) + have, current, err := r.currentIstio(ctx, name) + if err != nil { + return false, nil, err + } + + // TODO If we have a current CR with a different owner reference, + // should we append the new gatewayclass? + ownerRef := metav1.OwnerReference{ + APIVersion: gatewayapiv1beta1.SchemeGroupVersion.String(), + Kind: "GatewayClass", + Name: gatewayclass.Name, + UID: gatewayclass.UID, + } + desired, err := desiredIstio(name, ownerRef) + if err != nil { + return have, current, err + } + + switch { + case !have: + if err := r.createIstio(ctx, desired); err != nil { + return false, nil, err + } + return r.currentIstio(ctx, name) + case have: + if updated, err := r.updateIstio(ctx, current, desired); err != nil { + return have, current, err + } else if updated { + return r.currentIstio(ctx, name) + } + } + return true, current, nil +} + +// desiredIstio returns the desired Istio CR. +func desiredIstio(name types.NamespacedName, ownerRef metav1.OwnerReference) (*sailv1alpha1.Istio, error) { + pilotContainerEnv := map[string]string{ + "ENABLE_IOR": "false", + "PILOT_ENABLE_GATEWAY_API": "true", + "PILOT_ENABLE_GATEWAY_API_DEPLOYMENT_CONTROLLER": "true", + "PILOT_ENABLE_GATEWAY_API_GATEWAYCLASS_CONTROLLER": "false", + "PILOT_ENABLE_GATEWAY_API_STATUS": "true", + "PILOT_ENABLE_GATEWAY_CONTROLLER_MODE": "true", + "PILOT_GATEWAY_API_CONTROLLER_NAME": OpenShiftGatewayClassControllerName, + "PILOT_GATEWAY_API_DEFAULT_GATEWAYCLASS": OpenShiftDefaultGatewayClassName, + "PILOT_GATEWAY_API_DEPLOYMENT_DEFAULT_LABELS": `{\"gateway.openshift.io/inject\":\"true\"}`, + } + istio := sailv1alpha1.Istio{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: name.Namespace, + Name: name.Name, + OwnerReferences: []metav1.OwnerReference{ownerRef}, + }, + Spec: sailv1alpha1.IstioSpec{ + Namespace: controller.DefaultOperandNamespace, + UpdateStrategy: &sailv1alpha1.IstioUpdateStrategy{ + Type: sailv1alpha1.UpdateStrategyTypeInPlace, + }, + Values: &sailv1alpha1.Values{ + Global: &sailv1alpha1.GlobalConfig{ + IstioNamespace: controller.DefaultOperandNamespace, + }, + Pilot: &sailv1alpha1.PilotConfig{ + Enabled: ptr.To(true), + Env: pilotContainerEnv, + ExtraContainerArgs: []string{}, + }, + SidecarInjectorWebhook: &sailv1alpha1.SidecarInjectorConfig{ + EnableNamespacesByDefault: ptr.To(false), + }, + MeshConfig: &sailv1alpha1.MeshConfig{ + AccessLogFile: "/dev/stdout", + DefaultConfig: &sailv1alpha1.MeshConfigProxyConfig{ + ProxyHeaders: &sailv1alpha1.ProxyConfigProxyHeaders{ + Server: &sailv1alpha1.ProxyConfigProxyHeadersServer{ + Disabled: ptr.To(true), + }, + EnvoyDebugHeaders: &sailv1alpha1.ProxyConfigProxyHeadersEnvoyDebugHeaders{ + Disabled: ptr.To(true), + }, + MetadataExchangeHeaders: &sailv1alpha1.ProxyConfigProxyHeadersMetadataExchangeHeaders{ + Mode: sailv1alpha1.ProxyConfigProxyHeadersMetadataExchangeModeInMesh, + }, + }, + }, + }, + }, + Version: "v1.23.0", + }, + } + return &istio, nil +} + +// currentIstio returns the current istio CR. +func (r *reconciler) currentIstio(ctx context.Context, name types.NamespacedName) (bool, *sailv1alpha1.Istio, error) { + var istio sailv1alpha1.Istio + if err := r.cache.Get(ctx, name, &istio); err != nil { + if errors.IsNotFound(err) { + return false, nil, nil + } + return false, nil, err + } + return true, &istio, nil +} + +// createIstio creates an Istio CR. +func (r *reconciler) createIstio(ctx context.Context, istio *sailv1alpha1.Istio) error { + if err := r.client.Create(ctx, istio); err != nil { + return err + } + log.Info("created istio", "namespace", istio.Namespace, "name", istio.Name) + return nil +} + +// istioCmpOpts is an array of options for go-cmp to use when comparing Istio +// CRs. +var istioCmpOpts = []cmp.Option{ + cmpopts.EquateEmpty(), +} + +// updateIstio updates an Istio CR. +func (r *reconciler) updateIstio(ctx context.Context, current, desired *sailv1alpha1.Istio) (bool, error) { + changed, updated := istioChanged(current, desired) + if !changed { + return false, nil + } + + // Diff before updating because the client may mutate the object. + // Only diff spec because status may include unexported fields that + // cause go-cmp to panic. + diff := cmp.Diff(current.Spec, updated.Spec, istioCmpOpts...) + if err := r.client.Update(ctx, updated); err != nil { + return false, err + } + log.Info("updated istio", "namespace", updated.Namespace, "name", updated.Name, "diff", diff) + return true, nil +} + +// istioChanged returns a Boolean indicating whether the current Istio CR +// matches the expected CR, and the updated CR if they do not match. +func istioChanged(current, expected *sailv1alpha1.Istio) (bool, *sailv1alpha1.Istio) { + if cmp.Equal(current.Spec, expected.Spec, istioCmpOpts...) { + return false, nil + } + + updated := current.DeepCopy() + updated.Spec = expected.Spec + + return true, updated +} diff --git a/pkg/operator/controller/gatewayclass/servicemeshcontrolplane.go b/pkg/operator/controller/gatewayclass/servicemeshcontrolplane.go deleted file mode 100644 index 29083939e..000000000 --- a/pkg/operator/controller/gatewayclass/servicemeshcontrolplane.go +++ /dev/null @@ -1,218 +0,0 @@ -package gatewayclass - -import ( - "context" - "fmt" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - gatewayapiv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" - - maistrav1 "github.com/maistra/istio-operator/pkg/apis/maistra/v1" - maistrav2 "github.com/maistra/istio-operator/pkg/apis/maistra/v2" - - "github.com/openshift/cluster-ingress-operator/pkg/operator/controller" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -// ensureServiceMeshControlPlane attempts to ensure that a -// servicemeshcontrolplane is present and returns a Boolean indicating whether -// it exists, the servicemeshcontrolplane if it exists, and an error value. -func (r *reconciler) ensureServiceMeshControlPlane(ctx context.Context, gatewayclass *gatewayapiv1beta1.GatewayClass) (bool, *maistrav2.ServiceMeshControlPlane, error) { - name := controller.ServiceMeshControlPlaneName(r.config.OperandNamespace) - have, current, err := r.currentServiceMeshControlPlane(ctx, name) - if err != nil { - return false, nil, err - } - - // TODO If we have a current SMCP with a different owner reference, - // should we append the new gatewayclass? - ownerRef := metav1.OwnerReference{ - APIVersion: gatewayapiv1beta1.SchemeGroupVersion.String(), - Kind: "GatewayClass", - Name: gatewayclass.Name, - UID: gatewayclass.UID, - } - desired, err := desiredServiceMeshControlPlane(name, ownerRef) - if err != nil { - return have, current, err - } - - switch { - case !have: - if err := r.createServiceMeshControlPlane(ctx, desired); err != nil { - return false, nil, err - } - return r.currentServiceMeshControlPlane(ctx, name) - case have: - if updated, err := r.updateServiceMeshControlPlane(ctx, current, desired); err != nil { - return have, current, err - } else if updated { - return r.currentServiceMeshControlPlane(ctx, name) - } - } - return true, current, nil -} - -// desiredServiceMeshControlPlane returns the desired servicemeshcontrolplane. -func desiredServiceMeshControlPlane(name types.NamespacedName, ownerRef metav1.OwnerReference) (*maistrav2.ServiceMeshControlPlane, error) { - pilotContainerEnv := map[string]string{ - "PILOT_ENABLE_GATEWAY_CONTROLLER_MODE": "true", - "PILOT_GATEWAY_API_CONTROLLER_NAME": OpenShiftGatewayClassControllerName, - "PILOT_GATEWAY_API_DEFAULT_GATEWAYCLASS": OpenShiftDefaultGatewayClassName, - // OSSM will only reconcile the default gateway class if this is true. - "PILOT_ENABLE_GATEWAY_API_GATEWAYCLASS_CONTROLLER": "true", - } - f := false - t := true - smcp := maistrav2.ServiceMeshControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: name.Namespace, - Name: name.Name, - OwnerReferences: []metav1.OwnerReference{ownerRef}, - }, - Spec: maistrav2.ControlPlaneSpec{ - Addons: &maistrav2.AddonsConfig{ - // TODO Autodetect when these components have - // been installed and enable them in the SMCP - // if they are present. - Grafana: &maistrav2.GrafanaAddonConfig{ - Enablement: maistrav2.Enablement{Enabled: &f}, - }, - Jaeger: &maistrav2.JaegerAddonConfig{ - Name: "jaeger", - Install: nil, - }, - Kiali: &maistrav2.KialiAddonConfig{ - Enablement: maistrav2.Enablement{Enabled: &f}, - }, - Prometheus: &maistrav2.PrometheusAddonConfig{ - Enablement: maistrav2.Enablement{Enabled: &f}, - }, - }, - // Both ingress and egress gateways are enabled by default in OSSM. - // We don't need them, so we have to explicitly disable them. - Gateways: &maistrav2.GatewaysConfig{ - ClusterIngress: &maistrav2.ClusterIngressGatewayConfig{ - IngressEnabled: &f, - IngressGatewayConfig: maistrav2.IngressGatewayConfig{ - GatewayConfig: maistrav2.GatewayConfig{ - Enablement: maistrav2.Enablement{ - Enabled: &f, - }, - }, - }, - }, - ClusterEgress: &maistrav2.EgressGatewayConfig{ - GatewayConfig: maistrav2.GatewayConfig{ - Enablement: maistrav2.Enablement{ - Enabled: &f, - }, - }, - }, - }, - Mode: maistrav2.ClusterWideMode, - Policy: &maistrav2.PolicyConfig{ - Type: maistrav2.PolicyTypeIstiod, - }, - Profiles: []string{"default"}, - Proxy: &maistrav2.ProxyConfig{ - AccessLogging: &maistrav2.ProxyAccessLoggingConfig{ - EnvoyService: &maistrav2.ProxyEnvoyServiceConfig{ - Enablement: maistrav2.Enablement{ - Enabled: &t, - }, - }, - File: &maistrav2.ProxyFileAccessLogConfig{ - Name: "/dev/stdout", - }, - }, - }, - Runtime: &maistrav2.ControlPlaneRuntimeConfig{ - Components: map[maistrav2.ControlPlaneComponentName]*maistrav2.ComponentRuntimeConfig{ - maistrav2.ControlPlaneComponentNamePilot: { - Container: &maistrav2.ContainerConfig{ - Env: pilotContainerEnv, - }, - }, - }, - }, - Security: &maistrav2.SecurityConfig{ - ManageNetworkPolicy: &f, - }, - Tracing: &maistrav2.TracingConfig{ - Type: maistrav2.TracerTypeNone, - }, - Version: "v2.5", - TechPreview: maistrav1.NewHelmValues(map[string]interface{}{ - "gatewayAPI": map[string]interface{}{ - "enabled": &t, - }, - }), - }, - } - return &smcp, nil -} - -// currentServiceMeshControlPlane returns the current servicemeshcontrolplane. -func (r *reconciler) currentServiceMeshControlPlane(ctx context.Context, name types.NamespacedName) (bool, *maistrav2.ServiceMeshControlPlane, error) { - var smcp maistrav2.ServiceMeshControlPlane - if err := r.cache.Get(ctx, name, &smcp); err != nil { - if errors.IsNotFound(err) { - return false, nil, nil - } - return false, nil, fmt.Errorf("failed to get ServiceMeshControlPlane %s: %w", name, err) - } - return true, &smcp, nil -} - -// createServiceMeshControlPlane creates a servicemeshcontrolplane. -func (r *reconciler) createServiceMeshControlPlane(ctx context.Context, smcp *maistrav2.ServiceMeshControlPlane) error { - if err := r.client.Create(ctx, smcp); err != nil { - return fmt.Errorf("failed to create ServiceMeshControlPlane %s/%s: %w", smcp.Namespace, smcp.Name, err) - } - log.Info("created ServiceMeshControlPlane", "namespace", smcp.Namespace, "name", smcp.Name) - return nil -} - -// smcpCmpOpts is an array of options for go-cmp to use when comparing -// ServiceMeshControlPlane CRs. -var smcpCmpOpts = []cmp.Option{ - cmpopts.EquateEmpty(), - cmp.AllowUnexported(maistrav1.HelmValues{}), -} - -// updateServiceMeshControlPlane updates a servicemeshcontrolplane. -func (r *reconciler) updateServiceMeshControlPlane(ctx context.Context, current, desired *maistrav2.ServiceMeshControlPlane) (bool, error) { - changed, updated := serviceMeshControlPlaneChanged(current, desired) - if !changed { - return false, nil - } - - // Diff before updating because the client may mutate the object. - // Only diff spec because status may include unexported fields that - // cause go-cmp to panic. - diff := cmp.Diff(current.Spec, updated.Spec, smcpCmpOpts...) - if err := r.client.Update(ctx, updated); err != nil { - return false, fmt.Errorf("failed to update ServiceMeshControlPlane %s/%s: %w", updated.Namespace, updated.Name, err) - } - log.Info("updated ServiceMeshControlPlane", "namespace", updated.Namespace, "name", updated.Name, "diff", diff) - return true, nil -} - -// serviceMeshControlPlaneChanged returns a Boolean indicating whether the -// current ServiceMeshControlPlane matches the expected servicemeshcontrolplane -// and the updated servicemeshcontrolplane if they do not match. -func serviceMeshControlPlaneChanged(current, expected *maistrav2.ServiceMeshControlPlane) (bool, *maistrav2.ServiceMeshControlPlane) { - if cmp.Equal(current.Spec, expected.Spec, smcpCmpOpts...) { - return false, nil - } - - updated := current.DeepCopy() - updated.Spec = expected.Spec - - return true, updated -} diff --git a/pkg/operator/controller/gatewayclass/subscription.go b/pkg/operator/controller/gatewayclass/subscription.go index a37388fa4..ec55d959d 100644 --- a/pkg/operator/controller/gatewayclass/subscription.go +++ b/pkg/operator/controller/gatewayclass/subscription.go @@ -20,7 +20,7 @@ import ( // for servicemeshoperator is present and returns a Boolean indicating whether // it exists, the subscription if it exists, and an error value. func (r *reconciler) ensureServiceMeshOperatorSubscription(ctx context.Context) (bool, *operatorsv1alpha1.Subscription, error) { - name := operatorcontroller.ServiceMeshSubscriptionName() + name := operatorcontroller.SailOperatorSubscriptionName() have, current, err := r.currentSubscription(ctx, name) if err != nil { return false, nil, err @@ -55,10 +55,12 @@ func desiredSubscription(name types.NamespacedName) (*operatorsv1alpha1.Subscrip Name: name.Name, }, Spec: &operatorsv1alpha1.SubscriptionSpec{ - Channel: "stable", + // TODO Determine the correct channel and catalog source + // to use for the OSSM 3.0 Tech Preview release. + Channel: "0.1-nightly", //"stable", InstallPlanApproval: operatorsv1alpha1.ApprovalAutomatic, - Package: "servicemeshoperator", - CatalogSource: "redhat-operators", + Package: "sailoperator", + CatalogSource: "community-operators", //"redhat-operators", CatalogSourceNamespace: "openshift-marketplace", }, } diff --git a/pkg/operator/controller/names.go b/pkg/operator/controller/names.go index 24923a426..268cdbaa6 100644 --- a/pkg/operator/controller/names.go +++ b/pkg/operator/controller/names.go @@ -262,23 +262,21 @@ func IngressClassName(ingressControllerName string) types.NamespacedName { return types.NamespacedName{Name: "openshift-" + ingressControllerName} } -// ServiceMeshControlPlaneName returns the namespaced name for a -// ServiceMeshControlPlane CR. This CR is created in the operand's namespace -// and has a hard-coded name. Each namespace can have only one gatewayclass, so -// it is simplest to use the same name in every namespace. -func ServiceMeshControlPlaneName(operandNamespace string) types.NamespacedName { +// IstioName returns the namespaced name for the Istio CR that the ingress +// operator creates when Gateway API is enabled. +func IstioName(operandNamespace string) types.NamespacedName { return types.NamespacedName{ - Namespace: operandNamespace, + Namespace: "", // The Istio CR is cluster-scoped. Name: "openshift-gateway", } } -// ServiceMeshSubscriptionName returns the namespaced name for a Subscription CR -// to install OpenShift Service Mesh. -func ServiceMeshSubscriptionName() types.NamespacedName { +// SailOperatorSubscriptionName returns the namespaced name for a Subscription +// CR to install the Sail Operator for OpenShift Service Mesh. +func SailOperatorSubscriptionName() types.NamespacedName { return types.NamespacedName{ Namespace: "openshift-operators", - Name: "servicemeshoperator", + Name: "sailoperator", } } diff --git a/test/e2e/gateway_api_test.go b/test/e2e/gateway_api_test.go index 9fb1a4cd2..1b5089d1e 100644 --- a/test/e2e/gateway_api_test.go +++ b/test/e2e/gateway_api_test.go @@ -23,9 +23,9 @@ import ( const ( // The expected OSSM subscription name. - expectedSubscriptionName = "servicemeshoperator" + expectedSubscriptionName = "sailoperator" // The expected OSSM catalog source name. - expectedCatalogSourceName = "redhat-operators" + expectedCatalogSourceName = "community-operators" //"openshift-operators" // The expected catalog source namespace. expectedCatalogSourceNamespace = "openshift-marketplace" // The test gateway name used in multiple places. @@ -98,7 +98,6 @@ func testGatewayAPIResources(t *testing.T) { // - the required Subscription and CatalogSource are created. // - the OSSM Istio operator is installed successfully and has status Running and Ready. e.g. istio-operator-9f5c88857-2xfrr -n openshift-operators // - Istiod is installed successfully and has status Running and Ready. e.g istiod-openshift-gateway-867bb8d5c7-4z6mp -n openshift-ingress -// - the SMCP is created successfully (OSSM 2.x). func testGatewayAPIIstioInstallation(t *testing.T) { t.Helper() @@ -114,10 +113,6 @@ func testGatewayAPIIstioInstallation(t *testing.T) { if err := assertIstiodControlPlane(t); err != nil { t.Fatalf("failed to find expected Istiod control plane: %v", err) } - // TODO - In OSSM 3.x the configuration object to check will be different. - if err := assertSMCP(t); err != nil { - t.Fatalf("failed to find expected SMCP: %v", err) - } } // testGatewayAPIObjects tests that Gateway API objects can be created successfully. diff --git a/test/e2e/util_gatewayapi_test.go b/test/e2e/util_gatewayapi_test.go index ac1a3168d..7b6a183d8 100644 --- a/test/e2e/util_gatewayapi_test.go +++ b/test/e2e/util_gatewayapi_test.go @@ -14,14 +14,12 @@ import ( "testing" "time" - maistrav2 "github.com/maistra/istio-operator/pkg/apis/maistra/v2" v1 "github.com/openshift/api/operatoringress/v1" operatorcontroller "github.com/openshift/cluster-ingress-operator/pkg/operator/controller" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,42 +35,12 @@ const ( // openshiftOperatorsNamespace holds the expected OSSM subscription and Istio operator pod. openshiftOperatorsNamespace = "openshift-operators" - // openshiftIstioOperatorDeploymentName holds the expected istio-operator deployment name. - openshiftIstioOperatorDeploymentName = "istio-operator" + // openshiftIstioOperatorDeploymentName holds the expected operator deployment name. + openshiftIstioOperatorDeploymentName = "sail-operator" // openshiftIstiodDeploymentName holds the expected istiod deployment name openshiftIstiodDeploymentName = "istiod-openshift-gateway" - // openshiftSMCPName holds the expected OSSM ServiceMeshControlPlane name - openshiftSMCPName = "openshift-gateway" ) -// updateIngressOperatorRole updates the ingress-operator cluster role with cluster-admin privilege. -// TODO - Remove this function after https://issues.redhat.com/browse/OSSM-3508 is fixed. -func updateIngressOperatorRole(t *testing.T) error { - t.Helper() - - // Create the same rolebinding that the `oc adm policy add-cluster-role-to-user` command creates. - // Caller must remove this after setting it. - crb := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-admin-e2e", - }, - RoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: "cluster-admin"}, - Subjects: []rbacv1.Subject{{Kind: rbacv1.ServiceAccountKind, Name: "ingress-operator", Namespace: operatorcontroller.DefaultOperatorNamespace}}, - } - - // Add the rolebinding to the ingress-operator user. - if err := kclient.Create(context.TODO(), crb); err != nil { - if kerrors.IsAlreadyExists(err) { - t.Logf("rolebinding already exists") - return nil - } - t.Logf("error adding rolebinding: %v", err) - return err - } - t.Log("rolebinding has been added") - return nil -} - // assertCrdExists checks if the CRD of the given name exists and returns an error if not. // Otherwise returns the CRD version. func assertCrdExists(t *testing.T, crdname string) (string, error) { @@ -595,36 +563,6 @@ func assertCatalogSource(t *testing.T, namespace, csName string) error { return err } -// assertSMCP checks if the ServiceMeshControlPlane exists in a ready state, -// and returns an error if not. -func assertSMCP(t *testing.T) error { - t.Helper() - smcp := &maistrav2.ServiceMeshControlPlane{} - nsName := types.NamespacedName{Namespace: operatorcontroller.DefaultOperandNamespace, Name: openshiftSMCPName} - - err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 3*time.Minute, false, func(context context.Context) (bool, error) { - if err := kclient.Get(context, nsName, smcp); err != nil { - t.Logf("failed to get ServiceMeshControlPlane %s/%s: %v, retrying...", nsName.Namespace, nsName.Name, err) - return false, nil - } - if smcp.Status.Readiness.Components != nil { - pending := len(smcp.Status.Readiness.Components["pending"]) > 0 - unready := len(smcp.Status.Readiness.Components["unready"]) > 0 - if pending || unready { - t.Logf("found ServiceMeshControlPlane %s/%s, but it isn't ready. Retrying...", smcp.Namespace, smcp.Name) - return false, nil - } - if len(smcp.Status.Readiness.Components["ready"]) > 0 { - t.Logf("found ServiceMeshControlPlane %s/%s with ready components: %v", smcp.Namespace, smcp.Name, smcp.Status.Readiness.Components["ready"]) - return true, nil - } - } - t.Logf("found ServiceMeshControlPlane %s/%s but could not determine its readiness. Retrying...", smcp.Namespace, smcp.Name) - return false, nil - }) - return err -} - // assertDNSRecord checks to make sure a DNSRecord exists in a ready state, // and returns an error if not. func assertDNSRecord(t *testing.T, recordName types.NamespacedName) error { diff --git a/vendor/cloud.google.com/go/compute/LICENSE b/vendor/cloud.google.com/go/compute/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/cloud.google.com/go/compute/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go deleted file mode 100644 index 639553700..000000000 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -// Version is the current tagged release of the library. -const Version = "1.23.0" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 06b957349..967e06074 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15) + + +### Features + +* **compute/metadata:** Add context aware functions ([#9733](https://github.com/googleapis/google-cloud-go/issues/9733)) ([e4eb5b4](https://github.com/googleapis/google-cloud-go/commit/e4eb5b46ee2aec9d2fc18300bfd66015e25a0510)) + ## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index c17faa142..f67e3c7ee 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -23,7 +23,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -95,9 +95,9 @@ func (c *cachedValue) get(cl *Client) (v string, err error) { return c.v, nil } if c.trim { - v, err = cl.getTrimmed(c.k) + v, err = cl.getTrimmed(context.Background(), c.k) } else { - v, err = cl.Get(c.k) + v, err = cl.GetWithContext(context.Background(), c.k) } if err == nil { c.v = v @@ -197,18 +197,32 @@ func systemInfoSuggestsGCE() bool { // We don't have any non-Linux clues available, at least yet. return false } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name") name := strings.TrimSpace(string(slurp)) return name == "Google" || name == "Google Compute Engine" } -// Subscribe calls Client.Subscribe on the default client. +// Subscribe calls Client.SubscribeWithContext on the default client. func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return defaultClient.Subscribe(suffix, fn) + return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) } -// Get calls Client.Get on the default client. -func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } +// SubscribeWithContext calls Client.SubscribeWithContext on the default client. +func SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error { + return defaultClient.SubscribeWithContext(ctx, suffix, fn) +} + +// Get calls Client.GetWithContext on the default client. +// +// Deprecated: Please use the context aware variant [GetWithContext]. +func Get(suffix string) (string, error) { + return defaultClient.GetWithContext(context.Background(), suffix) +} + +// GetWithContext calls Client.GetWithContext on the default client. +func GetWithContext(ctx context.Context, suffix string) (string, error) { + return defaultClient.GetWithContext(ctx, suffix) +} // ProjectID returns the current instance's project ID string. func ProjectID() (string, error) { return defaultClient.ProjectID() } @@ -288,8 +302,7 @@ func NewClient(c *http.Client) *Client { // getETag returns a value from the metadata service as well as the associated ETag. // This func is otherwise equivalent to Get. -func (c *Client) getETag(suffix string) (value, etag string, err error) { - ctx := context.TODO() +func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string, err error) { // Using a fixed IP makes it very difficult to spoof the metadata service in // a container, which is an important use-case for local testing of cloud // deployments. To enable spoofing of the metadata service, the environment @@ -306,7 +319,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { } suffix = strings.TrimLeft(suffix, "/") u := "http://" + host + "/computeMetadata/v1/" + suffix - req, err := http.NewRequest("GET", u, nil) + req, err := http.NewRequestWithContext(ctx, "GET", u, nil) if err != nil { return "", "", err } @@ -336,7 +349,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } - all, err := ioutil.ReadAll(res.Body) + all, err := io.ReadAll(res.Body) if err != nil { return "", "", err } @@ -354,19 +367,33 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { // // If the requested metadata is not defined, the returned error will // be of type NotDefinedError. +// +// Deprecated: Please use the context aware variant [Client.GetWithContext]. func (c *Client) Get(suffix string) (string, error) { - val, _, err := c.getETag(suffix) + return c.GetWithContext(context.Background(), suffix) +} + +// GetWithContext returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) { + val, _, err := c.getETag(ctx, suffix) return val, err } -func (c *Client) getTrimmed(suffix string) (s string, err error) { - s, err = c.Get(suffix) +func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err error) { + s, err = c.GetWithContext(ctx, suffix) s = strings.TrimSpace(s) return } func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.Get(suffix) + j, err := c.GetWithContext(context.Background(), suffix) if err != nil { return nil, err } @@ -388,7 +415,7 @@ func (c *Client) InstanceID() (string, error) { return instID.get(c) } // InternalIP returns the instance's primary internal IP address. func (c *Client) InternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/ip") + return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip") } // Email returns the email address associated with the service account. @@ -398,25 +425,25 @@ func (c *Client) Email(serviceAccount string) (string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email") + return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email") } // ExternalIP returns the instance's primary external (public) IP address. func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") + return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". func (c *Client) Hostname() (string, error) { - return c.getTrimmed("instance/hostname") + return c.getTrimmed(context.Background(), "instance/hostname") } // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. func (c *Client) InstanceTags() ([]string, error) { var s []string - j, err := c.Get("instance/tags") + j, err := c.GetWithContext(context.Background(), "instance/tags") if err != nil { return nil, err } @@ -428,12 +455,12 @@ func (c *Client) InstanceTags() ([]string, error) { // InstanceName returns the current VM's instance ID string. func (c *Client) InstanceName() (string, error) { - return c.getTrimmed("instance/name") + return c.getTrimmed(context.Background(), "instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed("instance/zone") + zone, err := c.getTrimmed(context.Background(), "instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err @@ -460,7 +487,7 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.Get("instance/attributes/" + attr) + return c.GetWithContext(context.Background(), "instance/attributes/"+attr) } // ProjectAttributeValue returns the value of the provided @@ -472,7 +499,7 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) { // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.Get("project/attributes/" + attr) + return c.GetWithContext(context.Background(), "project/attributes/"+attr) } // Scopes returns the service account scopes for the given account. @@ -489,21 +516,30 @@ func (c *Client) Scopes(serviceAccount string) ([]string, error) { // The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". // The suffix may contain query parameters. // -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. +// Deprecated: Please use the context aware variant [Client.SubscribeWithContext]. func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + return c.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) +} + +// SubscribeWithContext subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// SubscribeWithContext calls fn with the latest metadata value indicated by the +// provided suffix. If the metadata value is deleted, fn is called with the +// empty string and ok false. Subscribe blocks until fn returns a non-nil error +// or the value is deleted. Subscribe returns the error value returned from the +// last call to fn, which may be nil when ok == false. +func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error { const failedSubscribeSleep = time.Second * 5 // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(suffix) + val, lastETag, err := c.getETag(ctx, suffix) if err != nil { return err } - if err := fn(val, true); err != nil { + if err := fn(ctx, val, true); err != nil { return err } @@ -514,7 +550,7 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro suffix += "?wait_for_change=true&last_etag=" } for { - val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + val, etag, err := c.getETag(ctx, suffix+url.QueryEscape(lastETag)) if err != nil { if _, deleted := err.(NotDefinedError); !deleted { time.Sleep(failedSubscribeSleep) @@ -524,7 +560,7 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro } lastETag = etag - if err := fn(val, ok); err != nil || !ok { + if err := fn(ctx, val, ok); err != nil || !ok { return err } } diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go index 0f18f3cda..3d4bc75dd 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry.go @@ -27,7 +27,7 @@ const ( ) var ( - syscallRetryable = func(err error) bool { return false } + syscallRetryable = func(error) bool { return false } ) // defaultBackoff is basically equivalent to gax.Backoff without the need for diff --git a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go deleted file mode 100644 index 4cef48500..000000000 --- a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the {{.RootMod}} import, won't actually become part of -// the resultant binary. -//go:build modhack -// +build modhack - -package metadata - -// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go/compute/internal" diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b78..33c88305c 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c9..78bddf1ce 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a40..78f95f256 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bba..118e49e81 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5fd..05f5e7dfe 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd8..cf9d42aed 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go index 0185b7c18..9cc4053a0 100644 --- a/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go @@ -1,14 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.18.0 +// protoc-gen-go v1.32.0 +// protoc v3.21.5 // source: xds/data/orca/v3/orca_load_report.proto package v3 import ( _ "github.com/envoyproxy/protoc-gen-validate/validate" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -22,10 +21,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type OrcaLoadReport struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -33,7 +28,7 @@ type OrcaLoadReport struct { CpuUtilization float64 `protobuf:"fixed64,1,opt,name=cpu_utilization,json=cpuUtilization,proto3" json:"cpu_utilization,omitempty"` MemUtilization float64 `protobuf:"fixed64,2,opt,name=mem_utilization,json=memUtilization,proto3" json:"mem_utilization,omitempty"` - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in xds/data/orca/v3/orca_load_report.proto. Rps uint64 `protobuf:"varint,3,opt,name=rps,proto3" json:"rps,omitempty"` RequestCost map[string]float64 `protobuf:"bytes,4,rep,name=request_cost,json=requestCost,proto3" json:"request_cost,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` Utilization map[string]float64 `protobuf:"bytes,5,rep,name=utilization,proto3" json:"utilization,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` @@ -89,7 +84,7 @@ func (x *OrcaLoadReport) GetMemUtilization() float64 { return 0 } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in xds/data/orca/v3/orca_load_report.proto. func (x *OrcaLoadReport) GetRps() uint64 { if x != nil { return x.Rps @@ -147,65 +142,64 @@ var file_xds_data_orca_v3_orca_load_report_proto_rawDesc = []byte{ 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb5, 0x06, 0x0a, 0x0e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa6, 0x06, 0x0a, 0x0e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x0f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x45, 0x0a, 0x0f, 0x6d, 0x65, 0x6d, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x42, 0x1c, 0xfa, 0x42, 0x0b, 0x12, 0x09, - 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x19, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x0e, 0x6d, 0x65, 0x6d, 0x55, 0x74, 0x69, 0x6c, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x03, 0x72, 0x70, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x42, 0x02, 0x18, 0x01, 0x52, 0x03, 0x72, 0x70, 0x73, 0x12, 0x54, 0x0a, - 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, + 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x65, 0x6d, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, + 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x0e, 0x6d, 0x65, 0x6d, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x03, 0x72, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x03, 0x72, 0x70, 0x73, 0x12, 0x54, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, + 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x12, 0x71, + 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, - 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, - 0x6f, 0x73, 0x74, 0x12, 0x7b, 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, - 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x26, 0xfa, 0x42, 0x10, - 0x9a, 0x01, 0x0d, 0x2a, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xfa, 0x42, 0x10, 0x9a, 0x01, 0x0d, 0x2a, 0x0b, 0x12, 0x09, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0xf0, 0x3f, 0x52, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x35, 0x0a, 0x0e, 0x72, 0x70, 0x73, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x0d, 0x72, 0x70, 0x73, 0x46, 0x72, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x70, 0x73, 0x18, 0x07, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x1c, 0xfa, 0x42, 0x19, 0x9a, 0x01, 0x16, 0x2a, 0x14, + 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x35, 0x0a, 0x0e, 0x72, 0x70, 0x73, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, + 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x0d, 0x72, 0x70, 0x73, 0x46, 0x72, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x70, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x03, 0x65, 0x70, 0x73, 0x12, 0x57, 0x0a, 0x0d, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, + 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x12, 0x47, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x52, 0x03, 0x65, 0x70, 0x73, 0x12, 0x57, 0x0a, 0x0d, 0x6e, 0x61, 0x6d, - 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, - 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x12, 0x47, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x52, 0x16, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x55, - 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x16, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, + 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5d, 0x0a, 0x1b, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x4f, 0x72, 0x63, - 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, - 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x64, - 0x61, 0x74, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5d, 0x0a, + 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x4f, 0x72, + 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, + 0x64, 0x61, 0x74, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go index 2f505db72..8dd55330a 100644 --- a/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,74 +32,144 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on OrcaLoadReport with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. func (m *OrcaLoadReport) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OrcaLoadReport with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OrcaLoadReportMultiError, +// or nil if none found. +func (m *OrcaLoadReport) ValidateAll() error { + return m.validate(true) +} + +func (m *OrcaLoadReport) validate(all bool) error { if m == nil { return nil } + var errors []error + if m.GetCpuUtilization() < 0 { - return OrcaLoadReportValidationError{ + err := OrcaLoadReportValidationError{ field: "CpuUtilization", reason: "value must be greater than or equal to 0", } + if !all { + return err + } + errors = append(errors, err) } if val := m.GetMemUtilization(); val < 0 || val > 1 { - return OrcaLoadReportValidationError{ + err := OrcaLoadReportValidationError{ field: "MemUtilization", reason: "value must be inside range [0, 1]", } + if !all { + return err + } + errors = append(errors, err) } // no validation rules for Rps // no validation rules for RequestCost - for key, val := range m.GetUtilization() { - _ = val - - // no validation rules for Utilization[key] - - if val := val; val < 0 || val > 1 { - return OrcaLoadReportValidationError{ - field: fmt.Sprintf("Utilization[%v]", key), - reason: "value must be inside range [0, 1]", - } + { + sorted_keys := make([]string, len(m.GetUtilization())) + i := 0 + for key := range m.GetUtilization() { + sorted_keys[i] = key + i++ } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetUtilization()[key] + _ = val + + // no validation rules for Utilization[key] + + if val := val; val < 0 || val > 1 { + err := OrcaLoadReportValidationError{ + field: fmt.Sprintf("Utilization[%v]", key), + reason: "value must be inside range [0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + } } if m.GetRpsFractional() < 0 { - return OrcaLoadReportValidationError{ + err := OrcaLoadReportValidationError{ field: "RpsFractional", reason: "value must be greater than or equal to 0", } + if !all { + return err + } + errors = append(errors, err) } if m.GetEps() < 0 { - return OrcaLoadReportValidationError{ + err := OrcaLoadReportValidationError{ field: "Eps", reason: "value must be greater than or equal to 0", } + if !all { + return err + } + errors = append(errors, err) } // no validation rules for NamedMetrics if m.GetApplicationUtilization() < 0 { - return OrcaLoadReportValidationError{ + err := OrcaLoadReportValidationError{ field: "ApplicationUtilization", reason: "value must be greater than or equal to 0", } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return OrcaLoadReportMultiError(errors) } return nil } +// OrcaLoadReportMultiError is an error wrapping multiple validation errors +// returned by OrcaLoadReport.ValidateAll() if the designated constraints +// aren't met. +type OrcaLoadReportMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OrcaLoadReportMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OrcaLoadReportMultiError) AllErrors() []error { return m } + // OrcaLoadReportValidationError is the validation error returned by // OrcaLoadReport.Validate if the designated constraints aren't met. type OrcaLoadReportValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go index 0aafd1ab6..ddec2202c 100644 --- a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go @@ -1,21 +1,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.18.0 +// protoc-gen-go v1.32.0 +// protoc v3.21.5 // source: xds/service/orca/v3/orca.proto package v3 import ( - context "context" v3 "github.com/cncf/xds/go/xds/data/orca/v3" - proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" ) @@ -27,17 +22,13 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type OrcaLoadReportRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ReportInterval *duration.Duration `protobuf:"bytes,1,opt,name=report_interval,json=reportInterval,proto3" json:"report_interval,omitempty"` - RequestCostNames []string `protobuf:"bytes,2,rep,name=request_cost_names,json=requestCostNames,proto3" json:"request_cost_names,omitempty"` + ReportInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=report_interval,json=reportInterval,proto3" json:"report_interval,omitempty"` + RequestCostNames []string `protobuf:"bytes,2,rep,name=request_cost_names,json=requestCostNames,proto3" json:"request_cost_names,omitempty"` } func (x *OrcaLoadReportRequest) Reset() { @@ -72,7 +63,7 @@ func (*OrcaLoadReportRequest) Descriptor() ([]byte, []int) { return file_xds_service_orca_v3_orca_proto_rawDescGZIP(), []int{0} } -func (x *OrcaLoadReportRequest) GetReportInterval() *duration.Duration { +func (x *OrcaLoadReportRequest) GetReportInterval() *durationpb.Duration { if x != nil { return x.ReportInterval } @@ -137,7 +128,7 @@ func file_xds_service_orca_v3_orca_proto_rawDescGZIP() []byte { var file_xds_service_orca_v3_orca_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_xds_service_orca_v3_orca_proto_goTypes = []interface{}{ (*OrcaLoadReportRequest)(nil), // 0: xds.service.orca.v3.OrcaLoadReportRequest - (*duration.Duration)(nil), // 1: google.protobuf.Duration + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration (*v3.OrcaLoadReport)(nil), // 2: xds.data.orca.v3.OrcaLoadReport } var file_xds_service_orca_v3_orca_proto_depIdxs = []int32{ @@ -189,110 +180,3 @@ func file_xds_service_orca_v3_orca_proto_init() { file_xds_service_orca_v3_orca_proto_goTypes = nil file_xds_service_orca_v3_orca_proto_depIdxs = nil } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// OpenRcaServiceClient is the client API for OpenRcaService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type OpenRcaServiceClient interface { - StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) -} - -type openRcaServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewOpenRcaServiceClient(cc grpc.ClientConnInterface) OpenRcaServiceClient { - return &openRcaServiceClient{cc} -} - -func (c *openRcaServiceClient) StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) { - stream, err := c.cc.NewStream(ctx, &_OpenRcaService_serviceDesc.Streams[0], "/xds.service.orca.v3.OpenRcaService/StreamCoreMetrics", opts...) - if err != nil { - return nil, err - } - x := &openRcaServiceStreamCoreMetricsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type OpenRcaService_StreamCoreMetricsClient interface { - Recv() (*v3.OrcaLoadReport, error) - grpc.ClientStream -} - -type openRcaServiceStreamCoreMetricsClient struct { - grpc.ClientStream -} - -func (x *openRcaServiceStreamCoreMetricsClient) Recv() (*v3.OrcaLoadReport, error) { - m := new(v3.OrcaLoadReport) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// OpenRcaServiceServer is the server API for OpenRcaService service. -type OpenRcaServiceServer interface { - StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error -} - -// UnimplementedOpenRcaServiceServer can be embedded to have forward compatible implementations. -type UnimplementedOpenRcaServiceServer struct { -} - -func (*UnimplementedOpenRcaServiceServer) StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error { - return status.Errorf(codes.Unimplemented, "method StreamCoreMetrics not implemented") -} - -func RegisterOpenRcaServiceServer(s *grpc.Server, srv OpenRcaServiceServer) { - s.RegisterService(&_OpenRcaService_serviceDesc, srv) -} - -func _OpenRcaService_StreamCoreMetrics_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(OrcaLoadReportRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(OpenRcaServiceServer).StreamCoreMetrics(m, &openRcaServiceStreamCoreMetricsServer{stream}) -} - -type OpenRcaService_StreamCoreMetricsServer interface { - Send(*v3.OrcaLoadReport) error - grpc.ServerStream -} - -type openRcaServiceStreamCoreMetricsServer struct { - grpc.ServerStream -} - -func (x *openRcaServiceStreamCoreMetricsServer) Send(m *v3.OrcaLoadReport) error { - return x.ServerStream.SendMsg(m) -} - -var _OpenRcaService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "xds.service.orca.v3.OpenRcaService", - HandlerType: (*OpenRcaServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamCoreMetrics", - Handler: _OpenRcaService_StreamCoreMetrics_Handler, - ServerStreams: true, - }, - }, - Metadata: "xds/service/orca/v3/orca.proto", -} diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go index 5c7c76584..8949e8372 100644 --- a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,17 +32,51 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on OrcaLoadReportRequest with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *OrcaLoadReportRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OrcaLoadReportRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// OrcaLoadReportRequestMultiError, or nil if none found. +func (m *OrcaLoadReportRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *OrcaLoadReportRequest) validate(all bool) error { if m == nil { return nil } - if v, ok := interface{}(m.GetReportInterval()).(interface{ Validate() error }); ok { + var errors []error + + if all { + switch v := interface{}(m.GetReportInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OrcaLoadReportRequestValidationError{ + field: "ReportInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OrcaLoadReportRequestValidationError{ + field: "ReportInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReportInterval()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { return OrcaLoadReportRequestValidationError{ field: "ReportInterval", @@ -51,9 +86,30 @@ func (m *OrcaLoadReportRequest) Validate() error { } } + if len(errors) > 0 { + return OrcaLoadReportRequestMultiError(errors) + } + return nil } +// OrcaLoadReportRequestMultiError is an error wrapping multiple validation +// errors returned by OrcaLoadReportRequest.ValidateAll() if the designated +// constraints aren't met. +type OrcaLoadReportRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OrcaLoadReportRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OrcaLoadReportRequestMultiError) AllErrors() []error { return m } + // OrcaLoadReportRequestValidationError is the validation error returned by // OrcaLoadReportRequest.Validate if the designated constraints aren't met. type OrcaLoadReportRequestValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go new file mode 100644 index 000000000..6296ea04b --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go @@ -0,0 +1,135 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.5 +// source: xds/service/orca/v3/orca.proto + +package v3 + +import ( + context "context" + v3 "github.com/cncf/xds/go/xds/data/orca/v3" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + OpenRcaService_StreamCoreMetrics_FullMethodName = "/xds.service.orca.v3.OpenRcaService/StreamCoreMetrics" +) + +// OpenRcaServiceClient is the client API for OpenRcaService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type OpenRcaServiceClient interface { + StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) +} + +type openRcaServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewOpenRcaServiceClient(cc grpc.ClientConnInterface) OpenRcaServiceClient { + return &openRcaServiceClient{cc} +} + +func (c *openRcaServiceClient) StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) { + stream, err := c.cc.NewStream(ctx, &OpenRcaService_ServiceDesc.Streams[0], OpenRcaService_StreamCoreMetrics_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &openRcaServiceStreamCoreMetricsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type OpenRcaService_StreamCoreMetricsClient interface { + Recv() (*v3.OrcaLoadReport, error) + grpc.ClientStream +} + +type openRcaServiceStreamCoreMetricsClient struct { + grpc.ClientStream +} + +func (x *openRcaServiceStreamCoreMetricsClient) Recv() (*v3.OrcaLoadReport, error) { + m := new(v3.OrcaLoadReport) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// OpenRcaServiceServer is the server API for OpenRcaService service. +// All implementations should embed UnimplementedOpenRcaServiceServer +// for forward compatibility +type OpenRcaServiceServer interface { + StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error +} + +// UnimplementedOpenRcaServiceServer should be embedded to have forward compatible implementations. +type UnimplementedOpenRcaServiceServer struct { +} + +func (UnimplementedOpenRcaServiceServer) StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error { + return status.Errorf(codes.Unimplemented, "method StreamCoreMetrics not implemented") +} + +// UnsafeOpenRcaServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to OpenRcaServiceServer will +// result in compilation errors. +type UnsafeOpenRcaServiceServer interface { + mustEmbedUnimplementedOpenRcaServiceServer() +} + +func RegisterOpenRcaServiceServer(s grpc.ServiceRegistrar, srv OpenRcaServiceServer) { + s.RegisterService(&OpenRcaService_ServiceDesc, srv) +} + +func _OpenRcaService_StreamCoreMetrics_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(OrcaLoadReportRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OpenRcaServiceServer).StreamCoreMetrics(m, &openRcaServiceStreamCoreMetricsServer{stream}) +} + +type OpenRcaService_StreamCoreMetricsServer interface { + Send(*v3.OrcaLoadReport) error + grpc.ServerStream +} + +type openRcaServiceStreamCoreMetricsServer struct { + grpc.ServerStream +} + +func (x *openRcaServiceStreamCoreMetricsServer) Send(m *v3.OrcaLoadReport) error { + return x.ServerStream.SendMsg(m) +} + +// OpenRcaService_ServiceDesc is the grpc.ServiceDesc for OpenRcaService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var OpenRcaService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "xds.service.orca.v3.OpenRcaService", + HandlerType: (*OpenRcaServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamCoreMetrics", + Handler: _OpenRcaService_StreamCoreMetrics_Handler, + ServerStreams: true, + }, + }, + Metadata: "xds/service/orca/v3/orca.proto", +} diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 5edd5a7ca..9e790390b 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,17 @@ # Change history of go-restful + +## [v3.12.0] - 2024-03-11 +- add Flush method #529 (#538) +- fix: Improper handling of empty POST requests (#543) + +## [v3.11.3] - 2024-01-09 +- better not have 2 tags on one commit + +## [v3.11.1, v3.11.2] - 2024-01-09 + +- fix by restoring custom JSON handler functions (Mike Beaumont #540) + ## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index e3e30080e..7234604e4 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -2,7 +2,6 @@ go-restful ========== package for building REST-style Web Services using Google Go -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) [![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) @@ -95,8 +94,7 @@ There are several hooks to customize the behavior of the go-restful package. - Trace logging - Compression - Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` -- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` +- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` ## Resources diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go index 1ff239f99..80adf55fd 100644 --- a/vendor/github.com/emicklei/go-restful/v3/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool { return c.writer.(http.CloseNotifier).CloseNotify() } +// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it. +func (c *CompressingResponseWriter) Flush() { + flusher, ok := c.writer.(http.Flusher) + if !ok { + // writer doesn't support http.Flusher interface + return + } + flusher.Flush() +} + // Close the underlying compressor func (c *CompressingResponseWriter) Close() error { if c.isCompressorClosed() { diff --git a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go index 66dfc824f..9808752ac 100644 --- a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go @@ -5,11 +5,18 @@ package restful // that can be found in the LICENSE file. import ( + "encoding/json" "encoding/xml" "strings" "sync" ) +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) + // EntityReaderWriter can read and write values using an encoding such as JSON,XML. type EntityReaderWriter interface { // Read a serialized version of the value from the request. diff --git a/vendor/github.com/emicklei/go-restful/v3/json.go b/vendor/github.com/emicklei/go-restful/v3/json.go deleted file mode 100644 index 871165166..000000000 --- a/vendor/github.com/emicklei/go-restful/v3/json.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !jsoniter - -package restful - -import "encoding/json" - -var ( - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go deleted file mode 100644 index 11b8f8ae7..000000000 --- a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build jsoniter - -package restful - -import "github.com/json-iterator/go" - -var ( - json = jsoniter.ConfigCompatibleWithStandardLibrary - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index 07a0c91e9..a9b3faaa8 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -155,7 +155,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") if (method == http.MethodPost || method == http.MethodPut || - method == http.MethodPatch) && length == "" { + method == http.MethodPatch) && (length == "" || length == "0") { return nil, NewError( http.StatusUnsupportedMediaType, fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto index 705d382aa..5aa965393 100644 --- a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto +++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto @@ -534,7 +534,7 @@ message StringRules { // at a maximum optional uint64 max_bytes = 5; - // Pattern specifes that this field must match against the specified + // Pattern specifies that this field must match against the specified // regular expression (RE2 syntax). The included expression should elide // any delimiters. optional string pattern = 6; @@ -646,7 +646,7 @@ message BytesRules { // at a maximum optional uint64 max_len = 3; - // Pattern specifes that this field must match against the specified + // Pattern specifies that this field must match against the specified // regular expression (RE2 syntax). The included expression should elide // any delimiters. optional string pattern = 4; @@ -732,11 +732,11 @@ message RepeatedRules { optional uint64 max_items = 2; // Unique specifies that all elements in this field must be unique. This - // contraint is only applicable to scalar and enum types (messages are not + // constraint is only applicable to scalar and enum types (messages are not // supported). optional bool unique = 3; - // Items specifies the contraints to be applied to each item in the field. + // Items specifies the constraints to be applied to each item in the field. // Repeated message fields will still execute validation against each item // unless skip is specified here. optional FieldRules items = 4; diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md index 28e351693..97e319b21 100644 --- a/vendor/github.com/evanphx/json-patch/README.md +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -4,7 +4,7 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). [![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Build Status](https://github.com/evanphx/json-patch/actions/workflows/go.yml/badge.svg)](https://github.com/evanphx/json-patch/actions/workflows/go.yml) [![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) # Get It! @@ -314,4 +314,4 @@ go test -cover ./... ``` Builds for pull requests are tested automatically -using [TravisCI](https://travis-ci.org/evanphx/json-patch). +using [GitHub Actions](https://github.com/evanphx/json-patch/actions/workflows/go.yml). diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index 4bce5936d..cd0274e1e 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -359,7 +359,7 @@ func findObject(pd *container, path string) (container, string) { next, ok := doc.get(decodePatchKey(part)) - if next == nil || ok != nil { + if next == nil || ok != nil || next.raw == nil { return nil, "" } @@ -568,6 +568,29 @@ func (p Patch) replace(doc *container, op Operation) error { return errors.Wrapf(err, "replace operation failed to decode path") } + if path == "" { + val := op.value() + + if val.which == eRaw { + if !val.tryDoc() { + if !val.tryAry() { + return errors.Wrapf(err, "replace operation value must be object or array") + } + } + } + + switch val.which { + case eAry: + *doc = &val.ary + case eDoc: + *doc = &val.doc + case eRaw: + return errors.Wrapf(err, "replace operation hit impossible case") + } + + return nil + } + con, key := findObject(doc, path) if con == nil { @@ -634,6 +657,25 @@ func (p Patch) test(doc *container, op Operation) error { return errors.Wrapf(err, "test operation failed to decode path") } + if path == "" { + var self lazyNode + + switch sv := (*doc).(type) { + case *partialDoc: + self.doc = *sv + self.which = eDoc + case *partialArray: + self.ary = *sv + self.which = eAry + } + + if self.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + con, key := findObject(doc, path) if con == nil { @@ -646,7 +688,7 @@ func (p Patch) test(doc *container, op Operation) error { } if val == nil { - if op.value().raw == nil { + if op.value() == nil || op.value().raw == nil { return nil } return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go index c4234287d..81094e87c 100644 --- a/vendor/github.com/fatih/color/color.go +++ b/vendor/github.com/fatih/color/color.go @@ -269,7 +269,7 @@ func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.wrap(fmt.Sprint(a...))) + return fmt.Fprintln(w, c.wrap(sprintln(a...))) } // Println formats using the default formats for its operands and writes to @@ -278,7 +278,7 @@ func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { // encountered. This is the standard fmt.Print() method wrapped with the given // color. func (c *Color) Println(a ...interface{}) (n int, err error) { - return fmt.Fprintln(Output, c.wrap(fmt.Sprint(a...))) + return fmt.Fprintln(Output, c.wrap(sprintln(a...))) } // Sprint is just like Print, but returns a string instead of printing it. @@ -288,7 +288,7 @@ func (c *Color) Sprint(a ...interface{}) string { // Sprintln is just like Println, but returns a string instead of printing it. func (c *Color) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.Sprint(a...)) + return c.wrap(sprintln(a...)) + "\n" } // Sprintf is just like Printf, but returns a string instead of printing it. @@ -370,7 +370,7 @@ func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { // string. Windows users should use this in conjunction with color.Output. func (c *Color) SprintlnFunc() func(a ...interface{}) string { return func(a ...interface{}) string { - return fmt.Sprintln(c.Sprint(a...)) + return c.wrap(sprintln(a...)) + "\n" } } @@ -648,3 +648,8 @@ func HiCyanString(format string, a ...interface{}) string { return colorString(f func HiWhiteString(format string, a ...interface{}) string { return colorString(format, FgHiWhite, a...) } + +// sprintln is a helper function to format a string with fmt.Sprintln and trim the trailing newline. +func sprintln(a ...interface{}) string { + return strings.TrimSuffix(fmt.Sprintln(a...), "\n") +} diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index 8969526a6..7c7f0c69c 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml new file mode 100644 index 000000000..22f8d21cc --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md index 813788aff..0108f1d57 100644 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -1,6 +1,10 @@ -# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonpointer [![Build Status](https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer.svg)](https://pkg.go.dev/github.com/go-openapi/jsonpointer) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonpointer)](https://goreportcard.com/report/github.com/go-openapi/jsonpointer) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) An implementation of JSON Pointer - Go language ## Status diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index 7df9853de..d970c7cf4 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -26,6 +26,7 @@ package jsonpointer import ( + "encoding/json" "errors" "fmt" "reflect" @@ -40,6 +41,7 @@ const ( pointerSeparator = `/` invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator + notFound = `Can't find the pointer in the document` ) var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() @@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() // JSONPointable is an interface for structs to implement when they need to customize the // json pointer process type JSONPointable interface { - JSONLookup(string) (interface{}, error) + JSONLookup(string) (any, error) } // JSONSetable is an interface for structs to implement when they need to customize the // json pointer process type JSONSetable interface { - JSONSet(string, interface{}) error + JSONSet(string, any) error } // New creates a new json pointer for the given string @@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error { err = errors.New(invalidStart) } else { referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - for _, referenceToken := range referenceTokens[1:] { - p.referenceTokens = append(p.referenceTokens, referenceToken) - } + p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) } } @@ -91,38 +91,58 @@ func (p *Pointer) parse(jsonPointerString string) error { } // Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { +func (p *Pointer) Get(document any) (any, reflect.Kind, error) { return p.get(document, swag.DefaultJSONNameProvider) } // Set uses the pointer to set a value from a JSON document -func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { +func (p *Pointer) Set(document any, value any) (any, error) { return document, p.set(document, value, swag.DefaultJSONNameProvider) } // GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { +func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) } // SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { +func SetForToken(document any, decodedToken string, value any) (any, error) { return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) } -func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func isNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + +func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() + if isNil(node) { + return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken) + } - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) + switch typed := node.(type) { + case JSONPointable: + r, err := typed.JSONLookup(decodedToken) if err != nil { return nil, kind, err } return r, kind, nil + case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect + return getSingleImpl(*typed, decodedToken, nameProvider) } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -159,7 +179,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam } -func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { +func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error { rValue := reflect.Indirect(reflect.ValueOf(node)) if ns, ok := node.(JSONSetable); ok { // pointer impl @@ -170,7 +190,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw return node.(JSONSetable).JSONSet(decodedToken, data) } - switch rValue.Kind() { + switch rValue.Kind() { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -210,7 +230,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw } -func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { if nameProvider == nil { nameProvider = swag.DefaultJSONNameProvider @@ -231,8 +251,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf if err != nil { return nil, knd, err } - node, kind = r, knd - + node = r } rValue := reflect.ValueOf(node) @@ -241,11 +260,11 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf return node, kind, nil } -func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { +func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + return errors.New("only structs, pointers, maps and slices are supported for setting values") } if nameProvider == nil { @@ -284,7 +303,7 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e continue } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -363,6 +382,128 @@ func (p *Pointer) String() string { return pointerString } +func (p *Pointer) Offset(document string) (int64, error) { + dec := json.NewDecoder(strings.NewReader(document)) + var offset int64 + for _, ttk := range p.DecodedTokens() { + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + offset, err = offsetSingleObject(dec, ttk) + if err != nil { + return 0, err + } + case '[': + offset, err = offsetSingleArray(dec, ttk) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return offset, nil +} + +func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { + for dec.More() { + offset := dec.InputOffset() + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err = drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err = drainSingle(dec); err != nil { + return 0, err + } + } + case string: + if tk == decodedToken { + return offset, nil + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return 0, fmt.Errorf("token reference %q not found", decodedToken) +} + +func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { + idx, err := strconv.Atoi(decodedToken) + if err != nil { + return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err) + } + var i int + for i = 0; i < idx && dec.More(); i++ { + tk, err := dec.Token() + if err != nil { + return 0, err + } + + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { + case '{': + if err = drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err = drainSingle(dec); err != nil { + return 0, err + } + } + } + } + + if !dec.More() { + return 0, fmt.Errorf("token reference %q not found", decodedToken) + } + return dec.InputOffset(), nil +} + +// drainSingle drains a single level of object or array. +// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed. +func drainSingle(dec *json.Decoder) error { + for dec.More() { + tk, err := dec.Token() + if err != nil { + return err + } + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { + case '{': + if err = drainSingle(dec); err != nil { + return err + } + case '[': + if err = drainSingle(dec); err != nil { + return err + } + } + } + } + + // Consumes the ending delim + if _, err := dec.Token(); err != nil { + return err + } + return nil +} + // Specific JSON pointer encoding here // ~0 => ~ // ~1 => / @@ -377,14 +518,14 @@ const ( // Unescape unescapes a json pointer reference token string to the original representation func Unescape(token string) string { - step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) - step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) + step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) return step2 } // Escape escapes a pointer reference token string func Escape(token string) string { - step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) - step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) + step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) return step2 } diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index 013fc1943..22f8d21cc 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,50 +1,61 @@ linters-settings: govet: check-shadowing: true + golint: + min-confidence: 0 gocyclo: - min-complexity: 30 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 - paralleltest: - ignore-missing: true + min-occurrences: 3 + linters: enable-all: true disable: - maligned + - unparam - lll + - gochecknoinits - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - errorlint - - varcheck - - interfacer - - deadcode - - golint + - gofumpt + - paralleltest + - tparallel + - thelper - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck - structcheck + - golint - nosnakecase - - varnamelen - - exhaustruct diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md index b94753aa5..c7fc2049c 100644 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -1,15 +1,19 @@ -# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonreference [![Build Status](https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonreference.svg)](https://pkg.go.dev/github.com/go-openapi/jsonreference) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonreference)](https://goreportcard.com/report/github.com/go-openapi/jsonreference) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) An implementation of JSON Reference - Go language ## Status Feature complete. Stable API ## Dependencies -https://github.com/go-openapi/jsonpointer +* https://github.com/go-openapi/jsonpointer ## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 +* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 +* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore index d69b53acc..c4b1b64f0 100644 --- a/vendor/github.com/go-openapi/swag/.gitignore +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -2,3 +2,4 @@ secrets.yml vendor Godeps .idea +*.out diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index bf503e400..80e2be004 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -4,14 +4,14 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 25 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 3 - min-occurrences: 2 + min-occurrences: 3 linters: enable-all: true @@ -20,35 +20,41 @@ linters: - lll - gochecknoinits - gochecknoglobals - - nlreturn - - testpackage + - funlen + - godox + - gocognit + - whitespace + - wsl - wrapcheck + - testpackage + - nlreturn - gomnd - - exhaustive - exhaustivestruct - goerr113 - - wsl - - whitespace - - gofumpt - - godot + - errorlint - nestif - - godox - - funlen - - gci - - gocognit + - godot + - gofumpt - paralleltest + - tparallel - thelper - ifshort - - gomoddirectives - - cyclop - - forcetypeassert - - ireturn - - tagliatelle - - varnamelen - - goimports - - tenv - - golint - exhaustruct - - nilnil + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 000000000..e7f28ed6b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 217f6fa50..a72922299 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,7 +1,8 @@ -# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) Contains a bunch of helper functions for go-openapi and go-swagger projects. @@ -18,4 +19,5 @@ You may also use it standalone for your projects. This repo has only few dependencies outside of the standard library: -* YAML utilities depend on gopkg.in/yaml.v2 +* YAML utilities depend on `gopkg.in/yaml.v3` +* `github.com/mailru/easyjson v0.7.7` diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go new file mode 100644 index 000000000..20a359bb6 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "sort" + "strings" + "sync" +) + +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Since go1.9, this may be implemented with sync.Map. +type indexOfInitialisms struct { + sortMutex *sync.Mutex + index *sync.Map +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + sortMutex: new(sync.Mutex), + index: new(sync.Map), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + for k, v := range initial { + m.index.Store(k, v) + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + _, ok := m.index.Load(key) + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.index.Store(key, true) + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + m.index.Range(func(key, _ interface{}) bool { + k := key.(string) + result = append(result, k) + return true + }) + sort.Sort(sort.Reverse(byInitialism(result))) + return +} + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 00038c377..783442fdd 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -21,6 +21,7 @@ import ( "net/http" "net/url" "os" + "path" "path/filepath" "runtime" "strings" @@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = "" var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) +func LoadFromFileOrHTTP(pth string) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout -func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) +func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth) } -// LoadStrategy returns a loader function for a given path or uri -func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { - if strings.HasPrefix(path, "http") { +// LoadStrategy returns a loader function for a given path or URI. +// +// The load strategy returns the remote load for any path starting with `http`. +// So this works for any URI with a scheme `http` or `https`. +// +// The fallback strategy is to call the local loader. +// +// The local loader takes a local file system path (absolute or relative) as argument, +// or alternatively a `file://...` URI, **without host** (see also below for windows). +// +// There are a few liberalities, initially intended to be tolerant regarding the URI syntax, +// especially on windows. +// +// Before the local loader is called, the given path is transformed: +// - percent-encoded characters are unescaped +// - simple paths (e.g. `./folder/file`) are passed as-is +// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too. +// +// For paths provided as URIs with the "file" scheme, please note that: +// - `file://` is simply stripped. +// This means that the host part of the URI is not parsed at all. +// For example, `file:///folder/file" becomes "/folder/file`, +// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems. +// Similarly, `file://./folder/file` yields `./folder/file`. +// - on windows, `file://...` can take a host so as to specify an UNC share location. +// +// Reminder about windows-specifics: +// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) +// - `file:///c:/folder/file` becomes `C:\folder\file` +// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` +func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(pth, "http") { return remote } - return func(pth string) ([]byte, error) { - upth, err := pathUnescape(pth) + + return func(p string) ([]byte, error) { + upth, err := url.PathUnescape(p) if err != nil { return nil, err } - if strings.HasPrefix(pth, `file://`) { - if runtime.GOOS == "windows" { - // support for canonical file URIs on windows. - // Zero tolerance here for dodgy URIs. - u, _ := url.Parse(upth) - if u.Host != "" { - // assume UNC name (volume share) - // file://host/share/folder\... ==> \\host\share\path\folder - // NOTE: UNC port not yet supported - upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`) - } else { - // file:///c:/folder/... ==> just remove the leading slash - upth = strings.TrimPrefix(upth, `file:///`) - } - } else { - upth = strings.TrimPrefix(upth, `file://`) + if !strings.HasPrefix(p, `file://`) { + // regular file path provided: just normalize slashes + return local(filepath.FromSlash(upth)) + } + + if runtime.GOOS != "windows" { + // crude processing: this leaves full URIs with a host with a (mostly) unexpected result + upth = strings.TrimPrefix(upth, `file://`) + + return local(filepath.FromSlash(upth)) + } + + // windows-only pre-processing of file://... URIs + + // support for canonical file URIs on windows. + u, err := url.Parse(filepath.ToSlash(upth)) + if err != nil { + return nil, err + } + + if u.Host != "" { + // assume UNC name (volume share) + // NOTE: UNC port not yet supported + + // when the "host" segment is a drive letter: + // file://C:/folder/... => C:\folder + upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`)) + if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' { + // tolerance: if we have a leading dot, this can't be a host + // file://host/share/folder\... ==> \\host\share\path\folder + upth = "//" + upth + } + } else { + // no host, let's figure out if this is a drive letter + upth = strings.TrimPrefix(upth, `file://`) + first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/") + if strings.HasSuffix(first, ":") { + // drive letter in the first segment: + // file:///c:/folder/... ==> strip the leading slash + upth = strings.TrimPrefix(upth, `/`) } } diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go index aa7f6a9bb..8bb64ac32 100644 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,74 +14,80 @@ package swag -import "unicode" +import ( + "unicode" + "unicode/utf8" +) type ( - nameLexem interface { - GetUnsafeGoName() string - GetOriginal() string - IsInitialism() bool - } + lexemKind uint8 - initialismNameLexem struct { + nameLexem struct { original string matchedInitialism string + kind lexemKind } +) - casualNameLexem struct { - original string - } +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName ) -func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { - return &initialismNameLexem{ +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) *casualNameLexem { - return &casualNameLexem{ +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, original: original, } } -func (l *initialismNameLexem) GetUnsafeGoName() string { - return l.matchedInitialism -} +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) -func (l *casualNameLexem) GetUnsafeGoName() string { - var first rune - var rest string for i, orig := range l.original { if i == 0 { first = orig continue } + if i > 0 { rest = l.original[i:] break } } + if len(l.original) > 1 { - return string(unicode.ToUpper(first)) + lower(rest) + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() } return l.original } -func (l *initialismNameLexem) GetOriginal() string { +func (l nameLexem) GetOriginal() string { return l.original } -func (l *casualNameLexem) GetOriginal() string { - return l.original -} - -func (l *initialismNameLexem) IsInitialism() bool { - return true -} - -func (l *casualNameLexem) IsInitialism() bool { - return false +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName } diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go deleted file mode 100644 index 7c7da9c08..000000000 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.9 -// +build go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Since go1.9, this may be implemented with sync.Map. -type indexOfInitialisms struct { - sortMutex *sync.Mutex - index *sync.Map -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - sortMutex: new(sync.Mutex), - index: new(sync.Map), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - for k, v := range initial { - m.index.Store(k, v) - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - _, ok := m.index.Load(key) - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.index.Store(key, true) - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - m.index.Range(func(key, value interface{}) bool { - k := key.(string) - result = append(result, k) - return true - }) - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go deleted file mode 100644 index 2757d9b95..000000000 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.8 -// +build !go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.QueryUnescape(path) -} diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go deleted file mode 100644 index 0565db377..000000000 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.9 -// +build !go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Before go1.9, this may be implemented with a mutex on the map. -type indexOfInitialisms struct { - getMutex *sync.Mutex - index map[string]bool -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - getMutex: new(sync.Mutex), - index: make(map[string]bool, 50), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k, v := range initial { - m.index[k] = v - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - m.getMutex.Lock() - defer m.getMutex.Unlock() - _, ok := m.index[key] - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - m.index[key] = true - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k := range m.index { - result = append(result, k) - } - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go index a1825fb7d..274727a86 100644 --- a/vendor/github.com/go-openapi/swag/split.go +++ b/vendor/github.com/go-openapi/swag/split.go @@ -15,124 +15,269 @@ package swag import ( + "bytes" + "sync" "unicode" + "unicode/utf8" ) -var nameReplaceTable = map[rune]string{ - '@': "At ", - '&': "And ", - '|': "Pipe ", - '$': "Dollar ", - '!': "Bang ", - '-': "", - '_': "", -} - type ( splitter struct { - postSplitInitialismCheck bool initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool + } + + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch +) + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool } - splitterOption func(*splitter) *splitter + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } ) -// split calls the splitter; splitter provides more control and post options +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options func split(str string) []string { - lexems := newSplitter().split(str) - result := make([]string, 0, len(lexems)) + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) - for _, lexem := range lexems { + for _, lexem := range *lexems { result = append(result, lexem.GetOriginal()) } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) return result } -func (s *splitter) split(str string) []nameLexem { - return s.toNameLexems(str) -} - -func newSplitter(options ...splitterOption) *splitter { - splitter := &splitter{ +func newSplitter(options ...splitterOption) splitter { + s := splitter{ postSplitInitialismCheck: false, initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - splitter = option(splitter) + option(&s) } - return splitter + return s } // withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) *splitter { +func withPostSplitInitialismCheck(s *splitter) { s.postSplitInitialismCheck = true +} + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + return s } -type ( - initialismMatch struct { - start, end int - body []rune - complete bool +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) } - initialismMatches []*initialismMatch -) -func (s *splitter) toNameLexems(name string) []nameLexem { + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { - matches := make(initialismMatches, 0) +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches for currentRunePosition, currentRune := range nameRunes { - newMatches := make(initialismMatches, 0, len(matches)) + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - for _, match := range matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - newMatches = append(newMatches, match) - continue - } + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue + } - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if !s.initialismRuneEqual(currentMatchRune, currentRune) { - continue - } + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } } + + match.complete = true + match.end = currentRunePosition } - match.complete = true - match.end = currentRunePosition + *newMatches = append(*newMatches, match) } - - newMatches = append(newMatches, match) } // check for new initialism matches - for _, initialism := range s.initialisms { - initialismRunes := []rune(initialism) - if s.initialismRuneEqual(initialismRunes[0], currentRune) { - newMatches = append(newMatches, &initialismMatch{ + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { } } + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } matches = newMatches } + // up to the caller to redeem this last slice return matches } -func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { - nameLexems := make([]nameLexem, 0) +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() - var lastAcceptedMatch *initialismMatch - for _, match := range matches { + var lastAcceptedMatch initialismMatch + for _, match := range *matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch == nil; firstMatch { - nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - nameLexems = append(nameLexems, s.breakCasualString(middle)...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch == nil { - return s.breakCasualString(nameRunes) - } - - if lastAcceptedMatch.end+1 != len(nameRunes) { + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { rest := nameRunes[lastAcceptedMatch.end+1:] - nameLexems = append(nameLexems, s.breakCasualString(rest)...) + s.appendBrokenDownCasualString(nameLexems, rest) } - return nameLexems -} + poolOfMatches.RedeemMatches(matches) -func (s *splitter) initialismRuneEqual(a, b rune) bool { - return a == b + return nameLexems } -func (s *splitter) breakInitialism(original string) nameLexem { +func (s splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s *splitter) breakCasualString(str []rune) []nameLexem { - segments := make([]nameLexem, 0) - currentSegment := "" +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() addCasualNameLexem := func(original string) { - segments = append(segments, newCasualNameLexem(original)) + *segments = append(*segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - segments = append(segments, newInitialismNameLexem(original, match)) + *segments = append(*segments, newInitialismNameLexem(original, match)) } - addNameLexem := func(original string) { - if s.postSplitInitialismCheck { - for _, initialism := range s.initialisms { - if upper(initialism) == upper(original) { - addInitialismNameLexem(original, initialism) + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + return } } - } - addCasualNameLexem(original) + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem } - for _, rn := range string(str) { - if replace, found := nameReplaceTable[rn]; found { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } if replace != "" { @@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem { } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } continue } if unicode.IsUpper(rn) { - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - currentSegment = "" + currentSegment.Reset() } - currentSegment += string(rn) + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } - if currentSegment != "" { - addNameLexem(currentSegment) + i += size } - return segments + return true } diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 000000000..90745d5ca --- /dev/null +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,8 @@ +package swag + +import "unsafe" + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index f78ab684a..5051401c4 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -18,76 +18,25 @@ import ( "reflect" "strings" "unicode" + "unicode/utf8" ) -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var isInitialism func(string) bool - // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // +// The prefix function is assumed to return a string that starts with an upper case letter. +// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in } - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - - // a test function - isInitialism = commonInitialisms.isInitialism + return GoNamePrefixFunc(name) + in } const ( @@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string { return result } -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - // Removes leading whitespaces func trim(str string) string { - return strings.Trim(str, " ") + return strings.TrimSpace(str) } // Shortcut to strings.ToUpper() @@ -188,15 +121,20 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) (camelized string) { +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + for pos, ru := range []rune(word) { if pos > 0 { - camelized += string(unicode.ToLower(ru)) + camelized.WriteRune(unicode.ToLower(ru)) } else { - camelized += string(unicode.ToUpper(ru)) + camelized.WriteRune(unicode.ToUpper(ru)) } } - return + return camelized.String() } // ToFileName lowercases and underscores a go type name @@ -224,33 +162,40 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(in)) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) - for _, w := range in { + for _, w := range *in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { - out = append(out, w.GetOriginal()) + out = append(out, trim(w.GetOriginal())) } } + poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(in)) - for _, w := range in { - original := w.GetOriginal() + out := make([]string, 0, len(*in)) + for _, w := range *in { + original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) } else { out = append(out, original) } } + poolOfLexems.RedeemLexems(in) + return strings.Join(out, " ") } @@ -264,7 +209,7 @@ func ToJSONName(name string) string { out = append(out, lower(w)) continue } - out = append(out, Camelize(w)) + out = append(out, Camelize(trim(w))) } return strings.Join(out, "") } @@ -283,35 +228,70 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - lexems := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() - result := "" - for _, lexem := range lexems { + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } + + for _, lexem := range lexemes[1:] { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result += goName + result.WriteString(goName) } - if len(result) > 0 { - // Only prefix with X when the first character isn't an ascii letter - first := []rune(result)[0] - if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { - if GoNamePrefixFunc == nil { - return "X" + result - } - result = GoNamePrefixFunc(name) + result - } - first = []rune(result)[0] - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) - } - } - - return result + return result.String() } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -341,13 +321,22 @@ type zeroable interface { // IsZero returns true when the value passed into the function is a zero value. // This allows for safer checking of interface values. func IsZero(data interface{}) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { //nolint:exhaustive + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return true + } + } + // check for things that have an IsZero method instead if vv, ok := data.(zeroable); ok { return vv.IsZero() } + // continue with slightly more complex reflection - v := reflect.ValueOf(data) - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.String: return v.Len() == 0 case reflect.Bool: @@ -358,24 +347,13 @@ func IsZero(data interface{}) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() case reflect.Struct, reflect.Array: return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) case reflect.Invalid: return true + default: + return false } - return false -} - -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() } // CommandLineOptionsGroup represents a group of user-defined command line options diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index f09ee609f..f59e02593 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -16,8 +16,11 @@ package swag import ( "encoding/json" + "errors" "fmt" "path/filepath" + "reflect" + "sort" "strconv" "github.com/mailru/easyjson/jlexer" @@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return nil, err } if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, fmt.Errorf("only YAML documents that are objects are supported") + return nil, errors.New("only YAML documents that are objects are supported") } return &document, nil } @@ -147,7 +150,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) { case yamlTimestamp: return node.Value, nil case yamlNull: - return nil, nil + return nil, nil //nolint:nilnil default: return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) } @@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) { return yaml.Marshal(&n) } +func isNil(input interface{}) bool { + if input == nil { + return true + } + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + func json2yaml(item interface{}) (*yaml.Node, error) { + if isNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + switch val := item.(type) { case JSONMapSlice: var n yaml.Node @@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) { case map[string]interface{}: var n yaml.Node n.Kind = yaml.MappingNode - for k, v := range val { + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] childNode, err := json2yaml(v) if err != nil { return nil, err @@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) { Tag: yamlBoolScalar, Value: strconv.FormatBool(val), }, nil + default: + return nil, fmt.Errorf("unhandled type: %T", val) } - return nil, nil } // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice diff --git a/vendor/github.com/goccy/go-yaml/.codecov.yml b/vendor/github.com/goccy/go-yaml/.codecov.yml index 887054a2a..8364eea0b 100644 --- a/vendor/github.com/goccy/go-yaml/.codecov.yml +++ b/vendor/github.com/goccy/go-yaml/.codecov.yml @@ -26,3 +26,6 @@ comment: layout: "header,diff" behavior: default require_changes: no + +ignore: + - ast diff --git a/vendor/github.com/goccy/go-yaml/CHANGELOG.md b/vendor/github.com/goccy/go-yaml/CHANGELOG.md new file mode 100644 index 000000000..c8f820de8 --- /dev/null +++ b/vendor/github.com/goccy/go-yaml/CHANGELOG.md @@ -0,0 +1,186 @@ +# 1.11.2 - 2023-09-15 + +### Fix bugs + +- Fix quoted comments ( #370 ) +- Fix handle of space at start or last ( #376 ) +- Fix sequence with comment ( #390 ) + +# 1.11.1 - 2023-09-14 + +### Fix bugs + +- Handle `\r` in a double-quoted string the same as `\n` ( #372 ) +- Replace loop with n.Values = append(n.Values, target.Values...) ( #380 ) +- Skip encoding an inline field if it is null ( #386 ) +- Fix comment parsing with null value ( #388 ) + +# 1.11.0 - 2023-04-03 + +### Features + +- Supports dynamically switch encode and decode processing for a given type + +# 1.10.1 - 2023-03-28 + +### Features + +- Quote YAML 1.1 bools at encoding time for compatibility with other legacy parsers +- Add support of 32-bit architecture + +### Fix bugs + +- Don't trim all space characters in block style sequence +- Support strings starting with `@` + +# 1.10.0 - 2023-03-01 + +### Fix bugs + +Reversible conversion of comments was not working in various cases, which has been corrected. +**Breaking Change** exists in the comment map interface. However, if you are dealing with CommentMap directly, there is no problem. + + +# 1.9.8 - 2022-12-19 + +### Fix feature + +- Append new line at the end of file ( #329 ) + +### Fix bugs + +- Fix custom marshaler ( #333, #334 ) +- Fix behavior when struct fields conflicted( #335 ) +- Fix position calculation for literal, folded and raw folded strings ( #330 ) + +# 1.9.7 - 2022-12-03 + +### Fix bugs + +- Fix handling of quoted map key ( #328 ) +- Fix resusing process of scanning context ( #322 ) + +## v1.9.6 - 2022-10-26 + +### New Features + +- Introduce MapKeyNode interface to limit node types for map key ( #312 ) + +### Fix bugs + +- Quote strings with special characters in flow mode ( #270 ) +- typeError implements PrettyPrinter interface ( #280 ) +- Fix incorrect const type ( #284 ) +- Fix large literals type inference on 32 bits ( #293 ) +- Fix UTF-8 characters ( #294 ) +- Fix decoding of unknown aliases ( #317 ) +- Fix stream encoder for insert a separator between each encoded document ( #318 ) + +### Update + +- Update golang.org/x/sys ( #289 ) +- Update Go version in CI ( #295 ) +- Add test cases for missing keys to struct literals ( #300 ) + +## v1.9.5 - 2022-01-12 + +### New Features + +* Add UseSingleQuote option ( #265 ) + +### Fix bugs + +* Preserve defaults while decoding nested structs ( #260 ) +* Fix minor typo in decodeInit error ( #264 ) +* Handle empty sequence entries ( #275 ) +* Fix encoding of sequence with multiline string ( #276 ) +* Fix encoding of BytesMarshaler type ( #277 ) +* Fix indentState logic for multi-line value ( #278 ) + +## v1.9.4 - 2021-10-12 + +### Fix bugs + +* Keep prev/next reference between tokens containing comments when filtering comment tokens ( #257 ) +* Supports escaping reserved keywords in PathBuilder ( #258 ) + +## v1.9.3 - 2021-09-07 + +### New Features + +* Support encoding and decoding `time.Duration` fields ( #246 ) +* Allow reserved characters for key name in YAMLPath ( #251 ) +* Support getting YAMLPath from ast.Node ( #252 ) +* Support CommentToMap option ( #253 ) + +### Fix bugs + +* Fix encoding nested sequences with `yaml.IndentSequence` ( #241 ) +* Fix error reporting on inline structs in strict mode ( #244, #245 ) +* Fix encoding of large floats ( #247 ) + +### Improve workflow + +* Migrate CI from CircleCI to GitHub Action ( #249 ) +* Add workflow for ycat ( #250 ) + +## v1.9.2 - 2021-07-26 + +### Support WithComment option ( #238 ) + +`yaml.WithComment` is a option for encoding with comment. +The position where you want to add a comment is represented by YAMLPath, and it is the key of `yaml.CommentMap`. +Also, you can select `Head` comment or `Line` comment as the comment type. + +## v1.9.1 - 2021-07-20 + +### Fix DecodeFromNode ( #237 ) + +- Fix YAML handling where anchor exists + +## v1.9.0 - 2021-07-19 + +### New features + +- Support encoding of comment node ( #233 ) +- Support `yaml.NodeToValue(ast.Node, interface{}, ...DecodeOption) error` ( #236 ) + - Can convert a AST node to a value directly + +### Fix decoder for comment + +- Fix parsing of literal with comment ( #234 ) + +### Rename API ( #235 ) + +- Rename `MarshalWithContext` to `MarshalContext` +- Rename `UnmarshalWithContext` to `UnmarshalContext` + +## v1.8.10 - 2021-07-02 + +### Fixed bugs + +- Fix searching anchor by alias name ( #212 ) +- Fixing Issue 186, scanner should account for newline characters when processing multi-line text. Without this source annotations line/column number (for this and all subsequent tokens) is inconsistent with plain text editors. e.g. https://github.com/goccy/go-yaml/issues/186. This addresses the issue specifically for single and double quote text only. ( #210 ) +- Add error for unterminated flow mapping node ( #213 ) +- Handle missing required field validation ( #221 ) +- Nicely format unexpected node type errors ( #229 ) +- Support to encode map which has defined type key ( #231 ) + +### New features + +- Support sequence indentation by EncodeOption ( #232 ) + +## v1.8.9 - 2021-03-01 + +### Fixed bugs + +- Fix origin buffer for DocumentHeader and DocumentEnd and Directive +- Fix origin buffer for anchor value +- Fix syntax error about map value +- Fix parsing MergeKey ('<<') characters +- Fix encoding of float value +- Fix incorrect column annotation when single or double quotes are used + +### New features + +- Support to encode/decode of ast.Node directly diff --git a/vendor/github.com/goccy/go-yaml/Makefile b/vendor/github.com/goccy/go-yaml/Makefile new file mode 100644 index 000000000..1b1d92397 --- /dev/null +++ b/vendor/github.com/goccy/go-yaml/Makefile @@ -0,0 +1,19 @@ +.PHONY: test +test: + go test -v -race ./... + +.PHONY: simple-test +simple-test: + go test -v ./... + +.PHONY: cover +cover: + go test -coverprofile=cover.out ./... + +.PHONY: cover-html +cover-html: cover + go tool cover -html=cover.out + +.PHONY: ycat/build +ycat/build: + go build -o ycat ./cmd/ycat diff --git a/vendor/github.com/goccy/go-yaml/README.md b/vendor/github.com/goccy/go-yaml/README.md index 552d0454c..945234917 100644 --- a/vendor/github.com/goccy/go-yaml/README.md +++ b/vendor/github.com/goccy/go-yaml/README.md @@ -1,7 +1,7 @@ # YAML support for the Go language [![PkgGoDev](https://pkg.go.dev/badge/github.com/goccy/go-yaml)](https://pkg.go.dev/github.com/goccy/go-yaml) -[![CircleCI](https://circleci.com/gh/goccy/go-yaml.svg?style=shield)](https://circleci.com/gh/goccy/go-yaml) +![Go](https://github.com/goccy/go-yaml/workflows/Go/badge.svg) [![codecov](https://codecov.io/gh/goccy/go-yaml/branch/master/graph/badge.svg)](https://codecov.io/gh/goccy/go-yaml) [![Go Report Card](https://goreportcard.com/badge/github.com/goccy/go-yaml)](https://goreportcard.com/report/github.com/goccy/go-yaml) @@ -9,18 +9,18 @@ # Why a new library? -As of this writing, there already exists a defacto standard library for YAML processing Go: [https://github.com/go-yaml/yaml](https://github.com/go-yaml/yaml). However we feel that some features are lacking, namely: +As of this writing, there already exists a de facto standard library for YAML processing for Go: [https://github.com/go-yaml/yaml](https://github.com/go-yaml/yaml). However we feel that some features are lacking, namely: - Pretty format for error notifications -- Directly manipulate the YAML abstract syntax tree -- Support `Anchor` and `Alias` when marshaling +- Direct manipulation of YAML abstract syntax tree +- Support for `Anchor` and `Alias` when marshaling - Allow referencing elements declared in another file via anchors # Features - Pretty format for error notifications -- Support `Scanner` or `Lexer` or `Parser` as public API -- Support `Anchor` and `Alias` to Marshaler +- Supports `Scanner` or `Lexer` or `Parser` as public API +- Supports `Anchor` and `Alias` to Marshaler - Allow referencing elements declared in another file via anchors - Extract value or AST by YAMLPath ( YAMLPath is like a JSONPath ) @@ -34,7 +34,7 @@ go get -u github.com/goccy/go-yaml ## 1. Simple Encode/Decode -Support compatible interface to `go-yaml/yaml` by using `reflect` +Has an interface like `go-yaml/yaml` using `reflect` ```go var v struct { @@ -66,7 +66,7 @@ if err := yaml.Unmarshal([]byte(yml), &v); err != nil { } ``` -To control marshal/unmarshal behavior, you can use the `yaml` tag +To control marshal/unmarshal behavior, you can use the `yaml` tag. ```go yml := `--- @@ -102,23 +102,23 @@ if err := yaml.Unmarshal([]byte(yml), &v); err != nil { For custom marshal/unmarshaling, implement either `Bytes` or `Interface` variant of marshaler/unmarshaler. The difference is that while `BytesMarshaler`/`BytesUnmarshaler` behaves like [`encoding/json`](https://pkg.go.dev/encoding/json) and `InterfaceMarshaler`/`InterfaceUnmarshaler` behaves like [`gopkg.in/yaml.v2`](https://pkg.go.dev/gopkg.in/yaml.v2). -Semantically both are the same, but they differ in performance. Because indentation matter in YAML, you cannot simply accept a valid YAML fragment from a Marshaler, and expect it to work when it is attached to the parent container's serialized form. Therefore when we receive use the `BytesMarshaler`, which returns `[]byte`, we must decode it once to figure out how to make it work in the given context. If you use the `InterfaceMarshaler`, we can skip the decoding. +Semantically both are the same, but they differ in performance. Because indentation matters in YAML, you cannot simply accept a valid YAML fragment from a Marshaler, and expect it to work when it is attached to the parent container's serialized form. Therefore when we receive use the `BytesMarshaler`, which returns `[]byte`, we must decode it once to figure out how to make it work in the given context. If you use the `InterfaceMarshaler`, we can skip the decoding. If you are repeatedly marshaling complex objects, the latter is always better performance wise. But if you are, for example, just providing a choice between a config file format that is read only once, the former is probably easier to code. -## 2. Reference elements in declared in another file +## 2. Reference elements declared in another file -`testdata` directory includes `anchor.yml` file +`testdata` directory contains `anchor.yml` file: ```shell ├── testdata    └── anchor.yml ``` -And `anchor.yml` is defined the following. +And `anchor.yml` is defined as follows: ```yaml a: &a @@ -126,8 +126,8 @@ a: &a c: hello ``` -Then, if `yaml.ReferenceDirs("testdata")` option passed to `yaml.Decoder`, - `Decoder` try to find anchor definition from YAML files the under `testdata` directory. +Then, if `yaml.ReferenceDirs("testdata")` option is passed to `yaml.Decoder`, + `Decoder` tries to find the anchor definition from YAML files the under `testdata` directory. ```go buf := bytes.NewBufferString("a: *a\n") @@ -146,7 +146,7 @@ fmt.Printf("%+v\n", v) // {A:{B:1 C:hello}} ## 3. Encode with `Anchor` and `Alias` -### 3.1. Explicitly declaration `Anchor` name and `Alias` name +### 3.1. Explicitly declared `Anchor` name and `Alias` name If you want to use `anchor` or `alias`, you can define it as a struct tag. @@ -217,7 +217,7 @@ d: *b ### 3.3 MergeKey and Alias -Merge key and alias ( `<<: *alias` ) can be used by embedding a structure with the `inline,alias` tag . +Merge key and alias ( `<<: *alias` ) can be used by embedding a structure with the `inline,alias` tag. ```go type Person struct { @@ -264,17 +264,17 @@ people: ## 4. Pretty Formatted Errors -Error values produced during parsing has two extra features over regular +Error values produced during parsing have two extra features over regular error values. -First by default they contain extra information on the location of the error -from the source YAML document, to make it easier finding the error location. +First, by default, they contain extra information on the location of the error +from the source YAML document, to make it easier to find the error location. Second, the error messages can optionally be colorized. If you would like to control exactly how the output looks like, consider using `yaml.FormatError`, which accepts two boolean values to -control turning on/off these features +control turning these features on or off. @@ -342,14 +342,14 @@ b: "hello" } ``` -output result is the following. +output result is the following: # Tools -## ycat +## ycat print yaml file with color @@ -358,9 +358,13 @@ print yaml file with color ### Installation ```sh -go get -u github.com/goccy/go-yaml/cmd/ycat +go install github.com/goccy/go-yaml/cmd/ycat@latest ``` +# Looking for Sponsors + +I'm looking for sponsors this library. This library is being developed as a personal project in my spare time. If you want a quick response or problem resolution when using this library in your project, please register as a [sponsor](https://github.com/sponsors/goccy). I will cooperate as much as possible. Of course, this library is developed as an MIT license, so you can use it freely for free. + # License MIT diff --git a/vendor/github.com/goccy/go-yaml/ast/ast.go b/vendor/github.com/goccy/go-yaml/ast/ast.go index f78e6d306..b4d5ec418 100644 --- a/vendor/github.com/goccy/go-yaml/ast/ast.go +++ b/vendor/github.com/goccy/go-yaml/ast/ast.go @@ -61,6 +61,8 @@ const ( TagType // CommentType type identifier for comment node CommentType + // CommentGroupType type identifier for comment group node + CommentGroupType ) // String node type identifier to text @@ -106,6 +108,58 @@ func (t NodeType) String() string { return "Tag" case CommentType: return "Comment" + case CommentGroupType: + return "CommentGroup" + } + return "" +} + +// String node type identifier to YAML Structure name +// based on https://yaml.org/spec/1.2/spec.html +func (t NodeType) YAMLName() string { + switch t { + case UnknownNodeType: + return "unknown" + case DocumentType: + return "document" + case NullType: + return "null" + case BoolType: + return "boolean" + case IntegerType: + return "int" + case FloatType: + return "float" + case InfinityType: + return "inf" + case NanType: + return "nan" + case StringType: + return "string" + case MergeKeyType: + return "merge key" + case LiteralType: + return "scalar" + case MappingType: + return "mapping" + case MappingKeyType: + return "key" + case MappingValueType: + return "value" + case SequenceType: + return "sequence" + case AnchorType: + return "anchor" + case AliasType: + return "alias" + case DirectiveType: + return "directive" + case TagType: + return "tag" + case CommentType: + return "comment" + case CommentGroupType: + return "comment" } return "" } @@ -122,9 +176,15 @@ type Node interface { // AddColumn add column number to child nodes recursively AddColumn(int) // SetComment set comment token to node - SetComment(*token.Token) error + SetComment(*CommentGroupNode) error // Comment returns comment token instance - GetComment() *token.Token + GetComment() *CommentGroupNode + // GetPath returns YAMLPath for the current node + GetPath() string + // SetPath set YAMLPath for the current node + SetPath(string) + // MarshalYAML + MarshalYAML() ([]byte, error) // already read length readLen() int // append read length @@ -133,17 +193,29 @@ type Node interface { clearLen() } +// MapKeyNode type for map key node +type MapKeyNode interface { + Node + // String node to text without comment + stringWithoutComment() string +} + // ScalarNode type for scalar node type ScalarNode interface { - Node + MapKeyNode GetValue() interface{} } type BaseNode struct { - Comment *token.Token + Path string + Comment *CommentGroupNode read int } +func addCommentString(base string, node *CommentGroupNode) string { + return fmt.Sprintf("%s %s", base, node.String()) +} + func (n *BaseNode) readLen() int { return n.read } @@ -156,17 +228,30 @@ func (n *BaseNode) addReadLen(len int) { n.read += len } +// GetPath returns YAMLPath for the current node. +func (n *BaseNode) GetPath() string { + if n == nil { + return "" + } + return n.Path +} + +// SetPath set YAMLPath for the current node. +func (n *BaseNode) SetPath(path string) { + if n == nil { + return + } + n.Path = path +} + // GetComment returns comment token instance -func (n *BaseNode) GetComment() *token.Token { +func (n *BaseNode) GetComment() *CommentGroupNode { return n.Comment } // SetComment set comment token -func (n *BaseNode) SetComment(tk *token.Token) error { - if tk.Type != token.CommentType { - return ErrInvalidTokenType - } - n.Comment = tk +func (n *BaseNode) SetComment(node *CommentGroupNode) error { + n.Comment = node return nil } @@ -186,7 +271,7 @@ func readNode(p []byte, node Node) (int, error) { return 0, io.EOF } size := min(remain, len(p)) - for idx, b := range s[readLen : readLen+size] { + for idx, b := range []byte(s[readLen : readLen+size]) { p[idx] = byte(b) } node.addReadLen(size) @@ -194,7 +279,7 @@ func readNode(p []byte, node Node) (int, error) { } // Null create node for null value -func Null(tk *token.Token) Node { +func Null(tk *token.Token) *NullNode { return &NullNode{ BaseNode: &BaseNode{}, Token: tk, @@ -202,7 +287,7 @@ func Null(tk *token.Token) Node { } // Bool create node for boolean value -func Bool(tk *token.Token) Node { +func Bool(tk *token.Token) *BoolNode { b, _ := strconv.ParseBool(tk.Value) return &BoolNode{ BaseNode: &BaseNode{}, @@ -212,7 +297,7 @@ func Bool(tk *token.Token) Node { } // Integer create node for integer value -func Integer(tk *token.Token) Node { +func Integer(tk *token.Token) *IntegerNode { value := removeUnderScoreFromNumber(tk.Value) switch tk.Type { case token.BinaryIntegerType: @@ -306,7 +391,7 @@ func Integer(tk *token.Token) Node { } // Float create node for float value -func Float(tk *token.Token) Node { +func Float(tk *token.Token) *FloatNode { f, _ := strconv.ParseFloat(removeUnderScoreFromNumber(tk.Value), 64) return &FloatNode{ BaseNode: &BaseNode{}, @@ -350,7 +435,19 @@ func String(tk *token.Token) *StringNode { // Comment create node for comment func Comment(tk *token.Token) *CommentNode { return &CommentNode{ - BaseNode: &BaseNode{Comment: tk}, + BaseNode: &BaseNode{}, + Token: tk, + } +} + +func CommentGroup(comments []*token.Token) *CommentGroupNode { + nodes := []*CommentNode{} + for _, comment := range comments { + nodes = append(nodes, Comment(comment)) + } + return &CommentGroupNode{ + BaseNode: &BaseNode{}, + Comments: nodes, } } @@ -375,7 +472,7 @@ func Mapping(tk *token.Token, isFlowStyle bool, values ...*MappingValueNode) *Ma } // MappingValue create node for mapping value -func MappingValue(tk *token.Token, key Node, value Node) *MappingValueNode { +func MappingValue(tk *token.Token, key MapKeyNode, value Node) *MappingValueNode { return &MappingValueNode{ BaseNode: &BaseNode{}, Start: tk, @@ -469,7 +566,11 @@ func (f *File) String() string { for _, doc := range f.Docs { docs = append(docs, doc.String()) } - return strings.Join(docs, "\n") + if len(docs) > 0 { + return strings.Join(docs, "\n") + "\n" + } else { + return "" + } } // DocumentNode type of Document @@ -513,6 +614,11 @@ func (d *DocumentNode) String() string { return strings.Join(doc, "\n") } +// MarshalYAML encodes to a YAML text +func (d *DocumentNode) MarshalYAML() ([]byte, error) { + return []byte(d.String()), nil +} + func removeUnderScoreFromNumber(num string) string { return strings.ReplaceAll(num, "_", "") } @@ -520,8 +626,7 @@ func removeUnderScoreFromNumber(num string) string { // NullNode type of null node type NullNode struct { *BaseNode - Comment *token.Token // position of Comment ( `#comment` ) - Token *token.Token + Token *token.Token } // Read implements (io.Reader).Read @@ -542,15 +647,6 @@ func (n *NullNode) AddColumn(col int) { n.Token.AddColumn(col) } -// SetComment set comment token -func (n *NullNode) SetComment(tk *token.Token) error { - if tk.Type != token.CommentType { - return ErrInvalidTokenType - } - n.Comment = tk - return nil -} - // GetValue returns nil value func (n *NullNode) GetValue() interface{} { return nil @@ -558,9 +654,21 @@ func (n *NullNode) GetValue() interface{} { // String returns `null` text func (n *NullNode) String() string { + if n.Comment != nil { + return addCommentString("null", n.Comment) + } + return n.stringWithoutComment() +} + +func (n *NullNode) stringWithoutComment() string { return "null" } +// MarshalYAML encodes to a YAML text +func (n *NullNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // IntegerNode type of integer node type IntegerNode struct { *BaseNode @@ -593,9 +701,21 @@ func (n *IntegerNode) GetValue() interface{} { // String int64 to text func (n *IntegerNode) String() string { + if n.Comment != nil { + return addCommentString(n.Token.Value, n.Comment) + } + return n.stringWithoutComment() +} + +func (n *IntegerNode) stringWithoutComment() string { return n.Token.Value } +// MarshalYAML encodes to a YAML text +func (n *IntegerNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // FloatNode type of float node type FloatNode struct { *BaseNode @@ -629,9 +749,21 @@ func (n *FloatNode) GetValue() interface{} { // String float64 to text func (n *FloatNode) String() string { + if n.Comment != nil { + return addCommentString(n.Token.Value, n.Comment) + } + return n.stringWithoutComment() +} + +func (n *FloatNode) stringWithoutComment() string { return n.Token.Value } +// MarshalYAML encodes to a YAML text +func (n *FloatNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // StringNode type of string node type StringNode struct { *BaseNode @@ -662,13 +794,66 @@ func (n *StringNode) GetValue() interface{} { return n.Value } +// escapeSingleQuote escapes s to a single quoted scalar. +// https://yaml.org/spec/1.2.2/#732-single-quoted-style +func escapeSingleQuote(s string) string { + var sb strings.Builder + growLen := len(s) + // s includes also one ' from the doubled pair + 2 + // opening and closing ' + strings.Count(s, "'") // ' added by ReplaceAll + sb.Grow(growLen) + sb.WriteString("'") + sb.WriteString(strings.ReplaceAll(s, "'", "''")) + sb.WriteString("'") + return sb.String() +} + // String string value to text with quote or literal header if required func (n *StringNode) String() string { switch n.Token.Type { case token.SingleQuoteType: + quoted := escapeSingleQuote(n.Value) + if n.Comment != nil { + return addCommentString(quoted, n.Comment) + } + return quoted + case token.DoubleQuoteType: + quoted := strconv.Quote(n.Value) + if n.Comment != nil { + return addCommentString(quoted, n.Comment) + } + return quoted + } + + lbc := token.DetectLineBreakCharacter(n.Value) + if strings.Contains(n.Value, lbc) { + // This block assumes that the line breaks in this inside scalar content and the Outside scalar content are the same. + // It works mostly, but inconsistencies occur if line break characters are mixed. + header := token.LiteralBlockHeader(n.Value) + space := strings.Repeat(" ", n.Token.Position.Column-1) + values := []string{} + for _, v := range strings.Split(n.Value, lbc) { + values = append(values, fmt.Sprintf("%s %s", space, v)) + } + block := strings.TrimSuffix(strings.TrimSuffix(strings.Join(values, lbc), fmt.Sprintf("%s %s", lbc, space)), fmt.Sprintf(" %s", space)) + return fmt.Sprintf("%s%s%s", header, lbc, block) + } else if len(n.Value) > 0 && (n.Value[0] == '{' || n.Value[0] == '[') { return fmt.Sprintf(`'%s'`, n.Value) + } + if n.Comment != nil { + return addCommentString(n.Value, n.Comment) + } + return n.Value +} + +func (n *StringNode) stringWithoutComment() string { + switch n.Token.Type { + case token.SingleQuoteType: + quoted := fmt.Sprintf(`'%s'`, n.Value) + return quoted case token.DoubleQuoteType: - return strconv.Quote(n.Value) + quoted := strconv.Quote(n.Value) + return quoted } lbc := token.DetectLineBreakCharacter(n.Value) @@ -689,6 +874,11 @@ func (n *StringNode) String() string { return n.Value } +// MarshalYAML encodes to a YAML text +func (n *StringNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // LiteralNode type of literal node type LiteralNode struct { *BaseNode @@ -725,7 +915,20 @@ func (n *LiteralNode) GetValue() interface{} { // String literal to text func (n *LiteralNode) String() string { origin := n.Value.GetToken().Origin - return fmt.Sprintf("%s\n%s", n.Start.Value, strings.TrimRight(strings.TrimRight(origin, " "), "\n")) + lit := strings.TrimRight(strings.TrimRight(origin, " "), "\n") + if n.Comment != nil { + return fmt.Sprintf("%s %s\n%s", n.Start.Value, n.Comment.String(), lit) + } + return fmt.Sprintf("%s\n%s", n.Start.Value, lit) +} + +func (n *LiteralNode) stringWithoutComment() string { + return n.String() +} + +// MarshalYAML encodes to a YAML text +func (n *LiteralNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil } // MergeKeyNode type of merge key node @@ -754,6 +957,10 @@ func (n *MergeKeyNode) GetValue() interface{} { // String returns '<<' value func (n *MergeKeyNode) String() string { + return n.stringWithoutComment() +} + +func (n *MergeKeyNode) stringWithoutComment() string { return n.Token.Value } @@ -762,6 +969,11 @@ func (n *MergeKeyNode) AddColumn(col int) { n.Token.AddColumn(col) } +// MarshalYAML encodes to a YAML text +func (n *MergeKeyNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // BoolNode type of boolean node type BoolNode struct { *BaseNode @@ -794,9 +1006,21 @@ func (n *BoolNode) GetValue() interface{} { // String boolean to text func (n *BoolNode) String() string { + if n.Comment != nil { + return addCommentString(n.Token.Value, n.Comment) + } + return n.stringWithoutComment() +} + +func (n *BoolNode) stringWithoutComment() string { return n.Token.Value } +// MarshalYAML encodes to a YAML text +func (n *BoolNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // InfinityNode type of infinity node type InfinityNode struct { *BaseNode @@ -829,9 +1053,21 @@ func (n *InfinityNode) GetValue() interface{} { // String infinity to text func (n *InfinityNode) String() string { + if n.Comment != nil { + return addCommentString(n.Token.Value, n.Comment) + } + return n.stringWithoutComment() +} + +func (n *InfinityNode) stringWithoutComment() string { return n.Token.Value } +// MarshalYAML encodes to a YAML text +func (n *InfinityNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // NanNode type of nan node type NanNode struct { *BaseNode @@ -863,9 +1099,21 @@ func (n *NanNode) GetValue() interface{} { // String returns .nan func (n *NanNode) String() string { + if n.Comment != nil { + return addCommentString(n.Token.Value, n.Comment) + } + return n.stringWithoutComment() +} + +func (n *NanNode) stringWithoutComment() string { return n.Token.Value } +// MarshalYAML encodes to a YAML text +func (n *NanNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // MapNode interface of MappingValueNode / MappingNode type MapNode interface { MapRange() *MapNodeIter @@ -890,7 +1138,7 @@ func (m *MapNodeIter) Next() bool { } // Key returns the key of the iterator's current map node entry. -func (m *MapNodeIter) Key() Node { +func (m *MapNodeIter) Key() MapKeyNode { return m.values[m.idx].Key } @@ -906,6 +1154,7 @@ type MappingNode struct { End *token.Token IsFlowStyle bool Values []*MappingValueNode + FootComment *CommentGroupNode } func (n *MappingNode) startPos() *token.Position { @@ -964,34 +1213,53 @@ func (n *MappingNode) AddColumn(col int) { } } -func (n *MappingNode) flowStyleString() string { - if len(n.Values) == 0 { - return "{}" - } +func (n *MappingNode) flowStyleString(commentMode bool) string { values := []string{} for _, value := range n.Values { values = append(values, strings.TrimLeft(value.String(), " ")) } - return fmt.Sprintf("{%s}", strings.Join(values, ", ")) + mapText := fmt.Sprintf("{%s}", strings.Join(values, ", ")) + if commentMode && n.Comment != nil { + return addCommentString(mapText, n.Comment) + } + return mapText } -func (n *MappingNode) blockStyleString() string { - if len(n.Values) == 0 { - return "{}" - } +func (n *MappingNode) blockStyleString(commentMode bool) string { values := []string{} for _, value := range n.Values { values = append(values, value.String()) } - return strings.Join(values, "\n") + mapText := strings.Join(values, "\n") + if commentMode && n.Comment != nil { + value := values[0] + var spaceNum int + for i := 0; i < len(value); i++ { + if value[i] != ' ' { + break + } + spaceNum++ + } + comment := n.Comment.StringWithSpace(spaceNum) + return fmt.Sprintf("%s\n%s", comment, mapText) + } + return mapText } // String mapping values to text func (n *MappingNode) String() string { + if len(n.Values) == 0 { + if n.Comment != nil { + return addCommentString("{}", n.Comment) + } + return "{}" + } + + commentMode := true if n.IsFlowStyle || len(n.Values) == 0 { - return n.flowStyleString() + return n.flowStyleString(commentMode) } - return n.blockStyleString() + return n.blockStyleString(commentMode) } // MapRange implements MapNode protocol @@ -1002,6 +1270,11 @@ func (n *MappingNode) MapRange() *MapNodeIter { } } +// MarshalYAML encodes to a YAML text +func (n *MappingNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // MappingKeyNode type of tag node type MappingKeyNode struct { *BaseNode @@ -1032,15 +1305,25 @@ func (n *MappingKeyNode) AddColumn(col int) { // String tag to text func (n *MappingKeyNode) String() string { + return n.stringWithoutComment() +} + +func (n *MappingKeyNode) stringWithoutComment() string { return fmt.Sprintf("%s %s", n.Start.Value, n.Value.String()) } +// MarshalYAML encodes to a YAML text +func (n *MappingKeyNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // MappingValueNode type of mapping value type MappingValueNode struct { *BaseNode - Start *token.Token - Key Node - Value Node + Start *token.Token + Key MapKeyNode + Value Node + FootComment *CommentGroupNode } // Replace replace value node. @@ -1089,12 +1372,39 @@ func (n *MappingValueNode) SetIsFlowStyle(isFlow bool) { // String mapping value to text func (n *MappingValueNode) String() string { + var text string + if n.Comment != nil { + text = fmt.Sprintf( + "%s\n%s", + n.Comment.StringWithSpace(n.Key.GetToken().Position.Column-1), + n.toString(), + ) + } else { + text = n.toString() + } + if n.FootComment != nil { + text += fmt.Sprintf("\n%s", n.FootComment.StringWithSpace(n.Key.GetToken().Position.Column-1)) + } + return text +} + +func (n *MappingValueNode) toString() string { space := strings.Repeat(" ", n.Key.GetToken().Position.Column-1) keyIndentLevel := n.Key.GetToken().Position.IndentLevel valueIndentLevel := n.Value.GetToken().Position.IndentLevel + keyComment := n.Key.GetComment() if _, ok := n.Value.(ScalarNode); ok { return fmt.Sprintf("%s%s: %s", space, n.Key.String(), n.Value.String()) } else if keyIndentLevel < valueIndentLevel { + if keyComment != nil { + return fmt.Sprintf( + "%s%s: %s\n%s", + space, + n.Key.stringWithoutComment(), + keyComment.String(), + n.Value.String(), + ) + } return fmt.Sprintf("%s%s:\n%s", space, n.Key.String(), n.Value.String()) } else if m, ok := n.Value.(*MappingNode); ok && (m.IsFlowStyle || len(m.Values) == 0) { return fmt.Sprintf("%s%s: %s", space, n.Key.String(), n.Value.String()) @@ -1105,6 +1415,23 @@ func (n *MappingValueNode) String() string { } else if _, ok := n.Value.(*AliasNode); ok { return fmt.Sprintf("%s%s: %s", space, n.Key.String(), n.Value.String()) } + if keyComment != nil { + return fmt.Sprintf( + "%s%s: %s\n%s", + space, + n.Key.stringWithoutComment(), + keyComment.String(), + n.Value.String(), + ) + } + if m, ok := n.Value.(*MappingNode); ok && m.Comment != nil { + return fmt.Sprintf( + "%s%s: %s", + space, + n.Key.String(), + strings.TrimLeft(n.Value.String(), " "), + ) + } return fmt.Sprintf("%s%s:\n%s", space, n.Key.String(), n.Value.String()) } @@ -1116,6 +1443,11 @@ func (n *MappingValueNode) MapRange() *MapNodeIter { } } +// MarshalYAML encodes to a YAML text +func (n *MappingValueNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // ArrayNode interface of SequenceNode type ArrayNode interface { ArrayRange() *ArrayNodeIter @@ -1148,10 +1480,12 @@ func (m *ArrayNodeIter) Len() int { // SequenceNode type of sequence node type SequenceNode struct { *BaseNode - Start *token.Token - End *token.Token - IsFlowStyle bool - Values []Node + Start *token.Token + End *token.Token + IsFlowStyle bool + Values []Node + ValueHeadComments []*CommentGroupNode + FootComment *CommentGroupNode } // Replace replace value node. @@ -1172,9 +1506,7 @@ func (n *SequenceNode) Replace(idx int, value Node) error { func (n *SequenceNode) Merge(target *SequenceNode) { column := n.Start.Position.Column - target.Start.Position.Column target.AddColumn(column) - for _, value := range target.Values { - n.Values = append(n.Values, value) - } + n.Values = append(n.Values, target.Values...) } // SetIsFlowStyle set value to IsFlowStyle field recursively. @@ -1225,11 +1557,22 @@ func (n *SequenceNode) flowStyleString() string { func (n *SequenceNode) blockStyleString() string { space := strings.Repeat(" ", n.Start.Position.Column-1) values := []string{} - for _, value := range n.Values { + if n.Comment != nil { + values = append(values, n.Comment.StringWithSpace(n.Start.Position.Column-1)) + } + + for idx, value := range n.Values { valueStr := value.String() splittedValues := strings.Split(valueStr, "\n") trimmedFirstValue := strings.TrimLeft(splittedValues[0], " ") diffLength := len(splittedValues[0]) - len(trimmedFirstValue) + if len(splittedValues) > 1 && value.Type() == StringType || value.Type() == LiteralType { + // If multi-line string, the space characters for indent have already been added, so delete them. + prefix := space + " " + for i := 1; i < len(splittedValues); i++ { + splittedValues[i] = strings.TrimPrefix(splittedValues[i], prefix) + } + } newValues := []string{trimmedFirstValue} for i := 1; i < len(splittedValues); i++ { if len(splittedValues[i]) <= diffLength { @@ -1241,8 +1584,14 @@ func (n *SequenceNode) blockStyleString() string { newValues = append(newValues, fmt.Sprintf("%s %s", space, trimmed)) } newValue := strings.Join(newValues, "\n") + if len(n.ValueHeadComments) == len(n.Values) && n.ValueHeadComments[idx] != nil { + values = append(values, n.ValueHeadComments[idx].StringWithSpace(n.Start.Position.Column-1)) + } values = append(values, fmt.Sprintf("%s- %s", space, newValue)) } + if n.FootComment != nil { + values = append(values, n.FootComment.StringWithSpace(n.Start.Position.Column-1)) + } return strings.Join(values, "\n") } @@ -1262,6 +1611,11 @@ func (n *SequenceNode) ArrayRange() *ArrayNodeIter { } } +// MarshalYAML encodes to a YAML text +func (n *SequenceNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // AnchorNode type of anchor node type AnchorNode struct { *BaseNode @@ -1319,6 +1673,11 @@ func (n *AnchorNode) String() string { return fmt.Sprintf("&%s %s", n.Name.String(), value) } +// MarshalYAML encodes to a YAML text +func (n *AnchorNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // AliasNode type of alias node type AliasNode struct { *BaseNode @@ -1364,6 +1723,11 @@ func (n *AliasNode) String() string { return fmt.Sprintf("*%s", n.Value.String()) } +// MarshalYAML encodes to a YAML text +func (n *AliasNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // DirectiveNode type of directive node type DirectiveNode struct { *BaseNode @@ -1396,6 +1760,11 @@ func (n *DirectiveNode) String() string { return fmt.Sprintf("%s%s", n.Start.Value, n.Value.String()) } +// MarshalYAML encodes to a YAML text +func (n *DirectiveNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // TagNode type of tag node type TagNode struct { *BaseNode @@ -1429,9 +1798,15 @@ func (n *TagNode) String() string { return fmt.Sprintf("%s %s", n.Start.Value, n.Value.String()) } +// MarshalYAML encodes to a YAML text +func (n *TagNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + // CommentNode type of comment node type CommentNode struct { *BaseNode + Token *token.Token } // Read implements (io.Reader).Read @@ -1443,16 +1818,77 @@ func (n *CommentNode) Read(p []byte) (int, error) { func (n *CommentNode) Type() NodeType { return CommentType } // GetToken returns token instance -func (n *CommentNode) GetToken() *token.Token { return n.Comment } +func (n *CommentNode) GetToken() *token.Token { return n.Token } // AddColumn add column number to child nodes recursively func (n *CommentNode) AddColumn(col int) { - n.Comment.AddColumn(col) + if n.Token == nil { + return + } + n.Token.AddColumn(col) } // String comment to text func (n *CommentNode) String() string { - return n.Comment.Value + return fmt.Sprintf("#%s", n.Token.Value) +} + +// MarshalYAML encodes to a YAML text +func (n *CommentNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil +} + +// CommentGroupNode type of comment node +type CommentGroupNode struct { + *BaseNode + Comments []*CommentNode +} + +// Read implements (io.Reader).Read +func (n *CommentGroupNode) Read(p []byte) (int, error) { + return readNode(p, n) +} + +// Type returns TagType +func (n *CommentGroupNode) Type() NodeType { return CommentType } + +// GetToken returns token instance +func (n *CommentGroupNode) GetToken() *token.Token { + if len(n.Comments) > 0 { + return n.Comments[0].Token + } + return nil +} + +// AddColumn add column number to child nodes recursively +func (n *CommentGroupNode) AddColumn(col int) { + for _, comment := range n.Comments { + comment.AddColumn(col) + } +} + +// String comment to text +func (n *CommentGroupNode) String() string { + values := []string{} + for _, comment := range n.Comments { + values = append(values, comment.String()) + } + return strings.Join(values, "\n") +} + +func (n *CommentGroupNode) StringWithSpace(col int) string { + values := []string{} + space := strings.Repeat(" ", col) + for _, comment := range n.Comments { + values = append(values, space+comment.String()) + } + return strings.Join(values, "\n") + +} + +// MarshalYAML encodes to a YAML text +func (n *CommentGroupNode) MarshalYAML() ([]byte, error) { + return []byte(n.String()), nil } // Visitor has Visit method that is invokded for each node encountered by Walk. @@ -1474,42 +1910,70 @@ func Walk(v Visitor, node Node) { switch n := node.(type) { case *CommentNode: case *NullNode: + walkComment(v, n.BaseNode) case *IntegerNode: + walkComment(v, n.BaseNode) case *FloatNode: + walkComment(v, n.BaseNode) case *StringNode: + walkComment(v, n.BaseNode) case *MergeKeyNode: + walkComment(v, n.BaseNode) case *BoolNode: + walkComment(v, n.BaseNode) case *InfinityNode: + walkComment(v, n.BaseNode) case *NanNode: + walkComment(v, n.BaseNode) case *LiteralNode: + walkComment(v, n.BaseNode) Walk(v, n.Value) case *DirectiveNode: + walkComment(v, n.BaseNode) Walk(v, n.Value) case *TagNode: + walkComment(v, n.BaseNode) Walk(v, n.Value) case *DocumentNode: + walkComment(v, n.BaseNode) Walk(v, n.Body) case *MappingNode: + walkComment(v, n.BaseNode) for _, value := range n.Values { Walk(v, value) } case *MappingKeyNode: + walkComment(v, n.BaseNode) Walk(v, n.Value) case *MappingValueNode: + walkComment(v, n.BaseNode) Walk(v, n.Key) Walk(v, n.Value) case *SequenceNode: + walkComment(v, n.BaseNode) for _, value := range n.Values { Walk(v, value) } case *AnchorNode: + walkComment(v, n.BaseNode) Walk(v, n.Name) Walk(v, n.Value) case *AliasNode: + walkComment(v, n.BaseNode) Walk(v, n.Value) } } +func walkComment(v Visitor, base *BaseNode) { + if base == nil { + return + } + if base.Comment == nil { + return + } + Walk(v, base.Comment) +} + type filterWalker struct { typ NodeType results []Node @@ -1522,6 +1986,77 @@ func (v *filterWalker) Visit(n Node) Visitor { return v } +type parentFinder struct { + target Node +} + +func (f *parentFinder) walk(parent, node Node) Node { + if f.target == node { + return parent + } + switch n := node.(type) { + case *CommentNode: + return nil + case *NullNode: + return nil + case *IntegerNode: + return nil + case *FloatNode: + return nil + case *StringNode: + return nil + case *MergeKeyNode: + return nil + case *BoolNode: + return nil + case *InfinityNode: + return nil + case *NanNode: + return nil + case *LiteralNode: + return f.walk(node, n.Value) + case *DirectiveNode: + return f.walk(node, n.Value) + case *TagNode: + return f.walk(node, n.Value) + case *DocumentNode: + return f.walk(node, n.Body) + case *MappingNode: + for _, value := range n.Values { + if found := f.walk(node, value); found != nil { + return found + } + } + case *MappingKeyNode: + return f.walk(node, n.Value) + case *MappingValueNode: + if found := f.walk(node, n.Key); found != nil { + return found + } + return f.walk(node, n.Value) + case *SequenceNode: + for _, value := range n.Values { + if found := f.walk(node, value); found != nil { + return found + } + } + case *AnchorNode: + if found := f.walk(node, n.Name); found != nil { + return found + } + return f.walk(node, n.Value) + case *AliasNode: + return f.walk(node, n.Value) + } + return nil +} + +// Parent get parent node from child node. +func Parent(root, child Node) Node { + finder := &parentFinder{target: child} + return finder.walk(root, root) +} + // Filter returns a list of nodes that match the given type. func Filter(typ NodeType, node Node) []Node { walker := &filterWalker{typ: typ} diff --git a/vendor/github.com/goccy/go-yaml/decode.go b/vendor/github.com/goccy/go-yaml/decode.go index 4c9fff196..72af5e22d 100644 --- a/vendor/github.com/goccy/go-yaml/decode.go +++ b/vendor/github.com/goccy/go-yaml/decode.go @@ -7,19 +7,20 @@ import ( "encoding/base64" "fmt" "io" - "io/ioutil" "math" "os" "path/filepath" "reflect" + "sort" "strconv" "time" + "golang.org/x/xerrors" + "github.com/goccy/go-yaml/ast" "github.com/goccy/go-yaml/internal/errors" "github.com/goccy/go-yaml/parser" "github.com/goccy/go-yaml/token" - "golang.org/x/xerrors" ) // Decoder reads and decodes YAML values from an input stream. @@ -28,6 +29,8 @@ type Decoder struct { referenceReaders []io.Reader anchorNodeMap map[string]ast.Node anchorValueMap map[string]reflect.Value + customUnmarshalerMap map[reflect.Type]func(interface{}, []byte) error + toCommentMap CommentMap opts []DecodeOption referenceFiles []string referenceDirs []string @@ -48,6 +51,7 @@ func NewDecoder(r io.Reader, opts ...DecodeOption) *Decoder { reader: r, anchorNodeMap: map[string]ast.Node{}, anchorValueMap: map[string]reflect.Value{}, + customUnmarshalerMap: map[reflect.Type]func(interface{}, []byte) error{}, opts: opts, referenceReaders: []io.Reader{}, referenceFiles: []string{}, @@ -103,7 +107,7 @@ func (d *Decoder) mergeValueNode(value ast.Node) ast.Node { return value } -func (d *Decoder) mapKeyNodeToString(node ast.Node) string { +func (d *Decoder) mapKeyNodeToString(node ast.MapKeyNode) string { key := d.nodeToValue(node) if key == nil { return "null" @@ -115,6 +119,7 @@ func (d *Decoder) mapKeyNodeToString(node ast.Node) string { } func (d *Decoder) setToMapValue(node ast.Node, m map[string]interface{}) { + d.setPathToCommentMap(node) switch n := node.(type) { case *ast.MappingValueNode: if n.Key.Type() == ast.MergeKeyType { @@ -127,6 +132,9 @@ func (d *Decoder) setToMapValue(node ast.Node, m map[string]interface{}) { for _, value := range n.Values { d.setToMapValue(value, m) } + case *ast.AnchorNode: + anchorName := n.Name.GetToken().Value + d.anchorNodeMap[anchorName] = n.Value } } @@ -146,7 +154,114 @@ func (d *Decoder) setToOrderedMapValue(node ast.Node, m *MapSlice) { } } +func (d *Decoder) setPathToCommentMap(node ast.Node) { + if d.toCommentMap == nil { + return + } + d.addHeadOrLineCommentToMap(node) + d.addFootCommentToMap(node) +} + +func (d *Decoder) addHeadOrLineCommentToMap(node ast.Node) { + sequence, ok := node.(*ast.SequenceNode) + if ok { + d.addSequenceNodeCommentToMap(sequence) + return + } + commentGroup := node.GetComment() + if commentGroup == nil { + return + } + texts := []string{} + targetLine := node.GetToken().Position.Line + minCommentLine := math.MaxInt + for _, comment := range commentGroup.Comments { + if minCommentLine > comment.Token.Position.Line { + minCommentLine = comment.Token.Position.Line + } + texts = append(texts, comment.Token.Value) + } + if len(texts) == 0 { + return + } + commentPath := node.GetPath() + if minCommentLine < targetLine { + d.addCommentToMap(commentPath, HeadComment(texts...)) + } else { + d.addCommentToMap(commentPath, LineComment(texts[0])) + } +} + +func (d *Decoder) addSequenceNodeCommentToMap(node *ast.SequenceNode) { + if len(node.ValueHeadComments) != 0 { + for idx, headComment := range node.ValueHeadComments { + if headComment == nil { + continue + } + texts := make([]string, 0, len(headComment.Comments)) + for _, comment := range headComment.Comments { + texts = append(texts, comment.Token.Value) + } + if len(texts) != 0 { + d.addCommentToMap(node.Values[idx].GetPath(), HeadComment(texts...)) + } + } + } + firstElemHeadComment := node.GetComment() + if firstElemHeadComment != nil { + texts := make([]string, 0, len(firstElemHeadComment.Comments)) + for _, comment := range firstElemHeadComment.Comments { + texts = append(texts, comment.Token.Value) + } + if len(texts) != 0 { + d.addCommentToMap(node.Values[0].GetPath(), HeadComment(texts...)) + } + } +} + +func (d *Decoder) addFootCommentToMap(node ast.Node) { + var ( + footComment *ast.CommentGroupNode + footCommentPath string = node.GetPath() + ) + switch n := node.(type) { + case *ast.SequenceNode: + if len(n.Values) != 0 { + footCommentPath = n.Values[len(n.Values)-1].GetPath() + } + footComment = n.FootComment + case *ast.MappingNode: + footComment = n.FootComment + case *ast.MappingValueNode: + footComment = n.FootComment + } + if footComment == nil { + return + } + var texts []string + for _, comment := range footComment.Comments { + texts = append(texts, comment.Token.Value) + } + if len(texts) != 0 { + d.addCommentToMap(footCommentPath, FootComment(texts...)) + } +} + +func (d *Decoder) addCommentToMap(path string, comment *Comment) { + for _, c := range d.toCommentMap[path] { + if c.Position == comment.Position { + // already added same comment + return + } + } + d.toCommentMap[path] = append(d.toCommentMap[path], comment) + sort.Slice(d.toCommentMap[path], func(i, j int) bool { + return d.toCommentMap[path][i].Position < d.toCommentMap[path][j].Position + }) +} + func (d *Decoder) nodeToValue(node ast.Node) interface{} { + d.setPathToCommentMap(node) switch n := node.(type) { case *ast.NullNode: return nil @@ -237,36 +352,67 @@ func (d *Decoder) nodeToValue(node ast.Node) interface{} { return nil } -func (d *Decoder) resolveAlias(node ast.Node) ast.Node { +func (d *Decoder) resolveAlias(node ast.Node) (ast.Node, error) { switch n := node.(type) { case *ast.MappingNode: - for idx, value := range n.Values { - n.Values[idx] = d.resolveAlias(value).(*ast.MappingValueNode) + for idx, v := range n.Values { + value, err := d.resolveAlias(v) + if err != nil { + return nil, err + } + n.Values[idx] = value.(*ast.MappingValueNode) } case *ast.TagNode: - n.Value = d.resolveAlias(n.Value) + value, err := d.resolveAlias(n.Value) + if err != nil { + return nil, err + } + n.Value = value case *ast.MappingKeyNode: - n.Value = d.resolveAlias(n.Value) + value, err := d.resolveAlias(n.Value) + if err != nil { + return nil, err + } + n.Value = value case *ast.MappingValueNode: if n.Key.Type() == ast.MergeKeyType && n.Value.Type() == ast.AliasType { - value := d.resolveAlias(n.Value) + value, err := d.resolveAlias(n.Value) + if err != nil { + return nil, err + } keyColumn := n.Key.GetToken().Position.Column requiredColumn := keyColumn + 2 value.AddColumn(requiredColumn) n.Value = value } else { - n.Key = d.resolveAlias(n.Key) - n.Value = d.resolveAlias(n.Value) + key, err := d.resolveAlias(n.Key) + if err != nil { + return nil, err + } + n.Key = key.(ast.MapKeyNode) + value, err := d.resolveAlias(n.Value) + if err != nil { + return nil, err + } + n.Value = value } case *ast.SequenceNode: - for idx, value := range n.Values { - n.Values[idx] = d.resolveAlias(value) + for idx, v := range n.Values { + value, err := d.resolveAlias(v) + if err != nil { + return nil, err + } + n.Values[idx] = value } case *ast.AliasNode: aliasName := n.Value.GetToken().Value - return d.resolveAlias(d.anchorNodeMap[aliasName]) + node := d.anchorNodeMap[aliasName] + if node == nil { + return nil, xerrors.Errorf("cannot find anchor by alias name %s", aliasName) + } + return d.resolveAlias(node) } - return node + return node, nil } func (d *Decoder) getMapNode(node ast.Node) (ast.MapNode, error) { @@ -278,7 +424,7 @@ func (d *Decoder) getMapNode(node ast.Node) (ast.MapNode, error) { if ok { return mapNode, nil } - return nil, xerrors.Errorf("%s node found where MapNode is expected", anchor.Value.Type()) + return nil, errUnexpectedNodeType(anchor.Value.Type(), ast.MappingType, node.GetToken()) } if alias, ok := node.(*ast.AliasNode); ok { aliasName := alias.Value.GetToken().Value @@ -290,11 +436,11 @@ func (d *Decoder) getMapNode(node ast.Node) (ast.MapNode, error) { if ok { return mapNode, nil } - return nil, xerrors.Errorf("%s node found where MapNode is expected", node.Type()) + return nil, errUnexpectedNodeType(node.Type(), ast.MappingType, node.GetToken()) } mapNode, ok := node.(ast.MapNode) if !ok { - return nil, xerrors.Errorf("%s node found where MapNode is expected", node.Type()) + return nil, errUnexpectedNodeType(node.Type(), ast.MappingType, node.GetToken()) } return mapNode, nil } @@ -308,7 +454,8 @@ func (d *Decoder) getArrayNode(node ast.Node) (ast.ArrayNode, error) { if ok { return arrayNode, nil } - return nil, xerrors.Errorf("%s node found where ArrayNode is expected", anchor.Value.Type()) + + return nil, errUnexpectedNodeType(anchor.Value.Type(), ast.SequenceType, node.GetToken()) } if alias, ok := node.(*ast.AliasNode); ok { aliasName := alias.Value.GetToken().Value @@ -320,11 +467,11 @@ func (d *Decoder) getArrayNode(node ast.Node) (ast.ArrayNode, error) { if ok { return arrayNode, nil } - return nil, xerrors.Errorf("%s node found where ArrayNode is expected", node.Type()) + return nil, errUnexpectedNodeType(node.Type(), ast.SequenceType, node.GetToken()) } arrayNode, ok := node.(ast.ArrayNode) if !ok { - return nil, xerrors.Errorf("%s node found where ArrayNode is expected", node.Type()) + return nil, errUnexpectedNodeType(node.Type(), ast.SequenceType, node.GetToken()) } return arrayNode, nil } @@ -338,10 +485,25 @@ func (d *Decoder) fileToNode(f *ast.File) ast.Node { return nil } -func (d *Decoder) convertValue(v reflect.Value, typ reflect.Type) (reflect.Value, error) { +func (d *Decoder) convertValue(v reflect.Value, typ reflect.Type, src ast.Node) (reflect.Value, error) { if typ.Kind() != reflect.String { if !v.Type().ConvertibleTo(typ) { - return reflect.Zero(typ), errTypeMismatch(typ, v.Type()) + + // Special case for "strings -> floats" aka scientific notation + // If the destination type is a float and the source type is a string, check if we can + // use strconv.ParseFloat to convert the string to a float. + if (typ.Kind() == reflect.Float32 || typ.Kind() == reflect.Float64) && + v.Type().Kind() == reflect.String { + if f, err := strconv.ParseFloat(v.String(), 64); err == nil { + if typ.Kind() == reflect.Float32 { + return reflect.ValueOf(float32(f)), nil + } else if typ.Kind() == reflect.Float64 { + return reflect.ValueOf(f), nil + } + // else, fall through to the error below + } + } + return reflect.Zero(typ), errTypeMismatch(typ, v.Type(), src.GetToken()) } return v.Convert(typ), nil } @@ -357,7 +519,7 @@ func (d *Decoder) convertValue(v reflect.Value, typ reflect.Type) (reflect.Value return reflect.ValueOf(fmt.Sprint(v.Bool())), nil } if !v.Type().ConvertibleTo(typ) { - return reflect.Zero(typ), errTypeMismatch(typ, v.Type()) + return reflect.Zero(typ), errTypeMismatch(typ, v.Type(), src.GetToken()) } return v.Convert(typ), nil } @@ -375,21 +537,8 @@ func errOverflow(dstType reflect.Type, num string) *overflowError { return &overflowError{dstType: dstType, srcNum: num} } -type typeError struct { - dstType reflect.Type - srcType reflect.Type - structFieldName *string -} - -func (e *typeError) Error() string { - if e.structFieldName != nil { - return fmt.Sprintf("cannot unmarshal %s into Go struct field %s of type %s", e.srcType, *e.structFieldName, e.dstType) - } - return fmt.Sprintf("cannot unmarshal %s into Go value of type %s", e.srcType, e.dstType) -} - -func errTypeMismatch(dstType, srcType reflect.Type) *typeError { - return &typeError{dstType: dstType, srcType: srcType} +func errTypeMismatch(dstType, srcType reflect.Type, token *token.Token) *errors.TypeError { + return &errors.TypeError{DstType: dstType, SrcType: srcType, Token: token} } type unknownFieldError struct { @@ -404,6 +553,10 @@ func errUnknownField(msg string, tk *token.Token) *unknownFieldError { return &unknownFieldError{err: errors.ErrSyntax(msg, tk)} } +func errUnexpectedNodeType(actual, expected ast.NodeType, tk *token.Token) error { + return errors.ErrSyntax(fmt.Sprintf("%s was used where %s is expected", actual.YAMLName(), expected.YAMLName()), tk) +} + type duplicateKeyError struct { err error } @@ -461,41 +614,79 @@ func (d *Decoder) lastNode(node ast.Node) ast.Node { return node } -func (d *Decoder) unmarshalableDocument(node ast.Node) []byte { - node = d.resolveAlias(node) +func (d *Decoder) unmarshalableDocument(node ast.Node) ([]byte, error) { + var err error + node, err = d.resolveAlias(node) + if err != nil { + return nil, err + } doc := node.String() last := d.lastNode(node) if last != nil && last.Type() == ast.LiteralType { doc += "\n" } - return []byte(doc) + return []byte(doc), nil } -func (d *Decoder) unmarshalableText(node ast.Node) ([]byte, bool) { - node = d.resolveAlias(node) +func (d *Decoder) unmarshalableText(node ast.Node) ([]byte, bool, error) { + var err error + node, err = d.resolveAlias(node) + if err != nil { + return nil, false, err + } if node.Type() == ast.AnchorType { node = node.(*ast.AnchorNode).Value } switch n := node.(type) { case *ast.StringNode: - return []byte(n.Value), true + return []byte(n.Value), true, nil case *ast.LiteralNode: - return []byte(n.Value.GetToken().Value), true + return []byte(n.Value.GetToken().Value), true, nil default: scalar, ok := n.(ast.ScalarNode) if ok { - return []byte(fmt.Sprint(scalar.GetValue())), true + return []byte(fmt.Sprint(scalar.GetValue())), true, nil } } - return nil, false + return nil, false, nil } type jsonUnmarshaler interface { UnmarshalJSON([]byte) error } +func (d *Decoder) existsTypeInCustomUnmarshalerMap(t reflect.Type) bool { + if _, exists := d.customUnmarshalerMap[t]; exists { + return true + } + + globalCustomUnmarshalerMu.Lock() + defer globalCustomUnmarshalerMu.Unlock() + if _, exists := globalCustomUnmarshalerMap[t]; exists { + return true + } + return false +} + +func (d *Decoder) unmarshalerFromCustomUnmarshalerMap(t reflect.Type) (func(interface{}, []byte) error, bool) { + if unmarshaler, exists := d.customUnmarshalerMap[t]; exists { + return unmarshaler, exists + } + + globalCustomUnmarshalerMu.Lock() + defer globalCustomUnmarshalerMu.Unlock() + if unmarshaler, exists := globalCustomUnmarshalerMap[t]; exists { + return unmarshaler, exists + } + return nil, false +} + func (d *Decoder) canDecodeByUnmarshaler(dst reflect.Value) bool { - iface := dst.Addr().Interface() + ptrValue := dst.Addr() + if d.existsTypeInCustomUnmarshalerMap(ptrValue.Type()) { + return true + } + iface := ptrValue.Interface() switch iface.(type) { case BytesUnmarshalerContext: return true @@ -507,6 +698,8 @@ func (d *Decoder) canDecodeByUnmarshaler(dst reflect.Value) bool { return true case *time.Time: return true + case *time.Duration: + return true case encoding.TextUnmarshaler: return true case jsonUnmarshaler: @@ -516,17 +709,36 @@ func (d *Decoder) canDecodeByUnmarshaler(dst reflect.Value) bool { } func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, src ast.Node) error { - iface := dst.Addr().Interface() + ptrValue := dst.Addr() + if unmarshaler, exists := d.unmarshalerFromCustomUnmarshalerMap(ptrValue.Type()); exists { + b, err := d.unmarshalableDocument(src) + if err != nil { + return errors.Wrapf(err, "failed to UnmarshalYAML") + } + if err := unmarshaler(ptrValue.Interface(), b); err != nil { + return errors.Wrapf(err, "failed to UnmarshalYAML") + } + return nil + } + iface := ptrValue.Interface() if unmarshaler, ok := iface.(BytesUnmarshalerContext); ok { - if err := unmarshaler.UnmarshalYAML(ctx, d.unmarshalableDocument(src)); err != nil { + b, err := d.unmarshalableDocument(src) + if err != nil { + return errors.Wrapf(err, "failed to UnmarshalYAML") + } + if err := unmarshaler.UnmarshalYAML(ctx, b); err != nil { return errors.Wrapf(err, "failed to UnmarshalYAML") } return nil } if unmarshaler, ok := iface.(BytesUnmarshaler); ok { - if err := unmarshaler.UnmarshalYAML(d.unmarshalableDocument(src)); err != nil { + b, err := d.unmarshalableDocument(src) + if err != nil { + return errors.Wrapf(err, "failed to UnmarshalYAML") + } + if err := unmarshaler.UnmarshalYAML(b); err != nil { return errors.Wrapf(err, "failed to UnmarshalYAML") } return nil @@ -568,8 +780,15 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr return d.decodeTime(ctx, dst, src) } + if _, ok := iface.(*time.Duration); ok { + return d.decodeDuration(ctx, dst, src) + } + if unmarshaler, isText := iface.(encoding.TextUnmarshaler); isText { - b, ok := d.unmarshalableText(src) + b, ok, err := d.unmarshalableText(src) + if err != nil { + return errors.Wrapf(err, "failed to UnmarshalText") + } if ok { if err := unmarshaler.UnmarshalText(b); err != nil { return errors.Wrapf(err, "failed to UnmarshalText") @@ -580,7 +799,11 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr if d.useJSONUnmarshaler { if unmarshaler, ok := iface.(jsonUnmarshaler); ok { - jsonBytes, err := YAMLToJSON(d.unmarshalableDocument(src)) + b, err := d.unmarshalableDocument(src) + if err != nil { + return errors.Wrapf(err, "failed to UnmarshalJSON") + } + jsonBytes, err := YAMLToJSON(b) if err != nil { return errors.Wrapf(err, "failed to convert yaml to json") } @@ -595,6 +818,10 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr return xerrors.Errorf("does not implemented Unmarshaler") } +var ( + astNodeType = reflect.TypeOf((*ast.Node)(nil)).Elem() +) + func (d *Decoder) decodeValue(ctx context.Context, dst reflect.Value, src ast.Node) error { if src.Type() == ast.AnchorType { anchorName := src.(*ast.AnchorNode).Name.GetToken().Value @@ -625,6 +852,10 @@ func (d *Decoder) decodeValue(ctx context.Context, dst reflect.Value, src ast.No } dst.Set(d.castToAssignableValue(v, dst.Type())) case reflect.Interface: + if dst.Type() == astNodeType { + dst.Set(reflect.ValueOf(src)) + return nil + } v := reflect.ValueOf(d.nodeToValue(src)) if v.IsValid() { dst.Set(v) @@ -661,8 +892,17 @@ func (d *Decoder) decodeValue(ctx context.Context, dst reflect.Value, src ast.No dst.SetInt(int64(vv)) return nil } + case string: // handle scientific notation + if i, err := strconv.ParseFloat(vv, 64); err == nil { + if 0 <= i && i <= math.MaxUint64 && !dst.OverflowInt(int64(i)) { + dst.SetInt(int64(i)) + return nil + } + } else { // couldn't be parsed as float + return errTypeMismatch(valueType, reflect.TypeOf(v), src.GetToken()) + } default: - return errTypeMismatch(valueType, reflect.TypeOf(v)) + return errTypeMismatch(valueType, reflect.TypeOf(v), src.GetToken()) } return errOverflow(valueType, fmt.Sprint(v)) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -683,14 +923,24 @@ func (d *Decoder) decodeValue(ctx context.Context, dst reflect.Value, src ast.No dst.SetUint(uint64(vv)) return nil } + case string: // handle scientific notation + if i, err := strconv.ParseFloat(vv, 64); err == nil { + if 0 <= i && i <= math.MaxUint64 && !dst.OverflowUint(uint64(i)) { + dst.SetUint(uint64(i)) + return nil + } + } else { // couldn't be parsed as float + return errTypeMismatch(valueType, reflect.TypeOf(v), src.GetToken()) + } + default: - return errTypeMismatch(valueType, reflect.TypeOf(v)) + return errTypeMismatch(valueType, reflect.TypeOf(v), src.GetToken()) } return errOverflow(valueType, fmt.Sprint(v)) } v := reflect.ValueOf(d.nodeToValue(src)) if v.IsValid() { - convertedValue, err := d.convertValue(v, dst.Type()) + convertedValue, err := d.convertValue(v, dst.Type(), src) if err != nil { return errors.Wrapf(err, "failed to convert value") } @@ -729,7 +979,9 @@ func (d *Decoder) castToAssignableValue(value reflect.Value, target reflect.Type return value } -func (d *Decoder) createDecodedNewValue(ctx context.Context, typ reflect.Type, node ast.Node) (reflect.Value, error) { +func (d *Decoder) createDecodedNewValue( + ctx context.Context, typ reflect.Type, defaultVal reflect.Value, node ast.Node, +) (reflect.Value, error) { if node.Type() == ast.AliasType { aliasName := node.(*ast.AliasNode).Value.GetToken().Value newValue := d.anchorValueMap[aliasName] @@ -741,6 +993,12 @@ func (d *Decoder) createDecodedNewValue(ctx context.Context, typ reflect.Type, n return reflect.Zero(typ), nil } newValue := d.createDecodableValue(typ) + for defaultVal.Kind() == reflect.Ptr { + defaultVal = defaultVal.Elem() + } + if defaultVal.IsValid() && defaultVal.Type().AssignableTo(newValue.Type()) { + newValue.Set(defaultVal) + } if err := d.decodeValue(ctx, newValue, node); err != nil { return newValue, errors.Wrapf(err, "failed to decode value") } @@ -850,7 +1108,7 @@ func (d *Decoder) castToTime(src ast.Node) (time.Time, error) { } s, ok := v.(string) if !ok { - return time.Time{}, errTypeMismatch(reflect.TypeOf(time.Time{}), reflect.TypeOf(v)) + return time.Time{}, errTypeMismatch(reflect.TypeOf(time.Time{}), reflect.TypeOf(v), src.GetToken()) } for _, format := range allowedTimestampFormats { t, err := time.Parse(format, s) @@ -866,7 +1124,35 @@ func (d *Decoder) castToTime(src ast.Node) (time.Time, error) { func (d *Decoder) decodeTime(ctx context.Context, dst reflect.Value, src ast.Node) error { t, err := d.castToTime(src) if err != nil { - return err + return errors.Wrapf(err, "failed to convert to time") + } + dst.Set(reflect.ValueOf(t)) + return nil +} + +func (d *Decoder) castToDuration(src ast.Node) (time.Duration, error) { + if src == nil { + return 0, nil + } + v := d.nodeToValue(src) + if t, ok := v.(time.Duration); ok { + return t, nil + } + s, ok := v.(string) + if !ok { + return 0, errTypeMismatch(reflect.TypeOf(time.Duration(0)), reflect.TypeOf(v), src.GetToken()) + } + t, err := time.ParseDuration(s) + if err != nil { + return 0, errors.Wrapf(err, "failed to parse duration") + } + return t, nil +} + +func (d *Decoder) decodeDuration(ctx context.Context, dst reflect.Value, src ast.Node) error { + t, err := d.castToDuration(src) + if err != nil { + return errors.Wrapf(err, "failed to convert to duration") } dst.Set(reflect.ValueOf(t)) return nil @@ -897,6 +1183,17 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N return nil } structType := dst.Type() + srcValue := reflect.ValueOf(src) + srcType := srcValue.Type() + if srcType.Kind() == reflect.Ptr { + srcType = srcType.Elem() + srcValue = srcValue.Elem() + } + if structType == srcType { + // dst value implements ast.Node + dst.Set(srcValue) + return nil + } structFieldMap, err := structFieldMap(structType) if err != nil { return errors.Wrapf(err, "failed to create struct field map") @@ -947,14 +1244,9 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N key := &ast.StringNode{BaseNode: &ast.BaseNode{}, Value: k} mapNode.Values = append(mapNode.Values, ast.MappingValue(nil, key, v)) } - newFieldValue, err := d.createDecodedNewValue(ctx, fieldValue.Type(), mapNode) + newFieldValue, err := d.createDecodedNewValue(ctx, fieldValue.Type(), fieldValue, mapNode) if d.disallowUnknownField { - var ufe *unknownFieldError - if xerrors.As(err, &ufe) { - err = nil - } - - if err = d.deleteStructKeys(fieldValue.Type(), unknownFields); err != nil { + if err := d.deleteStructKeys(fieldValue.Type(), unknownFields); err != nil { return errors.Wrapf(err, "cannot delete struct keys") } } @@ -963,14 +1255,14 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N if foundErr != nil { continue } - var te *typeError + var te *errors.TypeError if xerrors.As(err, &te) { - if te.structFieldName != nil { - fieldName := fmt.Sprintf("%s.%s", structType.Name(), *te.structFieldName) - te.structFieldName = &fieldName + if te.StructFieldName != nil { + fieldName := fmt.Sprintf("%s.%s", structType.Name(), *te.StructFieldName) + te.StructFieldName = &fieldName } else { fieldName := fmt.Sprintf("%s.%s", structType.Name(), field.Name) - te.structFieldName = &fieldName + te.StructFieldName = &fieldName } foundErr = te continue @@ -994,15 +1286,15 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N fieldValue.Set(reflect.Zero(fieldValue.Type())) continue } - newFieldValue, err := d.createDecodedNewValue(ctx, fieldValue.Type(), v) + newFieldValue, err := d.createDecodedNewValue(ctx, fieldValue.Type(), fieldValue, v) if err != nil { if foundErr != nil { continue } - var te *typeError + var te *errors.TypeError if xerrors.As(err, &te) { fieldName := fmt.Sprintf("%s.%s", structType.Name(), field.Name) - te.structFieldName = &fieldName + te.StructFieldName = &fieldName foundErr = te } else { foundErr = err @@ -1015,7 +1307,9 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N return errors.Wrapf(foundErr, "failed to decode value") } - if len(unknownFields) != 0 && d.disallowUnknownField { + // Ignore unknown fields when parsing an inline struct (recognized by a nil token). + // Unknown fields are expected (they could be fields from the parent struct). + if len(unknownFields) != 0 && d.disallowUnknownField && src.GetToken() != nil { for key, node := range unknownFields { return errUnknownField(fmt.Sprintf(`unknown field "%s"`, key), node.GetToken()) } @@ -1039,9 +1333,14 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N if exists { // TODO: to make FieldError message cutomizable return errors.ErrSyntax(fmt.Sprintf("%s", err), node.GetToken()) + } else if t := src.GetToken(); t != nil && t.Prev != nil && t.Prev.Prev != nil { + // A missing required field will not be in the keyToNodeMap + // the error needs to be associated with the parent of the source node + return errors.ErrSyntax(fmt.Sprintf("%s", err), t.Prev.Prev) } } } + return err } } return nil @@ -1068,7 +1367,7 @@ func (d *Decoder) decodeArray(ctx context.Context, dst reflect.Value, src ast.No // set nil value to pointer arrayValue.Index(idx).Set(reflect.Zero(elemType)) } else { - dstValue, err := d.createDecodedNewValue(ctx, elemType, v) + dstValue, err := d.createDecodedNewValue(ctx, elemType, reflect.Value{}, v) if err != nil { if foundErr == nil { foundErr = err @@ -1108,7 +1407,7 @@ func (d *Decoder) decodeSlice(ctx context.Context, dst reflect.Value, src ast.No sliceValue = reflect.Append(sliceValue, reflect.Zero(elemType)) continue } - dstValue, err := d.createDecodedNewValue(ctx, elemType, v) + dstValue, err := d.createDecodedNewValue(ctx, elemType, reflect.Value{}, v) if err != nil { if foundErr == nil { foundErr = err @@ -1236,10 +1535,19 @@ func (d *Decoder) decodeMap(ctx context.Context, dst reflect.Value, src ast.Node } continue } - k := reflect.ValueOf(d.nodeToValue(key)) - if k.IsValid() && k.Type().ConvertibleTo(keyType) { - k = k.Convert(keyType) + + k := d.createDecodableValue(keyType) + if d.canDecodeByUnmarshaler(k) { + if err := d.decodeByUnmarshaler(ctx, k, key); err != nil { + return errors.Wrapf(err, "failed to decode by unmarshaler") + } + } else { + k = reflect.ValueOf(d.nodeToValue(key)) + if k.IsValid() && k.Type().ConvertibleTo(keyType) { + k = k.Convert(keyType) + } } + if k.IsValid() { if err := d.validateDuplicateKey(keyMap, k.Interface(), key); err != nil { return errors.Wrapf(err, "invalid map key") @@ -1250,7 +1558,7 @@ func (d *Decoder) decodeMap(ctx context.Context, dst reflect.Value, src ast.Node mapValue.SetMapIndex(k, reflect.Zero(valueType)) continue } - dstValue, err := d.createDecodedNewValue(ctx, valueType, value) + dstValue, err := d.createDecodedNewValue(ctx, valueType, reflect.Value{}, value) if err != nil { if foundErr == nil { foundErr = err @@ -1356,7 +1664,7 @@ func (d *Decoder) resolveReference() error { } } for _, reader := range d.referenceReaders { - bytes, err := ioutil.ReadAll(reader) + bytes, err := io.ReadAll(reader) if err != nil { return errors.Wrapf(err, "failed to read buffer") } @@ -1371,7 +1679,11 @@ func (d *Decoder) resolveReference() error { } func (d *Decoder) parse(bytes []byte) (*ast.File, error) { - f, err := parser.ParseBytes(bytes, 0) + var parseMode parser.Mode + if d.toCommentMap != nil { + parseMode = parser.ParseComments + } + f, err := parser.ParseBytes(bytes, parseMode) if err != nil { return nil, errors.Wrapf(err, "failed to parse yaml") } @@ -1448,7 +1760,7 @@ func (d *Decoder) DecodeContext(ctx context.Context, v interface{}) error { return nil } if err := d.decodeInit(); err != nil { - return errors.Wrapf(err, "failed to decodInit") + return errors.Wrapf(err, "failed to decodeInit") } if err := d.decode(ctx, rv); err != nil { if err == io.EOF { @@ -1458,3 +1770,27 @@ func (d *Decoder) DecodeContext(ctx context.Context, v interface{}) error { } return nil } + +// DecodeFromNode decodes node into the value pointed to by v. +func (d *Decoder) DecodeFromNode(node ast.Node, v interface{}) error { + return d.DecodeFromNodeContext(context.Background(), node, v) +} + +// DecodeFromNodeContext decodes node into the value pointed to by v with context.Context. +func (d *Decoder) DecodeFromNodeContext(ctx context.Context, node ast.Node, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Ptr { + return errors.ErrDecodeRequiredPointerType + } + if !d.isInitialized() { + if err := d.decodeInit(); err != nil { + return errors.Wrapf(err, "failed to decodInit") + } + } + // resolve references to the anchor on the same file + d.nodeToValue(node) + if err := d.decodeValue(ctx, rv.Elem(), node); err != nil { + return errors.Wrapf(err, "failed to decode value") + } + return nil +} diff --git a/vendor/github.com/goccy/go-yaml/encode.go b/vendor/github.com/goccy/go-yaml/encode.go index 3599a8582..3b9b29814 100644 --- a/vendor/github.com/goccy/go-yaml/encode.go +++ b/vendor/github.com/goccy/go-yaml/encode.go @@ -30,12 +30,17 @@ type Encoder struct { writer io.Writer opts []EncodeOption indent int + indentSequence bool + singleQuote bool isFlowStyle bool isJSONStyle bool useJSONMarshaler bool anchorCallback func(*ast.AnchorNode, interface{}) error anchorPtrToNameMap map[uintptr]string + customMarshalerMap map[reflect.Type]func(interface{}) ([]byte, error) useLiteralStyleIfMultiline bool + commentMap map[*Path][]*Comment + written bool line int column int @@ -52,6 +57,7 @@ func NewEncoder(w io.Writer, opts ...EncodeOption) *Encoder { opts: opts, indent: DefaultIndentSpaces, anchorPtrToNameMap: map[uintptr]string{}, + customMarshalerMap: map[reflect.Type]func(interface{}) ([]byte, error){}, line: 1, column: 1, offset: 0, @@ -80,6 +86,15 @@ func (e *Encoder) EncodeContext(ctx context.Context, v interface{}) error { if err != nil { return errors.Wrapf(err, "failed to encode to node") } + if err := e.setCommentByCommentMap(node); err != nil { + return errors.Wrapf(err, "failed to set comment by comment map") + } + if !e.written { + e.written = true + } else { + // write document separator + e.writer.Write([]byte("---\n")) + } var p printer.Printer e.writer.Write(p.PrintNode(node)) return nil @@ -104,6 +119,131 @@ func (e *Encoder) EncodeToNodeContext(ctx context.Context, v interface{}) (ast.N return node, nil } +func (e *Encoder) setCommentByCommentMap(node ast.Node) error { + if e.commentMap == nil { + return nil + } + for path, comments := range e.commentMap { + n, err := path.FilterNode(node) + if err != nil { + return errors.Wrapf(err, "failed to filter node") + } + if n == nil { + continue + } + for _, comment := range comments { + commentTokens := []*token.Token{} + for _, text := range comment.Texts { + commentTokens = append(commentTokens, token.New(text, text, nil)) + } + commentGroup := ast.CommentGroup(commentTokens) + switch comment.Position { + case CommentHeadPosition: + if err := e.setHeadComment(node, n, commentGroup); err != nil { + return errors.Wrapf(err, "failed to set head comment") + } + case CommentLinePosition: + if err := e.setLineComment(node, n, commentGroup); err != nil { + return errors.Wrapf(err, "failed to set line comment") + } + case CommentFootPosition: + if err := e.setFootComment(node, n, commentGroup); err != nil { + return errors.Wrapf(err, "failed to set foot comment") + } + default: + return ErrUnknownCommentPositionType + } + } + } + return nil +} + +func (e *Encoder) setHeadComment(node ast.Node, filtered ast.Node, comment *ast.CommentGroupNode) error { + parent := ast.Parent(node, filtered) + if parent == nil { + return ErrUnsupportedHeadPositionType(node) + } + switch p := parent.(type) { + case *ast.MappingValueNode: + if err := p.SetComment(comment); err != nil { + return errors.Wrapf(err, "failed to set comment") + } + case *ast.MappingNode: + if err := p.SetComment(comment); err != nil { + return errors.Wrapf(err, "failed to set comment") + } + case *ast.SequenceNode: + if len(p.ValueHeadComments) == 0 { + p.ValueHeadComments = make([]*ast.CommentGroupNode, len(p.Values)) + } + var foundIdx int + for idx, v := range p.Values { + if v == filtered { + foundIdx = idx + break + } + } + p.ValueHeadComments[foundIdx] = comment + default: + return ErrUnsupportedHeadPositionType(node) + } + return nil +} + +func (e *Encoder) setLineComment(node ast.Node, filtered ast.Node, comment *ast.CommentGroupNode) error { + switch filtered.(type) { + case *ast.MappingValueNode, *ast.SequenceNode: + // Line comment cannot be set for mapping value node. + // It should probably be set for the parent map node + if err := e.setLineCommentToParentMapNode(node, filtered, comment); err != nil { + return errors.Wrapf(err, "failed to set line comment to parent node") + } + default: + if err := filtered.SetComment(comment); err != nil { + return errors.Wrapf(err, "failed to set comment") + } + } + return nil +} + +func (e *Encoder) setLineCommentToParentMapNode(node ast.Node, filtered ast.Node, comment *ast.CommentGroupNode) error { + parent := ast.Parent(node, filtered) + if parent == nil { + return ErrUnsupportedLinePositionType(node) + } + switch p := parent.(type) { + case *ast.MappingValueNode: + if err := p.Key.SetComment(comment); err != nil { + return errors.Wrapf(err, "failed to set comment") + } + case *ast.MappingNode: + if err := p.SetComment(comment); err != nil { + return errors.Wrapf(err, "failed to set comment") + } + default: + return ErrUnsupportedLinePositionType(parent) + } + return nil +} + +func (e *Encoder) setFootComment(node ast.Node, filtered ast.Node, comment *ast.CommentGroupNode) error { + parent := ast.Parent(node, filtered) + if parent == nil { + return ErrUnsupportedFootPositionType(node) + } + switch n := parent.(type) { + case *ast.MappingValueNode: + n.FootComment = comment + case *ast.MappingNode: + n.FootComment = comment + case *ast.SequenceNode: + n.FootComment = comment + default: + return ErrUnsupportedFootPositionType(n) + } + return nil +} + func (e *Encoder) encodeDocument(doc []byte) (ast.Node, error) { f, err := parser.ParseBytes(doc, 0) if err != nil { @@ -135,10 +275,39 @@ type jsonMarshaler interface { MarshalJSON() ([]byte, error) } +func (e *Encoder) existsTypeInCustomMarshalerMap(t reflect.Type) bool { + if _, exists := e.customMarshalerMap[t]; exists { + return true + } + + globalCustomMarshalerMu.Lock() + defer globalCustomMarshalerMu.Unlock() + if _, exists := globalCustomMarshalerMap[t]; exists { + return true + } + return false +} + +func (e *Encoder) marshalerFromCustomMarshalerMap(t reflect.Type) (func(interface{}) ([]byte, error), bool) { + if marshaler, exists := e.customMarshalerMap[t]; exists { + return marshaler, exists + } + + globalCustomMarshalerMu.Lock() + defer globalCustomMarshalerMu.Unlock() + if marshaler, exists := globalCustomMarshalerMap[t]; exists { + return marshaler, exists + } + return nil, false +} + func (e *Encoder) canEncodeByMarshaler(v reflect.Value) bool { if !v.CanInterface() { return false } + if e.existsTypeInCustomMarshalerMap(v.Type()) { + return true + } iface := v.Interface() switch iface.(type) { case BytesMarshalerContext: @@ -151,6 +320,8 @@ func (e *Encoder) canEncodeByMarshaler(v reflect.Value) bool { return true case time.Time: return true + case time.Duration: + return true case encoding.TextMarshaler: return true case jsonMarshaler: @@ -162,6 +333,18 @@ func (e *Encoder) canEncodeByMarshaler(v reflect.Value) bool { func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column int) (ast.Node, error) { iface := v.Interface() + if marshaler, exists := e.marshalerFromCustomMarshalerMap(v.Type()); exists { + doc, err := marshaler(iface) + if err != nil { + return nil, errors.Wrapf(err, "failed to MarshalYAML") + } + node, err := e.encodeDocument(doc) + if err != nil { + return nil, errors.Wrapf(err, "failed to encode document") + } + return node, nil + } + if marshaler, ok := iface.(BytesMarshalerContext); ok { doc, err := marshaler.MarshalYAML(ctx) if err != nil { @@ -206,6 +389,10 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column return e.encodeTime(t, column), nil } + if t, ok := iface.(time.Duration); ok { + return e.encodeDuration(t, column), nil + } + if marshaler, ok := iface.(encoding.TextMarshaler); ok { doc, err := marshaler.MarshalText() if err != nil { @@ -255,8 +442,10 @@ func (e *Encoder) encodeValue(ctx context.Context, v reflect.Value, column int) return e.encodeInt(v.Int()), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return e.encodeUint(v.Uint()), nil - case reflect.Float32, reflect.Float64: - return e.encodeFloat(v.Float()), nil + case reflect.Float32: + return e.encodeFloat(v.Float(), 32), nil + case reflect.Float64: + return e.encodeFloat(v.Float(), 64), nil case reflect.Ptr: anchorName := e.anchorPtrToNameMap[v.Pointer()] if anchorName != "" { @@ -294,7 +483,6 @@ func (e *Encoder) encodeValue(ctx context.Context, v reflect.Value, column int) default: return nil, xerrors.Errorf("unknown value type %s", v.Type().String()) } - return nil, nil } func (e *Encoder) pos(column int) *token.Position { @@ -307,22 +495,22 @@ func (e *Encoder) pos(column int) *token.Position { } } -func (e *Encoder) encodeNil() ast.Node { +func (e *Encoder) encodeNil() *ast.NullNode { value := "null" return ast.Null(token.New(value, value, e.pos(e.column))) } -func (e *Encoder) encodeInt(v int64) ast.Node { +func (e *Encoder) encodeInt(v int64) *ast.IntegerNode { value := fmt.Sprint(v) return ast.Integer(token.New(value, value, e.pos(e.column))) } -func (e *Encoder) encodeUint(v uint64) ast.Node { +func (e *Encoder) encodeUint(v uint64) *ast.IntegerNode { value := fmt.Sprint(v) return ast.Integer(token.New(value, value, e.pos(e.column))) } -func (e *Encoder) encodeFloat(v float64) ast.Node { +func (e *Encoder) encodeFloat(v float64, bitSize int) ast.Node { if v == math.Inf(0) { value := ".inf" return ast.Infinity(token.New(value, value, e.pos(e.column))) @@ -333,18 +521,10 @@ func (e *Encoder) encodeFloat(v float64) ast.Node { value := ".nan" return ast.Nan(token.New(value, value, e.pos(e.column))) } - value := fmt.Sprintf("%f", v) - fvalue := strings.Split(value, ".") - if len(fvalue) > 1 { - precision := fvalue[1] - precisionNum := 1 - for i := len(precision) - 1; i >= 0; i-- { - if precision[i] != '0' { - precisionNum = i + 1 - break - } - } - value = strconv.FormatFloat(v, 'f', precisionNum, 64) + value := strconv.FormatFloat(v, 'g', -1, bitSize) + if !strings.Contains(value, ".") && !strings.Contains(value, "e") { + // append x.0 suffix to keep float value context + value = fmt.Sprintf("%s.0", value) } return ast.Float(token.New(value, value, e.pos(e.column))) } @@ -356,45 +536,66 @@ func (e *Encoder) isNeedQuoted(v string) bool { if e.useLiteralStyleIfMultiline && strings.ContainsAny(v, "\n\r") { return false } + if e.isFlowStyle && strings.ContainsAny(v, `]},'"`) { + return true + } if token.IsNeedQuoted(v) { return true } return false } -func (e *Encoder) encodeString(v string, column int) ast.Node { +func (e *Encoder) encodeString(v string, column int) *ast.StringNode { if e.isNeedQuoted(v) { - v = strconv.Quote(v) + if e.singleQuote { + v = quoteWith(v, '\'') + } else { + v = strconv.Quote(v) + } } return ast.String(token.New(v, v, e.pos(column))) } -func (e *Encoder) encodeBool(v bool) ast.Node { +func (e *Encoder) encodeBool(v bool) *ast.BoolNode { value := fmt.Sprint(v) return ast.Bool(token.New(value, value, e.pos(e.column))) } -func (e *Encoder) encodeSlice(ctx context.Context, value reflect.Value) (ast.Node, error) { - sequence := ast.Sequence(token.New("-", "-", e.pos(e.column)), e.isFlowStyle) +func (e *Encoder) encodeSlice(ctx context.Context, value reflect.Value) (*ast.SequenceNode, error) { + if e.indentSequence { + e.column += e.indent + } + column := e.column + sequence := ast.Sequence(token.New("-", "-", e.pos(column)), e.isFlowStyle) for i := 0; i < value.Len(); i++ { - node, err := e.encodeValue(ctx, value.Index(i), e.column) + node, err := e.encodeValue(ctx, value.Index(i), column) if err != nil { return nil, errors.Wrapf(err, "failed to encode value for slice") } sequence.Values = append(sequence.Values, node) } + if e.indentSequence { + e.column -= e.indent + } return sequence, nil } -func (e *Encoder) encodeArray(ctx context.Context, value reflect.Value) (ast.Node, error) { - sequence := ast.Sequence(token.New("-", "-", e.pos(e.column)), e.isFlowStyle) +func (e *Encoder) encodeArray(ctx context.Context, value reflect.Value) (*ast.SequenceNode, error) { + if e.indentSequence { + e.column += e.indent + } + column := e.column + sequence := ast.Sequence(token.New("-", "-", e.pos(column)), e.isFlowStyle) for i := 0; i < value.Len(); i++ { - node, err := e.encodeValue(ctx, value.Index(i), e.column) + node, err := e.encodeValue(ctx, value.Index(i), column) if err != nil { return nil, errors.Wrapf(err, "failed to encode value for array") } sequence.Values = append(sequence.Values, node) } + if e.indentSequence { + e.column -= e.indent + } return sequence, nil } @@ -405,8 +606,8 @@ func (e *Encoder) encodeMapItem(ctx context.Context, item MapItem, column int) ( if err != nil { return nil, errors.Wrapf(err, "failed to encode MapItem") } - if m, ok := value.(*ast.MappingNode); ok { - m.AddColumn(e.indent) + if e.isMapNode(value) { + value.AddColumn(e.indent) } return ast.MappingValue( token.New("", "", e.pos(column)), @@ -415,7 +616,7 @@ func (e *Encoder) encodeMapItem(ctx context.Context, item MapItem, column int) ( ), nil } -func (e *Encoder) encodeMapSlice(ctx context.Context, value MapSlice, column int) (ast.Node, error) { +func (e *Encoder) encodeMapSlice(ctx context.Context, value MapSlice, column int) (*ast.MappingNode, error) { node := ast.Mapping(token.New("", "", e.pos(column)), e.isFlowStyle) for _, item := range value { value, err := e.encodeMapItem(ctx, item, column) @@ -427,13 +628,20 @@ func (e *Encoder) encodeMapSlice(ctx context.Context, value MapSlice, column int return node, nil } +func (e *Encoder) isMapNode(node ast.Node) bool { + _, ok := node.(ast.MapNode) + return ok +} + func (e *Encoder) encodeMap(ctx context.Context, value reflect.Value, column int) ast.Node { node := ast.Mapping(token.New("", "", e.pos(column)), e.isFlowStyle) - keys := []string{} - for _, k := range value.MapKeys() { - keys = append(keys, k.Interface().(string)) + keys := make([]interface{}, len(value.MapKeys())) + for i, k := range value.MapKeys() { + keys[i] = k.Interface() } - sort.Strings(keys) + sort.Slice(keys, func(i, j int) bool { + return fmt.Sprint(keys[i]) < fmt.Sprint(keys[j]) + }) for _, key := range keys { k := reflect.ValueOf(key) v := value.MapIndex(k) @@ -441,12 +649,12 @@ func (e *Encoder) encodeMap(ctx context.Context, value reflect.Value, column int if err != nil { return nil } - if m, ok := value.(*ast.MappingNode); ok { - m.AddColumn(e.indent) + if e.isMapNode(value) { + value.AddColumn(e.indent) } node.Values = append(node.Values, ast.MappingValue( nil, - e.encodeString(k.Interface().(string), column), + e.encodeString(fmt.Sprint(key), column), value, )) } @@ -500,7 +708,7 @@ func (e *Encoder) isZeroValue(v reflect.Value) bool { return false } -func (e *Encoder) encodeTime(v time.Time, column int) ast.Node { +func (e *Encoder) encodeTime(v time.Time, column int) *ast.StringNode { value := v.Format(time.RFC3339Nano) if e.isJSONStyle { value = strconv.Quote(value) @@ -508,7 +716,15 @@ func (e *Encoder) encodeTime(v time.Time, column int) ast.Node { return ast.String(token.New(value, value, e.pos(column))) } -func (e *Encoder) encodeAnchor(anchorName string, value ast.Node, fieldValue reflect.Value, column int) (ast.Node, error) { +func (e *Encoder) encodeDuration(v time.Duration, column int) *ast.StringNode { + value := v.String() + if e.isJSONStyle { + value = strconv.Quote(value) + } + return ast.String(token.New(value, value, e.pos(column))) +} + +func (e *Encoder) encodeAnchor(anchorName string, value ast.Node, fieldValue reflect.Value, column int) (*ast.AnchorNode, error) { anchorNode := ast.Anchor(token.New("&", "&", e.pos(column))) anchorNode.Name = ast.String(token.New(anchorName, anchorName, e.pos(column))) anchorNode.Value = value @@ -546,21 +762,20 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column // omit encoding continue } - value, err := e.encodeValue(ctx, fieldValue, column) + ve := e + if !e.isFlowStyle && structField.IsFlow { + ve = &Encoder{} + *ve = *e + ve.isFlowStyle = true + } + value, err := ve.encodeValue(ctx, fieldValue, column) if err != nil { return nil, errors.Wrapf(err, "failed to encode value") } - if m, ok := value.(*ast.MappingNode); ok { - if !e.isFlowStyle && structField.IsFlow { - m.SetIsFlowStyle(true) - } + if e.isMapNode(value) { value.AddColumn(e.indent) - } else if s, ok := value.(*ast.SequenceNode); ok { - if !e.isFlowStyle && structField.IsFlow { - s.SetIsFlowStyle(true) - } } - key := e.encodeString(structField.RenderName, column) + var key ast.MapKeyNode = e.encodeString(structField.RenderName, column) switch { case structField.AnchorName != "": anchorNode, err := e.encodeAnchor(structField.AnchorName, value, fieldValue, column) @@ -608,6 +823,10 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column } mapNode, ok := value.(ast.MapNode) if !ok { + // if an inline field is null, skip encoding it + if _, ok := value.(*ast.NullNode); ok { + continue + } return nil, xerrors.Errorf("inline value is must be map or struct type") } mapIter := mapNode.MapRange() diff --git a/vendor/github.com/goccy/go-yaml/error.go b/vendor/github.com/goccy/go-yaml/error.go index 3dfaf2130..163dcc558 100644 --- a/vendor/github.com/goccy/go-yaml/error.go +++ b/vendor/github.com/goccy/go-yaml/error.go @@ -6,12 +6,26 @@ import ( ) var ( - ErrInvalidQuery = xerrors.New("invalid query") - ErrInvalidPath = xerrors.New("invalid path instance") - ErrInvalidPathString = xerrors.New("invalid path string") - ErrNotFoundNode = xerrors.New("node not found") + ErrInvalidQuery = xerrors.New("invalid query") + ErrInvalidPath = xerrors.New("invalid path instance") + ErrInvalidPathString = xerrors.New("invalid path string") + ErrNotFoundNode = xerrors.New("node not found") + ErrUnknownCommentPositionType = xerrors.New("unknown comment position type") + ErrInvalidCommentMapValue = xerrors.New("invalid comment map value. it must be not nil value") ) +func ErrUnsupportedHeadPositionType(node ast.Node) error { + return xerrors.Errorf("unsupported comment head position for %s", node.Type()) +} + +func ErrUnsupportedLinePositionType(node ast.Node) error { + return xerrors.Errorf("unsupported comment line position for %s", node.Type()) +} + +func ErrUnsupportedFootPositionType(node ast.Node) error { + return xerrors.Errorf("unsupported comment foot position for %s", node.Type()) +} + // IsInvalidQueryError whether err is ErrInvalidQuery or not. func IsInvalidQueryError(err error) bool { return xerrors.Is(err, ErrInvalidQuery) diff --git a/vendor/github.com/goccy/go-yaml/internal/errors/error.go b/vendor/github.com/goccy/go-yaml/internal/errors/error.go index c7b1103c9..7f1ea9af7 100644 --- a/vendor/github.com/goccy/go-yaml/internal/errors/error.go +++ b/vendor/github.com/goccy/go-yaml/internal/errors/error.go @@ -3,6 +3,7 @@ package errors import ( "bytes" "fmt" + "reflect" "github.com/goccy/go-yaml/printer" "github.com/goccy/go-yaml/token" @@ -67,10 +68,10 @@ type wrapError struct { frame xerrors.Frame } -type myprinter struct { +type FormatErrorPrinter struct { xerrors.Printer - colored bool - inclSource bool + Colored bool + InclSource bool } func (e *wrapError) As(target interface{}) bool { @@ -90,15 +91,15 @@ func (e *wrapError) Unwrap() error { } func (e *wrapError) PrettyPrint(p xerrors.Printer, colored, inclSource bool) error { - return e.FormatError(&myprinter{Printer: p, colored: colored, inclSource: inclSource}) + return e.FormatError(&FormatErrorPrinter{Printer: p, Colored: colored, InclSource: inclSource}) } func (e *wrapError) FormatError(p xerrors.Printer) error { - if _, ok := p.(*myprinter); !ok { - p = &myprinter{ + if _, ok := p.(*FormatErrorPrinter); !ok { + p = &FormatErrorPrinter{ Printer: p, - colored: defaultColorize, - inclSource: defaultIncludeSource, + Colored: defaultColorize, + InclSource: defaultIncludeSource, } } if e.verb == 'v' && e.state.Flag('+') { @@ -171,16 +172,16 @@ type syntaxError struct { } func (e *syntaxError) PrettyPrint(p xerrors.Printer, colored, inclSource bool) error { - return e.FormatError(&myprinter{Printer: p, colored: colored, inclSource: inclSource}) + return e.FormatError(&FormatErrorPrinter{Printer: p, Colored: colored, InclSource: inclSource}) } func (e *syntaxError) FormatError(p xerrors.Printer) error { var pp printer.Printer var colored, inclSource bool - if mp, ok := p.(*myprinter); ok { - colored = mp.colored - inclSource = mp.inclSource + if fep, ok := p.(*FormatErrorPrinter); ok { + colored = fep.Colored + inclSource = fep.InclSource } pos := fmt.Sprintf("[%d:%d] ", e.token.Position.Line, e.token.Position.Column) @@ -220,3 +221,40 @@ func (e *syntaxError) Error() string { e.PrettyPrint(&Sink{&buf}, defaultColorize, defaultIncludeSource) return buf.String() } + +type TypeError struct { + DstType reflect.Type + SrcType reflect.Type + StructFieldName *string + Token *token.Token +} + +func (e *TypeError) Error() string { + if e.StructFieldName != nil { + return fmt.Sprintf("cannot unmarshal %s into Go struct field %s of type %s", e.SrcType, *e.StructFieldName, e.DstType) + } + return fmt.Sprintf("cannot unmarshal %s into Go value of type %s", e.SrcType, e.DstType) +} + +func (e *TypeError) PrettyPrint(p xerrors.Printer, colored, inclSource bool) error { + return e.FormatError(&FormatErrorPrinter{Printer: p, Colored: colored, InclSource: inclSource}) +} + +func (e *TypeError) FormatError(p xerrors.Printer) error { + var pp printer.Printer + + var colored, inclSource bool + if fep, ok := p.(*FormatErrorPrinter); ok { + colored = fep.Colored + inclSource = fep.InclSource + } + + pos := fmt.Sprintf("[%d:%d] ", e.Token.Position.Line, e.Token.Position.Column) + msg := pp.PrintErrorMessage(fmt.Sprintf("%s%s", pos, e.Error()), colored) + if inclSource { + msg += "\n" + pp.PrintErrorToken(e.Token, colored) + } + p.Print(msg) + + return nil +} diff --git a/vendor/github.com/goccy/go-yaml/option.go b/vendor/github.com/goccy/go-yaml/option.go index 9c4557fa0..eab5d43a8 100644 --- a/vendor/github.com/goccy/go-yaml/option.go +++ b/vendor/github.com/goccy/go-yaml/option.go @@ -2,6 +2,7 @@ package yaml import ( "io" + "reflect" "github.com/goccy/go-yaml/ast" ) @@ -94,6 +95,20 @@ func UseJSONUnmarshaler() DecodeOption { } } +// CustomUnmarshaler overrides any decoding process for the type specified in generics. +// +// NOTE: If RegisterCustomUnmarshaler and CustomUnmarshaler of DecodeOption are specified for the same type, +// the CustomUnmarshaler specified in DecodeOption takes precedence. +func CustomUnmarshaler[T any](unmarshaler func(*T, []byte) error) DecodeOption { + return func(d *Decoder) error { + var typ *T + d.customUnmarshalerMap[reflect.TypeOf(typ)] = func(v interface{}, b []byte) error { + return unmarshaler(v.(*T), b) + } + return nil + } +} + // EncodeOption functional option type for Encoder type EncodeOption func(e *Encoder) error @@ -105,6 +120,22 @@ func Indent(spaces int) EncodeOption { } } +// IndentSequence causes sequence values to be indented the same value as Indent +func IndentSequence(indent bool) EncodeOption { + return func(e *Encoder) error { + e.indentSequence = indent + return nil + } +} + +// UseSingleQuote determines if single or double quotes should be preferred for strings. +func UseSingleQuote(sq bool) EncodeOption { + return func(e *Encoder) error { + e.singleQuote = sq + return nil + } +} + // Flow encoding by flow style func Flow(isFlowStyle bool) EncodeOption { return func(e *Encoder) error { @@ -148,3 +179,100 @@ func UseJSONMarshaler() EncodeOption { return nil } } + +// CustomMarshaler overrides any encoding process for the type specified in generics. +// +// NOTE: If type T implements MarshalYAML for pointer receiver, the type specified in CustomMarshaler must be *T. +// If RegisterCustomMarshaler and CustomMarshaler of EncodeOption are specified for the same type, +// the CustomMarshaler specified in EncodeOption takes precedence. +func CustomMarshaler[T any](marshaler func(T) ([]byte, error)) EncodeOption { + return func(e *Encoder) error { + var typ T + e.customMarshalerMap[reflect.TypeOf(typ)] = func(v interface{}) ([]byte, error) { + return marshaler(v.(T)) + } + return nil + } +} + +// CommentPosition type of the position for comment. +type CommentPosition int + +const ( + CommentHeadPosition CommentPosition = CommentPosition(iota) + CommentLinePosition + CommentFootPosition +) + +func (p CommentPosition) String() string { + switch p { + case CommentHeadPosition: + return "Head" + case CommentLinePosition: + return "Line" + case CommentFootPosition: + return "Foot" + default: + return "" + } +} + +// LineComment create a one-line comment for CommentMap. +func LineComment(text string) *Comment { + return &Comment{ + Texts: []string{text}, + Position: CommentLinePosition, + } +} + +// HeadComment create a multiline comment for CommentMap. +func HeadComment(texts ...string) *Comment { + return &Comment{ + Texts: texts, + Position: CommentHeadPosition, + } +} + +// FootComment create a multiline comment for CommentMap. +func FootComment(texts ...string) *Comment { + return &Comment{ + Texts: texts, + Position: CommentFootPosition, + } +} + +// Comment raw data for comment. +type Comment struct { + Texts []string + Position CommentPosition +} + +// CommentMap map of the position of the comment and the comment information. +type CommentMap map[string][]*Comment + +// WithComment add a comment using the location and text information given in the CommentMap. +func WithComment(cm CommentMap) EncodeOption { + return func(e *Encoder) error { + commentMap := map[*Path][]*Comment{} + for k, v := range cm { + path, err := PathString(k) + if err != nil { + return err + } + commentMap[path] = v + } + e.commentMap = commentMap + return nil + } +} + +// CommentToMap apply the position and content of comments in a YAML document to a CommentMap. +func CommentToMap(cm CommentMap) DecodeOption { + return func(d *Decoder) error { + if cm == nil { + return ErrInvalidCommentMapValue + } + d.toCommentMap = cm + return nil + } +} diff --git a/vendor/github.com/goccy/go-yaml/parser/context.go b/vendor/github.com/goccy/go-yaml/parser/context.go index 5749e43b5..42cc4f8fa 100644 --- a/vendor/github.com/goccy/go-yaml/parser/context.go +++ b/vendor/github.com/goccy/go-yaml/parser/context.go @@ -1,13 +1,62 @@ package parser -import "github.com/goccy/go-yaml/token" +import ( + "fmt" + "strings" + + "github.com/goccy/go-yaml/token" +) // context context at parsing type context struct { + parent *context idx int size int tokens token.Tokens - mode Mode + path string +} + +var pathSpecialChars = []string{ + "$", "*", ".", "[", "]", +} + +func containsPathSpecialChar(path string) bool { + for _, char := range pathSpecialChars { + if strings.Contains(path, char) { + return true + } + } + return false +} + +func normalizePath(path string) string { + if containsPathSpecialChar(path) { + return fmt.Sprintf("'%s'", path) + } + return path +} + +func (c *context) withChild(path string) *context { + ctx := c.copy() + path = normalizePath(path) + ctx.path += fmt.Sprintf(".%s", path) + return ctx +} + +func (c *context) withIndex(idx uint) *context { + ctx := c.copy() + ctx.path += fmt.Sprintf("[%d]", idx) + return ctx +} + +func (c *context) copy() *context { + return &context{ + parent: c, + idx: c.idx, + size: c.size, + tokens: append(token.Tokens{}, c.tokens...), + path: c.path, + } } func (c *context) next() bool { @@ -22,6 +71,9 @@ func (c *context) previousToken() *token.Token { } func (c *context) insertToken(idx int, tk *token.Token) { + if c.parent != nil { + c.parent.insertToken(idx, tk) + } if c.size < idx { return } @@ -91,10 +143,6 @@ func (c *context) afterNextNotCommentToken() *token.Token { return nil } -func (c *context) enabledComment() bool { - return c.mode&ParseComments != 0 -} - func (c *context) isCurrentCommentToken() bool { tk := c.currentToken() if tk == nil { @@ -104,6 +152,9 @@ func (c *context) isCurrentCommentToken() bool { } func (c *context) progressIgnoreComment(num int) { + if c.parent != nil { + c.parent.progressIgnoreComment(num) + } if c.size <= c.idx+num { c.idx = c.size } else { @@ -119,7 +170,7 @@ func (c *context) progress(num int) { } func newContext(tokens token.Tokens, mode Mode) *context { - filteredTokens := token.Tokens{} + filteredTokens := []*token.Token{} if mode&ParseComments != 0 { filteredTokens = tokens } else { @@ -127,13 +178,15 @@ func newContext(tokens token.Tokens, mode Mode) *context { if tk.Type == token.CommentType { continue } - filteredTokens.Add(tk) + // keep prev/next reference between tokens containing comments + // https://github.com/goccy/go-yaml/issues/254 + filteredTokens = append(filteredTokens, tk) } } return &context{ idx: 0, size: len(filteredTokens), - tokens: filteredTokens, - mode: mode, + tokens: token.Tokens(filteredTokens), + path: "$", } } diff --git a/vendor/github.com/goccy/go-yaml/parser/parser.go b/vendor/github.com/goccy/go-yaml/parser/parser.go index efcd75bf9..2bec5fea0 100644 --- a/vendor/github.com/goccy/go-yaml/parser/parser.go +++ b/vendor/github.com/goccy/go-yaml/parser/parser.go @@ -2,7 +2,7 @@ package parser import ( "fmt" - "io/ioutil" + "os" "strings" "github.com/goccy/go-yaml/ast" @@ -14,14 +14,16 @@ import ( type parser struct{} -func (p *parser) parseMapping(ctx *context) (ast.Node, error) { - node := ast.Mapping(ctx.currentToken(), true) +func (p *parser) parseMapping(ctx *context) (*ast.MappingNode, error) { + mapTk := ctx.currentToken() + node := ast.Mapping(mapTk, true) + node.SetPath(ctx.path) ctx.progress(1) // skip MappingStart token for ctx.next() { tk := ctx.currentToken() if tk.Type == token.MappingEndType { node.End = tk - break + return node, nil } else if tk.Type == token.CollectEntryType { ctx.progress(1) continue @@ -38,11 +40,12 @@ func (p *parser) parseMapping(ctx *context) (ast.Node, error) { node.Values = append(node.Values, mvnode) ctx.progress(1) } - return node, nil + return nil, errors.ErrSyntax("unterminated flow mapping", node.GetToken()) } -func (p *parser) parseSequence(ctx *context) (ast.Node, error) { +func (p *parser) parseSequence(ctx *context) (*ast.SequenceNode, error) { node := ast.Sequence(ctx.currentToken(), true) + node.SetPath(ctx.path) ctx.progress(1) // skip SequenceStart token for ctx.next() { tk := ctx.currentToken() @@ -54,7 +57,7 @@ func (p *parser) parseSequence(ctx *context) (ast.Node, error) { continue } - value, err := p.parseToken(ctx, tk) + value, err := p.parseToken(ctx.withIndex(uint(len(node.Values))), tk) if err != nil { return nil, errors.Wrapf(err, "failed to parse sequence value in flow sequence node") } @@ -64,9 +67,10 @@ func (p *parser) parseSequence(ctx *context) (ast.Node, error) { return node, nil } -func (p *parser) parseTag(ctx *context) (ast.Node, error) { +func (p *parser) parseTag(ctx *context) (*ast.TagNode, error) { tagToken := ctx.currentToken() node := ast.Tag(tagToken) + node.SetPath(ctx.path) ctx.progress(1) // skip tag token var ( value ast.Node @@ -134,22 +138,56 @@ func (p *parser) createNullToken(base *token.Token) *token.Token { return token.New("null", "null", &pos) } -func (p *parser) parseMapValue(ctx *context, key ast.Node, colonToken *token.Token) (ast.Node, error) { +func (p *parser) parseMapValue(ctx *context, key ast.MapKeyNode, colonToken *token.Token) (ast.Node, error) { + node, err := p.createMapValueNode(ctx, key, colonToken) + if err != nil { + return nil, errors.Wrapf(err, "failed to create map value node") + } + if node != nil && node.GetPath() == "" { + node.SetPath(ctx.path) + } + return node, nil +} + +func (p *parser) createMapValueNode(ctx *context, key ast.MapKeyNode, colonToken *token.Token) (ast.Node, error) { tk := ctx.currentToken() if tk == nil { nullToken := p.createNullToken(colonToken) ctx.insertToken(ctx.idx, nullToken) return ast.Null(nullToken), nil } - + var comment *ast.CommentGroupNode + if tk.Type == token.CommentType { + comment = p.parseCommentOnly(ctx) + if comment != nil { + comment.SetPath(ctx.withChild(key.GetToken().Value).path) + } + tk = ctx.currentToken() + } if tk.Position.Column == key.GetToken().Position.Column && tk.Type == token.StringType { // in this case, // ---- // key: // next + nullToken := p.createNullToken(colonToken) ctx.insertToken(ctx.idx, nullToken) - return ast.Null(nullToken), nil + nullNode := ast.Null(nullToken) + + if comment != nil { + nullNode.SetComment(comment) + } else { + // If there is a comment, it is already bound to the key node, + // so remove the comment from the key to bind it to the null value. + keyComment := key.GetComment() + if keyComment != nil { + if err := key.SetComment(nil); err != nil { + return nil, err + } + nullNode.SetComment(keyComment) + } + } + return nullNode, nil } if tk.Position.Column < key.GetToken().Position.Column { @@ -159,13 +197,20 @@ func (p *parser) parseMapValue(ctx *context, key ast.Node, colonToken *token.Tok // next nullToken := p.createNullToken(colonToken) ctx.insertToken(ctx.idx, nullToken) - return ast.Null(nullToken), nil + nullNode := ast.Null(nullToken) + if comment != nil { + nullNode.SetComment(comment) + } + return nullNode, nil } value, err := p.parseToken(ctx, ctx.currentToken()) if err != nil { return nil, errors.Wrapf(err, "failed to parse mapping 'value' node") } + if comment != nil { + value.SetComment(comment) + } return value, nil } @@ -190,13 +235,18 @@ func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) { if err != nil { return nil, errors.Wrapf(err, "failed to parse map key") } + keyText := key.GetToken().Value + key.SetPath(ctx.withChild(keyText).path) if err := p.validateMapKey(key.GetToken()); err != nil { return nil, errors.Wrapf(err, "validate mapping key error") } ctx.progress(1) // progress to mapping value token tk := ctx.currentToken() // get mapping value token - ctx.progress(1) // progress to value token - if err := p.setSameLineCommentIfExists(ctx, key); err != nil { + if tk == nil { + return nil, errors.ErrSyntax("unexpected map", key.GetToken()) + } + ctx.progress(1) // progress to value token + if err := p.setSameLineCommentIfExists(ctx.withChild(keyText), key); err != nil { return nil, errors.Wrapf(err, "failed to set same line comment to node") } if key.GetComment() != nil { @@ -205,7 +255,7 @@ func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) { ctx.progressIgnoreComment(1) } - value, err := p.parseMapValue(ctx, key, tk) + value, err := p.parseMapValue(ctx.withChild(keyText), key, tk) if err != nil { return nil, errors.Wrapf(err, "failed to parse map value") } @@ -214,7 +264,9 @@ func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) { } mvnode := ast.MappingValue(tk, key, value) + mvnode.SetPath(ctx.withChild(keyText).path) node := ast.Mapping(tk, false, mvnode) + node.SetPath(ctx.withChild(keyText).path) ntk := ctx.nextNotCommentToken() antk := ctx.afterNextNotCommentToken() @@ -246,21 +298,56 @@ func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) { antk = ctx.afterNextNotCommentToken() } if len(node.Values) == 1 { + mapKeyCol := mvnode.Key.GetToken().Position.Column + commentTk := ctx.nextToken() + if commentTk != nil && commentTk.Type == token.CommentType && mapKeyCol <= commentTk.Position.Column { + // If the comment is in the same or deeper column as the last element column in map value, + // treat it as a footer comment for the last element. + comment := p.parseFootComment(ctx, mapKeyCol) + mvnode.FootComment = comment + } return mvnode, nil } + mapCol := node.GetToken().Position.Column + commentTk := ctx.nextToken() + if commentTk != nil && commentTk.Type == token.CommentType && mapCol <= commentTk.Position.Column { + // If the comment is in the same or deeper column as the last element column in map value, + // treat it as a footer comment for the last element. + comment := p.parseFootComment(ctx, mapCol) + node.FootComment = comment + } return node, nil } -func (p *parser) parseSequenceEntry(ctx *context) (ast.Node, error) { +func (p *parser) parseSequenceEntry(ctx *context) (*ast.SequenceNode, error) { tk := ctx.currentToken() sequenceNode := ast.Sequence(tk, false) + sequenceNode.SetPath(ctx.path) curColumn := tk.Position.Column for tk.Type == token.SequenceEntryType { ctx.progress(1) // skip sequence token - value, err := p.parseToken(ctx, ctx.currentToken()) + tk = ctx.currentToken() + if tk == nil { + return nil, errors.ErrSyntax("empty sequence entry", ctx.previousToken()) + } + var comment *ast.CommentGroupNode + if tk.Type == token.CommentType { + comment = p.parseCommentOnly(ctx) + tk = ctx.currentToken() + if tk.Type == token.SequenceEntryType { + ctx.progress(1) // skip sequence token + } + } + value, err := p.parseToken(ctx.withIndex(uint(len(sequenceNode.Values))), ctx.currentToken()) if err != nil { return nil, errors.Wrapf(err, "failed to parse sequence") } + if comment != nil { + comment.SetPath(ctx.withIndex(uint(len(sequenceNode.Values))).path) + sequenceNode.ValueHeadComments = append(sequenceNode.ValueHeadComments, comment) + } else { + sequenceNode.ValueHeadComments = append(sequenceNode.ValueHeadComments, nil) + } sequenceNode.Values = append(sequenceNode.Values, value) tk = ctx.nextNotCommentToken() if tk == nil { @@ -274,12 +361,20 @@ func (p *parser) parseSequenceEntry(ctx *context) (ast.Node, error) { } ctx.progressIgnoreComment(1) } + commentTk := ctx.nextToken() + if commentTk != nil && commentTk.Type == token.CommentType && curColumn <= commentTk.Position.Column { + // If the comment is in the same or deeper column as the last element column in sequence value, + // treat it as a footer comment for the last element. + comment := p.parseFootComment(ctx, curColumn) + sequenceNode.FootComment = comment + } return sequenceNode, nil } -func (p *parser) parseAnchor(ctx *context) (ast.Node, error) { +func (p *parser) parseAnchor(ctx *context) (*ast.AnchorNode, error) { tk := ctx.currentToken() anchor := ast.Anchor(tk) + anchor.SetPath(ctx.path) ntk := ctx.nextToken() if ntk == nil { return nil, errors.ErrSyntax("unexpected anchor. anchor name is undefined", tk) @@ -303,9 +398,10 @@ func (p *parser) parseAnchor(ctx *context) (ast.Node, error) { return anchor, nil } -func (p *parser) parseAlias(ctx *context) (ast.Node, error) { +func (p *parser) parseAlias(ctx *context) (*ast.AliasNode, error) { tk := ctx.currentToken() alias := ast.Alias(tk) + alias.SetPath(ctx.path) ntk := ctx.nextToken() if ntk == nil { return nil, errors.ErrSyntax("unexpected alias. alias name is undefined", tk) @@ -319,7 +415,7 @@ func (p *parser) parseAlias(ctx *context) (ast.Node, error) { return alias, nil } -func (p *parser) parseMapKey(ctx *context) (ast.Node, error) { +func (p *parser) parseMapKey(ctx *context) (ast.MapKeyNode, error) { tk := ctx.currentToken() if value := p.parseScalarValue(tk); value != nil { return value, nil @@ -333,7 +429,7 @@ func (p *parser) parseMapKey(ctx *context) (ast.Node, error) { return nil, errors.ErrSyntax("unexpected mapping key", tk) } -func (p *parser) parseStringValue(tk *token.Token) ast.Node { +func (p *parser) parseStringValue(tk *token.Token) *ast.StringNode { switch tk.Type { case token.StringType, token.SingleQuoteType, @@ -343,11 +439,12 @@ func (p *parser) parseStringValue(tk *token.Token) ast.Node { return nil } -func (p *parser) parseScalarValueWithComment(ctx *context, tk *token.Token) (ast.Node, error) { +func (p *parser) parseScalarValueWithComment(ctx *context, tk *token.Token) (ast.ScalarNode, error) { node := p.parseScalarValue(tk) if node == nil { return nil, nil } + node.SetPath(ctx.path) if p.isSameLineComment(ctx.nextToken(), node) { ctx.progress(1) if err := p.setSameLineCommentIfExists(ctx, node); err != nil { @@ -357,7 +454,7 @@ func (p *parser) parseScalarValueWithComment(ctx *context, tk *token.Token) (ast return node, nil } -func (p *parser) parseScalarValue(tk *token.Token) ast.Node { +func (p *parser) parseScalarValue(tk *token.Token) ast.ScalarNode { if node := p.parseStringValue(tk); node != nil { return node } @@ -381,7 +478,7 @@ func (p *parser) parseScalarValue(tk *token.Token) ast.Node { return nil } -func (p *parser) parseDirective(ctx *context) (ast.Node, error) { +func (p *parser) parseDirective(ctx *context) (*ast.DirectiveNode, error) { node := ast.Directive(ctx.currentToken()) ctx.progress(1) // skip directive token value, err := p.parseToken(ctx, ctx.currentToken()) @@ -402,10 +499,21 @@ func (p *parser) parseDirective(ctx *context) (ast.Node, error) { return node, nil } -func (p *parser) parseLiteral(ctx *context) (ast.Node, error) { +func (p *parser) parseLiteral(ctx *context) (*ast.LiteralNode, error) { node := ast.Literal(ctx.currentToken()) ctx.progress(1) // skip literal/folded token - value, err := p.parseToken(ctx, ctx.currentToken()) + + tk := ctx.currentToken() + var comment *ast.CommentGroupNode + if tk.Type == token.CommentType { + comment = p.parseCommentOnly(ctx) + comment.SetPath(ctx.path) + if err := node.SetComment(comment); err != nil { + return nil, errors.Wrapf(err, "failed to set comment to literal") + } + tk = ctx.currentToken() + } + value, err := p.parseToken(ctx, tk) if err != nil { return nil, errors.Wrapf(err, "failed to parse literal/folded value") } @@ -432,7 +540,9 @@ func (p *parser) setSameLineCommentIfExists(ctx *context, node ast.Node) error { if !p.isSameLineComment(tk, node) { return nil } - if err := node.SetComment(tk); err != nil { + comment := ast.CommentGroup([]*token.Token{tk}) + comment.SetPath(ctx.path) + if err := node.SetComment(comment); err != nil { return errors.Wrapf(err, "failed to set comment token to ast.Node") } return nil @@ -453,7 +563,7 @@ func (p *parser) parseDocument(ctx *context) (*ast.DocumentNode, error) { return node, nil } -func (p *parser) parseComment(ctx *context) (ast.Node, error) { +func (p *parser) parseCommentOnly(ctx *context) *ast.CommentGroupNode { commentTokens := []*token.Token{} for { tk := ctx.currentToken() @@ -466,33 +576,51 @@ func (p *parser) parseComment(ctx *context) (ast.Node, error) { commentTokens = append(commentTokens, tk) ctx.progressIgnoreComment(1) // skip comment token } - // TODO: support token group. currently merge tokens to one token - firstToken := commentTokens[0] - values := []string{} - origins := []string{} - for _, tk := range commentTokens { - values = append(values, tk.Value) - origins = append(origins, tk.Origin) + return ast.CommentGroup(commentTokens) +} + +func (p *parser) parseFootComment(ctx *context, col int) *ast.CommentGroupNode { + commentTokens := []*token.Token{} + for { + ctx.progressIgnoreComment(1) + commentTokens = append(commentTokens, ctx.currentToken()) + + nextTk := ctx.nextToken() + if nextTk == nil { + break + } + if nextTk.Type != token.CommentType { + break + } + if col > nextTk.Position.Column { + break + } } - firstToken.Value = strings.Join(values, "") - firstToken.Value = strings.Join(origins, "") + return ast.CommentGroup(commentTokens) +} + +func (p *parser) parseComment(ctx *context) (ast.Node, error) { + group := p.parseCommentOnly(ctx) node, err := p.parseToken(ctx, ctx.currentToken()) if err != nil { return nil, errors.Wrapf(err, "failed to parse node after comment") } if node == nil { - return ast.Comment(firstToken), nil + return group, nil } - if err := node.SetComment(firstToken); err != nil { + group.SetPath(node.GetPath()) + if err := node.SetComment(group); err != nil { return nil, errors.Wrapf(err, "failed to set comment token to node") } return node, nil } -func (p *parser) parseMappingKey(ctx *context) (ast.Node, error) { - node := ast.MappingKey(ctx.currentToken()) +func (p *parser) parseMappingKey(ctx *context) (*ast.MappingKeyNode, error) { + keyTk := ctx.currentToken() + node := ast.MappingKey(keyTk) + node.SetPath(ctx.path) ctx.progress(1) // skip mapping key token - value, err := p.parseToken(ctx, ctx.currentToken()) + value, err := p.parseToken(ctx.withChild(keyTk.Value), ctx.currentToken()) if err != nil { return nil, errors.Wrapf(err, "failed to parse map key") } @@ -501,6 +629,17 @@ func (p *parser) parseMappingKey(ctx *context) (ast.Node, error) { } func (p *parser) parseToken(ctx *context, tk *token.Token) (ast.Node, error) { + node, err := p.createNodeFromToken(ctx, tk) + if err != nil { + return nil, errors.Wrapf(err, "failed to create node from token") + } + if node != nil && node.GetPath() == "" { + node.SetPath(ctx.path) + } + return node, nil +} + +func (p *parser) createNodeFromToken(ctx *context, tk *token.Token) (ast.Node, error) { if tk == nil { return nil, nil } @@ -591,7 +730,7 @@ func Parse(tokens token.Tokens, mode Mode) (*ast.File, error) { // Parse parse from filename, and returns ast.File func ParseFile(filename string, mode Mode) (*ast.File, error) { - file, err := ioutil.ReadFile(filename) + file, err := os.ReadFile(filename) if err != nil { return nil, errors.Wrapf(err, "failed to read file: %s", filename) } diff --git a/vendor/github.com/goccy/go-yaml/path.go b/vendor/github.com/goccy/go-yaml/path.go index 2ba88baf8..b79c6669f 100644 --- a/vendor/github.com/goccy/go-yaml/path.go +++ b/vendor/github.com/goccy/go-yaml/path.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "strconv" + "strings" "github.com/goccy/go-yaml/ast" "github.com/goccy/go-yaml/internal/errors" @@ -20,6 +21,10 @@ import ( // .. : recursive descent // [num] : object/element of array by number // [*] : all objects/elements for array. +// +// If you want to use reserved characters such as `.` and `*` as a key name, +// enclose them in single quotation as follows ( $.foo.'bar.baz-*'.hoge ). +// If you want to use a single quote with reserved characters, escape it with `\` ( $.foo.'bar.baz\'s value'.hoge ). func PathString(s string) (*Path, error) { buf := []rune(s) length := len(buf) @@ -32,17 +37,19 @@ func PathString(s string) (*Path, error) { builder = builder.Root() cursor++ case '.': - b, c, err := parsePathDot(builder, buf, cursor) + b, buf, c, err := parsePathDot(builder, buf, cursor) if err != nil { return nil, errors.Wrapf(err, "failed to parse path of dot") } + length = len(buf) builder = b cursor = c case '[': - b, c, err := parsePathIndex(builder, buf, cursor) + b, buf, c, err := parsePathIndex(builder, buf, cursor) if err != nil { return nil, errors.Wrapf(err, "failed to parse path of index") } + length = len(buf) builder = b cursor = c default: @@ -52,7 +59,7 @@ func PathString(s string) (*Path, error) { return builder.Build(), nil } -func parsePathRecursive(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, int, error) { +func parsePathRecursive(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []rune, int, error) { length := len(buf) cursor += 2 // skip .. characters start := cursor @@ -60,58 +67,100 @@ func parsePathRecursive(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, i c := buf[cursor] switch c { case '$': - return nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '$' after '..' character") + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '$' after '..' character") case '*': - return nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '*' after '..' character") + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '*' after '..' character") case '.', '[': goto end case ']': - return nil, 0, errors.Wrapf(ErrInvalidPathString, "specified ']' after '..' character") + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified ']' after '..' character") } } end: if start == cursor { - return nil, 0, errors.Wrapf(ErrInvalidPathString, "not found recursive selector") + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "not found recursive selector") } - return b.Recursive(string(buf[start:cursor])), cursor, nil + return b.Recursive(string(buf[start:cursor])), buf, cursor, nil } -func parsePathDot(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, int, error) { +func parsePathDot(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []rune, int, error) { length := len(buf) if cursor+1 < length && buf[cursor+1] == '.' { - b, c, err := parsePathRecursive(b, buf, cursor) + b, buf, c, err := parsePathRecursive(b, buf, cursor) if err != nil { - return nil, 0, errors.Wrapf(err, "failed to parse path of recursive") + return nil, nil, 0, errors.Wrapf(err, "failed to parse path of recursive") } - return b, c, nil + return b, buf, c, nil } cursor++ // skip . character start := cursor + + // if started single quote, looking for end single quote char + if cursor < length && buf[cursor] == '\'' { + return parseQuotedKey(b, buf, cursor) + } for ; cursor < length; cursor++ { c := buf[cursor] switch c { case '$': - return nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '$' after '.' character") + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '$' after '.' character") case '*': - return nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '*' after '.' character") + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '*' after '.' character") case '.', '[': goto end case ']': - return nil, 0, errors.Wrapf(ErrInvalidPathString, "specified ']' after '.' character") + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified ']' after '.' character") + } + } +end: + if start == cursor { + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "cloud not find by empty key") + } + return b.child(string(buf[start:cursor])), buf, cursor, nil +} + +func parseQuotedKey(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []rune, int, error) { + cursor++ // skip single quote + start := cursor + length := len(buf) + var foundEndDelim bool + for ; cursor < length; cursor++ { + switch buf[cursor] { + case '\\': + buf = append(append([]rune{}, buf[:cursor]...), buf[cursor+1:]...) + length = len(buf) + case '\'': + foundEndDelim = true + goto end } } end: + if !foundEndDelim { + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "could not find end delimiter for key") + } if start == cursor { - return nil, 0, errors.Wrapf(ErrInvalidPathString, "not found child selector") + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "could not find by empty key") + } + selector := buf[start:cursor] + cursor++ + if cursor < length { + switch buf[cursor] { + case '$': + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '$' after '.' character") + case '*': + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '*' after '.' character") + case ']': + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified ']' after '.' character") + } } - return b.Child(string(buf[start:cursor])), cursor, nil + return b.child(string(selector)), buf, cursor, nil } -func parsePathIndex(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, int, error) { +func parsePathIndex(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []rune, int, error) { length := len(buf) cursor++ // skip '[' character if length <= cursor { - return nil, 0, errors.Wrapf(ErrInvalidPathString, "unexpected end of YAML Path") + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "unexpected end of YAML Path") } c := buf[cursor] switch c { @@ -127,19 +176,19 @@ func parsePathIndex(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, int, break } if buf[cursor] != ']' { - return nil, 0, errors.Wrapf(ErrInvalidPathString, "invalid character %s at %d", string(buf[cursor]), cursor) + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "invalid character %s at %d", string(buf[cursor]), cursor) } numOrAll := string(buf[start:cursor]) if numOrAll == "*" { - return b.IndexAll(), cursor + 1, nil + return b.IndexAll(), buf, cursor + 1, nil } num, err := strconv.ParseInt(numOrAll, 10, 64) if err != nil { - return nil, 0, errors.Wrapf(err, "failed to parse number") + return nil, nil, 0, errors.Wrapf(err, "failed to parse number") } - return b.Index(uint(num)), cursor + 1, nil + return b.Index(uint(num)), buf, cursor + 1, nil } - return nil, 0, errors.Wrapf(ErrInvalidPathString, "invalid character %s at %d", c, cursor) + return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "invalid character %s at %d", c, cursor) } // Path represent YAMLPath ( like a JSONPath ). @@ -338,12 +387,42 @@ func (b *PathBuilder) Recursive(selector string) *PathBuilder { return b } -// Child add '.name' to current path. -func (b *PathBuilder) Child(name string) *PathBuilder { +func (b *PathBuilder) containsReservedPathCharacters(path string) bool { + if strings.Contains(path, ".") { + return true + } + if strings.Contains(path, "*") { + return true + } + return false +} + +func (b *PathBuilder) enclosedSingleQuote(name string) bool { + return strings.HasPrefix(name, "'") && strings.HasSuffix(name, "'") +} + +func (b *PathBuilder) normalizeSelectorName(name string) string { + if b.enclosedSingleQuote(name) { + // already escaped name + return name + } + if b.containsReservedPathCharacters(name) { + escapedName := strings.ReplaceAll(name, `'`, `\'`) + return "'" + escapedName + "'" + } + return name +} + +func (b *PathBuilder) child(name string) *PathBuilder { b.node = b.node.chain(newSelectorNode(name)) return b } +// Child add '.name' to current path. +func (b *PathBuilder) Child(name string) *PathBuilder { + return b.child(b.normalizeSelectorName(name)) +} + // Index add '[idx]' to current path. func (b *PathBuilder) Index(idx uint) *PathBuilder { b.node = b.node.chain(newIndexNode(idx)) @@ -389,7 +468,7 @@ func (n *rootNode) String() string { func (n *rootNode) filter(node ast.Node) (ast.Node, error) { if n.child == nil { - return nil, nil + return node, nil } filtered, err := n.child.filter(node) if err != nil { @@ -421,11 +500,29 @@ func newSelectorNode(selector string) *selectorNode { } func (n *selectorNode) filter(node ast.Node) (ast.Node, error) { + selector := n.selector + if len(selector) > 1 && selector[0] == '\'' && selector[len(selector)-1] == '\'' { + selector = selector[1 : len(selector)-1] + } switch node.Type() { case ast.MappingType: for _, value := range node.(*ast.MappingNode).Values { key := value.Key.GetToken().Value - if key == n.selector { + if len(key) > 0 { + switch key[0] { + case '"': + var err error + key, err = strconv.Unquote(key) + if err != nil { + return nil, errors.Wrapf(err, "failed to unquote") + } + case '\'': + if len(key) > 1 && key[len(key)-1] == '\'' { + key = key[1 : len(key)-1] + } + } + } + if key == selector { if n.child == nil { return value.Value, nil } @@ -439,7 +536,7 @@ func (n *selectorNode) filter(node ast.Node) (ast.Node, error) { case ast.MappingValueType: value := node.(*ast.MappingValueNode) key := value.Key.GetToken().Value - if key == n.selector { + if key == selector { if n.child == nil { return value.Value, nil } @@ -492,7 +589,9 @@ func (n *selectorNode) replace(node ast.Node, target ast.Node) error { } func (n *selectorNode) String() string { - s := fmt.Sprintf(".%s", n.selector) + var builder PathBuilder + selector := builder.normalizeSelectorName(n.selector) + s := fmt.Sprintf(".%s", selector) if n.child != nil { s += n.child.String() } @@ -638,7 +737,7 @@ func (n *recursiveNode) String() string { } func (n *recursiveNode) filterNode(node ast.Node) (*ast.SequenceNode, error) { - sequence := &ast.SequenceNode{} + sequence := &ast.SequenceNode{BaseNode: &ast.BaseNode{}} switch typedNode := node.(type) { case *ast.MappingNode: for _, value := range typedNode.Values { diff --git a/vendor/github.com/goccy/go-yaml/printer/printer.go b/vendor/github.com/goccy/go-yaml/printer/printer.go index bc3d7232e..d5e25dc91 100644 --- a/vendor/github.com/goccy/go-yaml/printer/printer.go +++ b/vendor/github.com/goccy/go-yaml/printer/printer.go @@ -287,21 +287,6 @@ func (p *Printer) printBeforeTokens(tk *token.Token, minLine, extLine int) token return tokens } -func (p *Printer) addNewLineCharIfDocumentHeader(tk *token.Token) { - if tk.Prev == nil { - return - } - if tk.Type != token.DocumentHeaderType { - return - } - prev := tk.Prev - lineDiff := tk.Position.Line - prev.Position.Line - if p.isNewLineLastChar(prev.Origin) { - lineDiff-- - } - tk.Origin = strings.Repeat("\n", lineDiff) + tk.Origin -} - func (p *Printer) printAfterTokens(tk *token.Token, maxLine int) token.Tokens { tokens := token.Tokens{} if tk == nil { @@ -316,7 +301,6 @@ func (p *Printer) printAfterTokens(tk *token.Token, maxLine int) token.Tokens { tk = minTk.Next for tk != nil && tk.Position.Line <= maxLine { clonedTk := tk.Clone() - p.addNewLineCharIfDocumentHeader(clonedTk) tokens.Add(clonedTk) tk = clonedTk.Next } diff --git a/vendor/github.com/goccy/go-yaml/scanner/context.go b/vendor/github.com/goccy/go-yaml/scanner/context.go index e629a5c2a..3aaec5618 100644 --- a/vendor/github.com/goccy/go-yaml/scanner/context.go +++ b/vendor/github.com/goccy/go-yaml/scanner/context.go @@ -6,6 +6,8 @@ import ( "github.com/goccy/go-yaml/token" ) +const whitespace = ' ' + // Context context at scanning type Context struct { idx int @@ -55,7 +57,11 @@ func (c *Context) reset(src []rune) { c.src = src c.tokens = c.tokens[:0] c.resetBuffer() + c.isRawFolded = false c.isSingleLine = true + c.isLiteral = false + c.isFolded = false + c.literalOpt = "" } func (c *Context) resetBuffer() { @@ -139,7 +145,22 @@ func (c *Context) previousChar() rune { } func (c *Context) currentChar() rune { - return c.src[c.idx] + if c.size > c.idx { + return c.src[c.idx] + } + return rune(0) +} + +func (c *Context) currentCharWithSkipWhitespace() rune { + idx := c.idx + for c.size > idx { + ch := c.src[idx] + if ch != whitespace { + return ch + } + idx++ + } + return rune(0) } func (c *Context) nextChar() rune { @@ -175,9 +196,16 @@ func (c *Context) existsBuffer() bool { func (c *Context) bufferedSrc() []rune { src := c.buf[:c.notSpaceCharPos] - if len(src) > 0 && src[len(src)-1] == '\n' && c.isDocument() && c.literalOpt == "-" { - // remove end '\n' character - src = src[:len(src)-1] + if c.isDocument() && c.literalOpt == "-" { + // remove end '\n' character and trailing empty lines + // https://yaml.org/spec/1.2.2/#8112-block-chomping-indicator + for { + if len(src) > 0 && src[len(src)-1] == '\n' { + src = src[:len(src)-1] + continue + } + break + } } return src } @@ -199,3 +227,10 @@ func (c *Context) bufferedToken(pos *token.Position) *token.Token { c.resetBuffer() return tk } + +func (c *Context) lastToken() *token.Token { + if len(c.tokens) != 0 { + return c.tokens[len(c.tokens)-1] + } + return nil +} diff --git a/vendor/github.com/goccy/go-yaml/scanner/scanner.go b/vendor/github.com/goccy/go-yaml/scanner/scanner.go index f1d19c74a..77acb4184 100644 --- a/vendor/github.com/goccy/go-yaml/scanner/scanner.go +++ b/vendor/github.com/goccy/go-yaml/scanner/scanner.go @@ -4,8 +4,9 @@ import ( "io" "strings" - "github.com/goccy/go-yaml/token" "golang.org/x/xerrors" + + "github.com/goccy/go-yaml/token" ) // IndentState state for indent @@ -61,13 +62,25 @@ func (s *Scanner) bufferedToken(ctx *Context) *token.Token { s.savedPos = nil return tk } - size := len(ctx.buf) + line := s.line + column := s.column - len(ctx.buf) + level := s.indentLevel + if ctx.isSaveIndentMode() { + line -= s.newLineCount(ctx.buf) + column = strings.Index(string(ctx.obuf), string(ctx.buf)) + 1 + // Since we are in a literal, folded or raw folded + // we can use the indent level from the last token. + last := ctx.lastToken() + if last != nil { // The last token should never be nil here. + level = last.Position.IndentLevel + 1 + } + } return ctx.bufferedToken(&token.Position{ - Line: s.line, - Column: s.column - size, - Offset: s.offset - size, + Line: line, + Column: column, + Offset: s.offset - len(ctx.buf), IndentNum: s.indentNum, - IndentLevel: s.indentLevel, + IndentLevel: level, }) } @@ -128,27 +141,16 @@ func (s *Scanner) newLineCount(src []rune) int { return cnt } -func (s *Scanner) updateIndent(ctx *Context, c rune) { - if s.isFirstCharAtLine && s.isNewLineChar(c) && ctx.isDocument() { - return - } - if s.isFirstCharAtLine && c == ' ' { - s.indentNum++ - return - } - if !s.isFirstCharAtLine { - s.indentState = IndentStateKeep - return - } - +func (s *Scanner) updateIndentState(ctx *Context) { + indentNumBasedIndentState := s.indentState if s.prevIndentNum < s.indentNum { s.indentLevel = s.prevIndentLevel + 1 - s.indentState = IndentStateUp + indentNumBasedIndentState = IndentStateUp } else if s.prevIndentNum == s.indentNum { s.indentLevel = s.prevIndentLevel - s.indentState = IndentStateEqual + indentNumBasedIndentState = IndentStateEqual } else { - s.indentState = IndentStateDown + indentNumBasedIndentState = IndentStateDown if s.prevIndentLevel > 0 { s.indentLevel = s.prevIndentLevel - 1 } @@ -157,18 +159,48 @@ func (s *Scanner) updateIndent(ctx *Context, c rune) { if s.prevIndentColumn > 0 { if s.prevIndentColumn < s.column { s.indentState = IndentStateUp - } else if s.prevIndentColumn == s.column { - s.indentState = IndentStateEqual - } else { + } else if s.prevIndentColumn != s.column || indentNumBasedIndentState != IndentStateEqual { + // The following case ( current position is 'd' ), some variables becomes like here + // - prevIndentColumn: 1 of 'a' + // - indentNumBasedIndentState: IndentStateDown because d's indentNum(1) is less than c's indentNum(3). + // Therefore, s.prevIndentColumn(1) == s.column(1) is true, but we want to treat this as IndentStateDown. + // So, we look also current indentState value by the above prevIndentNum based logic, and determins finally indentState. + // --- + // a: + // b + // c + // d: e + // ^ s.indentState = IndentStateDown + } else { + s.indentState = IndentStateEqual } + } else { + s.indentState = indentNumBasedIndentState } +} + +func (s *Scanner) updateIndent(ctx *Context, c rune) { + if s.isFirstCharAtLine && s.isNewLineChar(c) && ctx.isDocument() { + return + } + if s.isFirstCharAtLine && c == ' ' { + s.indentNum++ + return + } + if !s.isFirstCharAtLine { + s.indentState = IndentStateKeep + return + } + s.updateIndentState(ctx) s.isFirstCharAtLine = false if s.isNeededKeepPreviousIndentNum(ctx, c) { return } + if s.indentState != IndentStateUp { + s.prevIndentColumn = 0 + } s.prevIndentNum = s.indentNum - s.prevIndentColumn = 0 s.prevIndentLevel = s.indentLevel } @@ -195,19 +227,27 @@ func (s *Scanner) breakLiteral(ctx *Context) { func (s *Scanner) scanSingleQuote(ctx *Context) (tk *token.Token, pos int) { ctx.addOriginBuf('\'') + srcpos := s.pos() startIndex := ctx.idx + 1 - ctx.progress(1) src := ctx.src size := len(src) value := []rune{} isFirstLineChar := false + isNewLine := false for idx := startIndex; idx < size; idx++ { + if !isNewLine { + s.progressColumn(ctx, 1) + } else { + isNewLine = false + } c := src[idx] pos = idx + 1 ctx.addOriginBuf(c) if s.isNewLineChar(c) { value = append(value, ' ') isFirstLineChar = true + isNewLine = true + s.progressLine(ctx) continue } else if c == ' ' && isFirstLineChar { continue @@ -223,7 +263,8 @@ func (s *Scanner) scanSingleQuote(ctx *Context) (tk *token.Token, pos int) { idx++ continue } - tk = token.SingleQuote(string(value), string(ctx.obuf), s.pos()) + s.progressColumn(ctx, 1) + tk = token.SingleQuote(string(value), string(ctx.obuf), srcpos) pos = idx - startIndex + 1 return } @@ -250,120 +291,127 @@ func hexRunesToInt(b []rune) int { func (s *Scanner) scanDoubleQuote(ctx *Context) (tk *token.Token, pos int) { ctx.addOriginBuf('"') + srcpos := s.pos() startIndex := ctx.idx + 1 - ctx.progress(1) src := ctx.src size := len(src) value := []rune{} isFirstLineChar := false + isNewLine := false for idx := startIndex; idx < size; idx++ { + if !isNewLine { + s.progressColumn(ctx, 1) + } else { + isNewLine = false + } c := src[idx] pos = idx + 1 ctx.addOriginBuf(c) if s.isNewLineChar(c) { value = append(value, ' ') isFirstLineChar = true + isNewLine = true + s.progressLine(ctx) continue } else if c == ' ' && isFirstLineChar { continue } else if c == '\\' { isFirstLineChar = false - if idx+1 < size { - nextChar := src[idx+1] - switch nextChar { - case 'b': - ctx.addOriginBuf(nextChar) - value = append(value, '\b') - idx++ - continue - case 'e': - ctx.addOriginBuf(nextChar) - value = append(value, '\x1B') - idx++ - continue - case 'f': - ctx.addOriginBuf(nextChar) - value = append(value, '\f') - idx++ - continue - case 'n': - ctx.addOriginBuf(nextChar) - value = append(value, '\n') - idx++ - continue - case 'v': - ctx.addOriginBuf(nextChar) - value = append(value, '\v') - idx++ - continue - case 'L': // LS (#x2028) - ctx.addOriginBuf(nextChar) - value = append(value, []rune{'\xE2', '\x80', '\xA8'}...) - idx++ - continue - case 'N': // NEL (#x85) - ctx.addOriginBuf(nextChar) - value = append(value, []rune{'\xC2', '\x85'}...) - idx++ - continue - case 'P': // PS (#x2029) - ctx.addOriginBuf(nextChar) - value = append(value, []rune{'\xE2', '\x80', '\xA9'}...) - idx++ - continue - case '_': // #xA0 - ctx.addOriginBuf(nextChar) - value = append(value, []rune{'\xC2', '\xA0'}...) - idx++ - continue - case '"': - ctx.addOriginBuf(nextChar) - value = append(value, nextChar) - idx++ - continue - case 'x': - if idx+3 >= size { - // TODO: need to return error - //err = xerrors.New("invalid escape character \\x") - return - } - codeNum := hexRunesToInt(src[idx+2 : idx+4]) - value = append(value, rune(codeNum)) - idx += 3 - continue - case 'u': - if idx+5 >= size { - // TODO: need to return error - //err = xerrors.New("invalid escape character \\u") - return - } - codeNum := hexRunesToInt(src[idx+2 : idx+6]) - value = append(value, rune(codeNum)) - idx += 5 - continue - case 'U': - if idx+9 >= size { - // TODO: need to return error - //err = xerrors.New("invalid escape character \\U") - return - } - codeNum := hexRunesToInt(src[idx+2 : idx+10]) - value = append(value, rune(codeNum)) - idx += 9 - continue - case '\\': - ctx.addOriginBuf(nextChar) - idx++ + if idx+1 >= size { + value = append(value, c) + continue + } + nextChar := src[idx+1] + progress := 0 + switch nextChar { + case 'b': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, '\b') + case 'e': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, '\x1B') + case 'f': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, '\f') + case 'n': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, '\n') + case 'r': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, '\r') + case 'v': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, '\v') + case 'L': // LS (#x2028) + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, []rune{'\xE2', '\x80', '\xA8'}...) + case 'N': // NEL (#x85) + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, []rune{'\xC2', '\x85'}...) + case 'P': // PS (#x2029) + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, []rune{'\xE2', '\x80', '\xA9'}...) + case '_': // #xA0 + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, []rune{'\xC2', '\xA0'}...) + case '"': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, nextChar) + case 'x': + progress = 3 + if idx+progress >= size { + // TODO: need to return error + //err = xerrors.New("invalid escape character \\x") + return + } + codeNum := hexRunesToInt(src[idx+2 : idx+progress+1]) + value = append(value, rune(codeNum)) + case 'u': + progress = 5 + if idx+progress >= size { + // TODO: need to return error + //err = xerrors.New("invalid escape character \\u") + return } + codeNum := hexRunesToInt(src[idx+2 : idx+progress+1]) + value = append(value, rune(codeNum)) + case 'U': + progress = 9 + if idx+progress >= size { + // TODO: need to return error + //err = xerrors.New("invalid escape character \\U") + return + } + codeNum := hexRunesToInt(src[idx+2 : idx+progress+1]) + value = append(value, rune(codeNum)) + case '\\': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, c) + default: + value = append(value, c) } - value = append(value, c) + idx += progress + s.progressColumn(ctx, progress) continue } else if c != '"' { value = append(value, c) isFirstLineChar = false continue } - tk = token.DoubleQuote(string(value), string(ctx.obuf), s.pos()) + s.progressColumn(ctx, 1) + tk = token.DoubleQuote(string(value), string(ctx.obuf), srcpos) pos = idx - startIndex + 1 return } @@ -377,6 +425,30 @@ func (s *Scanner) scanQuote(ctx *Context, ch rune) (tk *token.Token, pos int) { return s.scanDoubleQuote(ctx) } +func (s *Scanner) isMergeKey(ctx *Context) bool { + if ctx.repeatNum('<') != 2 { + return false + } + src := ctx.src + size := len(src) + for idx := ctx.idx + 2; idx < size; idx++ { + c := src[idx] + if c == ' ' { + continue + } + if c != ':' { + return false + } + if idx+1 < size { + nc := src[idx+1] + if nc == ' ' || s.isNewLineChar(nc) { + return true + } + } + } + return false +} + func (s *Scanner) scanTag(ctx *Context) (tk *token.Token, pos int) { ctx.addOriginBuf('!') ctx.progress(1) // skip '!' character @@ -411,9 +483,24 @@ func (s *Scanner) scanComment(ctx *Context) (tk *token.Token, pos int) { return } } + // document ends with comment. + value := string(ctx.src[ctx.idx:]) + tk = token.Comment(value, string(ctx.obuf), s.pos()) + pos = len([]rune(value)) + 1 return } +func trimCommentFromLiteralOpt(text string) (string, error) { + idx := strings.Index(text, "#") + if idx < 0 { + return text, nil + } + if idx == 0 { + return "", xerrors.New("invalid literal header") + } + return text[:idx-1], nil +} + func (s *Scanner) scanLiteral(ctx *Context, c rune) { ctx.addOriginBuf(c) if ctx.isEOS() { @@ -456,14 +543,44 @@ func (s *Scanner) scanLiteralHeader(ctx *Context) (pos int, err error) { case '\n', '\r': value := ctx.source(ctx.idx, ctx.idx+idx) opt := strings.TrimRight(value, " ") + orgOptLen := len(opt) + opt, err = trimCommentFromLiteralOpt(opt) + if err != nil { + return + } switch opt { case "", "+", "-", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9": + hasComment := len(opt) < orgOptLen if header == '|' { - ctx.addToken(token.Literal("|"+opt, string(ctx.obuf), s.pos())) + if hasComment { + commentLen := orgOptLen - len(opt) + headerPos := strings.Index(string(ctx.obuf), "|") + litBuf := ctx.obuf[:len(ctx.obuf)-commentLen-headerPos] + commentBuf := ctx.obuf[len(litBuf):] + ctx.addToken(token.Literal("|"+opt, string(litBuf), s.pos())) + s.column += len(litBuf) + s.offset += len(litBuf) + commentHeader := strings.Index(value, "#") + ctx.addToken(token.Comment(string(value[commentHeader+1:]), string(commentBuf), s.pos())) + } else { + ctx.addToken(token.Literal("|"+opt, string(ctx.obuf), s.pos())) + } ctx.isLiteral = true } else if header == '>' { - ctx.addToken(token.Folded(">"+opt, string(ctx.obuf), s.pos())) + if hasComment { + commentLen := orgOptLen - len(opt) + headerPos := strings.Index(string(ctx.obuf), ">") + foldedBuf := ctx.obuf[:len(ctx.obuf)-commentLen-headerPos] + commentBuf := ctx.obuf[len(foldedBuf):] + ctx.addToken(token.Folded(">"+opt, string(foldedBuf), s.pos())) + s.column += len(foldedBuf) + s.offset += len(foldedBuf) + commentHeader := strings.Index(value, "#") + ctx.addToken(token.Comment(string(value[commentHeader+1:]), string(commentBuf), s.pos())) + } else { + ctx.addToken(token.Folded(">"+opt, string(ctx.obuf), s.pos())) + } ctx.isFolded = true } s.indentState = IndentStateKeep @@ -498,6 +615,16 @@ func (s *Scanner) scanNewLine(ctx *Context, c rune) { } } + // There is no problem that we ignore CR which followed by LF and normalize it to LF, because of following YAML1.2 spec. + // > Line breaks inside scalar content must be normalized by the YAML processor. Each such line break must be parsed into a single line feed character. + // > Outside scalar content, YAML allows any line break to be used to terminate lines. + // > -- https://yaml.org/spec/1.2/spec.html + if c == '\r' && ctx.nextChar() == '\n' { + ctx.addOriginBuf('\r') + ctx.progress(1) + c = '\n' + } + if ctx.isEOS() { s.addBufferedTokenIfExists(ctx) } else if s.isAnchor { @@ -552,13 +679,13 @@ func (s *Scanner) scan(ctx *Context) (pos int) { } case '.': if s.indentNum == 0 && s.column == 1 && ctx.repeatNum('.') == 3 { - ctx.addToken(token.DocumentEnd(s.pos())) + ctx.addToken(token.DocumentEnd(string(ctx.obuf)+"...", s.pos())) s.progressColumn(ctx, 3) pos += 2 return } case '<': - if ctx.repeatNum('<') == 2 { + if s.isMergeKey(ctx) { s.prevIndentColumn = s.column ctx.addToken(token.MergeKey(string(ctx.obuf)+"<<", s.pos())) s.progressColumn(ctx, 1) @@ -568,7 +695,7 @@ func (s *Scanner) scan(ctx *Context) (pos int) { case '-': if s.indentNum == 0 && s.column == 1 && ctx.repeatNum('-') == 3 { s.addBufferedTokenIfExists(ctx) - ctx.addToken(token.DocumentHeader(s.pos())) + ctx.addToken(token.DocumentHeader(string(ctx.obuf)+"---", s.pos())) s.progressColumn(ctx, 3) pos += 2 return @@ -631,6 +758,12 @@ func (s *Scanner) scan(ctx *Context) (pos int) { if tk != nil { s.prevIndentColumn = tk.Position.Column ctx.addToken(tk) + } else if tk := ctx.lastToken(); tk != nil { + // If the map key is quote, the buffer does not exist because it has already been cut into tokens. + // Therefore, we need to check the last token. + if tk.Indicator == token.QuotedScalarIndicator { + s.prevIndentColumn = tk.Position.Column + } } ctx.addToken(token.MappingValue(s.pos())) s.progressColumn(ctx, 1) @@ -660,7 +793,7 @@ func (s *Scanner) scan(ctx *Context) (pos int) { } case '%': if !ctx.existsBuffer() && s.indentNum == 0 { - ctx.addToken(token.Directive(s.pos())) + ctx.addToken(token.Directive(string(ctx.obuf)+"%", s.pos())) s.progressColumn(ctx, 1) return } @@ -702,20 +835,15 @@ func (s *Scanner) scan(ctx *Context) (pos int) { if !ctx.existsBuffer() { token, progress := s.scanQuote(ctx, c) ctx.addToken(token) - s.progressColumn(ctx, progress) pos += progress + // If the non-whitespace character immediately following the quote is ':', the quote should be treated as a map key. + // Therefore, do not return and continue processing as a normal map key. + if ctx.currentCharWithSkipWhitespace() == ':' { + continue + } return } case '\r', '\n': - // There is no problem that we ignore CR which followed by LF and normalize it to LF, because of following YAML1.2 spec. - // > Line breaks inside scalar content must be normalized by the YAML processor. Each such line break must be parsed into a single line feed character. - // > Outside scalar content, YAML allows any line break to be used to terminate lines. - // > -- https://yaml.org/spec/1.2/spec.html - if c == '\r' && ctx.nextChar() == '\n' { - ctx.addOriginBuf('\r') - ctx.progress(1) - c = '\n' - } s.scanNewLine(ctx, c) continue case ' ': @@ -731,7 +859,7 @@ func (s *Scanner) scan(ctx *Context) (pos int) { continue } s.addBufferedTokenIfExists(ctx) - s.progressColumn(ctx, 1) + pos-- // to rescan white space at next scanning for adding white space to next buffer. s.isAnchor = false return } diff --git a/vendor/github.com/goccy/go-yaml/stdlib_quote.go b/vendor/github.com/goccy/go-yaml/stdlib_quote.go new file mode 100644 index 000000000..be50ae612 --- /dev/null +++ b/vendor/github.com/goccy/go-yaml/stdlib_quote.go @@ -0,0 +1,103 @@ +// Copied and trimmed down from https://github.com/golang/go/blob/e3769299cd3484e018e0e2a6e1b95c2b18ce4f41/src/strconv/quote.go +// We want to use the standard library's private "quoteWith" function rather than write our own so that we get robust unicode support. +// Every private function called by quoteWith was copied. +// There are 2 modifications to simplify the code: +// 1. The unicode.IsPrint function was substituted for the custom implementation of IsPrint +// 2. All code paths reachable only when ASCIIonly or grphicOnly are set to true were removed. + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package yaml + +import ( + "unicode" + "unicode/utf8" +) + +const ( + lowerhex = "0123456789abcdef" +) + +func quoteWith(s string, quote byte) string { + return string(appendQuotedWith(make([]byte, 0, 3*len(s)/2), s, quote)) +} + +func appendQuotedWith(buf []byte, s string, quote byte) []byte { + // Often called with big strings, so preallocate. If there's quoting, + // this is conservative but still helps a lot. + if cap(buf)-len(buf) < len(s) { + nBuf := make([]byte, len(buf), len(buf)+1+len(s)+1) + copy(nBuf, buf) + buf = nBuf + } + buf = append(buf, quote) + for width := 0; len(s) > 0; s = s[width:] { + r := rune(s[0]) + width = 1 + if r >= utf8.RuneSelf { + r, width = utf8.DecodeRuneInString(s) + } + if width == 1 && r == utf8.RuneError { + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[s[0]>>4]) + buf = append(buf, lowerhex[s[0]&0xF]) + continue + } + buf = appendEscapedRune(buf, r, quote) + } + buf = append(buf, quote) + return buf +} + +func appendEscapedRune(buf []byte, r rune, quote byte) []byte { + var runeTmp [utf8.UTFMax]byte + if r == rune(quote) || r == '\\' { // always backslashed + buf = append(buf, '\\') + buf = append(buf, byte(r)) + return buf + } + if unicode.IsPrint(r) { + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + return buf + } + switch r { + case '\a': + buf = append(buf, `\a`...) + case '\b': + buf = append(buf, `\b`...) + case '\f': + buf = append(buf, `\f`...) + case '\n': + buf = append(buf, `\n`...) + case '\r': + buf = append(buf, `\r`...) + case '\t': + buf = append(buf, `\t`...) + case '\v': + buf = append(buf, `\v`...) + default: + switch { + case r < ' ': + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[byte(r)>>4]) + buf = append(buf, lowerhex[byte(r)&0xF]) + case r > utf8.MaxRune: + r = 0xFFFD + fallthrough + case r < 0x10000: + buf = append(buf, `\u`...) + for s := 12; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + default: + buf = append(buf, `\U`...) + for s := 28; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + } + } + return buf +} diff --git a/vendor/github.com/goccy/go-yaml/struct.go b/vendor/github.com/goccy/go-yaml/struct.go index ac78f3139..a3da8ddd3 100644 --- a/vendor/github.com/goccy/go-yaml/struct.go +++ b/vendor/github.com/goccy/go-yaml/struct.go @@ -95,7 +95,7 @@ type StructFieldMap map[string]*StructField func (m StructFieldMap) isIncludedRenderName(name string) bool { for _, v := range m { - if v.RenderName == name { + if !v.IsInline && v.RenderName == name { return true } } diff --git a/vendor/github.com/goccy/go-yaml/token/token.go b/vendor/github.com/goccy/go-yaml/token/token.go index 054b9f1d0..14d76220b 100644 --- a/vendor/github.com/goccy/go-yaml/token/token.go +++ b/vendor/github.com/goccy/go-yaml/token/token.go @@ -12,41 +12,41 @@ const ( // SequenceEntryCharacter character for sequence entry SequenceEntryCharacter Character = '-' // MappingKeyCharacter character for mapping key - MappingKeyCharacter = '?' + MappingKeyCharacter Character = '?' // MappingValueCharacter character for mapping value - MappingValueCharacter = ':' + MappingValueCharacter Character = ':' // CollectEntryCharacter character for collect entry - CollectEntryCharacter = ',' + CollectEntryCharacter Character = ',' // SequenceStartCharacter character for sequence start - SequenceStartCharacter = '[' + SequenceStartCharacter Character = '[' // SequenceEndCharacter character for sequence end - SequenceEndCharacter = ']' + SequenceEndCharacter Character = ']' // MappingStartCharacter character for mapping start - MappingStartCharacter = '{' + MappingStartCharacter Character = '{' // MappingEndCharacter character for mapping end - MappingEndCharacter = '}' + MappingEndCharacter Character = '}' // CommentCharacter character for comment - CommentCharacter = '#' + CommentCharacter Character = '#' // AnchorCharacter character for anchor - AnchorCharacter = '&' + AnchorCharacter Character = '&' // AliasCharacter character for alias - AliasCharacter = '*' + AliasCharacter Character = '*' // TagCharacter character for tag - TagCharacter = '!' + TagCharacter Character = '!' // LiteralCharacter character for literal - LiteralCharacter = '|' + LiteralCharacter Character = '|' // FoldedCharacter character for folded - FoldedCharacter = '>' + FoldedCharacter Character = '>' // SingleQuoteCharacter character for single quote - SingleQuoteCharacter = '\'' + SingleQuoteCharacter Character = '\'' // DoubleQuoteCharacter character for double quote - DoubleQuoteCharacter = '"' + DoubleQuoteCharacter Character = '"' // DirectiveCharacter character for directive - DirectiveCharacter = '%' + DirectiveCharacter Character = '%' // SpaceCharacter character for space - SpaceCharacter = ' ' + SpaceCharacter Character = ' ' // LineBreakCharacter character for line break - LineBreakCharacter = '\n' + LineBreakCharacter Character = '\n' ) // Type type identifier for token @@ -283,6 +283,28 @@ var ( "False", "FALSE", } + // For compatibility with other YAML 1.1 parsers + // Note that we use these solely for encoding the bool value with quotes. + // go-yaml should not treat these as reserved keywords at parsing time. + // as go-yaml is supposed to be compliant only with YAML 1.2. + reservedLegacyBoolKeywords = []string{ + "y", + "Y", + "yes", + "Yes", + "YES", + "n", + "N", + "no", + "No", + "NO", + "on", + "On", + "ON", + "off", + "Off", + "OFF", + } reservedInfKeywords = []string{ ".inf", ".Inf", @@ -297,6 +319,11 @@ var ( ".NAN", } reservedKeywordMap = map[string]func(string, string, *Position) *Token{} + // reservedEncKeywordMap contains is the keyword map used at encoding time. + // This is supposed to be a superset of reservedKeywordMap, + // and used to quote legacy keywords present in YAML 1.1 or lesser for compatibility reasons, + // even though this library is supposed to be YAML 1.2-compliant. + reservedEncKeywordMap = map[string]func(string, string, *Position) *Token{} ) func reservedKeywordToken(typ Type, value, org string, pos *Position) *Token { @@ -317,7 +344,14 @@ func init() { } } for _, keyword := range reservedBoolKeywords { - reservedKeywordMap[keyword] = func(value, org string, pos *Position) *Token { + f := func(value, org string, pos *Position) *Token { + return reservedKeywordToken(BoolType, value, org, pos) + } + reservedKeywordMap[keyword] = f + reservedEncKeywordMap[keyword] = f + } + for _, keyword := range reservedLegacyBoolKeywords { + reservedEncKeywordMap[keyword] = func(value, org string, pos *Position) *Token { return reservedKeywordToken(BoolType, value, org, pos) } } @@ -581,7 +615,7 @@ func IsNeedQuoted(value string) bool { if value == "" { return true } - if _, exists := reservedKeywordMap[value]; exists { + if _, exists := reservedEncKeywordMap[value]; exists { return true } if stat := getNumberStat(value); stat.isNum { @@ -589,12 +623,12 @@ func IsNeedQuoted(value string) bool { } first := value[0] switch first { - case '*', '&', '[', '{', '}', ']', ',', '!', '|', '>', '%', '\'', '"': + case '*', '&', '[', '{', '}', ']', ',', '!', '|', '>', '%', '\'', '"', '@', ' ', '`': return true } last := value[len(value)-1] switch last { - case ':': + case ':', ' ': return true } if looksLikeTimeValue(value) { @@ -961,13 +995,13 @@ func DoubleQuote(value string, org string, pos *Position) *Token { } // Directive create token for Directive -func Directive(pos *Position) *Token { +func Directive(org string, pos *Position) *Token { return &Token{ Type: DirectiveType, CharacterType: CharacterTypeIndicator, Indicator: DirectiveIndicator, Value: string(DirectiveCharacter), - Origin: string(DirectiveCharacter), + Origin: org, Position: pos, } } @@ -997,25 +1031,25 @@ func MergeKey(org string, pos *Position) *Token { } // DocumentHeader create token for DocumentHeader -func DocumentHeader(pos *Position) *Token { +func DocumentHeader(org string, pos *Position) *Token { return &Token{ Type: DocumentHeaderType, CharacterType: CharacterTypeMiscellaneous, Indicator: NotIndicator, Value: "---", - Origin: "---", + Origin: org, Position: pos, } } // DocumentEnd create token for DocumentEnd -func DocumentEnd(pos *Position) *Token { +func DocumentEnd(org string, pos *Position) *Token { return &Token{ Type: DocumentEndType, CharacterType: CharacterTypeMiscellaneous, Indicator: NotIndicator, Value: "...", - Origin: "...", + Origin: org, Position: pos, } } diff --git a/vendor/github.com/goccy/go-yaml/yaml.go b/vendor/github.com/goccy/go-yaml/yaml.go index ad404cf5b..25b1056fe 100644 --- a/vendor/github.com/goccy/go-yaml/yaml.go +++ b/vendor/github.com/goccy/go-yaml/yaml.go @@ -4,6 +4,8 @@ import ( "bytes" "context" "io" + "reflect" + "sync" "github.com/goccy/go-yaml/ast" "github.com/goccy/go-yaml/internal/errors" @@ -87,54 +89,53 @@ func (s MapSlice) ToMap() map[interface{}]interface{} { // // The field tag format accepted is: // -// `(...) yaml:"[][,[,]]" (...)` +// `(...) yaml:"[][,[,]]" (...)` // // The following flags are currently supported: // -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. // -// flow Marshal using a flow style (useful for structs, -// sequences and maps). +// flow Marshal using a flow style (useful for structs, +// sequences and maps). // -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. // -// anchor Marshal with anchor. If want to define anchor name explicitly, use anchor=name style. -// Otherwise, if used 'anchor' name only, used the field name lowercased as the anchor name +// anchor Marshal with anchor. If want to define anchor name explicitly, use anchor=name style. +// Otherwise, if used 'anchor' name only, used the field name lowercased as the anchor name // -// alias Marshal with alias. If want to define alias name explicitly, use alias=name style. -// Otherwise, If omitted alias name and the field type is pointer type, -// assigned anchor name automatically from same pointer address. +// alias Marshal with alias. If want to define alias name explicitly, use alias=name style. +// Otherwise, If omitted alias name and the field type is pointer type, +// assigned anchor name automatically from same pointer address. // // In addition, if the key is "-", the field is ignored. // // For example: // -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}) // Returns "a: 1\nb: 0\n" -// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}) // Returns "a: 1\nb: 0\n" func Marshal(v interface{}) ([]byte, error) { return MarshalWithOptions(v) } // MarshalWithOptions serializes the value provided into a YAML document with EncodeOptions. func MarshalWithOptions(v interface{}, opts ...EncodeOption) ([]byte, error) { - return MarshalWithContext(context.Background(), v, opts...) + return MarshalContext(context.Background(), v, opts...) } -// MarshalWithContext serializes the value provided into a YAML document with context.Context and EncodeOptions. -func MarshalWithContext(ctx context.Context, v interface{}, opts ...EncodeOption) ([]byte, error) { +// MarshalContext serializes the value provided into a YAML document with context.Context and EncodeOptions. +func MarshalContext(ctx context.Context, v interface{}, opts ...EncodeOption) ([]byte, error) { var buf bytes.Buffer if err := NewEncoder(&buf, opts...).EncodeContext(ctx, v); err != nil { return nil, errors.Wrapf(err, "failed to marshal") @@ -165,16 +166,15 @@ func ValueToNode(v interface{}, opts ...EncodeOption) (ast.Node, error) { // // For example: // -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) // // See the documentation of Marshal for the format of tags and a list of // supported tag options. -// func Unmarshal(data []byte, v interface{}) error { return UnmarshalWithOptions(data, v) } @@ -182,11 +182,11 @@ func Unmarshal(data []byte, v interface{}) error { // UnmarshalWithOptions decodes with DecodeOptions the first document found within the in byte slice // and assigns decoded values into the out value. func UnmarshalWithOptions(data []byte, v interface{}, opts ...DecodeOption) error { - return UnmarshalWithContext(context.Background(), data, v, opts...) + return UnmarshalContext(context.Background(), data, v, opts...) } -// UnmarshalWithContext decodes with context.Context and DecodeOptions. -func UnmarshalWithContext(ctx context.Context, data []byte, v interface{}, opts ...DecodeOption) error { +// UnmarshalContext decodes with context.Context and DecodeOptions. +func UnmarshalContext(ctx context.Context, data []byte, v interface{}, opts ...DecodeOption) error { dec := NewDecoder(bytes.NewBuffer(data), opts...) if err := dec.DecodeContext(ctx, v); err != nil { if err == io.EOF { @@ -197,6 +197,15 @@ func UnmarshalWithContext(ctx context.Context, data []byte, v interface{}, opts return nil } +// NodeToValue converts node to the value pointed to by v. +func NodeToValue(node ast.Node, v interface{}, opts ...DecodeOption) error { + var buf bytes.Buffer + if err := NewDecoder(&buf, opts...).DecodeFromNode(node, v); err != nil { + return errors.Wrapf(err, "failed to convert node to value") + } + return nil +} + // FormatError is a utility function that takes advantage of the metadata // stored in the errors returned by this package's parser. // @@ -239,3 +248,41 @@ func JSONToYAML(bytes []byte) ([]byte, error) { } return out, nil } + +var ( + globalCustomMarshalerMu sync.Mutex + globalCustomUnmarshalerMu sync.Mutex + globalCustomMarshalerMap = map[reflect.Type]func(interface{}) ([]byte, error){} + globalCustomUnmarshalerMap = map[reflect.Type]func(interface{}, []byte) error{} +) + +// RegisterCustomMarshaler overrides any encoding process for the type specified in generics. +// If you want to switch the behavior for each encoder, use `CustomMarshaler` defined as EncodeOption. +// +// NOTE: If type T implements MarshalYAML for pointer receiver, the type specified in RegisterCustomMarshaler must be *T. +// If RegisterCustomMarshaler and CustomMarshaler of EncodeOption are specified for the same type, +// the CustomMarshaler specified in EncodeOption takes precedence. +func RegisterCustomMarshaler[T any](marshaler func(T) ([]byte, error)) { + globalCustomMarshalerMu.Lock() + defer globalCustomMarshalerMu.Unlock() + + var typ T + globalCustomMarshalerMap[reflect.TypeOf(typ)] = func(v interface{}) ([]byte, error) { + return marshaler(v.(T)) + } +} + +// RegisterCustomUnmarshaler overrides any decoding process for the type specified in generics. +// If you want to switch the behavior for each decoder, use `CustomUnmarshaler` defined as DecodeOption. +// +// NOTE: If RegisterCustomUnmarshaler and CustomUnmarshaler of DecodeOption are specified for the same type, +// the CustomUnmarshaler specified in DecodeOption takes precedence. +func RegisterCustomUnmarshaler[T any](unmarshaler func(*T, []byte) error) { + globalCustomUnmarshalerMu.Lock() + defer globalCustomUnmarshalerMu.Unlock() + + var typ *T + globalCustomUnmarshalerMap[reflect.TypeOf(typ)] = func(v interface{}, b []byte) error { + return unmarshaler(v.(*T), b) + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go deleted file mode 100644 index c6f66f103..000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package jsonpb - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/encoding/protojson" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapJSONUnmarshalV2 = false - -// UnmarshalNext unmarshals the next JSON object from d into m. -func UnmarshalNext(d *json.Decoder, m proto.Message) error { - return new(Unmarshaler).UnmarshalNext(d, m) -} - -// Unmarshal unmarshals a JSON object from r into m. -func Unmarshal(r io.Reader, m proto.Message) error { - return new(Unmarshaler).Unmarshal(r, m) -} - -// UnmarshalString unmarshals a JSON object from s into m. -func UnmarshalString(s string, m proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) -} - -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // AllowUnknownFields specifies whether to allow messages to contain - // unknown JSON fields, as opposed to failing to unmarshal. - AllowUnknownFields bool - - // AnyResolver is used to resolve the google.protobuf.Any well-known type. - // If unset, the global registry is used by default. - AnyResolver AnyResolver -} - -// JSONPBUnmarshaler is implemented by protobuf messages that customize the way -// they are unmarshaled from JSON. Messages that implement this should also -// implement JSONPBMarshaler so that the custom format can be produced. -// -// The JSON unmarshaling must follow the JSON to proto specification: -// -// https://developers.google.com/protocol-buffers/docs/proto3#json -// -// Deprecated: Custom types should implement protobuf reflection instead. -type JSONPBUnmarshaler interface { - UnmarshalJSONPB(*Unmarshaler, []byte) error -} - -// Unmarshal unmarshals a JSON object from r into m. -func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { - return u.UnmarshalNext(json.NewDecoder(r), m) -} - -// UnmarshalNext unmarshals the next JSON object from d into m. -func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { - if m == nil { - return errors.New("invalid nil message") - } - - // Parse the next JSON object from the stream. - raw := json.RawMessage{} - if err := d.Decode(&raw); err != nil { - return err - } - - // Check for custom unmarshalers first since they may not properly - // implement protobuf reflection that the logic below relies on. - if jsu, ok := m.(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, raw) - } - - mr := proto.MessageReflect(m) - - // NOTE: For historical reasons, a top-level null is treated as a noop. - // This is incorrect, but kept for compatibility. - if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { - return nil - } - - if wrapJSONUnmarshalV2 { - // NOTE: If input message is non-empty, we need to preserve merge semantics - // of the old jsonpb implementation. These semantics are not supported by - // the protobuf JSON specification. - isEmpty := true - mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { - isEmpty = false // at least one iteration implies non-empty - return false - }) - if !isEmpty { - // Perform unmarshaling into a newly allocated, empty message. - mr = mr.New() - - // Use a defer to copy all unmarshaled fields into the original message. - dst := proto.MessageReflect(m) - defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - dst.Set(fd, v) - return true - }) - } - - // Unmarshal using the v2 JSON unmarshaler. - opts := protojson.UnmarshalOptions{ - DiscardUnknown: u.AllowUnknownFields, - } - if u.AnyResolver != nil { - opts.Resolver = anyResolver{u.AnyResolver} - } - return opts.Unmarshal(raw, mr.Interface()) - } else { - if err := u.unmarshalMessage(mr, raw); err != nil { - return err - } - return protoV2.CheckInitialized(mr.Interface()) - } -} - -func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { - md := m.Descriptor() - fds := md.Fields() - - if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, in) - } - - if string(in) == "null" && md.FullName() != "google.protobuf.Value" { - return nil - } - - switch wellKnownType(md.FullName()) { - case "Any": - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return err - } - - rawTypeURL, ok := jsonObject["@type"] - if !ok { - return errors.New("Any JSON doesn't have '@type'") - } - typeURL, err := unquoteString(string(rawTypeURL)) - if err != nil { - return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) - } - m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) - - var m2 protoreflect.Message - if u.AnyResolver != nil { - mi, err := u.AnyResolver.Resolve(typeURL) - if err != nil { - return err - } - m2 = proto.MessageReflect(mi) - } else { - mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) - if err != nil { - if err == protoregistry.NotFound { - return fmt.Errorf("could not resolve Any message type: %v", typeURL) - } - return err - } - m2 = mt.New() - } - - if wellKnownType(m2.Descriptor().FullName()) != "" { - rawValue, ok := jsonObject["value"] - if !ok { - return errors.New("Any JSON doesn't have 'value'") - } - if err := u.unmarshalMessage(m2, rawValue); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) - } - } else { - delete(jsonObject, "@type") - rawJSON, err := json.Marshal(jsonObject) - if err != nil { - return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) - } - if err = u.unmarshalMessage(m2, rawJSON); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) - } - } - - rawWire, err := protoV2.Marshal(m2.Interface()) - if err != nil { - return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) - } - m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) - return nil - case "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue": - fd := fds.ByNumber(1) - v, err := u.unmarshalValue(m.NewField(fd), in, fd) - if err != nil { - return err - } - m.Set(fd, v) - return nil - case "Duration": - v, err := unquoteString(string(in)) - if err != nil { - return err - } - d, err := time.ParseDuration(v) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - sec := d.Nanoseconds() / 1e9 - nsec := d.Nanoseconds() % 1e9 - m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) - m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) - return nil - case "Timestamp": - v, err := unquoteString(string(in)) - if err != nil { - return err - } - t, err := time.Parse(time.RFC3339Nano, v) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - - sec := t.Unix() - nsec := t.Nanosecond() - m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) - m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) - return nil - case "Value": - switch { - case string(in) == "null": - m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) - case string(in) == "true": - m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) - case string(in) == "false": - m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) - case hasPrefixAndSuffix('"', in, '"'): - s, err := unquoteString(string(in)) - if err != nil { - return fmt.Errorf("unrecognized type for Value %q", in) - } - m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) - case hasPrefixAndSuffix('[', in, ']'): - v := m.Mutable(fds.ByNumber(6)) - return u.unmarshalMessage(v.Message(), in) - case hasPrefixAndSuffix('{', in, '}'): - v := m.Mutable(fds.ByNumber(5)) - return u.unmarshalMessage(v.Message(), in) - default: - f, err := strconv.ParseFloat(string(in), 0) - if err != nil { - return fmt.Errorf("unrecognized type for Value %q", in) - } - m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) - } - return nil - case "ListValue": - var jsonArray []json.RawMessage - if err := json.Unmarshal(in, &jsonArray); err != nil { - return fmt.Errorf("bad ListValue: %v", err) - } - - lv := m.Mutable(fds.ByNumber(1)).List() - for _, raw := range jsonArray { - ve := lv.NewElement() - if err := u.unmarshalMessage(ve.Message(), raw); err != nil { - return err - } - lv.Append(ve) - } - return nil - case "Struct": - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return fmt.Errorf("bad StructValue: %v", err) - } - - mv := m.Mutable(fds.ByNumber(1)).Map() - for key, raw := range jsonObject { - kv := protoreflect.ValueOf(key).MapKey() - vv := mv.NewValue() - if err := u.unmarshalMessage(vv.Message(), raw); err != nil { - return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) - } - mv.Set(kv, vv) - } - return nil - } - - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return err - } - - // Handle known fields. - for i := 0; i < fds.Len(); i++ { - fd := fds.Get(i) - if fd.IsWeak() && fd.Message().IsPlaceholder() { - continue // weak reference is not linked in - } - - // Search for any raw JSON value associated with this field. - var raw json.RawMessage - name := string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { - name = string(fd.Message().Name()) - } - if v, ok := jsonObject[name]; ok { - delete(jsonObject, name) - raw = v - } - name = string(fd.JSONName()) - if v, ok := jsonObject[name]; ok { - delete(jsonObject, name) - raw = v - } - - field := m.NewField(fd) - // Unmarshal the field value. - if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { - continue - } - v, err := u.unmarshalValue(field, raw, fd) - if err != nil { - return err - } - m.Set(fd, v) - } - - // Handle extension fields. - for name, raw := range jsonObject { - if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { - continue - } - - // Resolve the extension field by name. - xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) - xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) - if xt == nil && isMessageSet(md) { - xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) - } - if xt == nil { - continue - } - delete(jsonObject, name) - fd := xt.TypeDescriptor() - if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { - return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) - } - - field := m.NewField(fd) - // Unmarshal the field value. - if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { - continue - } - v, err := u.unmarshalValue(field, raw, fd) - if err != nil { - return err - } - m.Set(fd, v) - } - - if !u.AllowUnknownFields && len(jsonObject) > 0 { - for name := range jsonObject { - return fmt.Errorf("unknown field %q in %v", name, md.FullName()) - } - } - return nil -} - -func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { - if fd.Cardinality() == protoreflect.Repeated { - return false - } - if md := fd.Message(); md != nil { - return md.FullName() == "google.protobuf.Value" - } - if ed := fd.Enum(); ed != nil { - return ed.FullName() == "google.protobuf.NullValue" - } - return false -} - -func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { - if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { - _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) - return ok - } - return false -} - -func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - switch { - case fd.IsList(): - var jsonArray []json.RawMessage - if err := json.Unmarshal(in, &jsonArray); err != nil { - return v, err - } - lv := v.List() - for _, raw := range jsonArray { - ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) - if err != nil { - return v, err - } - lv.Append(ve) - } - return v, nil - case fd.IsMap(): - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return v, err - } - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := v.Map() - for key, raw := range jsonObject { - var kv protoreflect.MapKey - if kfd.Kind() == protoreflect.StringKind { - kv = protoreflect.ValueOf(key).MapKey() - } else { - v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) - if err != nil { - return v, err - } - kv = v.MapKey() - } - - vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) - if err != nil { - return v, err - } - mv.Set(kv, vv) - } - return v, nil - default: - return u.unmarshalSingularValue(v, in, fd) - } -} - -var nonFinite = map[string]float64{ - `"NaN"`: math.NaN(), - `"Infinity"`: math.Inf(+1), - `"-Infinity"`: math.Inf(-1), -} - -func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - switch fd.Kind() { - case protoreflect.BoolKind: - return unmarshalValue(in, new(bool)) - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - return unmarshalValue(trimQuote(in), new(int32)) - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return unmarshalValue(trimQuote(in), new(int64)) - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - return unmarshalValue(trimQuote(in), new(uint32)) - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return unmarshalValue(trimQuote(in), new(uint64)) - case protoreflect.FloatKind: - if f, ok := nonFinite[string(in)]; ok { - return protoreflect.ValueOfFloat32(float32(f)), nil - } - return unmarshalValue(trimQuote(in), new(float32)) - case protoreflect.DoubleKind: - if f, ok := nonFinite[string(in)]; ok { - return protoreflect.ValueOfFloat64(float64(f)), nil - } - return unmarshalValue(trimQuote(in), new(float64)) - case protoreflect.StringKind: - return unmarshalValue(in, new(string)) - case protoreflect.BytesKind: - return unmarshalValue(in, new([]byte)) - case protoreflect.EnumKind: - if hasPrefixAndSuffix('"', in, '"') { - vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) - if vd == nil { - return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) - } - return protoreflect.ValueOfEnum(vd.Number()), nil - } - return unmarshalValue(in, new(protoreflect.EnumNumber)) - case protoreflect.MessageKind, protoreflect.GroupKind: - err := u.unmarshalMessage(v.Message(), in) - return v, err - default: - panic(fmt.Sprintf("invalid kind %v", fd.Kind())) - } -} - -func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { - err := json.Unmarshal(in, v) - return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err -} - -func unquoteString(in string) (out string, err error) { - err = json.Unmarshal([]byte(in), &out) - return out, err -} - -func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { - if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { - return true - } - return false -} - -// trimQuote is like unquoteString but simply strips surrounding quotes. -// This is incorrect, but is behavior done by the legacy implementation. -func trimQuote(in []byte) []byte { - if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { - in = in[1 : len(in)-1] - } - return in -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go deleted file mode 100644 index e9438a93f..000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/encode.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package jsonpb - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/encoding/protojson" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapJSONMarshalV2 = false - -// Marshaler is a configurable object for marshaling protocol buffer messages -// to the specified JSON representation. -type Marshaler struct { - // OrigName specifies whether to use the original protobuf name for fields. - OrigName bool - - // EnumsAsInts specifies whether to render enum values as integers, - // as opposed to string values. - EnumsAsInts bool - - // EmitDefaults specifies whether to render fields with zero values. - EmitDefaults bool - - // Indent controls whether the output is compact or not. - // If empty, the output is compact JSON. Otherwise, every JSON object - // entry and JSON array value will be on its own line. - // Each line will be preceded by repeated copies of Indent, where the - // number of copies is the current indentation depth. - Indent string - - // AnyResolver is used to resolve the google.protobuf.Any well-known type. - // If unset, the global registry is used by default. - AnyResolver AnyResolver -} - -// JSONPBMarshaler is implemented by protobuf messages that customize the -// way they are marshaled to JSON. Messages that implement this should also -// implement JSONPBUnmarshaler so that the custom format can be parsed. -// -// The JSON marshaling must follow the proto to JSON specification: -// -// https://developers.google.com/protocol-buffers/docs/proto3#json -// -// Deprecated: Custom types should implement protobuf reflection instead. -type JSONPBMarshaler interface { - MarshalJSONPB(*Marshaler) ([]byte, error) -} - -// Marshal serializes a protobuf message as JSON into w. -func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { - b, err := jm.marshal(m) - if len(b) > 0 { - if _, err := w.Write(b); err != nil { - return err - } - } - return err -} - -// MarshalToString serializes a protobuf message as JSON in string form. -func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { - b, err := jm.marshal(m) - if err != nil { - return "", err - } - return string(b), nil -} - -func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { - v := reflect.ValueOf(m) - if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, errors.New("Marshal called with nil") - } - - // Check for custom marshalers first since they may not properly - // implement protobuf reflection that the logic below relies on. - if jsm, ok := m.(JSONPBMarshaler); ok { - return jsm.MarshalJSONPB(jm) - } - - if wrapJSONMarshalV2 { - opts := protojson.MarshalOptions{ - UseProtoNames: jm.OrigName, - UseEnumNumbers: jm.EnumsAsInts, - EmitUnpopulated: jm.EmitDefaults, - Indent: jm.Indent, - } - if jm.AnyResolver != nil { - opts.Resolver = anyResolver{jm.AnyResolver} - } - return opts.Marshal(proto.MessageReflect(m).Interface()) - } else { - // Check for unpopulated required fields first. - m2 := proto.MessageReflect(m) - if err := protoV2.CheckInitialized(m2.Interface()); err != nil { - return nil, err - } - - w := jsonWriter{Marshaler: jm} - err := w.marshalMessage(m2, "", "") - return w.buf, err - } -} - -type jsonWriter struct { - *Marshaler - buf []byte -} - -func (w *jsonWriter) write(s string) { - w.buf = append(w.buf, s...) -} - -func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { - if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { - b, err := jsm.MarshalJSONPB(w.Marshaler) - if err != nil { - return err - } - if typeURL != "" { - // we are marshaling this object to an Any type - var js map[string]*json.RawMessage - if err = json.Unmarshal(b, &js); err != nil { - return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) - } - turl, err := json.Marshal(typeURL) - if err != nil { - return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) - } - js["@type"] = (*json.RawMessage)(&turl) - if b, err = json.Marshal(js); err != nil { - return err - } - } - w.write(string(b)) - return nil - } - - md := m.Descriptor() - fds := md.Fields() - - // Handle well-known types. - const secondInNanos = int64(time.Second / time.Nanosecond) - switch wellKnownType(md.FullName()) { - case "Any": - return w.marshalAny(m, indent) - case "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue": - fd := fds.ByNumber(1) - return w.marshalValue(fd, m.Get(fd), indent) - case "Duration": - const maxSecondsInDuration = 315576000000 - // "Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision." - s := m.Get(fds.ByNumber(1)).Int() - ns := m.Get(fds.ByNumber(2)).Int() - if s < -maxSecondsInDuration || s > maxSecondsInDuration { - return fmt.Errorf("seconds out of range %v", s) - } - if ns <= -secondInNanos || ns >= secondInNanos { - return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) - } - if (s > 0 && ns < 0) || (s < 0 && ns > 0) { - return errors.New("signs of seconds and nanos do not match") - } - var sign string - if s < 0 || ns < 0 { - sign, s, ns = "-", -1*s, -1*ns - } - x := fmt.Sprintf("%s%d.%09d", sign, s, ns) - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - w.write(fmt.Sprintf(`"%vs"`, x)) - return nil - case "Timestamp": - // "RFC 3339, where generated output will always be Z-normalized - // and uses 0, 3, 6 or 9 fractional digits." - s := m.Get(fds.ByNumber(1)).Int() - ns := m.Get(fds.ByNumber(2)).Int() - if ns < 0 || ns >= secondInNanos { - return fmt.Errorf("ns out of range [0, %v)", secondInNanos) - } - t := time.Unix(s, ns).UTC() - // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). - x := t.Format("2006-01-02T15:04:05.000000000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - w.write(fmt.Sprintf(`"%vZ"`, x)) - return nil - case "Value": - // JSON value; which is a null, number, string, bool, object, or array. - od := md.Oneofs().Get(0) - fd := m.WhichOneof(od) - if fd == nil { - return errors.New("nil Value") - } - return w.marshalValue(fd, m.Get(fd), indent) - case "Struct", "ListValue": - // JSON object or array. - fd := fds.ByNumber(1) - return w.marshalValue(fd, m.Get(fd), indent) - } - - w.write("{") - if w.Indent != "" { - w.write("\n") - } - - firstField := true - if typeURL != "" { - if err := w.marshalTypeURL(indent, typeURL); err != nil { - return err - } - firstField = false - } - - for i := 0; i < fds.Len(); { - fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - if fd == nil { - continue - } - } else { - i++ - } - - v := m.Get(fd) - - if !m.Has(fd) { - if !w.EmitDefaults || fd.ContainingOneof() != nil { - continue - } - if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { - v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars - } - } - - if !firstField { - w.writeComma() - } - if err := w.marshalField(fd, v, indent); err != nil { - return err - } - firstField = false - } - - // Handle proto2 extensions. - if md.ExtensionRanges().Len() > 0 { - // Collect a sorted list of all extension descriptor and values. - type ext struct { - desc protoreflect.FieldDescriptor - val protoreflect.Value - } - var exts []ext - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - exts = append(exts, ext{fd, v}) - } - return true - }) - sort.Slice(exts, func(i, j int) bool { - return exts[i].desc.Number() < exts[j].desc.Number() - }) - - for _, ext := range exts { - if !firstField { - w.writeComma() - } - if err := w.marshalField(ext.desc, ext.val, indent); err != nil { - return err - } - firstField = false - } - } - - if w.Indent != "" { - w.write("\n") - w.write(indent) - } - w.write("}") - return nil -} - -func (w *jsonWriter) writeComma() { - if w.Indent != "" { - w.write(",\n") - } else { - w.write(",") - } -} - -func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { - // "If the Any contains a value that has a special JSON mapping, - // it will be converted as follows: {"@type": xxx, "value": yyy}. - // Otherwise, the value will be converted into a JSON object, - // and the "@type" field will be inserted to indicate the actual data type." - md := m.Descriptor() - typeURL := m.Get(md.Fields().ByNumber(1)).String() - rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() - - var m2 protoreflect.Message - if w.AnyResolver != nil { - mi, err := w.AnyResolver.Resolve(typeURL) - if err != nil { - return err - } - m2 = proto.MessageReflect(mi) - } else { - mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) - if err != nil { - return err - } - m2 = mt.New() - } - - if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { - return err - } - - if wellKnownType(m2.Descriptor().FullName()) == "" { - return w.marshalMessage(m2, indent, typeURL) - } - - w.write("{") - if w.Indent != "" { - w.write("\n") - } - if err := w.marshalTypeURL(indent, typeURL); err != nil { - return err - } - w.writeComma() - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - w.write(`"value": `) - } else { - w.write(`"value":`) - } - if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { - return err - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - } - w.write("}") - return nil -} - -func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - } - w.write(`"@type":`) - if w.Indent != "" { - w.write(" ") - } - b, err := json.Marshal(typeURL) - if err != nil { - return err - } - w.write(string(b)) - return nil -} - -// marshalField writes field description and value to the Writer. -func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - } - w.write(`"`) - switch { - case fd.IsExtension(): - // For message set, use the fname of the message as the extension name. - name := string(fd.FullName()) - if isMessageSet(fd.ContainingMessage()) { - name = strings.TrimSuffix(name, ".message_set_extension") - } - - w.write("[" + name + "]") - case w.OrigName: - name := string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { - name = string(fd.Message().Name()) - } - w.write(name) - default: - w.write(string(fd.JSONName())) - } - w.write(`":`) - if w.Indent != "" { - w.write(" ") - } - return w.marshalValue(fd, v, indent) -} - -func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - switch { - case fd.IsList(): - w.write("[") - comma := "" - lv := v.List() - for i := 0; i < lv.Len(); i++ { - w.write(comma) - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - w.write(w.Indent) - } - if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { - return err - } - comma = "," - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - } - w.write("]") - return nil - case fd.IsMap(): - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := v.Map() - - // Collect a sorted list of all map keys and values. - type entry struct{ key, val protoreflect.Value } - var entries []entry - mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - entries = append(entries, entry{k.Value(), v}) - return true - }) - sort.Slice(entries, func(i, j int) bool { - switch kfd.Kind() { - case protoreflect.BoolKind: - return !entries[i].key.Bool() && entries[j].key.Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return entries[i].key.Int() < entries[j].key.Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return entries[i].key.Uint() < entries[j].key.Uint() - case protoreflect.StringKind: - return entries[i].key.String() < entries[j].key.String() - default: - panic("invalid kind") - } - }) - - w.write(`{`) - comma := "" - for _, entry := range entries { - w.write(comma) - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - w.write(w.Indent) - } - - s := fmt.Sprint(entry.key.Interface()) - b, err := json.Marshal(s) - if err != nil { - return err - } - w.write(string(b)) - - w.write(`:`) - if w.Indent != "" { - w.write(` `) - } - - if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { - return err - } - comma = "," - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - } - w.write(`}`) - return nil - default: - return w.marshalSingularValue(fd, v, indent) - } -} - -func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - switch { - case !v.IsValid(): - w.write("null") - return nil - case fd.Message() != nil: - return w.marshalMessage(v.Message(), indent+w.Indent, "") - case fd.Enum() != nil: - if fd.Enum().FullName() == "google.protobuf.NullValue" { - w.write("null") - return nil - } - - vd := fd.Enum().Values().ByNumber(v.Enum()) - if vd == nil || w.EnumsAsInts { - w.write(strconv.Itoa(int(v.Enum()))) - } else { - w.write(`"` + string(vd.Name()) + `"`) - } - return nil - default: - switch v.Interface().(type) { - case float32, float64: - switch { - case math.IsInf(v.Float(), +1): - w.write(`"Infinity"`) - return nil - case math.IsInf(v.Float(), -1): - w.write(`"-Infinity"`) - return nil - case math.IsNaN(v.Float()): - w.write(`"NaN"`) - return nil - } - case int64, uint64: - w.write(fmt.Sprintf(`"%d"`, v.Interface())) - return nil - } - - b, err := json.Marshal(v.Interface()) - if err != nil { - return err - } - w.write(string(b)) - return nil - } -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go deleted file mode 100644 index 480e2448d..000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/json.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jsonpb provides functionality to marshal and unmarshal between a -// protocol buffer message and JSON. It follows the specification at -// https://developers.google.com/protocol-buffers/docs/proto3#json. -// -// Do not rely on the default behavior of the standard encoding/json package -// when called on generated message types as it does not operate correctly. -// -// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" -// package instead. -package jsonpb - -import ( - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// AnyResolver takes a type URL, present in an Any message, -// and resolves it into an instance of the associated message. -type AnyResolver interface { - Resolve(typeURL string) (proto.Message, error) -} - -type anyResolver struct{ AnyResolver } - -func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - return r.FindMessageByURL(string(message)) -} - -func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { - m, err := r.Resolve(url) - if err != nil { - return nil, err - } - return protoimpl.X.MessageTypeOf(m), nil -} - -func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - return protoregistry.GlobalTypes.FindExtensionByName(field) -} - -func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) -} - -func wellKnownType(s protoreflect.FullName) string { - if s.Parent() == "google.protobuf" { - switch s.Name() { - case "Empty", "Any", - "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue", - "Duration", "Timestamp", - "NullValue", "Struct", "Value", "ListValue": - return string(s.Name()) - } - } - return "" -} - -func isMessageSet(md protoreflect.MessageDescriptor) bool { - ms, ok := md.(interface{ IsMessageSet() bool }) - return ok && ms.IsMessageSet() -} diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig new file mode 100644 index 000000000..2940ec92a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore index cd3fcd1ef..84039fec6 100644 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -1,25 +1 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -.idea/ -*.iml +coverage.coverprofile diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml new file mode 100644 index 000000000..34882139e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.golangci.yml @@ -0,0 +1,3 @@ +run: + skip-dirs: + - examples/*.go diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS deleted file mode 100644 index 1931f4006..000000000 --- a/vendor/github.com/gorilla/websocket/AUTHORS +++ /dev/null @@ -1,9 +0,0 @@ -# This is the official list of Gorilla WebSocket authors for copyright -# purposes. -# -# Please keep the list sorted. - -Gary Burd -Google LLC (https://opensource.google.com/) -Joachim Bauch - diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE index 9171c9722..bb9d80bc9 100644 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -1,22 +1,27 @@ -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +modification, are permitted provided that the following conditions are +met: - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile new file mode 100644 index 000000000..603a63f50 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec -exclude-dir examples ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md index 2517a2871..1fd5e9c4e 100644 --- a/vendor/github.com/gorilla/websocket/README.md +++ b/vendor/github.com/gorilla/websocket/README.md @@ -1,17 +1,14 @@ -# Gorilla WebSocket +# gorilla/websocket -[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) -[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) +![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket) +[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge) -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the -[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) ---- - -⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)** - ---- ### Documentation @@ -20,6 +17,7 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the * [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) * [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) * [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) +* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool) ### Status @@ -36,4 +34,3 @@ package API is stable. The Gorilla WebSocket package passes the server tests in the [Autobahn Test Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). - diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go index 2efd83555..815b0ca5c 100644 --- a/vendor/github.com/gorilla/websocket/client.go +++ b/vendor/github.com/gorilla/websocket/client.go @@ -9,14 +9,18 @@ import ( "context" "crypto/tls" "errors" + "fmt" "io" - "io/ioutil" + "log" + "net" "net/http" "net/http/httptrace" "net/url" "strings" "time" + + "golang.org/x/net/proxy" ) // ErrBadHandshake is returned when the server response to opening handshake is @@ -224,6 +228,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h k == "Connection" || k == "Sec-Websocket-Key" || k == "Sec-Websocket-Version" || + //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution k == "Sec-Websocket-Extensions" || (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) @@ -289,7 +294,9 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } err = c.SetDeadline(deadline) if err != nil { - c.Close() + if err := c.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } return nil, err } return c, nil @@ -303,7 +310,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h return nil, nil, err } if proxyURL != nil { - dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial)) if err != nil { return nil, nil, err } @@ -318,18 +325,20 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } netConn, err := netDial("tcp", hostPort) + if err != nil { + return nil, nil, err + } if trace != nil && trace.GotConn != nil { trace.GotConn(httptrace.GotConnInfo{ Conn: netConn, }) } - if err != nil { - return nil, nil, err - } defer func() { if netConn != nil { - netConn.Close() + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } } }() @@ -370,6 +379,17 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h resp, err := http.ReadResponse(conn.br, req) if err != nil { + if d.TLSClientConfig != nil { + for _, proto := range d.TLSClientConfig.NextProtos { + if proto != "http/1.1" { + return nil, nil, fmt.Errorf( + "websocket: protocol %q was given but is not supported;"+ + "sharing tls.Config with net/http Transport can cause this error: %w", + proto, err, + ) + } + } + } return nil, nil, err } @@ -388,7 +408,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h // debugging. buf := make([]byte, 1024) n, _ := io.ReadFull(resp.Body, buf) - resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + resp.Body = io.NopCloser(bytes.NewReader(buf[:n])) return nil, resp, ErrBadHandshake } @@ -406,17 +426,19 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h break } - resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + resp.Body = io.NopCloser(bytes.NewReader([]byte{})) conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - netConn.SetDeadline(time.Time{}) + if err := netConn.SetDeadline(time.Time{}); err != nil { + return nil, nil, err + } netConn = nil // to avoid close in defer. return conn, resp, nil } func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { - return &tls.Config{} + return &tls.Config{MinVersion: tls.VersionTLS12} } return cfg.Clone() } diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go index 813ffb1e8..9fed0ef52 100644 --- a/vendor/github.com/gorilla/websocket/compression.go +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -8,6 +8,7 @@ import ( "compress/flate" "errors" "io" + "log" "strings" "sync" ) @@ -33,7 +34,9 @@ func decompressNoContextTakeover(r io.Reader) io.ReadCloser { "\x01\x00\x00\xff\xff" fr, _ := flateReaderPool.Get().(io.ReadCloser) - fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil { + panic(err) + } return &flateReadWrapper{fr} } @@ -132,7 +135,9 @@ func (r *flateReadWrapper) Read(p []byte) (int, error) { // Preemptively place the reader back in the pool. This helps with // scenarios where the application does not call NextReader() soon after // this final read. - r.Close() + if err := r.Close(); err != nil { + log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err) + } } return n, err } diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index 331eebc85..221e6cf79 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -6,11 +6,11 @@ package websocket import ( "bufio" + "crypto/rand" "encoding/binary" "errors" "io" - "io/ioutil" - "math/rand" + "log" "net" "strconv" "strings" @@ -181,13 +181,20 @@ var ( errInvalidControlFrame = errors.New("websocket: invalid control frame") ) +// maskRand is an io.Reader for generating mask bytes. The reader is initialized +// to crypto/rand Reader. Tests swap the reader to a math/rand reader for +// reproducible results. +var maskRand = rand.Reader + +// newMaskKey returns a new 32 bit value for masking client frames. func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} + var k [4]byte + _, _ = io.ReadFull(maskRand, k[:]) + return k } func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok && e.Temporary() { + if e, ok := err.(net.Error); ok { err = &netError{msg: e.Error(), timeout: e.Timeout()} } return err @@ -372,7 +379,9 @@ func (c *Conn) read(n int) ([]byte, error) { if err == io.EOF { err = errUnexpectedEOF } - c.br.Discard(len(p)) + if _, err := c.br.Discard(len(p)); err != nil { + return p, err + } return p, err } @@ -387,7 +396,9 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return err } - c.conn.SetWriteDeadline(deadline) + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } if len(buf1) == 0 { _, err = c.conn.Write(buf0) } else { @@ -397,7 +408,7 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return c.writeFatal(err) } if frameType == CloseMessage { - c.writeFatal(ErrCloseSent) + _ = c.writeFatal(ErrCloseSent) } return nil } @@ -438,7 +449,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er d := 1000 * time.Hour if !deadline.IsZero() { - d = deadline.Sub(time.Now()) + d = time.Until(deadline) if d < 0 { return errWriteTimeout } @@ -460,13 +471,15 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er return err } - c.conn.SetWriteDeadline(deadline) + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } _, err = c.conn.Write(buf) if err != nil { return c.writeFatal(err) } if messageType == CloseMessage { - c.writeFatal(ErrCloseSent) + _ = c.writeFatal(ErrCloseSent) } return err } @@ -477,7 +490,9 @@ func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { // probably better to return an error in this situation, but we cannot // change this without breaking existing applications. if c.writer != nil { - c.writer.Close() + if err := c.writer.Close(); err != nil { + log.Printf("websocket: discarding writer close error: %v", err) + } c.writer = nil } @@ -630,7 +645,7 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error { } if final { - w.endMessage(errWriteClosed) + _ = w.endMessage(errWriteClosed) return nil } @@ -795,7 +810,7 @@ func (c *Conn) advanceFrame() (int, error) { // 1. Skip remainder of previous frame. if c.readRemaining > 0 { - if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil { return noFrame, err } } @@ -817,7 +832,9 @@ func (c *Conn) advanceFrame() (int, error) { rsv2 := p[0]&rsv2Bit != 0 rsv3 := p[0]&rsv3Bit != 0 mask := p[1]&maskBit != 0 - c.setReadRemaining(int64(p[1] & 0x7f)) + if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil { + return noFrame, err + } c.readDecompress = false if rsv1 { @@ -922,7 +939,9 @@ func (c *Conn) advanceFrame() (int, error) { } if c.readLimit > 0 && c.readLength > c.readLimit { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil { + return noFrame, err + } return noFrame, ErrReadLimit } @@ -934,7 +953,9 @@ func (c *Conn) advanceFrame() (int, error) { var payload []byte if c.readRemaining > 0 { payload, err = c.read(int(c.readRemaining)) - c.setReadRemaining(0) + if err := c.setReadRemaining(0); err != nil { + return noFrame, err + } if err != nil { return noFrame, err } @@ -981,7 +1002,9 @@ func (c *Conn) handleProtocolError(message string) error { if len(data) > maxControlFramePayloadSize { data = data[:maxControlFramePayloadSize] } - c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) + if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil { + return err + } return errors.New("websocket: " + message) } @@ -998,7 +1021,9 @@ func (c *Conn) handleProtocolError(message string) error { func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { // Close previous reader, only relevant for decompression. if c.reader != nil { - c.reader.Close() + if err := c.reader.Close(); err != nil { + log.Printf("websocket: discarding reader close error: %v", err) + } c.reader = nil } @@ -1054,7 +1079,9 @@ func (r *messageReader) Read(b []byte) (int, error) { } rem := c.readRemaining rem -= int64(n) - c.setReadRemaining(rem) + if err := c.setReadRemaining(rem); err != nil { + return 0, err + } if c.readRemaining > 0 && c.readErr == io.EOF { c.readErr = errUnexpectedEOF } @@ -1094,7 +1121,7 @@ func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { if err != nil { return messageType, nil, err } - p, err = ioutil.ReadAll(r) + p, err = io.ReadAll(r) return messageType, p, err } @@ -1136,7 +1163,9 @@ func (c *Conn) SetCloseHandler(h func(code int, text string) error) { if h == nil { h = func(code int, text string) error { message := FormatCloseMessage(code, "") - c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil { + return err + } return nil } } @@ -1161,7 +1190,7 @@ func (c *Conn) SetPingHandler(h func(appData string) error) { err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) if err == ErrCloseSent { return nil - } else if e, ok := err.(net.Error); ok && e.Temporary() { + } else if _, ok := err.(net.Error); ok { return nil } return err @@ -1189,8 +1218,16 @@ func (c *Conn) SetPongHandler(h func(appData string) error) { c.handlePong = h } +// NetConn returns the underlying connection that is wrapped by c. +// Note that writing to or reading from this connection directly will corrupt the +// WebSocket connection. +func (c *Conn) NetConn() net.Conn { + return c.conn +} + // UnderlyingConn returns the internal net.Conn. This can be used to further // modifications to connection specific flags. +// Deprecated: Use the NetConn method. func (c *Conn) UnderlyingConn() net.Conn { return c.conn } diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go index d0742bf2a..67d0968be 100644 --- a/vendor/github.com/gorilla/websocket/mask.go +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -9,6 +9,7 @@ package websocket import "unsafe" +// #nosec G103 -- (CWE-242) Has been audited const wordSize = int(unsafe.Sizeof(uintptr(0))) func maskBytes(key [4]byte, pos int, b []byte) int { @@ -22,6 +23,7 @@ func maskBytes(key [4]byte, pos int, b []byte) int { } // Mask one byte at a time to word boundary. + //#nosec G103 -- (CWE-242) Has been audited if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { n = wordSize - n for i := range b[:n] { @@ -36,11 +38,13 @@ func maskBytes(key [4]byte, pos int, b []byte) int { for i := range k { k[i] = key[(pos+i)&3] } + //#nosec G103 -- (CWE-242) Has been audited kw := *(*uintptr)(unsafe.Pointer(&k)) // Mask one word at a time. n := (len(b) / wordSize) * wordSize for i := 0; i < n; i += wordSize { + //#nosec G103 -- (CWE-242) Has been audited *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go index e0f466b72..80f55d1ea 100644 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -8,10 +8,13 @@ import ( "bufio" "encoding/base64" "errors" + "log" "net" "net/http" "net/url" "strings" + + "golang.org/x/net/proxy" ) type netDialerFunc func(network, addr string) (net.Conn, error) @@ -21,7 +24,7 @@ func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { } func init() { - proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) { return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil }) } @@ -55,7 +58,9 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) } if err := connectReq.Write(conn); err != nil { - conn.Close() + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } return nil, err } @@ -64,12 +69,16 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) br := bufio.NewReader(conn) resp, err := http.ReadResponse(br, connectReq) if err != nil { - conn.Close() + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } return nil, err } if resp.StatusCode != 200 { - conn.Close() + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } f := strings.SplitN(resp.Status, " ", 2) return nil, errors.New(f[1]) } diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go index 24d53b38a..1e720e1da 100644 --- a/vendor/github.com/gorilla/websocket/server.go +++ b/vendor/github.com/gorilla/websocket/server.go @@ -8,6 +8,7 @@ import ( "bufio" "errors" "io" + "log" "net/http" "net/url" "strings" @@ -154,8 +155,8 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } challengeKey := r.Header.Get("Sec-Websocket-Key") - if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") + if !isValidChallengeKey(challengeKey) { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header must be Base64 encoded value of 16-byte in length") } subprotocol := u.selectSubprotocol(r, responseHeader) @@ -183,7 +184,9 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } if brw.Reader.Buffered() > 0 { - netConn.Close() + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } return nil, errors.New("websocket: client sent data before handshake is complete") } @@ -248,17 +251,34 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade p = append(p, "\r\n"...) // Clear deadlines set by HTTP server. - netConn.SetDeadline(time.Time{}) + if err := netConn.SetDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } } if _, err = netConn.Write(p); err != nil { - netConn.Close() + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } return nil, err } if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Time{}) + if err := netConn.SetWriteDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } } return c, nil @@ -356,8 +376,12 @@ func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { // bufio.Writer's underlying writer. var wh writeHook bw.Reset(&wh) - bw.WriteByte(0) - bw.Flush() + if err := bw.WriteByte(0); err != nil { + panic(err) + } + if err := bw.Flush(); err != nil { + log.Printf("websocket: bufioWriterBuffer: Flush: %v", err) + } bw.Reset(originalWriter) diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go index a62b68ccb..7f3864534 100644 --- a/vendor/github.com/gorilla/websocket/tls_handshake.go +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -1,6 +1,3 @@ -//go:build go1.17 -// +build go1.17 - package websocket import ( diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go deleted file mode 100644 index e1b2b44f6..000000000 --- a/vendor/github.com/gorilla/websocket/tls_handshake_116.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -package websocket - -import ( - "context" - "crypto/tls" -) - -func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.Handshake(); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go index 7bf2f66c6..9b1a629bf 100644 --- a/vendor/github.com/gorilla/websocket/util.go +++ b/vendor/github.com/gorilla/websocket/util.go @@ -6,7 +6,7 @@ package websocket import ( "crypto/rand" - "crypto/sha1" + "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54 "encoding/base64" "io" "net/http" @@ -17,7 +17,7 @@ import ( var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") func computeAcceptKey(challengeKey string) string { - h := sha1.New() + h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54 h.Write([]byte(challengeKey)) h.Write(keyGUID) return base64.StdEncoding.EncodeToString(h.Sum(nil)) @@ -281,3 +281,18 @@ headers: } return result } + +// isValidChallengeKey checks if the argument meets RFC6455 specification. +func isValidChallengeKey(s string) bool { + // From RFC6455: + // + // A |Sec-WebSocket-Key| header field with a base64-encoded (see + // Section 4 of [RFC4648]) value that, when decoded, is 16 bytes in + // length. + + if s == "" { + return false + } + decoded, err := base64.StdEncoding.DecodeString(s) + return err == nil && len(decoded) == 16 +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go deleted file mode 100644 index 2e668f6b8..000000000 --- a/vendor/github.com/gorilla/websocket/x_net_proxy.go +++ /dev/null @@ -1,473 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy - -// Package proxy provides support for a variety of protocols to proxy network -// data. -// - -package websocket - -import ( - "errors" - "io" - "net" - "net/url" - "os" - "strconv" - "strings" - "sync" -) - -type proxy_direct struct{} - -// Direct is a direct proxy: one that makes network connections directly. -var proxy_Direct = proxy_direct{} - -func (proxy_direct) Dial(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -// A PerHost directs connections to a default Dialer unless the host name -// requested matches one of a number of exceptions. -type proxy_PerHost struct { - def, bypass proxy_Dialer - - bypassNetworks []*net.IPNet - bypassIPs []net.IP - bypassZones []string - bypassHosts []string -} - -// NewPerHost returns a PerHost Dialer that directs connections to either -// defaultDialer or bypass, depending on whether the connection matches one of -// the configured rules. -func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { - return &proxy_PerHost{ - def: defaultDialer, - bypass: bypass, - } -} - -// Dial connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - return p.dialerForRequest(host).Dial(network, addr) -} - -func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { - if ip := net.ParseIP(host); ip != nil { - for _, net := range p.bypassNetworks { - if net.Contains(ip) { - return p.bypass - } - } - for _, bypassIP := range p.bypassIPs { - if bypassIP.Equal(ip) { - return p.bypass - } - } - return p.def - } - - for _, zone := range p.bypassZones { - if strings.HasSuffix(host, zone) { - return p.bypass - } - if host == zone[1:] { - // For a zone ".example.com", we match "example.com" - // too. - return p.bypass - } - } - for _, bypassHost := range p.bypassHosts { - if bypassHost == host { - return p.bypass - } - } - return p.def -} - -// AddFromString parses a string that contains comma-separated values -// specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a host name -// (localhost). A best effort is made to parse the string and errors are -// ignored. -func (p *proxy_PerHost) AddFromString(s string) { - hosts := strings.Split(s, ",") - for _, host := range hosts { - host = strings.TrimSpace(host) - if len(host) == 0 { - continue - } - if strings.Contains(host, "/") { - // We assume that it's a CIDR address like 127.0.0.0/8 - if _, net, err := net.ParseCIDR(host); err == nil { - p.AddNetwork(net) - } - continue - } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) - continue - } - if strings.HasPrefix(host, "*.") { - p.AddZone(host[1:]) - continue - } - p.AddHost(host) - } -} - -// AddIP specifies an IP address that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match an IP. -func (p *proxy_PerHost) AddIP(ip net.IP) { - p.bypassIPs = append(p.bypassIPs, ip) -} - -// AddNetwork specifies an IP range that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match. -func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { - p.bypassNetworks = append(p.bypassNetworks, net) -} - -// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of -// "example.com" matches "example.com" and all of its subdomains. -func (p *proxy_PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - p.bypassZones = append(p.bypassZones, zone) -} - -// AddHost specifies a host name that will use the bypass proxy. -func (p *proxy_PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } - p.bypassHosts = append(p.bypassHosts, host) -} - -// A Dialer is a means to establish a connection. -type proxy_Dialer interface { - // Dial connects to the given address via the proxy. - Dial(network, addr string) (c net.Conn, err error) -} - -// Auth contains authentication parameters that specific Dialers may require. -type proxy_Auth struct { - User, Password string -} - -// FromEnvironment returns the dialer specified by the proxy related variables in -// the environment. -func proxy_FromEnvironment() proxy_Dialer { - allProxy := proxy_allProxyEnv.Get() - if len(allProxy) == 0 { - return proxy_Direct - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return proxy_Direct - } - proxy, err := proxy_FromURL(proxyURL, proxy_Direct) - if err != nil { - return proxy_Direct - } - - noProxy := proxy_noProxyEnv.Get() - if len(noProxy) == 0 { - return proxy - } - - perHost := proxy_NewPerHost(proxy, proxy_Direct) - perHost.AddFromString(noProxy) - return perHost -} - -// proxySchemes is a map from URL schemes to a function that creates a Dialer -// from a URL with such a scheme. -var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) - -// RegisterDialerType takes a URL scheme and a function to generate Dialers from -// a URL with that scheme and a forwarding Dialer. Registered schemes are used -// by FromURL. -func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { - if proxy_proxySchemes == nil { - proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) - } - proxy_proxySchemes[scheme] = f -} - -// FromURL returns a Dialer given a URL specification and an underlying -// Dialer for it to make network requests. -func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { - var auth *proxy_Auth - if u.User != nil { - auth = new(proxy_Auth) - auth.User = u.User.Username() - if p, ok := u.User.Password(); ok { - auth.Password = p - } - } - - switch u.Scheme { - case "socks5": - return proxy_SOCKS5("tcp", u.Host, auth, forward) - } - - // If the scheme doesn't match any of the built-in schemes, see if it - // was registered by another package. - if proxy_proxySchemes != nil { - if f, ok := proxy_proxySchemes[u.Scheme]; ok { - return f(u, forward) - } - } - - return nil, errors.New("proxy: unknown scheme: " + u.Scheme) -} - -var ( - proxy_allProxyEnv = &proxy_envOnce{ - names: []string{"ALL_PROXY", "all_proxy"}, - } - proxy_noProxyEnv = &proxy_envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -// (Borrowed from net/http/transport.go) -type proxy_envOnce struct { - names []string - once sync.Once - val string -} - -func (e *proxy_envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *proxy_envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address -// with an optional username and password. See RFC 1928 and RFC 1929. -func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { - s := &proxy_socks5{ - network: network, - addr: addr, - forward: forward, - } - if auth != nil { - s.user = auth.User - s.password = auth.Password - } - - return s, nil -} - -type proxy_socks5 struct { - user, password string - network, addr string - forward proxy_Dialer -} - -const proxy_socks5Version = 5 - -const ( - proxy_socks5AuthNone = 0 - proxy_socks5AuthPassword = 2 -) - -const proxy_socks5Connect = 1 - -const ( - proxy_socks5IP4 = 1 - proxy_socks5Domain = 3 - proxy_socks5IP6 = 4 -) - -var proxy_socks5Errors = []string{ - "", - "general failure", - "connection forbidden", - "network unreachable", - "host unreachable", - "connection refused", - "TTL expired", - "command not supported", - "address type not supported", -} - -// Dial connects to the address addr on the given network via the SOCKS5 proxy. -func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) - } - - conn, err := s.forward.Dial(s.network, s.addr) - if err != nil { - return nil, err - } - if err := s.connect(conn, addr); err != nil { - conn.Close() - return nil, err - } - return conn, nil -} - -// connect takes an existing connection to a socks5 proxy server, -// and commands the server to extend that connection to target, -// which must be a canonical address with a host and port. -func (s *proxy_socks5) connect(conn net.Conn, target string) error { - host, portStr, err := net.SplitHostPort(target) - if err != nil { - return err - } - - port, err := strconv.Atoi(portStr) - if err != nil { - return errors.New("proxy: failed to parse port number: " + portStr) - } - if port < 1 || port > 0xffff { - return errors.New("proxy: port number out of range: " + portStr) - } - - // the size here is just an estimate - buf := make([]byte, 0, 6+len(host)) - - buf = append(buf, proxy_socks5Version) - if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { - buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) - } else { - buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) - } - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - if buf[0] != 5 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) - } - if buf[1] == 0xff { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") - } - - // See RFC 1929 - if buf[1] == proxy_socks5AuthPassword { - buf = buf[:0] - buf = append(buf, 1 /* password protocol version */) - buf = append(buf, uint8(len(s.user))) - buf = append(buf, s.user...) - buf = append(buf, uint8(len(s.password))) - buf = append(buf, s.password...) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if buf[1] != 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") - } - } - - buf = buf[:0] - buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) - - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - buf = append(buf, proxy_socks5IP4) - ip = ip4 - } else { - buf = append(buf, proxy_socks5IP6) - } - buf = append(buf, ip...) - } else { - if len(host) > 255 { - return errors.New("proxy: destination host name too long: " + host) - } - buf = append(buf, proxy_socks5Domain) - buf = append(buf, byte(len(host))) - buf = append(buf, host...) - } - buf = append(buf, byte(port>>8), byte(port)) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - failure := "unknown error" - if int(buf[1]) < len(proxy_socks5Errors) { - failure = proxy_socks5Errors[buf[1]] - } - - if len(failure) > 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) - } - - bytesToDiscard := 0 - switch buf[3] { - case proxy_socks5IP4: - bytesToDiscard = net.IPv4len - case proxy_socks5IP6: - bytesToDiscard = net.IPv6len - case proxy_socks5Domain: - _, err := io.ReadFull(conn, buf[:1]) - if err != nil { - return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - bytesToDiscard = int(buf[0]) - default: - return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) - } - - if cap(buf) < bytesToDiscard { - buf = make([]byte, bytesToDiscard) - } else { - buf = buf[:bytesToDiscard] - } - if _, err := io.ReadFull(conn, buf); err != nil { - return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - // Also need to discard the port number - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - return nil -} diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml deleted file mode 100644 index 8a0681af8..000000000 --- a/vendor/github.com/imdario/mergo/.deepsource.toml +++ /dev/null @@ -1,12 +0,0 @@ -version = 1 - -test_patterns = [ - "*_test.go" -] - -[[analyzers]] -name = "go" -enabled = true - - [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index d324c43ba..b13a50ed1 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -1,12 +1,7 @@ language: go -arch: - - amd64 - - ppc64le install: - go get -t - go get golang.org/x/tools/cmd/cover - go get github.com/mattn/goveralls script: - - go test -race -v ./... -after_script: - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index aa8cbd7ce..d1cefa871 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,54 +1,43 @@ # Mergo +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). [![GoDoc][3]][4] -[![GitHub release][5]][6] -[![GoCard][7]][8] +[![GoCard][5]][6] [![Build Status][1]][2] -[![Coverage Status][9]][10] -[![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] - -[![GoCenter Kudos][15]][16] +[![Coverage Status][7]][8] +[![Sourcegraph][9]][10] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo -[5]: https://img.shields.io/github/release/imdario/mergo.svg -[6]: https://github.com/imdario/mergo/releases -[7]: https://goreportcard.com/badge/imdario/mergo -[8]: https://goreportcard.com/report/github.com/imdario/mergo -[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[10]: https://coveralls.io/github/imdario/mergo?branch=master -[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[12]: https://sourcegraph.com/github.com/imdario/mergo?badge -[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield -[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo -[16]: https://search.gocenter.io/github.com/imdario/mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). +[5]: https://goreportcard.com/badge/imdario/mergo +[6]: https://goreportcard.com/report/github.com/imdario/mergo +[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[8]: https://coveralls.io/github/imdario/mergo?branch=master +[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[10]: https://sourcegraph.com/github.com/imdario/mergo?badge -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status +### Latest release -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). +[Release v0.3.4](https://github.com/imdario/mergo/releases/tag/v0.3.4). ### Important note -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. - -Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. +Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). +If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). ### Donations -If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: +If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com [![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) @@ -97,9 +86,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) -## Install +## Installation go get github.com/imdario/mergo @@ -110,7 +98,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont ## Usage -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). ```go if err := mergo.Merge(&dst, src); err != nil { @@ -136,7 +124,9 @@ if err := mergo.Map(&dst, srcMap); err != nil { Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. -Here is a nice example: +More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). + +### Nice example ```go package main @@ -184,10 +174,10 @@ import ( "time" ) -type timeTransformer struct { +type timeTransfomer struct { } -func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { +func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { if typ == reflect.TypeOf(time.Time{}) { return func(dst, src reflect.Value) error { if dst.CanSet() { @@ -211,7 +201,7 @@ type Snapshot struct { func main() { src := Snapshot{time.Now()} dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) fmt.Println(dest) // Will print // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } @@ -227,21 +217,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don Written by [Dario Castañé](http://dario.im). -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go index fcd985f99..6e9aa7baf 100644 --- a/vendor/github.com/imdario/mergo/doc.go +++ b/vendor/github.com/imdario/mergo/doc.go @@ -4,140 +4,41 @@ // license that can be found in the LICENSE file. /* -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. +Package mergo merges same-type structs and maps by setting default values in zero-value fields. -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Status - -It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. - -Important note - -Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. - -Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -Install - -Do your usual installation procedure: - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) +Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). Usage -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - if err := mergo.Merge(&dst, src); err != nil { - // ... - } - -Also, you can merge overwriting values using the transformer WithOverride. - - if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... - } - -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. - - if err := mergo.Map(&dst, srcMap); err != nil { - // ... - } - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. - -Here is a nice example: - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - ) +From my own work-in-progress project: - type Foo struct { - A string - B int64 + type networkConfig struct { + Protocol string + Address string + ServerType string `json: "server_type"` + Port uint16 } - func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} + type FssnConfig struct { + Network networkConfig } -Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" - ) - - type timeTransformer struct { + var fssnDefault = FssnConfig { + networkConfig { + "tcp", + "127.0.0.1", + "http", + 31560, + }, } - func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil - } - - type Snapshot struct { - Time time.Time - // ... - } + // Inside a function [...] - func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } + if err := mergo.Merge(&config, fssnDefault); err != nil { + log.Fatal(err) } -Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario - -About - -Written by Dario Castañé: https://da.rio.hn - -License - -BSD 3-Clause license, as Go language. + // More code [...] */ package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index a13a7ee46..6ea38e636 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -72,7 +72,6 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf case reflect.Struct: srcMap := src.Interface().(map[string]interface{}) for key := range srcMap { - config.overwriteWithEmptyValue = true srcValue := srcMap[key] fieldName := changeInitialCase(key, unicode.ToUpper) dstElement := dst.FieldByName(fieldName) @@ -141,9 +140,6 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { } func _map(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument - } var ( vDst, vSrc reflect.Value err error diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 8c2a8fcd9..706b22069 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -9,43 +9,25 @@ package mergo import ( - "fmt" "reflect" ) -func hasMergeableFields(dst reflect.Value) (exported bool) { +func hasExportedField(dst reflect.Value) (exported bool) { for i, n := 0, dst.NumField(); i < n; i++ { field := dst.Type().Field(i) if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasMergeableFields(dst.Field(i)) - } else if isExportedComponent(&field) { + exported = exported || hasExportedField(dst.Field(i)) + } else { exported = exported || len(field.PkgPath) == 0 } } return } -func isExportedComponent(field *reflect.StructField) bool { - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } - c := field.Name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } - return true -} - type Config struct { - Overwrite bool - AppendSlice bool - TypeCheck bool - Transformers Transformers - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool - sliceDeepCopy bool - debug bool + Overwrite bool + AppendSlice bool + Transformers Transformers } type Transformers interface { @@ -57,10 +39,6 @@ type Transformers interface { // short circuiting on recursive types. func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - sliceDeepCopy := config.sliceDeepCopy if !src.IsValid() { return @@ -88,34 +66,21 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co switch dst.Kind() { case reflect.Struct: - if hasMergeableFields(dst) { + if hasExportedField(dst) { for i, n := 0, dst.NumField(); i < n; i++ { if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { return } } } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } case reflect.Map: if dst.IsNil() && !src.IsNil() { - if dst.CanSet() { - dst.Set(reflect.MakeMap(dst.Type())) - } else { - dst = src - return - } + dst.Set(reflect.MakeMap(dst.Type())) } - - if src.Kind() != reflect.Map { - if overwrite { - dst.Set(src) - } - return - } - for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) if !srcElement.IsValid() { @@ -125,9 +90,6 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co switch srcElement.Kind() { case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: if srcElement.IsNil() { - if overwrite { - dst.SetMapIndex(key, srcElement) - } continue } fallthrough @@ -162,43 +124,19 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { - if typeCheck && srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } + if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { dstSlice = srcSlice } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } else if sliceDeepCopy { - i := 0 - for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { - srcElement := srcSlice.Index(i) - dstElement := dstSlice.Index(i) - - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - } dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { + if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { continue } - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { + if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } @@ -209,41 +147,19 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { dst.Set(src) } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } dst.Set(reflect.AppendSlice(dst, src)) - } else if sliceDeepCopy { - for i := 0; i < src.Len() && i < dst.Len(); i++ { - srcElement := src.Index(i) - dstElement := dst.Index(i) - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } } case reflect.Ptr: fallthrough case reflect.Interface: - if isReflectNil(src) { - if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } + if src.IsNil() { break } - if src.Kind() != reflect.Interface { - if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { + if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } @@ -260,31 +176,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } break } - if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } - break - } - - if dst.Elem().Kind() == src.Elem().Kind() { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - break + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return } default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) - if mustSet { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) } } - return } @@ -296,7 +199,7 @@ func Merge(dst, src interface{}, opts ...func(*Config)) error { return merge(dst, src, opts...) } -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by // non-empty src attribute values. // Deprecated: use Merge(…) with WithOverride func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { @@ -315,37 +218,12 @@ func WithOverride(config *Config) { config.Overwrite = true } -// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. -func WithOverwriteWithEmptyValue(config *Config) { - config.Overwrite = true - config.overwriteWithEmptyValue = true -} - -// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. -func WithOverrideEmptySlice(config *Config) { - config.overwriteSliceWithEmptyValue = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. +// WithAppendSlice will make merge append slices instead of overwriting it func WithAppendSlice(config *Config) { config.AppendSlice = true } -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. -func WithSliceDeepCopy(config *Config) { - config.sliceDeepCopy = true - config.Overwrite = true -} - func merge(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument - } var ( vDst, vSrc reflect.Value err error @@ -365,16 +243,3 @@ func merge(dst, src interface{}, opts ...func(*Config)) error { } return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } - -// IsReflectNil is the reflect value provided nil -func isReflectNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: - // Both interface and slice are nil if first word is 0. - // Both are always bigger than a word; assume flagIndir. - return v.IsNil() - default: - return false - } -} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 3cc926c7f..a82fea2fd 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -20,7 +20,6 @@ var ( ErrNotSupported = errors.New("only structs and maps are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -76,3 +75,23 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { } return } + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + return // TODO refactor +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE b/vendor/github.com/istio-ecosystem/sail-operator/LICENSE similarity index 99% rename from vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE rename to vendor/github.com/istio-ecosystem/sail-operator/LICENSE index 8dada3eda..75bfd113b 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE +++ b/vendor/github.com/istio-ecosystem/sail-operator/LICENSE @@ -1,4 +1,5 @@ - Apache License + + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/common.go similarity index 78% rename from vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go rename to vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/common.go index c318385cb..2d17e98cb 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go +++ b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/common.go @@ -1,4 +1,4 @@ -// Copyright 2013 Matt T. Proud +// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,5 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil +package v1alpha1 + +import ( + "time" +) + +// testTime is only in unit tests to pin the time to a fixed value +var testTime *time.Time diff --git a/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/groupversion_info.go b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..d7b705454 --- /dev/null +++ b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/groupversion_info.go @@ -0,0 +1,34 @@ +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v1alpha1 contains API Schema definitions for the sailoperator.io v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=sailoperator.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "sailoperator.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/istio_types.go b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/istio_types.go new file mode 100644 index 000000000..ba39fdc06 --- /dev/null +++ b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/istio_types.go @@ -0,0 +1,277 @@ +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const IstioKind = "Istio" + +type UpdateStrategyType string + +const ( + UpdateStrategyTypeInPlace UpdateStrategyType = "InPlace" + UpdateStrategyTypeRevisionBased UpdateStrategyType = "RevisionBased" + + DefaultRevisionDeletionGracePeriodSeconds = 30 + MinRevisionDeletionGracePeriodSeconds = 30 +) + +// IstioSpec defines the desired state of Istio +// +kubebuilder:validation:XValidation:rule="!has(self.values) || !has(self.values.global) || !has(self.values.global.istioNamespace) || self.values.global.istioNamespace == self.__namespace__",message="spec.values.global.istioNamespace must match spec.namespace" +type IstioSpec struct { + // +sail:version + // Defines the version of Istio to install. + // Must be one of: v1.23.0, v1.22.3, v1.21.5, latest. + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1,displayName="Istio Version",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:fieldGroup:General", "urn:alm:descriptor:com.tectonic.ui:select:v1.23.0", "urn:alm:descriptor:com.tectonic.ui:select:v1.22.3", "urn:alm:descriptor:com.tectonic.ui:select:v1.21.5", "urn:alm:descriptor:com.tectonic.ui:select:latest"} + // +kubebuilder:validation:Enum=v1.23.0;v1.22.3;v1.21.5;latest + // +kubebuilder:default=v1.23.0 + Version string `json:"version"` + + // Defines the update strategy to use when the version in the Istio CR is updated. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Update Strategy" + // +kubebuilder:default={type: "InPlace"} + UpdateStrategy *IstioUpdateStrategy `json:"updateStrategy,omitempty"` + + // +sail:profile + // The built-in installation configuration profile to use. + // The 'default' profile is always applied. On OpenShift, the 'openshift' profile is also applied on top of 'default'. + // Must be one of: ambient, default, demo, empty, external, openshift-ambient, openshift, preview, stable. + // +++PROFILES-DROPDOWN-HIDDEN-UNTIL-WE-FULLY-IMPLEMENT-THEM+++operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Profile",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:fieldGroup:General", "urn:alm:descriptor:com.tectonic.ui:select:ambient", "urn:alm:descriptor:com.tectonic.ui:select:default", "urn:alm:descriptor:com.tectonic.ui:select:demo", "urn:alm:descriptor:com.tectonic.ui:select:empty", "urn:alm:descriptor:com.tectonic.ui:select:external", "urn:alm:descriptor:com.tectonic.ui:select:minimal", "urn:alm:descriptor:com.tectonic.ui:select:preview", "urn:alm:descriptor:com.tectonic.ui:select:remote"} + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} + // +kubebuilder:validation:Enum=ambient;default;demo;empty;external;openshift-ambient;openshift;preview;stable + Profile string `json:"profile,omitempty"` + + // Namespace to which the Istio components should be installed. + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:io.kubernetes:Namespace"} + // +kubebuilder:default=istio-system + Namespace string `json:"namespace"` + + // Defines the values to be passed to the Helm charts when installing Istio. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Helm Values" + Values *Values `json:"values,omitempty"` +} + +// IstioUpdateStrategy defines how the control plane should be updated when the version in +// the Istio CR is updated. +type IstioUpdateStrategy struct { + // Type of strategy to use. Can be "InPlace" or "RevisionBased". When the "InPlace" strategy + // is used, the existing Istio control plane is updated in-place. The workloads therefore + // don't need to be moved from one control plane instance to another. When the "RevisionBased" + // strategy is used, a new Istio control plane instance is created for every change to the + // Istio.spec.version field. The old control plane remains in place until all workloads have + // been moved to the new control plane instance. + // + // The "InPlace" strategy is the default. TODO: change default to "RevisionBased" + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1,displayName="Type",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:InPlace", "urn:alm:descriptor:com.tectonic.ui:select:RevisionBased"} + // +kubebuilder:validation:Enum=InPlace;RevisionBased + // +kubebuilder:default=InPlace + Type UpdateStrategyType `json:"type,omitempty"` + + // Defines how many seconds the operator should wait before removing a non-active revision after all + // the workloads have stopped using it. You may want to set this value on the order of minutes. + // The minimum and the default value is 30. + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=2,displayName="Inactive Revision Deletion Grace Period (seconds)",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:number"} + // +kubebuilder:validation:Minimum=30 + InactiveRevisionDeletionGracePeriodSeconds *int64 `json:"inactiveRevisionDeletionGracePeriodSeconds,omitempty"` + + // Defines whether the workloads should be moved from one control plane instance to another + // automatically. If updateWorkloads is true, the operator moves the workloads from the old + // control plane instance to the new one after the new control plane is ready. + // If updateWorkloads is false, the user must move the workloads manually by updating the + // istio.io/rev labels on the namespace and/or the pods. + // Defaults to false. + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=3,displayName="Update Workloads Automatically",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:booleanSwitch"} + UpdateWorkloads bool `json:"updateWorkloads,omitempty"` +} + +// IstioStatus defines the observed state of Istio +type IstioStatus struct { + // ObservedGeneration is the most recent generation observed for this + // Istio object. It corresponds to the object's generation, which is + // updated on mutation by the API Server. The information in the status + // pertains to this particular generation of the object. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Represents the latest available observations of the object's current state. + Conditions []IstioCondition `json:"conditions,omitempty"` + + // Reports the current state of the object. + State IstioConditionReason `json:"state,omitempty"` + + // The name of the active revision. + ActiveRevisionName string `json:"activeRevisionName,omitempty"` + + // Reports information about the underlying IstioRevisions. + Revisions RevisionSummary `json:"revisions,omitempty"` +} + +// RevisionSummary contains information on the number of IstioRevisions associated with this Istio. +type RevisionSummary struct { + // Total number of IstioRevisions currently associated with this Istio. + Total int32 `json:"total"` + + // Number of IstioRevisions that are Ready. + Ready int32 `json:"ready"` + + // Number of IstioRevisions that are currently in use. + InUse int32 `json:"inUse"` +} + +// GetCondition returns the condition of the specified type +func (s *IstioStatus) GetCondition(conditionType IstioConditionType) IstioCondition { + if s != nil { + for i := range s.Conditions { + if s.Conditions[i].Type == conditionType { + return s.Conditions[i] + } + } + } + return IstioCondition{Type: conditionType, Status: metav1.ConditionUnknown} +} + +// SetCondition sets a specific condition in the list of conditions +func (s *IstioStatus) SetCondition(condition IstioCondition) { + var now time.Time + if testTime == nil { + now = time.Now() + } else { + now = *testTime + } + + // The lastTransitionTime only gets serialized out to the second. This can + // break update skipping, as the time in the resource returned from the client + // may not match the time in our cached status during a reconcile. We truncate + // here to save any problems down the line. + lastTransitionTime := metav1.NewTime(now.Truncate(time.Second)) + + for i, prevCondition := range s.Conditions { + if prevCondition.Type == condition.Type { + if prevCondition.Status != condition.Status { + condition.LastTransitionTime = lastTransitionTime + } else { + condition.LastTransitionTime = prevCondition.LastTransitionTime + } + s.Conditions[i] = condition + return + } + } + + // If the condition does not exist, initialize the lastTransitionTime + condition.LastTransitionTime = lastTransitionTime + s.Conditions = append(s.Conditions, condition) +} + +// IstioCondition represents a specific observation of the IstioCondition object's state. +type IstioCondition struct { + // The type of this condition. + Type IstioConditionType `json:"type,omitempty"` + + // The status of this condition. Can be True, False or Unknown. + Status metav1.ConditionStatus `json:"status,omitempty"` + + // Unique, single-word, CamelCase reason for the condition's last transition. + Reason IstioConditionReason `json:"reason,omitempty"` + + // Human-readable message indicating details about the last transition. + Message string `json:"message,omitempty"` + + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// IstioConditionType represents the type of the condition. Condition stages are: +// Installed, Reconciled, Ready +type IstioConditionType string + +// IstioConditionReason represents a short message indicating how the condition came +// to be in its present state. +type IstioConditionReason string + +const ( + // IstioConditionReconciled signifies whether the controller has + // successfully reconciled the resources defined through the CR. + IstioConditionReconciled IstioConditionType = "Reconciled" + + // IstioReasonReconcileError indicates that the reconciliation of the resource has failed, but will be retried. + IstioReasonReconcileError IstioConditionReason = "ReconcileError" +) + +const ( + // IstioConditionReady signifies whether any Deployment, StatefulSet, + // etc. resources are Ready. + IstioConditionReady IstioConditionType = "Ready" + + // IstioReasonRevisionNotFound indicates that the active IstioRevision is not found. + IstioReasonRevisionNotFound IstioConditionReason = "ActiveRevisionNotFound" + + // IstioReasonFailedToGetActiveRevision indicates that a failure occurred when getting the active IstioRevision + IstioReasonFailedToGetActiveRevision IstioConditionReason = "FailedToGetActiveRevision" + + // IstioReasonIstiodNotReady indicates that the control plane is fully reconciled, but istiod is not ready. + IstioReasonIstiodNotReady IstioConditionReason = "IstiodNotReady" + + // IstioReasonReadinessCheckFailed indicates that readiness could not be ascertained. + IstioReasonReadinessCheckFailed IstioConditionReason = "ReadinessCheckFailed" +) + +const ( + // IstioReasonHealthy indicates that the control plane is fully reconciled and that all components are ready. + IstioReasonHealthy IstioConditionReason = "Healthy" +) + +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster,categories=istio-io +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Revisions",type="string",JSONPath=".status.revisions.total",description="Total number of IstioRevision objects currently associated with this object." +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.revisions.ready",description="Number of revisions that are ready." +// +kubebuilder:printcolumn:name="In use",type="string",JSONPath=".status.revisions.inUse",description="Number of revisions that are currently being used by workloads." +// +kubebuilder:printcolumn:name="Active Revision",type="string",JSONPath=".status.activeRevisionName",description="The name of the currently active revision." +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.state",description="The current state of the active revision." +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of the control plane installation." +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the object" + +// Istio represents an Istio Service Mesh deployment consisting of one or more +// control plane instances (represented by one or more IstioRevision objects). +// To deploy an Istio Service Mesh, a user creates an Istio object with the +// desired Istio version and configuration. The operator then creates +// an IstioRevision object, which in turn creates the underlying Deployment +// objects for istiod and other control plane components, similar to how a +// Deployment object in Kubernetes creates ReplicaSets that create the Pods. +type Istio struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:default={version: "v1.23.0", namespace: "istio-system", updateStrategy: {type:"InPlace"}} + Spec IstioSpec `json:"spec,omitempty"` + + Status IstioStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IstioList contains a list of Istio +type IstioList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Istio `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Istio{}, &IstioList{}) +} diff --git a/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/istiocni_types.go b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/istiocni_types.go new file mode 100644 index 000000000..0bceffb8c --- /dev/null +++ b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/istiocni_types.go @@ -0,0 +1,197 @@ +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + IstioCNIKind = "IstioCNI" +) + +// IstioCNISpec defines the desired state of IstioCNI +type IstioCNISpec struct { + // +sail:version + // Defines the version of Istio to install. + // Must be one of: v1.23.0, v1.22.3, v1.21.5, latest. + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1,displayName="Istio Version",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:fieldGroup:General", "urn:alm:descriptor:com.tectonic.ui:select:v1.23.0", "urn:alm:descriptor:com.tectonic.ui:select:v1.22.3", "urn:alm:descriptor:com.tectonic.ui:select:v1.21.5", "urn:alm:descriptor:com.tectonic.ui:select:latest"} + // +kubebuilder:validation:Enum=v1.23.0;v1.22.3;v1.21.5;latest + // +kubebuilder:default=v1.23.0 + Version string `json:"version"` + + // +sail:profile + // The built-in installation configuration profile to use. + // The 'default' profile is always applied. On OpenShift, the 'openshift' profile is also applied on top of 'default'. + // Must be one of: ambient, default, demo, empty, external, openshift-ambient, openshift, preview, stable. + // +++PROFILES-DROPDOWN-HIDDEN-UNTIL-WE-FULLY-IMPLEMENT-THEM+++operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Profile",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:fieldGroup:General", "urn:alm:descriptor:com.tectonic.ui:select:ambient", "urn:alm:descriptor:com.tectonic.ui:select:default", "urn:alm:descriptor:com.tectonic.ui:select:demo", "urn:alm:descriptor:com.tectonic.ui:select:empty", "urn:alm:descriptor:com.tectonic.ui:select:external", "urn:alm:descriptor:com.tectonic.ui:select:minimal", "urn:alm:descriptor:com.tectonic.ui:select:preview", "urn:alm:descriptor:com.tectonic.ui:select:remote"} + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} + // +kubebuilder:validation:Enum=ambient;default;demo;empty;external;openshift-ambient;openshift;preview;stable + Profile string `json:"profile,omitempty"` + + // Namespace to which the Istio CNI component should be installed. + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:io.kubernetes:Namespace"} + // +kubebuilder:default=istio-cni + Namespace string `json:"namespace"` + + // Defines the values to be passed to the Helm charts when installing Istio CNI. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Helm Values" + Values *CNIValues `json:"values,omitempty"` +} + +// IstioCNIStatus defines the observed state of IstioCNI +type IstioCNIStatus struct { + // ObservedGeneration is the most recent generation observed for this + // IstioCNI object. It corresponds to the object's generation, which is + // updated on mutation by the API Server. The information in the status + // pertains to this particular generation of the object. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Represents the latest available observations of the object's current state. + Conditions []IstioCNICondition `json:"conditions,omitempty"` + + // Reports the current state of the object. + State IstioCNIConditionReason `json:"state,omitempty"` +} + +// GetCondition returns the condition of the specified type +func (s *IstioCNIStatus) GetCondition(conditionType IstioCNIConditionType) IstioCNICondition { + if s != nil { + for i := range s.Conditions { + if s.Conditions[i].Type == conditionType { + return s.Conditions[i] + } + } + } + return IstioCNICondition{Type: conditionType, Status: metav1.ConditionUnknown} +} + +// SetCondition sets a specific condition in the list of conditions +func (s *IstioCNIStatus) SetCondition(condition IstioCNICondition) { + var now time.Time + if testTime == nil { + now = time.Now() + } else { + now = *testTime + } + + // The lastTransitionTime only gets serialized out to the second. This can + // break update skipping, as the time in the resource returned from the client + // may not match the time in our cached status during a reconcile. We truncate + // here to save any problems down the line. + lastTransitionTime := metav1.NewTime(now.Truncate(time.Second)) + + for i, prevCondition := range s.Conditions { + if prevCondition.Type == condition.Type { + if prevCondition.Status != condition.Status { + condition.LastTransitionTime = lastTransitionTime + } else { + condition.LastTransitionTime = prevCondition.LastTransitionTime + } + s.Conditions[i] = condition + return + } + } + + // If the condition does not exist, initialize the lastTransitionTime + condition.LastTransitionTime = lastTransitionTime + s.Conditions = append(s.Conditions, condition) +} + +// IstioCNICondition represents a specific observation of the IstioCNI object's state. +type IstioCNICondition struct { + // The type of this condition. + Type IstioCNIConditionType `json:"type,omitempty"` + + // The status of this condition. Can be True, False or Unknown. + Status metav1.ConditionStatus `json:"status,omitempty"` + + // Unique, single-word, CamelCase reason for the condition's last transition. + Reason IstioCNIConditionReason `json:"reason,omitempty"` + + // Human-readable message indicating details about the last transition. + Message string `json:"message,omitempty"` + + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// IstioCNIConditionType represents the type of the condition. Condition stages are: +// Installed, Reconciled, Ready +type IstioCNIConditionType string + +// IstioCNIConditionReason represents a short message indicating how the condition came +// to be in its present state. +type IstioCNIConditionReason string + +const ( + // IstioCNIConditionReconciled signifies whether the controller has + // successfully reconciled the resources defined through the CR. + IstioCNIConditionReconciled IstioCNIConditionType = "Reconciled" + + // IstioCNIReasonReconcileError indicates that the reconciliation of the resource has failed, but will be retried. + IstioCNIReasonReconcileError IstioCNIConditionReason = "ReconcileError" +) + +const ( + // IstioCNIConditionReady signifies whether the istio-cni-node DaemonSet is ready. + IstioCNIConditionReady IstioCNIConditionType = "Ready" + + // IstioCNIDaemonSetNotReady indicates that the istio-cni-node DaemonSet is not ready. + IstioCNIDaemonSetNotReady IstioCNIConditionReason = "DaemonSetNotReady" + + // IstioCNIReasonReadinessCheckFailed indicates that the DaemonSet readiness status could not be ascertained. + IstioCNIReasonReadinessCheckFailed IstioCNIConditionReason = "ReadinessCheckFailed" +) + +const ( + // IstioCNIReasonHealthy indicates that the control plane is fully reconciled and that all components are ready. + IstioCNIReasonHealthy IstioCNIConditionReason = "Healthy" +) + +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster,categories=istio-io +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="Whether the Istio CNI installation is ready to handle requests." +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.state",description="The current state of this object." +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of the Istio CNI installation." +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the object" +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default'",message="metadata.name must be 'default'" + +// IstioCNI represents a deployment of the Istio CNI component. +type IstioCNI struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:default={version: "v1.23.0", namespace: "istio-cni"} + Spec IstioCNISpec `json:"spec,omitempty"` + + Status IstioCNIStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IstioCNIList contains a list of IstioCNI +type IstioCNIList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IstioCNI `json:"items"` +} + +func init() { + SchemeBuilder.Register(&IstioCNI{}, &IstioCNIList{}) +} diff --git a/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/istiorevision_types.go b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/istiorevision_types.go new file mode 100644 index 000000000..9500341f3 --- /dev/null +++ b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/istiorevision_types.go @@ -0,0 +1,224 @@ +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + IstioRevisionKind = "IstioRevision" + DefaultRevision = "default" +) + +// IstioRevisionSpec defines the desired state of IstioRevision +// +kubebuilder:validation:XValidation:rule="self.values.global.istioNamespace == self.__namespace__",message="spec.values.global.istioNamespace must match spec.namespace" +type IstioRevisionSpec struct { + // Type indicates whether this revision represents a local or a remote control plane installation. + // +kubebuilder:default=Local + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + Type IstioRevisionType `json:"type"` + + // +sail:version + // Defines the version of Istio to install. + // Must be one of: v1.23.0, v1.22.3, v1.21.5, latest. + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1,displayName="Istio Version",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:fieldGroup:General", "urn:alm:descriptor:com.tectonic.ui:select:v1.23.0", "urn:alm:descriptor:com.tectonic.ui:select:v1.22.3", "urn:alm:descriptor:com.tectonic.ui:select:v1.21.5", "urn:alm:descriptor:com.tectonic.ui:select:latest"} + // +kubebuilder:validation:Enum=v1.23.0;v1.22.3;v1.21.5;latest + Version string `json:"version"` + + // Namespace to which the Istio components should be installed. + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:io.kubernetes:Namespace"} + Namespace string `json:"namespace"` + + // Defines the values to be passed to the Helm charts when installing Istio. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Helm Values" + Values *Values `json:"values,omitempty"` +} + +// IstioRevisionStatus defines the observed state of IstioRevision +type IstioRevisionStatus struct { + // ObservedGeneration is the most recent generation observed for this + // IstioRevision object. It corresponds to the object's generation, which is + // updated on mutation by the API Server. The information in the status + // pertains to this particular generation of the object. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Represents the latest available observations of the object's current state. + Conditions []IstioRevisionCondition `json:"conditions,omitempty"` + + // Reports the current state of the object. + State IstioRevisionConditionReason `json:"state,omitempty"` +} + +// GetCondition returns the condition of the specified type +func (s *IstioRevisionStatus) GetCondition(conditionType IstioRevisionConditionType) IstioRevisionCondition { + if s != nil { + for i := range s.Conditions { + if s.Conditions[i].Type == conditionType { + return s.Conditions[i] + } + } + } + return IstioRevisionCondition{Type: conditionType, Status: metav1.ConditionUnknown} +} + +// SetCondition sets a specific condition in the list of conditions +func (s *IstioRevisionStatus) SetCondition(condition IstioRevisionCondition) { + var now time.Time + if testTime == nil { + now = time.Now() + } else { + now = *testTime + } + + // The lastTransitionTime only gets serialized out to the second. This can + // break update skipping, as the time in the resource returned from the client + // may not match the time in our cached status during a reconcile. We truncate + // here to save any problems down the line. + lastTransitionTime := metav1.NewTime(now.Truncate(time.Second)) + + for i, prevCondition := range s.Conditions { + if prevCondition.Type == condition.Type { + if prevCondition.Status != condition.Status { + condition.LastTransitionTime = lastTransitionTime + } else { + condition.LastTransitionTime = prevCondition.LastTransitionTime + } + s.Conditions[i] = condition + return + } + } + + // If the condition does not exist, initialize the lastTransitionTime + condition.LastTransitionTime = lastTransitionTime + s.Conditions = append(s.Conditions, condition) +} + +// IstioRevisionCondition represents a specific observation of the IstioRevision object's state. +type IstioRevisionCondition struct { + // The type of this condition. + Type IstioRevisionConditionType `json:"type,omitempty"` + + // The status of this condition. Can be True, False or Unknown. + Status metav1.ConditionStatus `json:"status,omitempty"` + + // Unique, single-word, CamelCase reason for the condition's last transition. + Reason IstioRevisionConditionReason `json:"reason,omitempty"` + + // Human-readable message indicating details about the last transition. + Message string `json:"message,omitempty"` + + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// IstioRevisionConditionType represents the type of the condition. Condition stages are: +// Installed, Reconciled, Ready +type IstioRevisionConditionType string + +// IstioRevisionConditionReason represents a short message indicating how the condition came +// to be in its present state. +type IstioRevisionConditionReason string + +const ( + // IstioRevisionConditionReconciled signifies whether the controller has + // successfully reconciled the resources defined through the CR. + IstioRevisionConditionReconciled IstioRevisionConditionType = "Reconciled" + + // IstioRevisionReasonReconcileError indicates that the reconciliation of the resource has failed, but will be retried. + IstioRevisionReasonReconcileError IstioRevisionConditionReason = "ReconcileError" +) + +const ( + // IstioRevisionConditionReady signifies whether any Deployment, StatefulSet, + // etc. resources are Ready. + IstioRevisionConditionReady IstioRevisionConditionType = "Ready" + + // IstioRevisionReasonIstiodNotReady indicates that the control plane is fully reconciled, but istiod is not ready. + IstioRevisionReasonIstiodNotReady IstioRevisionConditionReason = "IstiodNotReady" + + // IstioRevisionReasonRemoteIstiodNotReady indicates that the remote istiod is not ready. + IstioRevisionReasonRemoteIstiodNotReady IstioRevisionConditionReason = "RemoteIstiodNotReady" + + // IstioRevisionReasonReadinessCheckFailed indicates that istiod readiness status could not be ascertained. + IstioRevisionReasonReadinessCheckFailed IstioRevisionConditionReason = "ReadinessCheckFailed" +) + +const ( + // IstioRevisionConditionInUse signifies whether any workload is configured to use the revision. + IstioRevisionConditionInUse IstioRevisionConditionType = "InUse" + + // IstioRevisionReasonReferencedByWorkloads indicates that the revision is referenced by at least one pod or namespace. + IstioRevisionReasonReferencedByWorkloads IstioRevisionConditionReason = "ReferencedByWorkloads" + + // IstioRevisionReasonNotReferenced indicates that the revision is not referenced by any pod or namespace. + IstioRevisionReasonNotReferenced IstioRevisionConditionReason = "NotReferencedByAnything" + + // IstioRevisionReasonUsageCheckFailed indicates that the operator could not check whether any workloads use the revision. + IstioRevisionReasonUsageCheckFailed IstioRevisionConditionReason = "UsageCheckFailed" +) + +const ( + // IstioRevisionReasonHealthy indicates that the control plane is fully reconciled and that all components are ready. + IstioRevisionReasonHealthy IstioRevisionConditionReason = "Healthy" +) + +type IstioRevisionType string + +const ( + // IstioRevisionTypeLocal indicates that the revision represents a local control plane installation. + IstioRevisionTypeLocal IstioRevisionType = "Local" + + // IstioRevisionTypeRemote indicates that the revision represents a remote control plane installation. + IstioRevisionTypeRemote IstioRevisionType = "Remote" +) + +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster,shortName=istiorev,categories=istio-io +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type",description="Whether the control plane is installed locally or in a remote cluster." +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="Whether the control plane installation is ready to handle requests." +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.state",description="The current state of this object." +// +kubebuilder:printcolumn:name="In use",type="string",JSONPath=".status.conditions[?(@.type==\"InUse\")].status",description="Whether the revision is being used by workloads." +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of the control plane installation." +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the object" + +// IstioRevision represents a single revision of an Istio Service Mesh deployment. +// Users shouldn't create IstioRevision objects directly. Instead, they should +// create an Istio object and allow the operator to create the underlying +// IstioRevision object(s). +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default' ? (!has(self.spec.values.revision) || size(self.spec.values.revision) == 0) : self.spec.values.revision == self.metadata.name",message="spec.values.revision must match metadata.name" +type IstioRevision struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec IstioRevisionSpec `json:"spec,omitempty"` + Status IstioRevisionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IstioRevisionList contains a list of IstioRevision +type IstioRevisionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IstioRevision `json:"items"` +} + +func init() { + SchemeBuilder.Register(&IstioRevision{}, &IstioRevisionList{}) +} diff --git a/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/remoteistio_types.go b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/remoteistio_types.go new file mode 100644 index 000000000..dbfe3707c --- /dev/null +++ b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/remoteistio_types.go @@ -0,0 +1,217 @@ +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const RemoteIstioKind = "RemoteIstio" + +// RemoteIstioSpec defines the desired state of RemoteIstio +// +kubebuilder:validation:XValidation:rule="!has(self.values) || !has(self.values.global) || !has(self.values.global.istioNamespace) || self.values.global.istioNamespace == self.__namespace__",message="spec.values.global.istioNamespace must match spec.namespace" +type RemoteIstioSpec struct { + // +sail:version + // Defines the version of Istio to install. + // Must be one of: v1.23.0, v1.22.3, v1.21.5, latest. + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1,displayName="Istio Version",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:fieldGroup:General", "urn:alm:descriptor:com.tectonic.ui:select:v1.23.0", "urn:alm:descriptor:com.tectonic.ui:select:v1.22.3", "urn:alm:descriptor:com.tectonic.ui:select:v1.21.5", "urn:alm:descriptor:com.tectonic.ui:select:latest"} + // +kubebuilder:validation:Enum=v1.23.0;v1.22.3;v1.21.5;latest + // +kubebuilder:default=v1.23.0 + Version string `json:"version"` + + // Defines the update strategy to use when the version in the RemoteIstio CR is updated. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Update Strategy" + // +kubebuilder:default={type: "InPlace"} + UpdateStrategy *IstioUpdateStrategy `json:"updateStrategy,omitempty"` + + // +sail:profile + // The built-in installation configuration profile to use. + // The 'default' profile is always applied. On OpenShift, the 'openshift' profile is also applied on top of 'default'. + // Must be one of: ambient, default, demo, empty, external, openshift-ambient, openshift, preview, stable. + // +++PROFILES-DROPDOWN-HIDDEN-UNTIL-WE-FULLY-IMPLEMENT-THEM+++operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Profile",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:fieldGroup:General", "urn:alm:descriptor:com.tectonic.ui:select:ambient", "urn:alm:descriptor:com.tectonic.ui:select:default", "urn:alm:descriptor:com.tectonic.ui:select:demo", "urn:alm:descriptor:com.tectonic.ui:select:empty", "urn:alm:descriptor:com.tectonic.ui:select:external", "urn:alm:descriptor:com.tectonic.ui:select:minimal", "urn:alm:descriptor:com.tectonic.ui:select:preview", "urn:alm:descriptor:com.tectonic.ui:select:remote"} + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} + // +kubebuilder:validation:Enum=ambient;default;demo;empty;external;openshift-ambient;openshift;preview;stable + Profile string `json:"profile,omitempty"` + + // Namespace to which the Istio components should be installed. + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:io.kubernetes:Namespace"} + // +kubebuilder:default=istio-system + Namespace string `json:"namespace"` + + // Defines the values to be passed to the Helm charts when installing Istio. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Helm Values" + Values *Values `json:"values,omitempty"` +} + +// RemoteIstioStatus defines the observed state of RemoteIstio +type RemoteIstioStatus struct { + // ObservedGeneration is the most recent generation observed for this + // RemoteIstio object. It corresponds to the object's generation, which is + // updated on mutation by the API Server. The information in the status + // pertains to this particular generation of the object. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Represents the latest available observations of the object's current state. + Conditions []RemoteIstioCondition `json:"conditions,omitempty"` + + // Reports the current state of the object. + State RemoteIstioConditionReason `json:"state,omitempty"` + + // The name of the active revision. + ActiveRevisionName string `json:"activeRevisionName,omitempty"` + + // Reports information about the underlying IstioRevisions. + Revisions RevisionSummary `json:"revisions,omitempty"` +} + +// GetCondition returns the condition of the specified type +func (s *RemoteIstioStatus) GetCondition(conditionType RemoteIstioConditionType) RemoteIstioCondition { + if s != nil { + for i := range s.Conditions { + if s.Conditions[i].Type == conditionType { + return s.Conditions[i] + } + } + } + return RemoteIstioCondition{Type: conditionType, Status: metav1.ConditionUnknown} +} + +// SetCondition sets a specific condition in the list of conditions +func (s *RemoteIstioStatus) SetCondition(condition RemoteIstioCondition) { + var now time.Time + if testTime == nil { + now = time.Now() + } else { + now = *testTime + } + + // The lastTransitionTime only gets serialized out to the second. This can + // break update skipping, as the time in the resource returned from the client + // may not match the time in our cached status during a reconcile. We truncate + // here to save any problems down the line. + lastTransitionTime := metav1.NewTime(now.Truncate(time.Second)) + + for i, prevCondition := range s.Conditions { + if prevCondition.Type == condition.Type { + if prevCondition.Status != condition.Status { + condition.LastTransitionTime = lastTransitionTime + } else { + condition.LastTransitionTime = prevCondition.LastTransitionTime + } + s.Conditions[i] = condition + return + } + } + + // If the condition does not exist, initialize the lastTransitionTime + condition.LastTransitionTime = lastTransitionTime + s.Conditions = append(s.Conditions, condition) +} + +// RemoteIstioCondition represents a specific observation of the RemoteIstioCondition object's state. +type RemoteIstioCondition struct { + // The type of this condition. + Type RemoteIstioConditionType `json:"type,omitempty"` + + // The status of this condition. Can be True, False or Unknown. + Status metav1.ConditionStatus `json:"status,omitempty"` + + // Unique, single-word, CamelCase reason for the condition's last transition. + Reason RemoteIstioConditionReason `json:"reason,omitempty"` + + // Human-readable message indicating details about the last transition. + Message string `json:"message,omitempty"` + + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// RemoteIstioConditionType represents the type of the condition. Condition stages are: +// Installed, Reconciled, Ready +type RemoteIstioConditionType string + +// RemoteIstioConditionReason represents a short message indicating how the condition came +// to be in its present state. +type RemoteIstioConditionReason string + +const ( + // RemoteIstioConditionReconciled signifies whether the controller has + // successfully reconciled the resources defined through the CR. + RemoteIstioConditionReconciled RemoteIstioConditionType = "Reconciled" + + // RemoteIstioReasonReconcileError indicates that the reconciliation of the resource has failed, but will be retried. + RemoteIstioReasonReconcileError RemoteIstioConditionReason = "ReconcileError" +) + +const ( + // RemoteIstioConditionReady signifies whether any Deployment, StatefulSet, + // etc. resources are Ready. + RemoteIstioConditionReady RemoteIstioConditionType = "Ready" + + // RemoteIstioReasonRevisionNotFound indicates that the active IstioRevision is not found. + RemoteIstioReasonRevisionNotFound RemoteIstioConditionReason = "ActiveRevisionNotFound" + + // RemoteIstioReasonFailedToGetActiveRevision indicates that a failure occurred when getting the active IstioRevision + RemoteIstioReasonFailedToGetActiveRevision RemoteIstioConditionReason = "FailedToGetActiveRevision" + + // RemoteIstioReasonIstiodNotReady indicates that the control plane is fully reconciled, but istiod is not ready. + RemoteIstioReasonIstiodNotReady RemoteIstioConditionReason = "IstiodNotReady" + + // RemoteIstioReasonReadinessCheckFailed indicates that readiness could not be ascertained. + RemoteIstioReasonReadinessCheckFailed RemoteIstioConditionReason = "ReadinessCheckFailed" +) + +const ( + // RemoteIstioReasonHealthy indicates that the control plane is fully reconciled and that all components are ready. + RemoteIstioReasonHealthy RemoteIstioConditionReason = "Healthy" +) + +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster,categories=istio-io +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Revisions",type="string",JSONPath=".status.revisions.total",description="Total number of IstioRevision objects currently associated with this object." +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.revisions.ready",description="Number of revisions that are ready." +// +kubebuilder:printcolumn:name="In use",type="string",JSONPath=".status.revisions.inUse",description="Number of revisions that are currently being used by workloads." +// +kubebuilder:printcolumn:name="Active Revision",type="string",JSONPath=".status.activeRevisionName",description="The name of the currently active revision." +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.state",description="The current state of the active revision." +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of the control plane installation." +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the object" + +// RemoteIstio represents a remote Istio Service Mesh deployment consisting of one or more +// remote control plane instances (represented by one or more IstioRevision objects). +type RemoteIstio struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:default={version: "v1.23.0", namespace: "istio-system", updateStrategy: {type:"InPlace"}} + Spec RemoteIstioSpec `json:"spec,omitempty"` + + Status RemoteIstioStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RemoteIstioList contains a list of RemoteIstio +type RemoteIstioList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RemoteIstio `json:"items"` +} + +func init() { + SchemeBuilder.Register(&RemoteIstio{}, &RemoteIstioList{}) +} diff --git a/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/values_types.gen.go b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/values_types.gen.go new file mode 100644 index 000000000..276fc28b4 --- /dev/null +++ b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/values_types.gen.go @@ -0,0 +1,3642 @@ +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by hack/api_transformer/main.go. DO NOT EDIT. + +package v1alpha1 + +import ( + json "encoding/json" + autoscalingv2 "k8s.io/api/autoscaling/v2" + k8sv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Mode for the ingress controller. +// +kubebuilder:validation:Enum=UNSPECIFIED;DEFAULT;STRICT;OFF +type IngressControllerMode string + +const ( + // Unspecified Istio ingress controller. + IngressControllerModeUnspecified IngressControllerMode = "UNSPECIFIED" + // Selects all Ingress resources, with or without Istio annotation. + IngressControllerModeDefault IngressControllerMode = "DEFAULT" + // Selects only resources with istio annotation. + IngressControllerModeStrict IngressControllerMode = "STRICT" + // No ingress or sync. + IngressControllerModeOff IngressControllerMode = "OFF" +) + +// Specifies which tracer to use. +// +kubebuilder:validation:Enum=zipkin;lightstep;datadog;stackdriver;openCensusAgent;none +type Tracer string + +const ( + TracerZipkin Tracer = "zipkin" + TracerLightstep Tracer = "lightstep" + TracerDatadog Tracer = "datadog" + TracerStackdriver Tracer = "stackdriver" + TracerOpencensusagent Tracer = "openCensusAgent" + TracerNone Tracer = "none" +) + +// Specifies the sidecar's default behavior when handling outbound traffic from the application. +// +kubebuilder:validation:Enum=ALLOW_ANY;REGISTRY_ONLY +type OutboundTrafficPolicyConfigMode string + +const ( + // Outbound traffic to unknown destinations will be allowed, in case there are no services or ServiceEntries for the destination port + OutboundTrafficPolicyConfigModeAllowAny OutboundTrafficPolicyConfigMode = "ALLOW_ANY" + // Restrict outbound traffic to services defined in the service registry as well as those defined through ServiceEntries + OutboundTrafficPolicyConfigModeRegistryOnly OutboundTrafficPolicyConfigMode = "REGISTRY_ONLY" +) + +// ArchConfig specifies the pod scheduling target architecture(amd64, ppc64le, s390x, arm64) +// for all the Istio control plane components. +type ArchConfig struct { + // Sets pod scheduling weight for amd64 arch + Amd64 uint32 `json:"amd64,omitempty"` + // Sets pod scheduling weight for ppc64le arch. + Ppc64Le uint32 `json:"ppc64le,omitempty"` + // Sets pod scheduling weight for s390x arch. + S390X uint32 `json:"s390x,omitempty"` + // Sets pod scheduling weight for arm64 arch. + Arm64 uint32 `json:"arm64,omitempty"` +} + +// Configuration for CNI. +type CNIConfig struct { + // Hub to pull the container image from. Image will be `Hub/Image:Tag-Variant`. + Hub string `json:"hub,omitempty"` + // The container image tag to pull. Image will be `Hub/Image:Tag-Variant`. + Tag string `json:"tag,omitempty"` + // The container image variant to pull. Options are "debug" or "distroless". Unset will use the default for the given version. + Variant string `json:"variant,omitempty"` + // Image name to pull from. Image will be `Hub/Image:Tag-Variant`. + // If Image contains a "/", it will replace the entire `image` in the pod. + Image string `json:"image,omitempty"` + // Specifies the image pull policy. one of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. + // + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +kubebuilder:validation:Enum=Always;Never;IfNotPresent + PullPolicy *k8sv1.PullPolicy `json:"pullPolicy,omitempty"` + // The directory path within the cluster node's filesystem where the CNI binaries are to be installed. Typically /var/lib/cni/bin. + CniBinDir string `json:"cniBinDir,omitempty"` + // The directory path within the cluster node's filesystem where the CNI configuration files are to be installed. Typically /etc/cni/net.d. + CniConfDir string `json:"cniConfDir,omitempty"` + // The name of the CNI plugin configuration file. Defaults to istio-cni.conf. + CniConfFileName string `json:"cniConfFileName,omitempty"` + // The directory path within the cluster node's filesystem where network namespaces are located. + // Defaults to '/var/run/netns', in minikube/docker/others can be '/var/run/docker/netns'. + CniNetnsDir string `json:"cniNetnsDir,omitempty"` + // List of namespaces that should be ignored by the CNI plugin. + ExcludeNamespaces []string `json:"excludeNamespaces,omitempty"` + // K8s affinity to set on the istio-cni Pods. Can be used to exclude istio-cni from being scheduled on specified nodes. + Affinity *k8sv1.Affinity `json:"affinity,omitempty"` + // Additional annotations to apply to the istio-cni Pods. + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + // PodSecurityPolicy cluster role. No longer used anywhere. + PspClusterRole string `json:"psp_cluster_role,omitempty"` + + // Same as `global.logging.level`, but will override it if set + Logging *GlobalLoggingConfig `json:"logging,omitempty"` + // Configuration for the CNI Repair controller. + Repair *CNIRepairConfig `json:"repair,omitempty"` + // Configure the plugin as a chained CNI plugin. When true, the configuration is added to the CNI chain; when false, + // the configuration is added as a standalone file in the CNI configuration directory. + Chained *bool `json:"chained,omitempty"` + // The resource quotas configration for the CNI DaemonSet. + ResourceQuotas *ResourceQuotas `json:"resource_quotas,omitempty"` + // The k8s resource requests and limits for the istio-cni Pods. + Resources *k8sv1.ResourceRequirements `json:"resources,omitempty"` + // No longer used for CNI. See: https://github.com/istio/istio/issues/49004 + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + Privileged *bool `json:"privileged,omitempty"` + // The Container seccompProfile + // + // See: https://kubernetes.io/docs/tutorials/security/seccomp/ + SeccompProfile *k8sv1.SeccompProfile `json:"seccompProfile,omitempty"` + + // Specifies the CNI provider. Can be either "default" or "multus". When set to "multus", an additional + // NetworkAttachmentDefinition resource is deployed to the cluster to allow the istio-cni plugin to be + // invoked in a cluster using the Multus CNI plugin. + Provider string `json:"provider,omitempty"` + // The number of pods that can be unavailable during a rolling update of the CNI DaemonSet (see + // `updateStrategy.rollingUpdate.maxUnavailable` here: + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/daemon-set-v1/#DaemonSetSpec). + // May be specified as a number of pods or as a percent of the total number + // of pods at the start of the update. + // +kubebuilder:validation:XIntOrString + RollingMaxUnavailable *intstr.IntOrString `json:"rollingMaxUnavailable,omitempty"` +} + +type CNIUsageConfig struct { + // Controls whether CNI should be used. + Enabled *bool `json:"enabled,omitempty"` + + // Specifies the CNI provider. Can be either "default" or "multus". When set to "multus", an annotation + // `k8s.v1.cni.cncf.io/networks` is set on injected pods to point to a NetworkAttachmentDefinition + Provider string `json:"provider,omitempty"` +} + +type CNIAmbientConfig struct { + // Controls whether ambient redirection is enabled + Enabled *bool `json:"enabled,omitempty"` + // The directory path containing the configuration files for Ambient. Defaults to /etc/ambient-config. + ConfigDir string `json:"configDir,omitempty"` + // If enabled, and ambient is enabled, DNS redirection will be enabled. + DnsCapture *bool `json:"dnsCapture,omitempty"` + // UNSTABLE: If enabled, and ambient is enabled, enables ipv6 support + Ipv6 *bool `json:"ipv6,omitempty"` +} + +type CNIRepairConfig struct { + // Controls whether repair behavior is enabled. + Enabled *bool `json:"enabled,omitempty"` + // Hub to pull the container image from. Image will be `Hub/Image:Tag-Variant`. + Hub string `json:"hub,omitempty"` + // The container image tag to pull. Image will be `Hub/Image:Tag-Variant`. + Tag string `json:"tag,omitempty"` + // Image name to pull from. Image will be `Hub/Image:Tag-Variant`. + // If Image contains a "/", it will replace the entire `image` in the pod. + Image string `json:"image,omitempty"` + // The Repair controller has 3 modes (labelPods, deletePods, and repairPods). Pick which one meets your use cases. Note only one may be used. + // The mode defines the action the controller will take when a pod is detected as broken. + // If labelPods is true, the controller will label all broken pods with =. + // This is only capable of identifying broken pods; the user is responsible for fixing them (generally, by deleting them). + // Note this gives the DaemonSet a relatively high privilege, as modifying pod metadata/status can have wider impacts. + LabelPods bool `json:"labelPods,omitempty"` + // The Repair controller has 3 modes (labelPods, deletePods, and repairPods). Pick which one meets your use cases. Note only one may be used. + // The mode defines the action the controller will take when a pod is detected as broken. + // If repairPods is true, the controller will dynamically repair any broken pod by setting up the pod networking configuration even after it has started. + // Note the pod will be crashlooping, so this may take a few minutes to become fully functional based on when the retry occurs. + // This requires no RBAC privilege, but will require the CNI agent to run as a privileged pod. + RepairPods bool `json:"repairPods,omitempty"` + // No longer used. + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + CreateEvents string `json:"createEvents,omitempty"` + // The Repair controller has 3 modes (labelPods, deletePods, and repairPods). Pick which one meets your use cases. Note only one may be used. + // The mode defines the action the controller will take when a pod is detected as broken. + // If deletePods is true, the controller will delete the broken pod. The pod will then be rescheduled, hopefully onto a node that is fully ready. + // Note this gives the DaemonSet a relatively high privilege, as it can delete any Pod. + DeletePods bool `json:"deletePods,omitempty"` + // The label key to apply to a broken pod when the controller is in labelPods mode. + BrokenPodLabelKey string `json:"brokenPodLabelKey,omitempty"` + // The label value to apply to a broken pod when the controller is in labelPods mode. + BrokenPodLabelValue string `json:"brokenPodLabelValue,omitempty"` + // The name of the init container to use for the repairPods mode. + InitContainerName string `json:"initContainerName,omitempty"` +} + +// Configuration for the resource quotas for the CNI DaemonSet. +type ResourceQuotas struct { + // Controls whether to create resource quotas or not for the CNI DaemonSet. + Enabled *bool `json:"enabled,omitempty"` + // The hard limit on the number of pods in the namespace where the CNI DaemonSet is deployed. + Pods int64 `json:"pods,omitempty"` +} + +// Configuration for CPU or memory target utilization for HorizontalPodAutoscaler target. +type TargetUtilizationConfig struct { + // K8s utilization setting for HorizontalPodAutoscaler target. + // + // See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + TargetAverageUtilization int32 `json:"targetAverageUtilization,omitempty"` +} + +// DefaultPodDisruptionBudgetConfig specifies the default pod disruption budget configuration. +// +// See https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +type DefaultPodDisruptionBudgetConfig struct { + // Controls whether a PodDisruptionBudget with a default minAvailable value of 1 is created for each deployment. + Enabled *bool `json:"enabled,omitempty"` +} + +// Global Configuration for Istio components. +type GlobalConfig struct { + // Specifies pod scheduling arch(amd64, ppc64le, s390x, arm64) and weight as follows: + // + // 0 - Never scheduled + // 1 - Least preferred + // 2 - No preference + // 3 - Most preferred + // + // Deprecated: replaced by the affinity k8s settings which allows architecture nodeAffinity configuration of this behavior. + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + Arch *ArchConfig `json:"arch,omitempty"` + // List of certSigners to allow "approve" action in the ClusterRole + CertSigners []string `json:"certSigners,omitempty"` + // Controls whether the server-side validation is enabled. + ConfigValidation *bool `json:"configValidation,omitempty"` + // Default k8s node selector for all the Istio control plane components + // + // See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + DefaultNodeSelector map[string]string `json:"defaultNodeSelector,omitempty"` + // Specifies the default pod disruption budget configuration. + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + DefaultPodDisruptionBudget *DefaultPodDisruptionBudgetConfig `json:"defaultPodDisruptionBudget,omitempty"` + // Default k8s resources settings for all Istio control plane components. + // + // See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + DefaultResources *k8sv1.ResourceRequirements `json:"defaultResources,omitempty"` + // Default node tolerations to be applied to all deployments so that all pods can be + // scheduled to nodes with matching taints. Each component can overwrite + // these default values by adding its tolerations block in the relevant section below + // and setting the desired values. + // Configure this field in case that all pods of Istio control plane are expected to + // be scheduled to particular nodes with specified taints. + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + DefaultTolerations []k8sv1.Toleration `json:"defaultTolerations,omitempty"` + // Specifies the docker hub for Istio images. + Hub string `json:"hub,omitempty"` + // Specifies the image pull policy for the Istio images. one of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. + // + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +kubebuilder:validation:Enum=Always;Never;IfNotPresent + ImagePullPolicy *k8sv1.PullPolicy `json:"imagePullPolicy,omitempty"` + // ImagePullSecrets for the control plane ServiceAccount, list of secrets in the same namespace + // to use for pulling any images in pods that reference this ServiceAccount. + // Must be set for any cluster configured with private docker registry. + ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` + // Specifies the default namespace for the Istio control plane components. + IstioNamespace string `json:"istioNamespace,omitempty"` + // Specifies whether istio components should output logs in json format by adding --log_as_json argument to each container. + LogAsJson *bool `json:"logAsJson,omitempty"` + // Specifies the global logging level settings for the Istio control plane components. + Logging *GlobalLoggingConfig `json:"logging,omitempty"` + // The Mesh Identifier. It should be unique within the scope where + // meshes will interact with each other, but it is not required to be + // globally/universally unique. For example, if any of the following are true, + // then two meshes must have different Mesh IDs: + // - Meshes will have their telemetry aggregated in one place + // - Meshes will be federated together + // - Policy will be written referencing one mesh from the other + // + // If an administrator expects that any of these conditions may become true in + // the future, they should ensure their meshes have different Mesh IDs + // assigned. + // + // Within a multicluster mesh, each cluster must be (manually or auto) + // configured to have the same Mesh ID value. If an existing cluster 'joins' a + // multicluster mesh, it will need to be migrated to the new mesh ID. Details + // of migration TBD, and it may be a disruptive operation to change the Mesh + // ID post-install. + // + // If the mesh admin does not specify a value, Istio will use the value of the + // mesh's Trust Domain. The best practice is to select a proper Trust Domain + // value. + MeshID string `json:"meshID,omitempty"` + // Configure the mesh networks to be used by the Split Horizon EDS. + // + // The following example defines two networks with different endpoints association methods. + // For `network1` all endpoints that their IP belongs to the provided CIDR range will be + // mapped to network1. The gateway for this network example is specified by its public IP + // address and port. + // The second network, `network2`, in this example is defined differently with all endpoints + // retrieved through the specified Multi-Cluster registry being mapped to network2. The + // gateway is also defined differently with the name of the gateway service on the remote + // cluster. The public IP for the gateway will be determined from that remote service (only + // LoadBalancer gateway service type is currently supported, for a NodePort type gateway service, + // it still need to be configured manually). + // + // meshNetworks: + // + // network1: + // endpoints: + // - fromCidr: "192.168.0.1/24" + // gateways: + // - address: 1.1.1.1 + // port: 80 + // network2: + // endpoints: + // - fromRegistry: reg1 + // gateways: + // - registryServiceName: istio-ingressgateway.istio-system.svc.cluster.local + // port: 443 + MeshNetworks map[string]Network `json:"meshNetworks,omitempty"` + // Specifies the Configuration for Istio mesh across multiple clusters through Istio gateways. + MultiCluster *MultiClusterConfig `json:"multiCluster,omitempty"` + // Network defines the network this cluster belong to. This name + // corresponds to the networks in the map of mesh networks. + Network string `json:"network,omitempty"` + // Custom DNS config for the pod to resolve names of services in other + // clusters. Use this to add additional search domains, and other settings. + // see https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#dns-config + // This does not apply to gateway pods as they typically need a different + // set of DNS settings than the normal application pods (e.g. in multicluster scenarios). + PodDNSSearchNamespaces []string `json:"podDNSSearchNamespaces,omitempty"` + // Controls whether the creation of the sidecar injector ConfigMap should be skipped. + // Defaults to false. When set to true, the sidecar injector ConfigMap will not be created. + OmitSidecarInjectorConfigMap *bool `json:"omitSidecarInjectorConfigMap,omitempty"` + // Controls whether the WebhookConfiguration resource(s) should be created. The current behavior + // of Istiod is to manage its own webhook configurations. + // When this option is set to true, Istio Operator, instead of webhooks, manages the + // webhook configurations. When this option is set as false, webhooks manage their + // own webhook configurations. + OperatorManageWebhooks *bool `json:"operatorManageWebhooks,omitempty"` + // Specifies the k8s priorityClassName for the istio control plane components. + // + // See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + PriorityClassName string `json:"priorityClassName,omitempty"` + // Specifies how proxies are configured within Istio. + Proxy *ProxyConfig `json:"proxy,omitempty"` + // Specifies the Configuration for proxy_init container which sets the pods' networking to intercept the inbound/outbound traffic. + ProxyInit *ProxyInitConfig `json:"proxy_init,omitempty"` + // Specifies the Configuration for the SecretDiscoveryService instead of using K8S secrets to mount the certificates. + Sds *SDSConfig `json:"sds,omitempty"` + // Specifies the tag for the Istio docker images. + Tag string `json:"tag,omitempty"` + // The variant of the Istio container images to use. Options are "debug" or "distroless". Unset will use the default for the given version. + Variant string `json:"variant,omitempty"` + // Specifies the Configuration for each of the supported tracers. + Tracer *TracerConfig `json:"tracer,omitempty"` + // Specifies the Istio control plane’s pilot Pod IP address or remote cluster DNS resolvable hostname. + RemotePilotAddress string `json:"remotePilotAddress,omitempty"` + // Specifies the configution of istiod + Istiod *IstiodConfig `json:"istiod,omitempty"` + // Configure the Pilot certificate provider. + // Currently, four providers are supported: "kubernetes", "istiod", "custom" and "none". + PilotCertProvider string `json:"pilotCertProvider,omitempty"` + // Configure the policy for validating JWT. + // This is deprecated and has no effect. + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + JwtPolicy string `json:"jwtPolicy,omitempty"` + // Specifies the configuration for Security Token Service. + Sts *STSConfig `json:"sts,omitempty"` + // Configures the revision this control plane is a part of + Revision string `json:"revision,omitempty"` + // Controls whether the in-cluster MTLS key and certs are loaded from the secret volume mounts. + MountMtlsCerts *bool `json:"mountMtlsCerts,omitempty"` + // The address of the CA for CSR. + CaAddress string `json:"caAddress,omitempty"` + // Controls whether one external istiod is enabled. + ExternalIstiod *bool `json:"externalIstiod,omitempty"` + // Controls whether a remote cluster is the config cluster for an external istiod + ConfigCluster *bool `json:"configCluster,omitempty"` + // The name of the CA for workloads. + // For example, when caName=GkeWorkloadCertificate, GKE workload certificates + // will be used as the certificates for workloads. + // The default value is "" and when caName="", the CA will be configured by other + // mechanisms (e.g., environmental variable CA_PROVIDER). + CaName string `json:"caName,omitempty"` + // Platform in which Istio is deployed. Possible values are: "openshift" and "gcp" + // An empty value means it is a vanilla Kubernetes distribution, therefore no special + // treatment will be considered. + Platform string `json:"platform,omitempty"` + // Defines which IP family to use for single stack or the order of IP families for dual-stack. + // Valid list items are "IPv4", "IPv6". + // More info: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + IpFamilies []string `json:"ipFamilies,omitempty"` + // Controls whether Services are configured to use IPv4, IPv6, or both. Valid options + // are PreferDualStack, RequireDualStack, and SingleStack. + // More info: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + IpFamilyPolicy string `json:"ipFamilyPolicy,omitempty"` + // Specifies how waypoints are configured within Istio. + Waypoint *WaypointConfig `json:"waypoint,omitempty"` // The next available key is 73 +} + +// Configuration for Security Token Service (STS) server. +// +// See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 +type STSConfig struct { + ServicePort uint32 `json:"servicePort,omitempty"` +} + +type IstiodConfig struct { + // If enabled, istiod will perform config analysis + EnableAnalysis *bool `json:"enableAnalysis,omitempty"` +} + +// GlobalLoggingConfig specifies the global logging level settings for the Istio control plane components. +type GlobalLoggingConfig struct { + // Comma-separated minimum per-scope logging level of messages to output, in the form of :,: + // The control plane has different scopes depending on component, but can configure default log level across all components + // If empty, default scope and level will be used as configured in code + Level string `json:"level,omitempty"` +} + +// MultiClusterConfig specifies the Configuration for Istio mesh across multiple clusters through the istio gateways. +type MultiClusterConfig struct { + // Enables the connection between two kubernetes clusters via their respective ingressgateway services. + // Use if the pods in each cluster cannot directly talk to one another. + Enabled *bool `json:"enabled,omitempty"` + // The name of the cluster this installation will run in. This is required for sidecar injection + // to properly label proxies + ClusterName string `json:"clusterName,omitempty"` + // The suffix for global service names. + GlobalDomainSuffix string `json:"globalDomainSuffix,omitempty"` + // Enable envoy filter to translate `globalDomainSuffix` to cluster local suffix for cross cluster communication. + IncludeEnvoyFilter *bool `json:"includeEnvoyFilter,omitempty"` +} + +// OutboundTrafficPolicyConfig controls the default behavior of the sidecar for handling outbound traffic from the application. +type OutboundTrafficPolicyConfig struct { + Mode OutboundTrafficPolicyConfigMode `json:"mode,omitempty"` +} + +// Configuration for Pilot. +type PilotConfig struct { + // Controls whether Pilot is enabled. + Enabled *bool `json:"enabled,omitempty"` + // Controls whether a HorizontalPodAutoscaler is installed for Pilot. + AutoscaleEnabled *bool `json:"autoscaleEnabled,omitempty"` + // Minimum number of replicas in the HorizontalPodAutoscaler for Pilot. + AutoscaleMin uint32 `json:"autoscaleMin,omitempty"` + // Maximum number of replicas in the HorizontalPodAutoscaler for Pilot. + AutoscaleMax uint32 `json:"autoscaleMax,omitempty"` + // See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#configurable-scaling-behavior + AutoscaleBehavior *autoscalingv2.HorizontalPodAutoscalerBehavior `json:"autoscaleBehavior,omitempty"` + // Number of replicas in the Pilot Deployment. + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + ReplicaCount uint32 `json:"replicaCount,omitempty"` + // Image name used for Pilot. + // + // This can be set either to image name if hub is also set, or can be set to the full hub:name string. + // + // Examples: custom-pilot, docker.io/someuser:custom-pilot + Image string `json:"image,omitempty"` + // Trace sampling fraction. + // + // Used to set the fraction of time that traces are sampled. Higher values are more accurate but add CPU overhead. + // + // Allowed values: 0.0 to 1.0 + TraceSampling float64 `json:"traceSampling,omitempty"` + // K8s resources settings. + // + // See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + Resources *k8sv1.ResourceRequirements `json:"resources,omitempty"` + // Target CPU utilization used in HorizontalPodAutoscaler. + // + // See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + Cpu *TargetUtilizationConfig `json:"cpu,omitempty"` + // K8s node selector. + // + // See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // Maximum duration that a sidecar can be connected to a pilot. + // + // This setting balances out load across pilot instances, but adds some resource overhead. + // + // Examples: 300s, 30m, 1h + KeepaliveMaxServerConnectionAge *metav1.Duration `json:"keepaliveMaxServerConnectionAge,omitempty"` + // Labels that are added to Pilot deployment. + // + // See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + DeploymentLabels map[string]string `json:"deploymentLabels,omitempty"` + // Labels that are added to Pilot pods. + // + // See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + PodLabels map[string]string `json:"podLabels,omitempty"` + // Configuration settings passed to Pilot as a ConfigMap. + // + // This controls whether the mesh config map, generated from values.yaml is generated. + // If false, pilot wil use default values or user-supplied values, in that order of preference. + ConfigMap *bool `json:"configMap,omitempty"` + // Environment variables passed to the Pilot container. + // + // Examples: + // env: + // + // ENV_VAR_1: value1 + // ENV_VAR_2: value2 + Env map[string]string `json:"env,omitempty"` + // K8s affinity to set on the Pilot Pods. + Affinity *k8sv1.Affinity `json:"affinity,omitempty"` + // K8s rolling update strategy + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + // +kubebuilder:validation:XIntOrString + RollingMaxSurge *intstr.IntOrString `json:"rollingMaxSurge,omitempty"` + // The number of pods that can be unavailable during a rolling update (see + // `strategy.rollingUpdate.maxUnavailable` here: + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/deployment-v1/#DeploymentSpec). + // May be specified as a number of pods or as a percent of the total number + // of pods at the start of the update. + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + // +kubebuilder:validation:XIntOrString + RollingMaxUnavailable *intstr.IntOrString `json:"rollingMaxUnavailable,omitempty"` + // The node tolerations to be applied to the Pilot deployment so that it can be + // scheduled to particular nodes with matching taints. + // More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + Tolerations []k8sv1.Toleration `json:"tolerations,omitempty"` + // K8s annotations for pods. + // + // See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + // K8s annotations for the Service. + // + // See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + // K8s annotations for the service account + ServiceAccountAnnotations map[string]string `json:"serviceAccountAnnotations,omitempty"` + // Specifies an extra root certificate in PEM format. This certificate will be trusted + // by pilot when resolving JWKS URIs. + JwksResolverExtraRootCA string `json:"jwksResolverExtraRootCA,omitempty"` + // Hub to pull the container image from. Image will be `Hub/Image:Tag-Variant`. + Hub string `json:"hub,omitempty"` + // The container image tag to pull. Image will be `Hub/Image:Tag-Variant`. + Tag string `json:"tag,omitempty"` + // The container image variant to pull. Options are "debug" or "distroless". Unset will use the default for the given version. + Variant string `json:"variant,omitempty"` + // The seccompProfile for the Pilot container. + // + // See: https://kubernetes.io/docs/tutorials/security/seccomp/ + SeccompProfile *k8sv1.SeccompProfile `json:"seccompProfile,omitempty"` + // The k8s topologySpreadConstraints for the Pilot pods. + TopologySpreadConstraints []k8sv1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + // Additional container arguments for the Pilot container. + ExtraContainerArgs []string `json:"extraContainerArgs,omitempty"` + // Additional volumeMounts to add to the Pilot container. + VolumeMounts []k8sv1.VolumeMount `json:"volumeMounts,omitempty"` + // Additional volumes to add to the Pilot Pod. + Volumes []k8sv1.Volume `json:"volumes,omitempty"` + // Defines which IP family to use for single stack or the order of IP families for dual-stack. + // Valid list items are "IPv4", "IPv6". + // More info: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + IpFamilies []string `json:"ipFamilies,omitempty"` + // Controls whether Services are configured to use IPv4, IPv6, or both. Valid options + // are PreferDualStack, RequireDualStack, and SingleStack. + // More info: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + IpFamilyPolicy string `json:"ipFamilyPolicy,omitempty"` + // Target memory utilization used in HorizontalPodAutoscaler. + // + // See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + Memory *TargetUtilizationConfig `json:"memory,omitempty"` + // Configures whether to use an existing CNI installation for workloads + Cni *CNIUsageConfig `json:"cni,omitempty"` + Taint *PilotTaintControllerConfig `json:"taint,omitempty"` + // If set, `istiod` will allow connections from trusted node proxy ztunnels + // in the provided namespace. + TrustedZtunnelNamespace string `json:"trustedZtunnelNamespace,omitempty"` +} + +type PilotTaintControllerConfig struct { + // Enable the untaint controller for new nodes. This aims to solve a race for CNI installation on + // new nodes. For this to work, the newly added nodes need to have the istio CNI taint as they are + // added to the cluster. This is usually done by configuring the cluster infra provider. + Enabled bool `json:"enabled,omitempty"` + // The namespace of the CNI daemonset, incase it's not the same as istiod. + Namespace string `json:"namespace,omitempty"` +} + +// Controls whether Istio policy is applied to Pilot. +type PilotPolicyConfig struct { + // Controls whether Istio policy is applied to Pilot. + Enabled *bool `json:"enabled,omitempty"` +} + +// Controls telemetry configuration +type TelemetryConfig struct { + // Controls whether telemetry is exported for Pilot. + Enabled *bool `json:"enabled,omitempty"` + // Configuration for Telemetry v2. + V2 *TelemetryV2Config `json:"v2,omitempty"` +} + +// Controls whether pilot will configure telemetry v2. +type TelemetryV2Config struct { + // Controls whether pilot will configure telemetry v2. + Enabled *bool `json:"enabled,omitempty"` + // Telemetry v2 settings for prometheus. + Prometheus *TelemetryV2PrometheusConfig `json:"prometheus,omitempty"` + // Telemetry v2 settings for stackdriver. + Stackdriver *TelemetryV2StackDriverConfig `json:"stackdriver,omitempty"` +} + +// Controls telemetry v2 prometheus settings. +type TelemetryV2PrometheusConfig struct { + // Controls whether stats envoyfilter would be enabled or not. + Enabled *bool `json:"enabled,omitempty"` +} + +// TelemetryV2StackDriverConfig controls telemetry v2 stackdriver settings. +type TelemetryV2StackDriverConfig struct { + Enabled *bool `json:"enabled,omitempty"` +} + +// Configuration for a port. +type PortsConfig struct { + // Port name. + Name string `json:"name,omitempty"` + // Port number. + Port int32 `json:"port,omitempty"` + // NodePort number. + NodePort int32 `json:"nodePort,omitempty"` + // Target port number. + TargetPort int32 `json:"targetPort,omitempty"` + // Protocol name. + Protocol string `json:"protocol,omitempty"` +} + +// Configuration for Proxy. +type ProxyConfig struct { + // Controls the 'policy' in the sidecar injector. + AutoInject string `json:"autoInject,omitempty"` + // Domain for the cluster, default: "cluster.local". + // + // K8s allows this to be customized, see https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/ + ClusterDomain string `json:"clusterDomain,omitempty"` + // Per Component log level for proxy, applies to gateways and sidecars. + // + // If a component level is not set, then the global "logLevel" will be used. If left empty, "misc:error" is used. + ComponentLogLevel string `json:"componentLogLevel,omitempty"` + // Enables core dumps for newly injected sidecars. + // + // If set, newly injected sidecars will have core dumps enabled. + EnableCoreDump *bool `json:"enableCoreDump,omitempty"` + // Specifies the Istio ingress ports not to capture. + ExcludeInboundPorts string `json:"excludeInboundPorts,omitempty"` + // Lists the excluded IP ranges of Istio egress traffic that the sidecar captures. + ExcludeIPRanges string `json:"excludeIPRanges,omitempty"` + // Image name or path for the proxy, default: "proxyv2". + // + // If registry or tag are not specified, global.hub and global.tag are used. + // + // Examples: my-proxy (uses global.hub/tag), docker.io/myrepo/my-proxy:v1.0.0 + Image string `json:"image,omitempty"` + // Lists the IP ranges of Istio egress traffic that the sidecar captures. + // + // Example: "172.30.0.0/16,172.20.0.0/16" + // This would only capture egress traffic on those two IP Ranges, all other outbound traffic would # be allowed by the sidecar." + IncludeIPRanges string `json:"includeIPRanges,omitempty"` + // Log level for proxy, applies to gateways and sidecars. If left empty, "warning" is used. Expected values are: trace\|debug\|info\|warning\|error\|critical\|off + LogLevel string `json:"logLevel,omitempty"` + // Path to the file to which the proxy will write outlier detection logs. + // + // Example: "/dev/stdout" + // This would write the logs to standard output. + OutlierLogPath string `json:"outlierLogPath,omitempty"` + // Enables privileged securityContext for the istio-proxy container. + // + // See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + Privileged *bool `json:"privileged,omitempty"` + // Sets the initial delay for readiness probes in seconds. + ReadinessInitialDelaySeconds uint32 `json:"readinessInitialDelaySeconds,omitempty"` + // Sets the interval between readiness probes in seconds. + ReadinessPeriodSeconds uint32 `json:"readinessPeriodSeconds,omitempty"` + // Sets the number of successive failed probes before indicating readiness failure. + ReadinessFailureThreshold uint32 `json:"readinessFailureThreshold,omitempty"` + // Configures the startup probe for the istio-proxy container. + StartupProbe *StartupProbe `json:"startupProbe,omitempty"` + // Default port used for the Pilot agent's health checks. + StatusPort uint32 `json:"statusPort,omitempty"` + // K8s resources settings. + // + // See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + Resources *k8sv1.ResourceRequirements `json:"resources,omitempty"` + // Specify which tracer to use. One of: zipkin, lightstep, datadog, stackdriver. + // If using stackdriver tracer outside GCP, set env GOOGLE_APPLICATION_CREDENTIALS to the GCP credential file. + Tracer Tracer `json:"tracer,omitempty"` + // A comma separated list of outbound ports to be excluded from redirection to Envoy. + ExcludeOutboundPorts string `json:"excludeOutboundPorts,omitempty"` + // The k8s lifecycle hooks definition (pod.spec.containers.lifecycle) for the proxy container. + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + Lifecycle *k8sv1.Lifecycle `json:"lifecycle,omitempty"` + // Controls if sidecar is injected at the front of the container list and blocks the start of the other containers until the proxy is ready + // + // Deprecated: replaced by ProxyConfig setting which allows per-pod configuration of this behavior. + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + HoldApplicationUntilProxyStarts *bool `json:"holdApplicationUntilProxyStarts,omitempty"` + // A comma separated list of inbound ports for which traffic is to be redirected to Envoy. + // The wildcard character '*' can be used to configure redirection for all ports. + IncludeInboundPorts string `json:"includeInboundPorts,omitempty"` + // A comma separated list of outbound ports for which traffic is to be redirected to Envoy, regardless of the destination IP. + IncludeOutboundPorts string `json:"includeOutboundPorts,omitempty"` +} + +type StartupProbe struct { + // Enables or disables a startup probe. + // For optimal startup times, changing this should be tied to the readiness probe values. + // + // If the probe is enabled, it is recommended to have delay=0s,period=15s,failureThreshold=4. + // This ensures the pod is marked ready immediately after the startup probe passes (which has a 1s poll interval), + // and doesn't spam the readiness endpoint too much + // + // If the probe is disabled, it is recommended to have delay=1s,period=2s,failureThreshold=30. + // This ensures the startup is reasonable fast (polling every 2s). 1s delay is used since the startup is not often ready instantly. + Enabled *bool `json:"enabled,omitempty"` + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + FailureThreshold uint32 `json:"failureThreshold,omitempty"` +} + +// Configuration for proxy_init container which sets the pods' networking to intercept the inbound/outbound traffic. +type ProxyInitConfig struct { + // Specifies the image for the proxy_init container. + Image string `json:"image,omitempty"` + // K8s resources settings. + // + // See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + Resources *k8sv1.ResourceRequirements `json:"resources,omitempty"` +} + +// Configuration for K8s resource requests. +type ResourcesRequestsConfig struct { + // CPU requests. + Cpu string `json:"cpu,omitempty"` + // Memory requests. + Memory string `json:"memory,omitempty"` +} + +// Configuration for the SecretDiscoveryService instead of using K8S secrets to mount the certificates. +type SDSConfig struct { + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + Token *SDSConfigToken `json:"token,omitempty"` +} + +// Configuration for secret volume mounts. +// +// See https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets. +type SecretVolume struct { + MountPath string `json:"mountPath,omitempty"` + Name string `json:"name,omitempty"` + SecretName string `json:"secretName,omitempty"` +} + +// SidecarInjectorConfig is described in istio.io documentation. +type SidecarInjectorConfig struct { + // Enables sidecar auto-injection in namespaces by default. + EnableNamespacesByDefault *bool `json:"enableNamespacesByDefault,omitempty"` + // Setting this to `IfNeeded` will result in the sidecar injector being run again if additional mutations occur. Default: Never + ReinvocationPolicy string `json:"reinvocationPolicy,omitempty"` + // Instructs Istio to not inject the sidecar on those pods, based on labels that are present in those pods. + // + // Annotations in the pods have higher precedence than the label selectors. + // Order of evaluation: Pod Annotations → NeverInjectSelector → AlwaysInjectSelector → Default Policy. + // See https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/#more-control-adding-exceptions + NeverInjectSelector []metav1.LabelSelector `json:"neverInjectSelector,omitempty"` + // See NeverInjectSelector. + AlwaysInjectSelector []metav1.LabelSelector `json:"alwaysInjectSelector,omitempty"` + // If true, webhook or istioctl injector will rewrite PodSpec for liveness health check to redirect request to sidecar. This makes liveness check work even when mTLS is enabled. + RewriteAppHTTPProbe *bool `json:"rewriteAppHTTPProbe,omitempty"` + // injectedAnnotations are additional annotations that will be added to the pod spec after injection + // This is primarily to support PSP annotations. + InjectedAnnotations map[string]string `json:"injectedAnnotations,omitempty"` + // Configure the injection url for sidecar injector webhook + InjectionURL string `json:"injectionURL,omitempty"` + // Templates defines a set of custom injection templates that can be used. For example, defining: + // + // templates: + // + // hello: | + // metadata: + // labels: + // hello: world + // + // Then starting a pod with the `inject.istio.io/templates: hello` annotation, will result in the pod + // being injected with the hello=world labels. + // This is intended for advanced configuration only; most users should use the built in template + Templates map[string]string `json:"templates,omitempty"` + // defaultTemplates: ["sidecar", "hello"] + DefaultTemplates []string `json:"defaultTemplates,omitempty"` +} + +// Configuration for each of the supported tracers. +type TracerConfig struct { + // Configuration for the datadog tracing service. + Datadog *TracerDatadogConfig `json:"datadog,omitempty"` + // Configuration for the lightstep tracing service. + Lightstep *TracerLightStepConfig `json:"lightstep,omitempty"` + // Configuration for the zipkin tracing service. + Zipkin *TracerZipkinConfig `json:"zipkin,omitempty"` + // Configuration for the stackdriver tracing service. + Stackdriver *TracerStackdriverConfig `json:"stackdriver,omitempty"` +} + +// Configuration for the datadog tracing service. +type TracerDatadogConfig struct { + // Address in host:port format for reporting trace data to the Datadog agent. + Address string `json:"address,omitempty"` +} + +// Configuration for the lightstep tracing service. +type TracerLightStepConfig struct { + // Sets the lightstep satellite pool address in host:port format for reporting trace data. + Address string `json:"address,omitempty"` + // Sets the lightstep access token. + AccessToken string `json:"accessToken,omitempty"` +} + +// Configuration for the zipkin tracing service. +type TracerZipkinConfig struct { + // Address of zipkin instance in host:port format for reporting trace data. + // + // Example: .:941 + Address string `json:"address,omitempty"` +} + +// Configuration for the stackdriver tracing service. +type TracerStackdriverConfig struct { + // enables trace output to stdout. + Debug *bool `json:"debug,omitempty"` + // The global default max number of attributes per span. + MaxNumberOfAttributes uint32 `json:"maxNumberOfAttributes,omitempty"` + // The global default max number of annotation events per span. + MaxNumberOfAnnotations uint32 `json:"maxNumberOfAnnotations,omitempty"` + // The global default max number of message events per span. + MaxNumberOfMessageEvents uint32 `json:"maxNumberOfMessageEvents,omitempty"` +} + +type BaseConfig struct { + // URL to use for validating webhook. + ValidationURL string `json:"validationURL,omitempty"` + + // validation webhook CA bundle + ValidationCABundle string `json:"validationCABundle,omitempty"` +} + +type IstiodRemoteConfig struct { + // URL to use for sidecar injector webhook. + InjectionURL string `json:"injectionURL,omitempty"` + // Path to use for the sidecar injector webhook service. + InjectionPath string `json:"injectionPath,omitempty"` + // injector ca bundle + InjectionCABundle string `json:"injectionCABundle,omitempty"` +} + +type Values struct { + // Global configuration for Istio components. + Global *GlobalConfig `json:"global,omitempty"` + // Configuration for the Pilot component. + Pilot *PilotConfig `json:"pilot,omitempty"` + + // Controls whether telemetry is exported for Pilot. + Telemetry *TelemetryConfig `json:"telemetry,omitempty"` + // Configuration for the sidecar injector webhook. + SidecarInjectorWebhook *SidecarInjectorConfig `json:"sidecarInjectorWebhook,omitempty"` + + // Identifies the revision this installation is associated with. + Revision string `json:"revision,omitempty"` + + // Defines runtime configuration of components, including Istiod and istio-agent behavior. + // See https://istio.io/docs/reference/config/istio.mesh.v1alpha1/ for all available options. + // TODO can this import the real mesh config API? + MeshConfig *MeshConfig `json:"meshConfig,omitempty"` + // Configuration for the base component. + Base *BaseConfig `json:"base,omitempty"` + // Configuration for istiod-remote. + IstiodRemote *IstiodRemoteConfig `json:"istiodRemote,omitempty"` + // Specifies the aliases for the Istio control plane revision. A MutatingWebhookConfiguration + // is created for each alias. + RevisionTags []string `json:"revisionTags,omitempty"` + // The name of the default revision in the cluster. + DefaultRevision string `json:"defaultRevision,omitempty"` + // Specifies which installation configuration profile to apply. + Profile string `json:"profile,omitempty"` + // Specifies the compatibility version to use. When this is set, the control plane will + // be configured with the same defaults as the specified version. + CompatibilityVersion string `json:"compatibilityVersion,omitempty"` + // Specifies experimental helm fields that could be removed or changed in the future + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Experimental json.RawMessage `json:"experimental,omitempty"` +} + +// ZeroVPNConfig enables cross-cluster access using SNI matching. +type ZeroVPNConfig struct { + // Controls whether ZeroVPN is enabled. + Enabled *bool `json:"enabled,omitempty"` + Suffix string `json:"suffix,omitempty"` +} + +// ExperimentalConfig is a placeholder for experimental installation features. +type ExperimentalConfig struct { + // Controls whether the experimental feature is enabled. + StableValidationPolicy *bool `json:"stableValidationPolicy,omitempty"` +} + +// Configuration for Waypoint proxies. +type WaypointConfig struct { + // K8s resource settings. + // + // See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container + Resources *k8sv1.ResourceRequirements `json:"resources,omitempty"` +} + +// CNIGlobalConfig is a subset of the Global Configuration used in the Istio CNI chart. +type CNIGlobalConfig struct { // Default k8s resources settings for all Istio control plane components. + // + // See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container + // + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. + DefaultResources *k8sv1.ResourceRequirements `json:"defaultResources,omitempty"` + + // Specifies the docker hub for Istio images. + Hub string `json:"hub,omitempty"` + // Specifies the image pull policy for the Istio images. one of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. + // + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +kubebuilder:validation:Enum=Always;Never;IfNotPresent + ImagePullPolicy *k8sv1.PullPolicy `json:"imagePullPolicy,omitempty"` + // ImagePullSecrets for the control plane ServiceAccount, list of secrets in the same namespace + // to use for pulling any images in pods that reference this ServiceAccount. + // Must be set for any cluster configured with private docker registry. + ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` + + // Specifies whether istio components should output logs in json format by adding --log_as_json argument to each container. + LogAsJson *bool `json:"logAsJson,omitempty"` + // Specifies the global logging level settings for the Istio control plane components. + Logging *GlobalLoggingConfig `json:"logging,omitempty"` + + // Specifies the tag for the Istio docker images. + Tag string `json:"tag,omitempty"` + // The variant of the Istio container images to use. Options are "debug" or "distroless". Unset will use the default for the given version. + Variant string `json:"variant,omitempty"` +} + +// Resource describes the source of configuration +// +kubebuilder:validation:Enum=SERVICE_REGISTRY +type Resource string + +const ( + // Set to only receive service entries that are generated by the platform. + // These auto generated service entries are combination of services and endpoints + // that are generated by a specific platform e.g. k8 + ResourceServiceRegistry Resource = "SERVICE_REGISTRY" +) + +// +kubebuilder:validation:Enum=UNSPECIFIED;OFF;DEFAULT;STRICT +type MeshConfigIngressControllerMode string + +const ( + // Unspecified Istio ingress controller. + MeshConfigIngressControllerModeUnspecified MeshConfigIngressControllerMode = "UNSPECIFIED" + // Disables Istio ingress controller. + MeshConfigIngressControllerModeOff MeshConfigIngressControllerMode = "OFF" + // Istio ingress controller will act on ingress resources that do not + // contain any annotation or whose annotations match the value + // specified in the ingress_class parameter described earlier. Use this + // mode if Istio ingress controller will be the default ingress + // controller for the entire Kubernetes cluster. + MeshConfigIngressControllerModeDefault MeshConfigIngressControllerMode = "DEFAULT" + // Istio ingress controller will only act on ingress resources whose + // annotations match the value specified in the ingress_class parameter + // described earlier. Use this mode if Istio ingress controller will be + // a secondary ingress controller (e.g., in addition to a + // cloud-provided ingress controller). + MeshConfigIngressControllerModeStrict MeshConfigIngressControllerMode = "STRICT" +) + +// +hidefromdoc +// +kubebuilder:validation:Enum=NONE;MUTUAL_TLS +type MeshConfigAuthPolicy string + +const ( + MeshConfigAuthPolicyNone MeshConfigAuthPolicy = "NONE" + MeshConfigAuthPolicyMutualTls MeshConfigAuthPolicy = "MUTUAL_TLS" +) + +// +kubebuilder:validation:Enum=TEXT;JSON +type MeshConfigAccessLogEncoding string + +const ( + // text encoding for the proxy access log + MeshConfigAccessLogEncodingText MeshConfigAccessLogEncoding = "TEXT" + // json encoding for the proxy access log + MeshConfigAccessLogEncodingJson MeshConfigAccessLogEncoding = "JSON" +) + +// Default Policy for upgrading http1.1 connections to http2. +// +kubebuilder:validation:Enum=DO_NOT_UPGRADE;UPGRADE +type MeshConfigH2UpgradePolicy string + +const ( + // Do not upgrade connections to http2. + MeshConfigH2UpgradePolicyDoNotUpgrade MeshConfigH2UpgradePolicy = "DO_NOT_UPGRADE" + // Upgrade the connections to http2. + MeshConfigH2UpgradePolicyUpgrade MeshConfigH2UpgradePolicy = "UPGRADE" +) + +// +kubebuilder:validation:Enum=REGISTRY_ONLY;ALLOW_ANY +type MeshConfigOutboundTrafficPolicyMode string + +const ( + // In `REGISTRY_ONLY` mode, unknown outbound traffic will be dropped. + // Traffic destinations must be explicitly declared into the service registry through `ServiceEntry` configurations. + // + // Note: Istio [does not offer an outbound traffic security policy](https://istio.io/latest/docs/ops/best-practices/security/#understand-traffic-capture-limitations). + // This option does not act as one, or as any form of an outbound firewall. + // Instead, this option exists primarily to offer users a way to detect missing `ServiceEntry` configurations by explicitly failing. + MeshConfigOutboundTrafficPolicyModeRegistryOnly MeshConfigOutboundTrafficPolicyMode = "REGISTRY_ONLY" + // In `ALLOW_ANY` mode, any traffic to unknown destinations will be allowed. + // Unknown destination traffic will have limited functionality, however, such as reduced observability. + // This mode allows users that do not have all possible egress destinations registered through `ServiceEntry` configurations to still connect + // to arbitrary destinations. + MeshConfigOutboundTrafficPolicyModeAllowAny MeshConfigOutboundTrafficPolicyMode = "ALLOW_ANY" +) + +// +kubebuilder:validation:Enum=PASSTHROUGH;LOCALHOST +type MeshConfigInboundTrafficPolicyMode string + +const ( + // inbound traffic will be passed through to the destination listening + // on Pod IP. This matches the behavior without Istio enabled at all + // allowing proxy to be transparent. + MeshConfigInboundTrafficPolicyModePassthrough MeshConfigInboundTrafficPolicyMode = "PASSTHROUGH" + // inbound traffic will be sent to the destinations listening on localhost. + MeshConfigInboundTrafficPolicyModeLocalhost MeshConfigInboundTrafficPolicyMode = "LOCALHOST" +) + +// TraceContext selects the context propagation headers used for +// distributed tracing. +// +kubebuilder:validation:Enum=UNSPECIFIED;W3C_TRACE_CONTEXT;GRPC_BIN;CLOUD_TRACE_CONTEXT;B3 +type MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContext string + +const ( + // +hidefromdoc + // Unspecified context. Should not be used for now, but added to reserve + // the 0 enum value if TraceContext is used outside of a repeated field. + MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContextUnspecified MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContext = "UNSPECIFIED" + // Use W3C Trace Context propagation using the `traceparent` HTTP header. + // See the + // [Trace Context documentation](https://www.w3.org/TR/trace-context/) for details. + MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContextW3cTraceContext MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContext = "W3C_TRACE_CONTEXT" + // Use gRPC binary context propagation using the `grpc-trace-bin` http header. + MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContextGrpcBin MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContext = "GRPC_BIN" + // Use Cloud Trace context propagation using the + // `X-Cloud-Trace-Context` http header. + MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContextCloudTraceContext MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContext = "CLOUD_TRACE_CONTEXT" + // Use multi-header B3 context propagation using the `X-B3-TraceId`, + // `X-B3-SpanId`, and `X-B3-Sampled` HTTP headers. See + // [B3 header propagation README](https://github.com/openzipkin/b3-propagation) + // for details. + MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContextB3 MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContext = "B3" +) + +// +kubebuilder:validation:Enum=DEFAULT;NONE;BASE;MERGE_SLASHES;DECODE_AND_MERGE_SLASHES +type MeshConfigProxyPathNormalizationNormalizationType string + +const ( + // Apply default normalizations. Currently, this is BASE. + MeshConfigProxyPathNormalizationNormalizationTypeDefault MeshConfigProxyPathNormalizationNormalizationType = "DEFAULT" + // No normalization, paths are used as is. + MeshConfigProxyPathNormalizationNormalizationTypeNone MeshConfigProxyPathNormalizationNormalizationType = "NONE" + // Normalize according to [RFC 3986](https://tools.ietf.org/html/rfc3986). + // For Envoy proxies, this is the [`normalize_path`](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto.html) option. + // For example, `/a/../b` normalizes to `/b`. + MeshConfigProxyPathNormalizationNormalizationTypeBase MeshConfigProxyPathNormalizationNormalizationType = "BASE" + // In addition to the `BASE` normalization, consecutive slashes are also merged. + // For example, `/a//b` normalizes to `a/b`. + MeshConfigProxyPathNormalizationNormalizationTypeMergeSlashes MeshConfigProxyPathNormalizationNormalizationType = "MERGE_SLASHES" + // In addition to normalization in `MERGE_SLASHES`, slash characters are UTF-8 decoded (case insensitive) prior to merging. + // This means `%2F`, `%2f`, `%5C`, and `%5c` sequences in the request path will be rewritten to `/` or `\`. + // For example, `/a%2f/b` normalizes to `a/b`. + MeshConfigProxyPathNormalizationNormalizationTypeDecodeAndMergeSlashes MeshConfigProxyPathNormalizationNormalizationType = "DECODE_AND_MERGE_SLASHES" +) + +// TLS protocol versions. +// +kubebuilder:validation:Enum=TLS_AUTO;TLSV1_2;TLSV1_3 +type MeshConfigTLSConfigTLSProtocol string + +const ( + // Automatically choose the optimal TLS version. + MeshConfigTLSConfigTLSProtocolTlsAuto MeshConfigTLSConfigTLSProtocol = "TLS_AUTO" + // TLS version 1.2 + MeshConfigTLSConfigTLSProtocolTlsv12 MeshConfigTLSConfigTLSProtocol = "TLSV1_2" + // TLS version 1.3 + MeshConfigTLSConfigTLSProtocolTlsv13 MeshConfigTLSConfigTLSProtocol = "TLSV1_3" +) + +// MeshConfig defines mesh-wide settings for the Istio service mesh. +type MeshConfig struct { + // Port on which Envoy should listen for all outbound traffic to other services. + // Default port is 15001. + ProxyListenPort int32 `json:"proxyListenPort,omitempty"` + // Port on which Envoy should listen for all inbound traffic to the pod/vm will be captured to. + // Default port is 15006. + ProxyInboundListenPort int32 `json:"proxyInboundListenPort,omitempty"` + // Port on which Envoy should listen for HTTP PROXY requests if set. + ProxyHttpPort int32 `json:"proxyHttpPort,omitempty"` + // Connection timeout used by Envoy. (MUST BE >=1ms) + // Default timeout is 10s. + ConnectTimeout *metav1.Duration `json:"connectTimeout,omitempty"` + // +hidefromdoc + // Automatic protocol detection uses a set of heuristics to + // determine whether the connection is using TLS or not (on the + // server side), as well as the application protocol being used + // (e.g., http vs tcp). These heuristics rely on the client sending + // the first bits of data. For server first protocols like MySQL, + // MongoDB, etc. Envoy will timeout on the protocol detection after + // the specified period, defaulting to non mTLS plain TCP + // traffic. Set this field to tweak the period that Envoy will wait + // for the client to send the first bits of data. (MUST BE >=1ms or + // 0s to disable). Default detection timeout is 0s (no timeout). + // + // Setting a timeout is not recommended nor safe. Even high timeouts (>5s) will be hit + // occasionally, and when they occur the result is typically broken traffic that may not + // recover on its own. Exceptionally high values might solve this, but injecting 60s delays + // onto new connections is generally not tenable anyways. + ProtocolDetectionTimeout *metav1.Duration `json:"protocolDetectionTimeout,omitempty"` + // If set then set `SO_KEEPALIVE` on the socket to enable TCP Keepalives. + TcpKeepalive *ConnectionPoolSettingsTCPSettingsTcpKeepalive `json:"tcpKeepalive,omitempty"` + // Class of ingress resources to be processed by Istio ingress + // controller. This corresponds to the value of + // `kubernetes.io/ingress.class` annotation. + IngressClass string `json:"ingressClass,omitempty"` + // Name of the Kubernetes service used for the istio ingress controller. + // If no ingress controller is specified, the default value `istio-ingressgateway` is used. + IngressService string `json:"ingressService,omitempty"` + // Defines whether to use Istio ingress controller for annotated or all ingress resources. + // Default mode is `STRICT`. + IngressControllerMode MeshConfigIngressControllerMode `json:"ingressControllerMode,omitempty"` + // Defines which gateway deployment to use as the Ingress controller. This field corresponds to + // the Gateway.selector field, and will be set as `istio: INGRESS_SELECTOR`. + // By default, `ingressgateway` is used, which will select the default IngressGateway as it has the + // `istio: ingressgateway` labels. + // It is recommended that this is the same value as ingress_service. + IngressSelector string `json:"ingressSelector,omitempty"` + // Flag to control generation of trace spans and request IDs. + // Requires a trace span collector defined in the proxy configuration. + EnableTracing bool `json:"enableTracing,omitempty"` + // File address for the proxy access log (e.g. /dev/stdout). + // Empty value disables access logging. + AccessLogFile string `json:"accessLogFile,omitempty"` + // Format for the proxy access log + // Empty value results in proxy's default access log format + AccessLogFormat string `json:"accessLogFormat,omitempty"` + // Encoding for the proxy access log (`TEXT` or `JSON`). + // Default value is `TEXT`. + AccessLogEncoding MeshConfigAccessLogEncoding `json:"accessLogEncoding,omitempty"` + // This flag enables Envoy's gRPC Access Log Service. + // See [Access Log Service](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/access_loggers/grpc/v3/als.proto) + // for details about Envoy's gRPC Access Log Service API. + // Default value is `false`. + EnableEnvoyAccessLogService bool `json:"enableEnvoyAccessLogService,omitempty"` + // This flag disables Envoy Listener logs. + // See [Listener Access Log](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/listener/v3/listener.proto#envoy-v3-api-field-config-listener-v3-listener-access-log) + // Istio Enables Envoy's listener access logs on "NoRoute" response flag. + // Default value is `false`. + DisableEnvoyListenerLog bool `json:"disableEnvoyListenerLog,omitempty"` + // Default proxy config used by gateway and sidecars. + // In case of Kubernetes, the proxy config is applied once during the injection process, + // and remain constant for the duration of the pod. The rest of the mesh config can be changed + // at runtime and config gets distributed dynamically. + // On Kubernetes, this can be overridden on individual pods with the `proxy.istio.io/config` annotation. + DefaultConfig *MeshConfigProxyConfig `json:"defaultConfig,omitempty"` + // Set the default behavior of the sidecar for handling outbound + // traffic from the application. + // + // Can be overridden at a Sidecar level by setting the `OutboundTrafficPolicy` in the + // [Sidecar API](https://istio.io/docs/reference/config/networking/sidecar/#OutboundTrafficPolicy). + // + // Default mode is `ALLOW_ANY`, which means outbound traffic to unknown destinations will be allowed. + OutboundTrafficPolicy *MeshConfigOutboundTrafficPolicy `json:"outboundTrafficPolicy,omitempty"` + // Set the default behavior of the sidecar for handling inbound + // traffic to the application. If your application listens on + // localhost, you will need to set this to `LOCALHOST`. + InboundTrafficPolicy *MeshConfigInboundTrafficPolicy `json:"inboundTrafficPolicy,omitempty"` + // ConfigSource describes a source of configuration data for networking + // rules, and other Istio configuration artifacts. Multiple data sources + // can be configured for a single control plane. + ConfigSources []*ConfigSource `json:"configSources,omitempty"` + // This flag is used to enable mutual `TLS` automatically for service to service communication + // within the mesh, default true. + // If set to true, and a given service does not have a corresponding `DestinationRule` configured, + // or its `DestinationRule` does not have ClientTLSSettings specified, Istio configures client side + // TLS configuration appropriately. More specifically, + // If the upstream authentication policy is in `STRICT` mode, use Istio provisioned certificate + // for mutual `TLS` to connect to upstream. + // If upstream service is in plain text mode, use plain text. + // If the upstream authentication policy is in PERMISSIVE mode, Istio configures clients to use + // mutual `TLS` when server sides are capable of accepting mutual `TLS` traffic. + // If service `DestinationRule` exists and has `ClientTLSSettings` specified, that is always used instead. + EnableAutoMtls *bool `json:"enableAutoMtls,omitempty"` + // The trust domain corresponds to the trust root of a system. + // Refer to [SPIFFE-ID](https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain) + TrustDomain string `json:"trustDomain,omitempty"` + // The trust domain aliases represent the aliases of `trust_domain`. + // For example, if we have + // ```yaml + // trustDomain: td1 + // trustDomainAliases: ["td2", "td3"] + // ``` + // Any service with the identity `td1/ns/foo/sa/a-service-account`, `td2/ns/foo/sa/a-service-account`, + // or `td3/ns/foo/sa/a-service-account` will be treated the same in the Istio mesh. + TrustDomainAliases []string `json:"trustDomainAliases,omitempty"` + // The extra root certificates for workload-to-workload communication. + // The plugin certificates (the 'cacerts' secret) or self-signed certificates (the 'istio-ca-secret' secret) + // are automatically added by Istiod. + // The CA certificate that signs the workload certificates is automatically added by Istio Agent. + CaCertificates []*MeshConfigCertificateData `json:"caCertificates,omitempty"` + // The default value for the ServiceEntry.export_to field and services + // imported through container registry integrations, e.g. this applies to + // Kubernetes Service resources. The value is a list of namespace names and + // reserved namespace aliases. The allowed namespace aliases are: + // ``` + // * - All Namespaces + // . - Current Namespace + // ~ - No Namespace + // ``` + // If not set the system will use "*" as the default value which implies that + // services are exported to all namespaces. + // + // `All namespaces` is a reasonable default for implementations that don't + // need to restrict access or visibility of services across namespace + // boundaries. If that requirement is present it is generally good practice to + // make the default `Current namespace` so that services are only visible + // within their own namespaces by default. Operators can then expand the + // visibility of services to other namespaces as needed. Use of `No Namespace` + // is expected to be rare but can have utility for deployments where + // dependency management needs to be precise even within the scope of a single + // namespace. + // + // For further discussion see the reference documentation for `ServiceEntry`, + // `Sidecar`, and `Gateway`. + DefaultServiceExportTo []string `json:"defaultServiceExportTo,omitempty"` + // The default value for the VirtualService.export_to field. Has the same + // syntax as `default_service_export_to`. + // + // If not set the system will use "*" as the default value which implies that + // virtual services are exported to all namespaces + DefaultVirtualServiceExportTo []string `json:"defaultVirtualServiceExportTo,omitempty"` + // The default value for the `DestinationRule.export_to` field. Has the same + // syntax as `default_service_export_to`. + // + // If not set the system will use "*" as the default value which implies that + // destination rules are exported to all namespaces + DefaultDestinationRuleExportTo []string `json:"defaultDestinationRuleExportTo,omitempty"` + // The namespace to treat as the administrative root namespace for + // Istio configuration. When processing a leaf namespace Istio will search for + // declarations in that namespace first and if none are found it will + // search in the root namespace. Any matching declaration found in the root + // namespace is processed as if it were declared in the leaf namespace. + // + // The precise semantics of this processing are documented on each resource + // type. + RootNamespace string `json:"rootNamespace,omitempty"` + // Locality based load balancing distribution or failover settings. + // If unspecified, locality based load balancing will be enabled by default. + // However, this requires outlierDetection to actually take effect for a particular + // service, see https://istio.io/latest/docs/tasks/traffic-management/locality-load-balancing/failover/ + LocalityLbSetting *LocalityLoadBalancerSetting `json:"localityLbSetting,omitempty"` + // Configures DNS refresh rate for Envoy clusters of type `STRICT_DNS` + // Default refresh rate is `60s`. + DnsRefreshRate *metav1.Duration `json:"dnsRefreshRate,omitempty"` + // Specify if http1.1 connections should be upgraded to http2 by default. + // if sidecar is installed on all pods in the mesh, then this should be set to `UPGRADE`. + // If one or more services or namespaces do not have sidecar(s), then this should be set to `DO_NOT_UPGRADE`. + // It can be enabled by destination using the `destinationRule.trafficPolicy.connectionPool.http.h2UpgradePolicy` override. + H2UpgradePolicy MeshConfigH2UpgradePolicy `json:"h2UpgradePolicy,omitempty"` + // Name to be used while emitting statistics for inbound clusters. The same pattern is used while computing stat prefix for + // network filters like TCP and Redis. + // By default, Istio emits statistics with the pattern `inbound|||`. + // For example `inbound|7443|grpc-reviews|reviews.prod.svc.cluster.local`. This can be used to override that pattern. + // + // A Pattern can be composed of various pre-defined variables. The following variables are supported. + // + // - `%SERVICE%` - Will be substituted with short hostname of the service. + // - `%SERVICE_NAME%` - Will be substituted with name of the service. + // - `%SERVICE_FQDN%` - Will be substituted with FQDN of the service. + // - `%SERVICE_PORT%` - Will be substituted with port of the service. + // - `%TARGET_PORT%` - Will be substituted with the target port of the service. + // - `%SERVICE_PORT_NAME%` - Will be substituted with port name of the service. + // + // Following are some examples of supported patterns for reviews: + // + // - `%SERVICE_FQDN%_%SERVICE_PORT%` will use reviews.prod.svc.cluster.local_7443 as the stats name. + // - `%SERVICE%` will use reviews.prod as the stats name. + InboundClusterStatName string `json:"inboundClusterStatName,omitempty"` + // Name to be used while emitting statistics for outbound clusters. The same pattern is used while computing stat prefix for + // network filters like TCP and Redis. + // By default, Istio emits statistics with the pattern `outbound|||`. + // For example `outbound|8080|v2|reviews.prod.svc.cluster.local`. This can be used to override that pattern. + // + // A Pattern can be composed of various pre-defined variables. The following variables are supported. + // + // - `%SERVICE%` - Will be substituted with short hostname of the service. + // - `%SERVICE_NAME%` - Will be substituted with name of the service. + // - `%SERVICE_FQDN%` - Will be substituted with FQDN of the service. + // - `%SERVICE_PORT%` - Will be substituted with port of the service. + // - `%SERVICE_PORT_NAME%` - Will be substituted with port name of the service. + // - `%SUBSET_NAME%` - Will be substituted with subset. + // + // Following are some examples of supported patterns for reviews: + // + // - `%SERVICE_FQDN%_%SERVICE_PORT%` will use `reviews.prod.svc.cluster.local_7443` as the stats name. + // - `%SERVICE%` will use reviews.prod as the stats name. + OutboundClusterStatName string `json:"outboundClusterStatName,omitempty"` + // +hidefromdoc + // Configure the provision of certificates. + // + // Note: Deprecated, please refer to Cert-Manager or other cert provisioning solutions to sign DNS certificates. + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/config.proto. + Certificates []*Certificate `json:"certificates,omitempty"` + // +hidefromdoc + // Settings to be applied to select services. + ServiceSettings []*MeshConfigServiceSettings `json:"serviceSettings,omitempty"` + // If enabled, Istio agent will merge metrics exposed by the application with metrics from Envoy + // and Istio agent. The sidecar injection will replace `prometheus.io` annotations present on the pod + // and redirect them towards Istio agent, which will then merge metrics of from the application with Istio metrics. + // This relies on the annotations `prometheus.io/scrape`, `prometheus.io/port`, and + // `prometheus.io/path` annotations. + // If you are running a separately managed Envoy with an Istio sidecar, this may cause issues, as the metrics will collide. + // In this case, it is recommended to disable aggregation on that deployment with the + // `prometheus.istio.io/merge-metrics: "false"` annotation. + // If not specified, this will be enabled by default. + EnablePrometheusMerge *bool `json:"enablePrometheusMerge,omitempty"` + // +hidefromdoc + // `VerifyCertificateAtClient` sets the mesh global default for peer certificate validation + // at the client-side proxy when `SIMPLE` TLS or `MUTUAL` TLS (non `ISTIO_MUTUAL`) origination + // modes are used. This setting can be overridden at the host level via DestinationRule API. + // By default, `VerifyCertificateAtClient` is `true`. + // + // `CaCertificates`: If set, proxy verifies CA signature based on given CaCertificates. If unset, + // and VerifyCertificateAtClient is true, proxy uses default System CA bundle. If unset and + // `VerifyCertificateAtClient` is false, proxy will not verify the CA. + // + // `SubjectAltNames`: If set, proxy verifies subject alt names are present in the SAN. If unset, + // and `VerifyCertificateAtClient` is true, proxy uses host in destination rule to verify the SANs. + // If unset, and `VerifyCertificateAtClient` is false, proxy does not verify SANs. + // + // For SAN, client-side proxy will exact match host in `DestinationRule` as well as one level + // wildcard if the specified host in DestinationRule doesn't contain a wildcard. + // For example, if the host in `DestinationRule` is `x.y.com`, client-side proxy will + // match either `x.y.com` or `*.y.com` for the SAN in the presented server certificate. + // For wildcard host name in DestinationRule, client-side proxy will do a suffix match. For example, + // if host is `*.x.y.com`, client-side proxy will verify the presented server certificate SAN matches + // “.x.y.com` suffix. + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/config.proto. + VerifyCertificateAtClient *bool `json:"verifyCertificateAtClient,omitempty"` + // +hidefromdoc + // If specified, Istiod will authorize and forward the CSRs from the workloads to the specified external CA + // using the Istio CA gRPC API. + Ca *MeshConfigCA `json:"ca,omitempty"` + // Defines a list of extension providers that extend Istio's functionality. For example, the AuthorizationPolicy + // can be used with an extension provider to delegate the authorization decision to a custom authorization system. + // +kubebuilder:validation:MaxItems=1000 + ExtensionProviders []*MeshConfigExtensionProvider `json:"extensionProviders,omitempty"` + // Specifies extension providers to use by default in Istio configuration resources. + DefaultProviders *MeshConfigDefaultProviders `json:"defaultProviders,omitempty"` + // A list of Kubernetes selectors that specify the set of namespaces that Istio considers when + // computing configuration updates for sidecars. This can be used to reduce Istio's computational load + // by limiting the number of entities (including services, pods, and endpoints) that are watched and processed. + // If omitted, Istio will use the default behavior of processing all namespaces in the cluster. + // Elements in the list are disjunctive (OR semantics), i.e. a namespace will be included if it matches any selector. + // The following example selects any namespace that matches either below: + // 1. The namespace has both of these labels: `env: prod` and `region: us-east1` + // 2. The namespace has label `app` equal to `cassandra` or `spark`. + // ```yaml + // discoverySelectors: + // - matchLabels: + // env: prod + // region: us-east1 + // - matchExpressions: + // - key: app + // operator: In + // values: + // - cassandra + // - spark + // + // ``` + // Refer to the [Kubernetes selector docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) + // for additional detail on selector semantics. + DiscoverySelectors []*metav1.LabelSelector `json:"discoverySelectors,omitempty"` + // ProxyPathNormalization configures how URL paths in incoming and outgoing HTTP requests are + // normalized by the sidecars and gateways. + // The normalized paths will be used in all aspects through the requests' lifetime on the + // sidecars and gateways, which includes routing decisions in outbound direction (client proxy), + // authorization policy match and enforcement in inbound direction (server proxy), and the URL + // path proxied to the upstream service. + // If not set, the NormalizationType.DEFAULT configuration will be used. + PathNormalization *MeshConfigProxyPathNormalization `json:"pathNormalization,omitempty"` + // Configure the default HTTP retry policy. + // The default number of retry attempts is set at 2 for these errors: + // + // "connect-failure,refused-stream,unavailable,cancelled,retriable-status-codes". + // + // Setting the number of attempts to 0 disables retry policy globally. + // This setting can be overridden on a per-host basis using the Virtual Service + // API. + // All settings in the retry policy except `perTryTimeout` can currently be + // configured globally via this field. + DefaultHttpRetryPolicy *HTTPRetry `json:"defaultHttpRetryPolicy,omitempty"` + // The below configuration parameters can be used to specify TLSConfig for mesh traffic. + // For example, a user could enable min TLS version for ISTIO_MUTUAL traffic and specify a curve for non ISTIO_MUTUAL traffic like below: + // ```yaml + // meshConfig: + // + // meshMTLS: + // minProtocolVersion: TLSV1_3 + // tlsDefaults: + // Note: applicable only for non ISTIO_MUTUAL scenarios + // ecdhCurves: + // - P-256 + // - P-512 + // + // ``` + // Configuration of mTLS for traffic between workloads with ISTIO_MUTUAL TLS traffic. + // + // Note: Mesh mTLS does not respect ECDH curves. + MeshMTLS *MeshConfigTLSConfig `json:"meshMTLS,omitempty"` + // Configuration of TLS for all traffic except for ISTIO_MUTUAL mode. + // Currently, this supports configuration of ecdh_curves and cipher_suites only. + // For ISTIO_MUTUAL TLS settings, use meshMTLS configuration. + TlsDefaults *MeshConfigTLSConfig `json:"tlsDefaults,omitempty"` +} + +// ConfigSource describes information about a configuration store inside a +// mesh. A single control plane instance can interact with one or more data +// sources. +type ConfigSource struct { + // Address of the server implementing the Istio Mesh Configuration + // protocol (MCP). Can be IP address or a fully qualified DNS name. + // Use xds:// to specify a grpc-based xds backend, k8s:// to specify a k8s controller or + // fs:/// to specify a file-based backend with absolute path to the directory. + Address string `json:"address,omitempty"` + // Use the tls_settings to specify the tls mode to use. If the MCP server + // uses Istio mutual TLS and shares the root CA with Pilot, specify the TLS + // mode as `ISTIO_MUTUAL`. + TlsSettings *ClientTLSSettings `json:"tlsSettings,omitempty"` + // Describes the source of configuration, if nothing is specified default is MCP + SubscribedResources []Resource `json:"subscribedResources,omitempty"` +} + +// +hidefromdoc +// Certificate configures the provision of a certificate and its key. +// Example 1: key and cert stored in a secret +// ``` +// { secretName: galley-cert +// +// secretNamespace: istio-system +// dnsNames: +// - galley.istio-system.svc +// - galley.mydomain.com +// } +// +// ``` +// Example 2: key and cert stored in a directory +// ``` +// { dnsNames: +// - pilot.istio-system +// - pilot.istio-system.svc +// - pilot.mydomain.com +// } +// +// ``` +type Certificate struct { + // Name of the secret the certificate and its key will be stored into. + // If it is empty, it will not be stored into a secret. + // Instead, the certificate and its key will be stored into a hard-coded directory. + SecretName string `json:"secretName,omitempty"` + // The DNS names for the certificate. A certificate may contain + // multiple DNS names. + DnsNames []string `json:"dnsNames,omitempty"` +} + +// `OutboundTrafficPolicy` sets the default behavior of the sidecar for +// handling unknown outbound traffic from the application. +type MeshConfigOutboundTrafficPolicy struct { + Mode MeshConfigOutboundTrafficPolicyMode `json:"mode,omitempty"` +} + +type MeshConfigInboundTrafficPolicy struct { + Mode MeshConfigInboundTrafficPolicyMode `json:"mode,omitempty"` +} + +// +kubebuilder:validation:XValidation:message="At most one of [pem spiffeBundleUrl] should be set",rule="(has(self.pem)?1:0) + (has(self.spiffeBundleUrl)?1:0) <= 1" +type MeshConfigCertificateData struct { + // The PEM data of the certificate. + Pem string `json:"pem,omitempty"` + + // The SPIFFE bundle endpoint URL that complies to: + // https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE_Trust_Domain_and_Bundle.md#the-spiffe-trust-domain-and-bundle + // The endpoint should support authentication based on Web PKI: + // https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE_Trust_Domain_and_Bundle.md#521-web-pki + // The certificate is retrieved from the endpoint. + SpiffeBundleUrl string `json:"spiffeBundleUrl,omitempty"` // Optional. Specify the kubernetes signers (External CA) that use this trustAnchor + // when Istiod is acting as RA(registration authority) + // If set, they are used for these signers. Otherwise, this trustAnchor is used for all signers. + CertSigners []string `json:"certSigners,omitempty"` + // Optional. Specify the list of trust domains to which this trustAnchor data belongs. + // If set, they are used for these trust domains. Otherwise, this trustAnchor is used for default trust domain + // and its aliases. + // Note that we can have multiple trustAnchor data for a same trust_domain. + // In that case, trustAnchors with a same trust domain will be merged and used together to verify peer certificates. + // If neither cert_signers nor trust_domains is set, this trustAnchor is used for all trust domains and all signers. + // If only trust_domains is set, this trustAnchor is used for these trust_domains and all signers. + // If only cert_signers is set, this trustAnchor is used for these cert_signers and all trust domains. + // If both cert_signers and trust_domains is set, this trustAnchor is only used for these signers and trust domains. + TrustDomains []string `json:"trustDomains,omitempty"` +} + +// +hidefromdoc +// Settings to be applied to select services. +// +// For example, the following configures all services in namespace "foo" as well as the +// "bar" service in namespace "baz" to be considered cluster-local: +// +// ```yaml +// serviceSettings: +// - settings: +// cluster_local: true +// hosts: +// - "*.foo.svc.cluster.local" +// - "bar.baz.svc.cluster.local" +// +// ``` +type MeshConfigServiceSettings struct { + // The settings to apply to the selected services. + Settings *MeshConfigServiceSettingsSettings `json:"settings,omitempty"` + // The services to which the Settings should be applied. Services are selected using the hostname + // matching rules used by DestinationRule. + // + // For example: foo.bar.svc.cluster.local, *.baz.svc.cluster.local + Hosts []string `json:"hosts,omitempty"` +} + +type MeshConfigCA struct { + // REQUIRED. Address of the CA server implementing the Istio CA gRPC API. + // Can be IP address or a fully qualified DNS name with port + // Eg: custom-ca.default.svc.cluster.local:8932, 192.168.23.2:9000 + // +kubebuilder:validation:Required + Address string `json:"address"` + // Use the tls_settings to specify the tls mode to use. + // Regarding tls_settings: + // - DISABLE MODE is legitimate for the case Istiod is making the request via an Envoy sidecar. + // DISABLE MODE can also be used for testing + // - TLS MUTUAL MODE be on by default. If the CA certificates + // (cert bundle to verify the CA server's certificate) is omitted, Istiod will + // use the system root certs to verify the CA server's certificate. + TlsSettings *ClientTLSSettings `json:"tlsSettings,omitempty"` + // timeout for forward CSR requests from Istiod to External CA + // Default: 10s + RequestTimeout *metav1.Duration `json:"requestTimeout,omitempty"` + // Use istiod_side to specify CA Server integrate to Istiod side or Agent side + // Default: true + IstiodSide bool `json:"istiodSide,omitempty"` +} + +// +kubebuilder:validation:XValidation:message="At most one of [envoyExtAuthzHttp envoyExtAuthzGrpc zipkin lightstep datadog stackdriver opencensus skywalking opentelemetry prometheus envoyFileAccessLog envoyHttpAls envoyTcpAls envoyOtelAls] should be set",rule="(has(self.envoyExtAuthzHttp)?1:0) + (has(self.envoyExtAuthzGrpc)?1:0) + (has(self.zipkin)?1:0) + (has(self.lightstep)?1:0) + (has(self.datadog)?1:0) + (has(self.stackdriver)?1:0) + (has(self.opencensus)?1:0) + (has(self.skywalking)?1:0) + (has(self.opentelemetry)?1:0) + (has(self.prometheus)?1:0) + (has(self.envoyFileAccessLog)?1:0) + (has(self.envoyHttpAls)?1:0) + (has(self.envoyTcpAls)?1:0) + (has(self.envoyOtelAls)?1:0) <= 1" +type MeshConfigExtensionProvider struct { + // REQUIRED. A unique name identifying the extension provider. + // +kubebuilder:validation:Required + Name string `json:"name"` + + // Configures an external authorizer that implements the Envoy ext_authz filter authorization check service using the HTTP API. + EnvoyExtAuthzHttp *MeshConfigExtensionProviderEnvoyExternalAuthorizationHttpProvider `json:"envoyExtAuthzHttp,omitempty"` + + // Configures an external authorizer that implements the Envoy ext_authz filter authorization check service using the gRPC API. + EnvoyExtAuthzGrpc *MeshConfigExtensionProviderEnvoyExternalAuthorizationGrpcProvider `json:"envoyExtAuthzGrpc,omitempty"` + + // Configures a tracing provider that uses the Zipkin API. + Zipkin *MeshConfigExtensionProviderZipkinTracingProvider `json:"zipkin,omitempty"` + + // +hidefromdoc + // Configures a Lightstep tracing provider. + // Deprecated: For Istio 1.15+, please use an OpenTelemetryTracingProvider instead, more details can be found at https://github.com/istio/istio/issues/40027 + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/config.proto. + Lightstep *MeshConfigExtensionProviderLightstepTracingProvider `json:"lightstep,omitempty"` + + // Configures a Datadog tracing provider. + Datadog *MeshConfigExtensionProviderDatadogTracingProvider `json:"datadog,omitempty"` + + // Configures a Stackdriver provider. + Stackdriver *MeshConfigExtensionProviderStackdriverProvider `json:"stackdriver,omitempty"` + + // +hidefromdoc + // Configures an OpenCensusAgent tracing provider. + // Deprecated: OpenCensus is deprecated, more details can be found at https://opentelemetry.io/blog/2023/sunsetting-opencensus/ + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/config.proto. + Opencensus *MeshConfigExtensionProviderOpenCensusAgentTracingProvider `json:"opencensus,omitempty"` + + // Configures a Apache SkyWalking provider. + Skywalking *MeshConfigExtensionProviderSkyWalkingTracingProvider `json:"skywalking,omitempty"` + + // Configures an OpenTelemetry tracing provider. + Opentelemetry *MeshConfigExtensionProviderOpenTelemetryTracingProvider `json:"opentelemetry,omitempty"` + + // Configures a Prometheus metrics provider. + Prometheus *MeshConfigExtensionProviderPrometheusMetricsProvider `json:"prometheus,omitempty"` + + // Configures an Envoy File Access Log provider. + EnvoyFileAccessLog *MeshConfigExtensionProviderEnvoyFileAccessLogProvider `json:"envoyFileAccessLog,omitempty"` + + // Configures an Envoy Access Logging Service provider for HTTP traffic. + EnvoyHttpAls *MeshConfigExtensionProviderEnvoyHttpGrpcV3LogProvider `json:"envoyHttpAls,omitempty"` + + // Configures an Envoy Access Logging Service provider for TCP traffic. + EnvoyTcpAls *MeshConfigExtensionProviderEnvoyTcpGrpcV3LogProvider `json:"envoyTcpAls,omitempty"` + + // Configures an Envoy Open Telemetry Access Logging Service provider. + EnvoyOtelAls *MeshConfigExtensionProviderEnvoyOpenTelemetryLogProvider `json:"envoyOtelAls,omitempty"` +} + +// Holds the name references to the providers that will be used by default +// in other Istio configuration resources if the provider is not specified. +// +// These names must match a provider defined in `extension_providers` that is +// one of the supported tracing providers. +type MeshConfigDefaultProviders struct { + // Name of the default provider(s) for tracing. + Tracing []string `json:"tracing,omitempty"` + // Name of the default provider(s) for metrics. + Metrics []string `json:"metrics,omitempty"` + // Name of the default provider(s) for access logging. + AccessLogging []string `json:"accessLogging,omitempty"` +} + +type MeshConfigProxyPathNormalization struct { + Normalization MeshConfigProxyPathNormalizationNormalizationType `json:"normalization,omitempty"` +} + +type MeshConfigTLSConfig struct { + // Optional: the minimum TLS protocol version. The default minimum + // TLS version will be TLS 1.2. As servers may not be Envoy and be + // set to TLS 1.2 (e.g., workloads using mTLS without sidecars), the + // minimum TLS version for clients may also be TLS 1.2. + // In the current Istio implementation, the maximum TLS protocol version + // is TLS 1.3. + MinProtocolVersion MeshConfigTLSConfigTLSProtocol `json:"minProtocolVersion,omitempty"` + // Optional: If specified, the TLS connection will only support the specified ECDH curves for the DH key exchange. + // If not specified, the default curves enforced by Envoy will be used. For details about the default curves, refer to + // [Ecdh Curves](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto). + EcdhCurves []string `json:"ecdhCurves,omitempty"` + // Optional: If specified, the TLS connection will only support the specified cipher list when negotiating TLS 1.0-1.2. + // If not specified, the following cipher suites will be used: + // ``` + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // AES256-GCM-SHA384 + // AES128-GCM-SHA256 + // ``` + CipherSuites []string `json:"cipherSuites,omitempty"` +} + +// Settings for the selected services. +type MeshConfigServiceSettingsSettings struct { + // If true, specifies that the client and service endpoints must reside in the same cluster. + // By default, in multi-cluster deployments, the Istio control plane assumes all service + // endpoints to be reachable from any client in any of the clusters which are part of the + // mesh. This configuration option limits the set of service endpoints visible to a client + // to be cluster scoped. + // + // There are some common scenarios when this can be useful: + // + // - A service (or group of services) is inherently local to the cluster and has local storage + // for that cluster. For example, the kube-system namespace (e.g. the Kube API Server). + // - A mesh administrator wants to slowly migrate services to Istio. They might start by first + // having services cluster-local and then slowly transition them to mesh-wide. They could do + // this service-by-service (e.g. mysvc.myns.svc.cluster.local) or as a group + // (e.g. *.myns.svc.cluster.local). + // + // By default Istio will consider kubernetes.default.svc (i.e. the API Server) as well as all + // services in the kube-system namespace to be cluster-local, unless explicitly overridden here. + ClusterLocal bool `json:"clusterLocal,omitempty"` +} + +type MeshConfigExtensionProviderEnvoyExternalAuthorizationRequestBody struct { + // Sets the maximum size of a message body that the ext-authz filter will hold in memory. + // If max_request_bytes is reached, and allow_partial_message is false, Envoy will return a 413 (Payload Too Large). + // Otherwise the request will be sent to the provider with a partial message. + // Note that this setting will have precedence over the fail_open field, the 413 will be returned even when the + // fail_open is set to true. + MaxRequestBytes uint32 `json:"maxRequestBytes,omitempty"` + // When this field is true, ext-authz filter will buffer the message until max_request_bytes is reached. + // The authorization request will be dispatched and no 413 HTTP error will be returned by the filter. + // A "x-envoy-auth-partial-body: false|true" metadata header will be added to the authorization request message + // indicating if the body data is partial. + AllowPartialMessage bool `json:"allowPartialMessage,omitempty"` + // If true, the body sent to the external authorization service in the gRPC authorization request is set with raw bytes + // in the [raw_body field](https://github.com/envoyproxy/envoy/blame/cffb095d59d7935abda12b9509bcd136808367bb/api/envoy/service/auth/v3/attribute_context.proto#L153). + // Otherwise, it will be filled with UTF-8 string in the [body field](https://github.com/envoyproxy/envoy/blame/cffb095d59d7935abda12b9509bcd136808367bb/api/envoy/service/auth/v3/attribute_context.proto#L147). + // This field only works with the envoy_ext_authz_grpc provider and has no effect for the envoy_ext_authz_http provider. + PackAsBytes bool `json:"packAsBytes,omitempty"` +} + +type MeshConfigExtensionProviderEnvoyExternalAuthorizationHttpProvider struct { + // REQUIRED. Specifies the service that implements the Envoy ext_authz HTTP authorization service. + // The format is `[/]`. The specification of `` is required only when it is insufficient + // to unambiguously resolve a service in the service registry. The `` is a fully qualified host name of a + // service defined by the Kubernetes service or ServiceEntry. + // + // Example: "my-ext-authz.foo.svc.cluster.local" or "bar/my-ext-authz.example.com". + // +kubebuilder:validation:Required + Service string `json:"service"` + // REQUIRED. Specifies the port of the service. + // +kubebuilder:validation:Required + Port uint32 `json:"port"` + // The maximum duration that the proxy will wait for a response from the provider (default timeout: 600s). + // When this timeout condition is met, the proxy marks the communication to the authorization service as failure. + // In this situation, the response sent back to the client will depend on the configured `fail_open` field. + Timeout *metav1.Duration `json:"timeout,omitempty"` + // Sets a prefix to the value of authorization request header *Path*. + // For example, setting this to "/check" for an original user request at path "/admin" will cause the + // authorization check request to be sent to the authorization service at the path "/check/admin" instead of "/admin". + PathPrefix string `json:"pathPrefix,omitempty"` + // If true, the user request will be allowed even if the communication with the authorization service has failed, + // or if the authorization service has returned a HTTP 5xx error. + // Default is false and the request will be rejected with "Forbidden" response. + FailOpen bool `json:"failOpen,omitempty"` + // Sets the HTTP status that is returned to the client when there is a network error to the authorization service. + // The default status is "403" (HTTP Forbidden). + StatusOnError string `json:"statusOnError,omitempty"` + // DEPRECATED. Use include_request_headers_in_check instead. + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/config.proto. + IncludeHeadersInCheck []string `json:"includeHeadersInCheck,omitempty"` + // List of client request headers that should be included in the authorization request sent to the authorization service. + // Note that in addition to the headers specified here following headers are included by default: + // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically sent. + // 2. *Content-Length* will be set to 0 and the request will not have a message body. However, the authorization + // request can include the buffered client request body (controlled by include_request_body_in_check setting), + // consequently the value of Content-Length of the authorization request reflects the size of its payload size. + // + // Exact, prefix and suffix matches are supported (similar to the + // [authorization policy rule syntax](https://istio.io/latest/docs/reference/config/security/authorization-policy/#Rule) + // except the presence match): + // - Exact match: "abc" will match on value "abc". + // - Prefix match: "abc*" will match on value "abc" and "abcd". + // - Suffix match: "*abc" will match on value "abc" and "xabc". + IncludeRequestHeadersInCheck []string `json:"includeRequestHeadersInCheck,omitempty"` + // Set of additional fixed headers that should be included in the authorization request sent to the authorization service. + // Key is the header name and value is the header value. + // Note that client request of the same key or headers specified in include_request_headers_in_check will be overridden. + IncludeAdditionalHeadersInCheck map[string]string `json:"includeAdditionalHeadersInCheck,omitempty"` + // If set, the client request body will be included in the authorization request sent to the authorization service. + IncludeRequestBodyInCheck *MeshConfigExtensionProviderEnvoyExternalAuthorizationRequestBody `json:"includeRequestBodyInCheck,omitempty"` + // List of headers from the authorization service that should be added or overridden in the original request and + // forwarded to the upstream when the authorization check result is allowed (HTTP code 200). + // If not specified, the original request will not be modified and forwarded to backend as-is. + // Note, any existing headers will be overridden. + // + // Exact, prefix and suffix matches are supported (similar to the + // [authorization policy rule syntax](https://istio.io/latest/docs/reference/config/security/authorization-policy/#Rule) + // except the presence match): + // - Exact match: "abc" will match on value "abc". + // - Prefix match: "abc*" will match on value "abc" and "abcd". + // - Suffix match: "*abc" will match on value "abc" and "xabc". + HeadersToUpstreamOnAllow []string `json:"headersToUpstreamOnAllow,omitempty"` + // List of headers from the authorization service that should be forwarded to downstream when the authorization + // check result is not allowed (HTTP code other than 200). + // If not specified, all the authorization response headers, except *Authority (Host)* will be in the response to + // the downstream. + // When a header is included in this list, *Path*, *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are + // automatically added. + // Note, the body from the authorization service is always included in the response to downstream. + // + // Exact, prefix and suffix matches are supported (similar to the + // [authorization policy rule syntax](https://istio.io/latest/docs/reference/config/security/authorization-policy/#Rule) + // except the presence match): + // - Exact match: "abc" will match on value "abc". + // - Prefix match: "abc*" will match on value "abc" and "abcd". + // - Suffix match: "*abc" will match on value "abc" and "xabc". + HeadersToDownstreamOnDeny []string `json:"headersToDownstreamOnDeny,omitempty"` + // List of headers from the authorization service that should be forwarded to downstream when the authorization + // check result is allowed (HTTP code 200). + // If not specified, the original response will not be modified and forwarded to downstream as-is. + // Note, any existing headers will be overridden. + // + // Exact, prefix and suffix matches are supported (similar to the + // [authorization policy rule syntax](https://istio.io/latest/docs/reference/config/security/authorization-policy/#Rule) + // except the presence match): + // - Exact match: "abc" will match on value "abc". + // - Prefix match: "abc*" will match on value "abc" and "abcd". + // - Suffix match: "*abc" will match on value "abc" and "xabc". + HeadersToDownstreamOnAllow []string `json:"headersToDownstreamOnAllow,omitempty"` +} + +type MeshConfigExtensionProviderEnvoyExternalAuthorizationGrpcProvider struct { + // REQUIRED. Specifies the service that implements the Envoy ext_authz gRPC authorization service. + // The format is `[/]`. The specification of `` is required only when it is insufficient + // to unambiguously resolve a service in the service registry. The `` is a fully qualified host name of a + // service defined by the Kubernetes service or ServiceEntry. + // + // Example: "my-ext-authz.foo.svc.cluster.local" or "bar/my-ext-authz.example.com". + // +kubebuilder:validation:Required + Service string `json:"service"` + // REQUIRED. Specifies the port of the service. + // +kubebuilder:validation:Required + Port uint32 `json:"port"` + // The maximum duration that the proxy will wait for a response from the provider, this is the timeout for a specific request (default timeout: 600s). + // When this timeout condition is met, the proxy marks the communication to the authorization service as failure. + // In this situation, the response sent back to the client will depend on the configured `fail_open` field. + Timeout *metav1.Duration `json:"timeout,omitempty"` + // If true, the HTTP request or TCP connection will be allowed even if the communication with the authorization service has failed, + // or if the authorization service has returned a HTTP 5xx error. + // Default is false. For HTTP request, it will be rejected with 403 (HTTP Forbidden). For TCP connection, it will be closed immediately. + FailOpen bool `json:"failOpen,omitempty"` + // Sets the HTTP status that is returned to the client when there is a network error to the authorization service. + // The default status is "403" (HTTP Forbidden). + StatusOnError string `json:"statusOnError,omitempty"` + // If set, the client request body will be included in the authorization request sent to the authorization service. + IncludeRequestBodyInCheck *MeshConfigExtensionProviderEnvoyExternalAuthorizationRequestBody `json:"includeRequestBodyInCheck,omitempty"` +} + +// Defines configuration for a Zipkin tracer. +type MeshConfigExtensionProviderZipkinTracingProvider struct { + // REQUIRED. Specifies the service that the Zipkin API. + // The format is `[/]`. The specification of `` is required only when it is insufficient + // to unambiguously resolve a service in the service registry. The `` is a fully qualified host name of a + // service defined by the Kubernetes service or ServiceEntry. + // + // Example: "zipkin.default.svc.cluster.local" or "bar/zipkin.example.com". + // +kubebuilder:validation:Required + Service string `json:"service"` + // REQUIRED. Specifies the port of the service. + // +kubebuilder:validation:Required + Port uint32 `json:"port"` + // Optional. Controls the overall path length allowed in a reported span. + // NOTE: currently only controls max length of the path tag. + MaxTagLength uint32 `json:"maxTagLength,omitempty"` + // Optional. A 128 bit trace id will be used in Istio. + // If true, will result in a 64 bit trace id being used. + Enable64BitTraceId bool `json:"enable64bitTraceId,omitempty"` +} + +// Defines configuration for a Lightstep tracer. +// Note: Lightstep has moved to OpenTelemetry-based integrations. Istio 1.15+ +// will generate OpenTelemetry-compatible configuration when using this option. +type MeshConfigExtensionProviderLightstepTracingProvider struct { + // REQUIRED. Specifies the service for the Lightstep collector. + // The format is `[/]`. The specification of `` is required only when it is insufficient + // to unambiguously resolve a service in the service registry. The `` is a fully qualified host name of a + // service defined by the Kubernetes service or ServiceEntry. + // + // Example: "lightstep.default.svc.cluster.local" or "bar/lightstep.example.com". + // +kubebuilder:validation:Required + Service string `json:"service"` + // REQUIRED. Specifies the port of the service. + // +kubebuilder:validation:Required + Port uint32 `json:"port"` + // The Lightstep access token. + AccessToken string `json:"accessToken,omitempty"` + // Optional. Controls the overall path length allowed in a reported span. + // NOTE: currently only controls max length of the path tag. + MaxTagLength uint32 `json:"maxTagLength,omitempty"` +} + +// Defines configuration for a Datadog tracer. +type MeshConfigExtensionProviderDatadogTracingProvider struct { + // REQUIRED. Specifies the service for the Datadog agent. + // The format is `[/]`. The specification of `` is required only when it is insufficient + // to unambiguously resolve a service in the service registry. The `` is a fully qualified host name of a + // service defined by the Kubernetes service or ServiceEntry. + // + // Example: "datadog.default.svc.cluster.local" or "bar/datadog.example.com". + // +kubebuilder:validation:Required + Service string `json:"service"` + // REQUIRED. Specifies the port of the service. + // +kubebuilder:validation:Required + Port uint32 `json:"port"` + // Optional. Controls the overall path length allowed in a reported span. + // NOTE: currently only controls max length of the path tag. + MaxTagLength uint32 `json:"maxTagLength,omitempty"` +} + +// Defines configuration for a SkyWalking tracer. +type MeshConfigExtensionProviderSkyWalkingTracingProvider struct { + // REQUIRED. Specifies the service for the SkyWalking receiver. + // The format is `[/]`. The specification of `` is required only when it is insufficient + // to unambiguously resolve a service in the service registry. The `` is a fully qualified host name of a + // service defined by the Kubernetes service or ServiceEntry. + // + // Example: "skywalking.default.svc.cluster.local" or "bar/skywalking.example.com". + // +kubebuilder:validation:Required + Service string `json:"service"` + // REQUIRED. Specifies the port of the service. + // +kubebuilder:validation:Required + Port uint32 `json:"port"` + // Optional. The SkyWalking OAP access token. + AccessToken string `json:"accessToken,omitempty"` +} + +// Defines configuration for Stackdriver. +// +// WARNING: Stackdriver tracing uses OpenCensus configuration under the hood and, as a result, cannot be used +// alongside any OpenCensus provider configuration. This is due to a limitation in the implementation of OpenCensus +// driver in Envoy. +type MeshConfigExtensionProviderStackdriverProvider struct { + // debug enables trace output to stdout. + // +hidefromdoc + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/config.proto. + Debug bool `json:"debug,omitempty"` + // The global default max number of attributes per span. + // default is 200. + // +hidefromdoc + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/config.proto. + MaxNumberOfAttributes *int64 `json:"maxNumberOfAttributes,omitempty"` + // The global default max number of annotation events per span. + // default is 200. + // +hidefromdoc + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/config.proto. + MaxNumberOfAnnotations *int64 `json:"maxNumberOfAnnotations,omitempty"` + // The global default max number of message events per span. + // default is 200. + // +hidefromdoc + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/config.proto. + MaxNumberOfMessageEvents *int64 `json:"maxNumberOfMessageEvents,omitempty"` + // Optional. Controls the overall path length allowed in a reported span. + // NOTE: currently only controls max length of the path tag. + MaxTagLength uint32 `json:"maxTagLength,omitempty"` + // Optional. Controls Stackdriver logging behavior. + Logging *MeshConfigExtensionProviderStackdriverProviderLogging `json:"logging,omitempty"` +} + +// Defines configuration for an OpenCensus tracer writing to an OpenCensus backend. +// +// WARNING: OpenCensusAgentTracingProviders should be used with extreme care. Configuration of +// OpenCensus providers CANNOT be changed during the course of proxy's lifetime due to a limitation +// in the implementation of OpenCensus driver in Envoy. This means only a single provider configuration +// may be used for OpenCensus at any given time for a proxy or group of proxies AND that any change to the provider +// configuration MUST be accompanied by a restart of all proxies that will use that configuration. +// +// NOTE: Stackdriver tracing uses OpenCensus configuration under the hood and, as a result, cannot be used +// alongside OpenCensus provider configuration. +type MeshConfigExtensionProviderOpenCensusAgentTracingProvider struct { + // REQUIRED. Specifies the service for the OpenCensusAgent. + // The format is `[/]`. The specification of `` is required only when it is insufficient + // to unambiguously resolve a service in the service registry. The `` is a fully qualified host name of a + // service defined by the Kubernetes service or ServiceEntry. + // + // Example: "ocagent.default.svc.cluster.local" or "bar/ocagent.example.com". + // +kubebuilder:validation:Required + Service string `json:"service"` + // REQUIRED. Specifies the port of the service. + // +kubebuilder:validation:Required + Port uint32 `json:"port"` + // Specifies the set of context propagation headers used for distributed + // tracing. Default is `["W3C_TRACE_CONTEXT"]`. If multiple values are specified, + // the proxy will attempt to read each header for each request and will + // write all headers. + Context []MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContext `json:"context,omitempty"` + // Optional. Controls the overall path length allowed in a reported span. + // NOTE: currently only controls max length of the path tag. + MaxTagLength uint32 `json:"maxTagLength,omitempty"` +} + +type MeshConfigExtensionProviderPrometheusMetricsProvider struct { +} + +// Defines configuration for Envoy-based access logging that writes to +// local files (and/or standard streams). +type MeshConfigExtensionProviderEnvoyFileAccessLogProvider struct { + // Path to a local file to write the access log entries. + // This may be used to write to streams, via `/dev/stderr` and `/dev/stdout` + // If unspecified, defaults to `/dev/stdout`. + Path string `json:"path,omitempty"` + // Optional. Allows overriding of the default access log format. + LogFormat *MeshConfigExtensionProviderEnvoyFileAccessLogProviderLogFormat `json:"logFormat,omitempty"` +} + +// Defines configuration for an Envoy [Access Logging Service](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/access_loggers/grpc/v3/als.proto#grpc-access-log-service-als) +// integration for HTTP traffic. +type MeshConfigExtensionProviderEnvoyHttpGrpcV3LogProvider struct { + // REQUIRED. Specifies the service that implements the Envoy ALS gRPC authorization service. + // The format is `[/]`. The specification of `` is required only when it is insufficient + // to unambiguously resolve a service in the service registry. The `` is a fully qualified host name of a + // service defined by the Kubernetes service or ServiceEntry. + // + // Example: "envoy-als.foo.svc.cluster.local" or "bar/envoy-als.example.com". + // +kubebuilder:validation:Required + Service string `json:"service"` + // REQUIRED. Specifies the port of the service. + // +kubebuilder:validation:Required + Port uint32 `json:"port"` + // Optional. The friendly name of the access log. + // Defaults: + // - "http_envoy_accesslog" + // - "listener_envoy_accesslog" + LogName string `json:"logName,omitempty"` + // Optional. Additional filter state objects to log. + FilterStateObjectsToLog []string `json:"filterStateObjectsToLog,omitempty"` + // Optional. Additional request headers to log. + AdditionalRequestHeadersToLog []string `json:"additionalRequestHeadersToLog,omitempty"` + // Optional. Additional response headers to log. + AdditionalResponseHeadersToLog []string `json:"additionalResponseHeadersToLog,omitempty"` + // Optional. Additional response trailers to log. + AdditionalResponseTrailersToLog []string `json:"additionalResponseTrailersToLog,omitempty"` +} + +// Defines configuration for an Envoy [Access Logging Service](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/access_loggers/grpc/v3/als.proto#grpc-access-log-service-als) +// integration for TCP traffic. +type MeshConfigExtensionProviderEnvoyTcpGrpcV3LogProvider struct { + // REQUIRED. Specifies the service that implements the Envoy ALS gRPC authorization service. + // The format is `[/]`. The specification of `` is required only when it is insufficient + // to unambiguously resolve a service in the service registry. The `` is a fully qualified host name of a + // service defined by the Kubernetes service or ServiceEntry. + // + // Example: "envoy-als.foo.svc.cluster.local" or "bar/envoy-als.example.com". + // +kubebuilder:validation:Required + Service string `json:"service"` + // REQUIRED. Specifies the port of the service. + // +kubebuilder:validation:Required + Port uint32 `json:"port"` + // Optional. The friendly name of the access log. + // Defaults: + // - "tcp_envoy_accesslog" + // - "listener_envoy_accesslog" + LogName string `json:"logName,omitempty"` + // Optional. Additional filter state objects to log. + FilterStateObjectsToLog []string `json:"filterStateObjectsToLog,omitempty"` +} + +// Defines configuration for an Envoy [OpenTelemetry (gRPC) Access Log](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/access_loggers/open_telemetry/v3/logs_service.proto) +type MeshConfigExtensionProviderEnvoyOpenTelemetryLogProvider struct { + // REQUIRED. Specifies the service that implements the Envoy ALS gRPC authorization service. + // The format is `[/]`. The specification of `` is required only when it is insufficient + // to unambiguously resolve a service in the service registry. The `` is a fully qualified host name of a + // service defined by the Kubernetes service or ServiceEntry. + // + // Example: "envoy-als.foo.svc.cluster.local" or "bar/envoy-als.example.com". + // +kubebuilder:validation:Required + Service string `json:"service"` + // REQUIRED. Specifies the port of the service. + // +kubebuilder:validation:Required + Port uint32 `json:"port"` + // Optional. The friendly name of the access log. + // Defaults: + // - "otel_envoy_accesslog" + LogName string `json:"logName,omitempty"` + // Optional. Format for the proxy access log + // Empty value results in proxy's default access log format, following Envoy access logging formatting. + LogFormat *MeshConfigExtensionProviderEnvoyOpenTelemetryLogProviderLogFormat `json:"logFormat,omitempty"` +} + +// Defines configuration for an OpenTelemetry tracing backend. Istio 1.16.1 or higher is needed. +// +kubebuilder:validation:XValidation:message="At most one of [dynatraceSampler] should be set",rule="(has(self.dynatraceSampler)?1:0) <= 1" +type MeshConfigExtensionProviderOpenTelemetryTracingProvider struct { + // REQUIRED. Specifies the OpenTelemetry endpoint that will receive OTLP traces. + // The format is `[/]`. The specification of `` is required only when it is insufficient + // to unambiguously resolve a service in the service registry. The `` is a fully qualified host name of a + // service defined by the Kubernetes service or ServiceEntry. + // + // Example: "otlp.default.svc.cluster.local" or "bar/otlp.example.com". + // +kubebuilder:validation:Required + Service string `json:"service"` + // REQUIRED. Specifies the port of the service. + // +kubebuilder:validation:Required + Port uint32 `json:"port"` + // Optional. Controls the overall path length allowed in a reported span. + // NOTE: currently only controls max length of the path tag. + MaxTagLength uint32 `json:"maxTagLength,omitempty"` + // Optional. Specifies the configuration for exporting OTLP traces via HTTP. + // When empty, traces will be exported via gRPC. + // + // The following example shows how to configure the OpenTelemetry ExtensionProvider to export via HTTP: + // + // 1. Add/change the OpenTelemetry extension provider in `MeshConfig` + // ```yaml + // - name: otel-tracing + // opentelemetry: + // port: 443 + // service: my.olly-backend.com + // http: + // path: "/api/otlp/traces" + // timeout: 10s + // headers: + // - name: "my-custom-header" + // value: "some value" + // + // ``` + // + // 2. Deploy a `ServiceEntry` for the observability back-end + // ```yaml + // apiVersion: networking.istio.io/v1alpha3 + // kind: ServiceEntry + // metadata: + // + // name: my-olly-backend + // + // spec: + // + // hosts: + // - my.olly-backend.com + // ports: + // - number: 443 + // name: https-port + // protocol: HTTPS + // resolution: DNS + // location: MESH_EXTERNAL + // + // --- + // apiVersion: networking.istio.io/v1alpha3 + // kind: DestinationRule + // metadata: + // + // name: my-olly-backend + // + // spec: + // + // host: my.olly-backend.com + // trafficPolicy: + // portLevelSettings: + // - port: + // number: 443 + // tls: + // mode: SIMPLE + // + // ``` + Http *MeshConfigExtensionProviderHttpService `json:"http,omitempty"` + // Optional. Specifies [Resource Detectors](https://opentelemetry.io/docs/specs/otel/resource/sdk/) + // to be used by the OpenTelemetry Tracer. When multiple resources are provided, they are merged + // according to the OpenTelemetry [Resource specification](https://opentelemetry.io/docs/specs/otel/resource/sdk/#merge). + // + // The following example shows how to configure the Environment Resource Detector, that will + // read the attributes from the environment variable `OTEL_RESOURCE_ATTRIBUTES`: + // + // ```yaml + // - name: otel-tracing + // opentelemetry: + // port: 443 + // service: my.olly-backend.com + // resource_detectors: + // environment: {} + // + // ``` + ResourceDetectors *MeshConfigExtensionProviderResourceDetectors `json:"resourceDetectors,omitempty"` + + // The Dynatrace adaptive traffic management (ATM) sampler. + // + // Example configuration: + // + // ```yaml + // - name: otel-tracing + // opentelemetry: + // port: 443 + // service: "{your-environment-id}.live.dynatrace.com" + // http: + // path: "/api/v2/otlp/v1/traces" + // timeout: 10s + // headers: + // - name: "Authorization" + // value: "Api-Token dt0c01." + // resource_detectors: + // dynatrace: {} + // dynatrace_sampler: + // tenant: "{your-environment-id}" + // cluster_id: 1234 + DynatraceSampler *MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSampler `json:"dynatraceSampler,omitempty"` +} + +// Defines configuration for an HTTP service that can be used by an Extension Provider. +// that does communication via HTTP. +type MeshConfigExtensionProviderHttpService struct { + // REQUIRED. Specifies the path on the service. + // +kubebuilder:validation:Required + Path string `json:"path"` + // Optional. Specifies the timeout for the HTTP request. + // If not specified, the default is 3s. + Timeout *metav1.Duration `json:"timeout,omitempty"` + // Optional. Allows specifying custom HTTP headers that will be added + // to each HTTP request sent. + Headers []*MeshConfigExtensionProviderHttpHeader `json:"headers,omitempty"` +} + +type MeshConfigExtensionProviderHttpHeader struct { + // REQUIRED. The HTTP header name. + // +kubebuilder:validation:Required + Name string `json:"name"` + // REQUIRED. The HTTP header value. + // +kubebuilder:validation:Required + Value string `json:"value"` +} + +type MeshConfigExtensionProviderResourceDetectors struct { + Environment *MeshConfigExtensionProviderResourceDetectorsEnvironmentResourceDetector `json:"environment,omitempty"` + Dynatrace *MeshConfigExtensionProviderResourceDetectorsDynatraceResourceDetector `json:"dynatrace,omitempty"` +} + +type MeshConfigExtensionProviderStackdriverProviderLogging struct { + // Collection of tag names and tag expressions to include in the log + // entry. Conflicts are resolved by the tag name by overriding previously + // supplied values. + // + // Example: + // + // labels: + // path: request.url_path + // foo: request.headers['x-foo'] + Labels map[string]string `json:"labels,omitempty"` +} + +// +kubebuilder:validation:XValidation:message="At most one of [text labels] should be set",rule="(has(self.text)?1:0) + (has(self.labels)?1:0) <= 1" +type MeshConfigExtensionProviderEnvoyFileAccessLogProviderLogFormat struct { + // Textual format for the envoy access logs. Envoy [command operators](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#command-operators) may be + // used in the format. The [format string documentation](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#config-access-log-format-strings) + // provides more information. + // + // NOTE: Istio will insert a newline ('\n') on all formats (if missing). + // + // Example: `text: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%"` + Text string `json:"text,omitempty"` + + // JSON structured format for the envoy access logs. Envoy [command operators](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#command-operators) + // can be used as values for fields within the Struct. Values are rendered + // as strings, numbers, or boolean values, as appropriate + // (see: [format dictionaries](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#config-access-log-format-dictionaries)). Nested JSON is + // supported for some command operators (e.g. `FILTER_STATE` or `DYNAMIC_METADATA`). + // Use `labels: {}` for default envoy JSON log format. + // + // Example: + // ``` + // labels: + // + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" + // + // ``` + Labels map[string]string `json:"labels,omitempty"` +} + +type MeshConfigExtensionProviderEnvoyOpenTelemetryLogProviderLogFormat struct { + // Textual format for the envoy access logs. Envoy [command operators](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#command-operators) may be + // used in the format. The [format string documentation](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#config-access-log-format-strings) + // provides more information. + // Alias to `body` filed in [Open Telemetry](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/access_loggers/open_telemetry/v3/logs_service.proto) + // Example: `text: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%"` + Text string `json:"text,omitempty"` + // Optional. Additional attributes that describe the specific event occurrence. + // Structured format for the envoy access logs. Envoy [command operators](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#command-operators) + // can be used as values for fields within the Struct. Values are rendered + // as strings, numbers, or boolean values, as appropriate + // (see: [format dictionaries](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#config-access-log-format-dictionaries)). Nested JSON is + // supported for some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + // Alias to `attributes` filed in [Open Telemetry](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/access_loggers/open_telemetry/v3/logs_service.proto) + // + // Example: + // ``` + // labels: + // + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" + // + // ``` + Labels map[string]string `json:"labels,omitempty"` +} + +type MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSampler struct { + // REQUIRED. The Dynatrace customer's tenant identifier. + // + // The value can be obtained from the Istio deployment page in Dynatrace. + // +kubebuilder:validation:Required + Tenant string `json:"tenant"` + // REQUIRED. The identifier of the cluster in the Dynatrace platform. + // The cluster here is Dynatrace-specific concept and not related to the cluster concept in Istio/Envoy. + // + // The value can be obtained from the Istio deployment page in Dynatrace. + // +kubebuilder:validation:Required + ClusterId int32 `json:"clusterId"` + // Optional. Number of sampled spans per minute to be used + // when the adaptive value cannot be obtained from the Dynatrace API. + // + // A default value of `1000` is used when: + // + // - `root_spans_per_minute` is unset + // - `root_spans_per_minute` is set to 0 + RootSpansPerMinute uint32 `json:"rootSpansPerMinute,omitempty"` + // Optional. Dynatrace HTTP API to obtain sampling configuration. + // + // When not provided, the Dynatrace Sampler will re-use the configuration from the OpenTelemetryTracingProvider HTTP Exporter + // (`service`, `port` and `http`), including the access token. + HttpService *MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSamplerDynatraceApi `json:"httpService,omitempty"` +} + +type MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSamplerDynatraceApi struct { + // REQUIRED. Specifies the Dynatrace environment to obtain the sampling configuration. + // The format is ``, where `` is the fully qualified Dynatrace environment + // host name defined in the ServiceEntry. + // + // Example: "{your-environment-id}.live.dynatrace.com". + // +kubebuilder:validation:Required + Service string `json:"service"` + // REQUIRED. Specifies the port of the service. + // +kubebuilder:validation:Required + Port uint32 `json:"port"` + // REQUIRED. Specifies sampling configuration URI. + // +kubebuilder:validation:Required + Http *MeshConfigExtensionProviderHttpService `json:"http"` +} + +// OpenTelemetry Environment Resource Detector. +// The resource detector reads attributes from the environment variable `OTEL_RESOURCE_ATTRIBUTES` +// and adds them to the OpenTelemetry resource. +// +// See: [Resource specification](https://opentelemetry.io/docs/specs/otel/resource/sdk/#specifying-resource-information-via-an-environment-variable) +type MeshConfigExtensionProviderResourceDetectorsEnvironmentResourceDetector struct { +} + +// Dynatrace Resource Detector. +// The resource detector reads from the Dynatrace enrichment files +// and adds host/process related attributes to the OpenTelemetry resource. +// +// See: [Enrich ingested data with Dynatrace-specific dimensions](https://docs.dynatrace.com/docs/shortlink/enrichment-files) +type MeshConfigExtensionProviderResourceDetectorsDynatraceResourceDetector struct { +} + +// Network provides information about the endpoints in a routable L3 +// network. A single routable L3 network can have one or more service +// registries. Note that the network has no relation to the locality of the +// endpoint. The endpoint locality will be obtained from the service +// registry. +type Network struct { + // The list of endpoints in the network (obtained through the + // constituent service registries or from CIDR ranges). All endpoints in + // the network are directly accessible to one another. + Endpoints []*NetworkNetworkEndpoints `json:"endpoints,omitempty"` + // Set of gateways associated with the network. + Gateways []*NetworkIstioNetworkGateway `json:"gateways,omitempty"` +} + +// MeshNetworks (config map) provides information about the set of networks +// inside a mesh and how to route to endpoints in each network. For example +// +// MeshNetworks(file/config map): +// +// ```yaml +// networks: +// +// network1: +// endpoints: +// - fromRegistry: registry1 #must match kubeconfig name in Kubernetes secret +// - fromCidr: 192.168.100.0/22 #a VM network for example +// gateways: +// - registryServiceName: istio-ingressgateway.istio-system.svc.cluster.local +// port: 15443 +// locality: us-east-1a +// - address: 192.168.100.1 +// port: 15443 +// locality: us-east-1a +// +// ``` +type MeshNetworks struct { + // The set of networks inside this mesh. Each network should + // have a unique name and information about how to infer the endpoints in + // the network as well as the gateways associated with the network. + Networks map[string]*Network `json:"networks,omitempty"` +} + +// NetworkEndpoints describes how the network associated with an endpoint +// should be inferred. An endpoint will be assigned to a network based on +// the following rules: +// +// 1. Implicitly: If the registry explicitly provides information about +// the network to which the endpoint belongs to. In some cases, its +// possible to indicate the network associated with the endpoint by +// adding the `ISTIO_META_NETWORK` environment variable to the sidecar. +// +// 2. Explicitly: +// +// a. By matching the registry name with one of the "fromRegistry" +// in the mesh config. A "from_registry" can only be assigned to a +// single network. +// +// b. By matching the IP against one of the CIDR ranges in a mesh +// config network. The CIDR ranges must not overlap and be assigned to +// a single network. +// +// (2) will override (1) if both are present. +// +kubebuilder:validation:XValidation:message="At most one of [fromCidr fromRegistry] should be set",rule="(has(self.fromCidr)?1:0) + (has(self.fromRegistry)?1:0) <= 1" +type NetworkNetworkEndpoints struct { + // A CIDR range for the set of endpoints in this network. The CIDR + // ranges for endpoints from different networks must not overlap. + FromCidr string `json:"fromCidr,omitempty"` + + // Add all endpoints from the specified registry into this network. + // The names of the registries should correspond to the kubeconfig file name + // inside the secret that was used to configure the registry (Kubernetes + // multicluster) or supplied by MCP server. + FromRegistry string `json:"fromRegistry,omitempty"` +} + +// The gateway associated with this network. Traffic from remote networks +// will arrive at the specified gateway:port. All incoming traffic must +// use mTLS. +// +kubebuilder:validation:XValidation:message="At most one of [registryServiceName address] should be set",rule="(has(self.registryServiceName)?1:0) + (has(self.address)?1:0) <= 1" +type NetworkIstioNetworkGateway struct { + // A fully qualified domain name of the gateway service. Pilot will + // lookup the service from the service registries in the network and + // obtain the endpoint IPs of the gateway from the service + // registry. Note that while the service name is a fully qualified + // domain name, it need not be resolvable outside the orchestration + // platform for the registry. e.g., this could be + // istio-ingressgateway.istio-system.svc.cluster.local. + RegistryServiceName string `json:"registryServiceName,omitempty"` + + // IP address or externally resolvable DNS address associated with the gateway. + Address string `json:"address,omitempty"` // The port associated with the gateway. + Port uint32 `json:"port,omitempty"` + // The locality associated with an explicitly specified gateway (i.e. ip) + Locality string `json:"locality,omitempty"` +} + +// AuthenticationPolicy defines how the proxy is authenticated when it connects to the control plane. +// It can be set for two different scopes, mesh-wide or set on a per-pod basis using the ProxyConfig annotation. +// Mesh policy cannot be INHERIT. +// +kubebuilder:validation:Enum=NONE;MUTUAL_TLS;INHERIT +type AuthenticationPolicy string + +const ( + // Do not encrypt proxy to control plane traffic. + AuthenticationPolicyNone AuthenticationPolicy = "NONE" + // Proxy to control plane traffic is wrapped into mutual TLS connections. + AuthenticationPolicyMutualTls AuthenticationPolicy = "MUTUAL_TLS" + // Use the policy defined by the parent scope. Should not be used for mesh + // policy. + AuthenticationPolicyInherit AuthenticationPolicy = "INHERIT" +) + +// ForwardClientCertDetails controls how the x-forwarded-client-cert (XFCC) +// header is handled by the gateway proxy. +// See [Envoy XFCC](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto.html#enum-extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-forwardclientcertdetails) +// header handling for more details. +// +kubebuilder:validation:Enum=UNDEFINED;SANITIZE;FORWARD_ONLY;APPEND_FORWARD;SANITIZE_SET;ALWAYS_FORWARD_ONLY +type ForwardClientCertDetails string + +const ( + // Field is not set + ForwardClientCertDetailsUndefined ForwardClientCertDetails = "UNDEFINED" + // Do not send the XFCC header to the next hop. This is the default value. + ForwardClientCertDetailsSanitize ForwardClientCertDetails = "SANITIZE" + // When the client connection is mTLS (Mutual TLS), forward the XFCC header + // in the request. + ForwardClientCertDetailsForwardOnly ForwardClientCertDetails = "FORWARD_ONLY" + // When the client connection is mTLS, append the client certificate + // information to the request’s XFCC header and forward it. + ForwardClientCertDetailsAppendForward ForwardClientCertDetails = "APPEND_FORWARD" + // When the client connection is mTLS, reset the XFCC header with the client + // certificate information and send it to the next hop. + ForwardClientCertDetailsSanitizeSet ForwardClientCertDetails = "SANITIZE_SET" + // Always forward the XFCC header in the request, regardless of whether the + // client connection is mTLS. + ForwardClientCertDetailsAlwaysForwardOnly ForwardClientCertDetails = "ALWAYS_FORWARD_ONLY" +) + +// TraceContext selects the context propagation headers used for +// distributed tracing. +// +kubebuilder:validation:Enum=UNSPECIFIED;W3C_TRACE_CONTEXT;GRPC_BIN;CLOUD_TRACE_CONTEXT;B3 +type TracingOpenCensusAgentTraceContext string + +const ( + // +hidefromdoc + // Unspecified context. Should not be used for now, but added to reserve + // the 0 enum value if TraceContext is used outside of a repeated field. + TracingOpenCensusAgentTraceContextUnspecified TracingOpenCensusAgentTraceContext = "UNSPECIFIED" + // Use W3C Trace Context propagation using the `traceparent` HTTP header. + // See the + // [Trace Context documentation](https://www.w3.org/TR/trace-context/) for details. + TracingOpenCensusAgentTraceContextW3cTraceContext TracingOpenCensusAgentTraceContext = "W3C_TRACE_CONTEXT" + // Use gRPC binary context propagation using the `grpc-trace-bin` http header. + TracingOpenCensusAgentTraceContextGrpcBin TracingOpenCensusAgentTraceContext = "GRPC_BIN" + // Use Cloud Trace context propagation using the + // `X-Cloud-Trace-Context` http header. + TracingOpenCensusAgentTraceContextCloudTraceContext TracingOpenCensusAgentTraceContext = "CLOUD_TRACE_CONTEXT" + // Use multi-header B3 context propagation using the `X-B3-TraceId`, + // `X-B3-SpanId`, and `X-B3-Sampled` HTTP headers. See + // [B3 header propagation README](https://github.com/openzipkin/b3-propagation) + // for details. + TracingOpenCensusAgentTraceContextB3 TracingOpenCensusAgentTraceContext = "B3" +) + +// Allows specification of various Istio-supported naming schemes for the +// Envoy `service_cluster` value. The `servce_cluster` value is primarily used +// by Envoys to provide service names for tracing spans. +// +kubebuilder:validation:Enum=APP_LABEL_AND_NAMESPACE;CANONICAL_NAME_ONLY;CANONICAL_NAME_AND_NAMESPACE +type ProxyConfigTracingServiceName string + +const ( + // Default scheme. Uses the `app` label and workload namespace to construct + // a cluster name. If the `app` label does not exist `istio-proxy` is used. + ProxyConfigTracingServiceNameAppLabelAndNamespace ProxyConfigTracingServiceName = "APP_LABEL_AND_NAMESPACE" + // Uses the canonical name for a workload (*excluding namespace*). + ProxyConfigTracingServiceNameCanonicalNameOnly ProxyConfigTracingServiceName = "CANONICAL_NAME_ONLY" + // Uses the canonical name and namespace for a workload. + ProxyConfigTracingServiceNameCanonicalNameAndNamespace ProxyConfigTracingServiceName = "CANONICAL_NAME_AND_NAMESPACE" +) + +// The mode used to redirect inbound traffic to Envoy. +// This setting has no effect on outbound traffic: iptables `REDIRECT` is always used for +// outbound connections. +// +kubebuilder:validation:Enum=REDIRECT;TPROXY;NONE +type ProxyConfigInboundInterceptionMode string + +const ( + // The `REDIRECT` mode uses iptables `REDIRECT` to `NAT` and redirect to Envoy. This mode loses + // source IP addresses during redirection. This is the default redirection mode. + ProxyConfigInboundInterceptionModeRedirect ProxyConfigInboundInterceptionMode = "REDIRECT" + // The `TPROXY` mode uses iptables `TPROXY` to redirect to Envoy. This mode preserves both the + // source and destination IP addresses and ports, so that they can be used for advanced + // filtering and manipulation. This mode also configures the sidecar to run with the + // `CAP_NET_ADMIN` capability, which is required to use `TPROXY`. + ProxyConfigInboundInterceptionModeTproxy ProxyConfigInboundInterceptionMode = "TPROXY" + // The `NONE` mode does not configure redirect to Envoy at all. This is an advanced + // configuration that typically requires changes to user applications. + ProxyConfigInboundInterceptionModeNone ProxyConfigInboundInterceptionMode = "NONE" +) + +// +kubebuilder:validation:Enum=UNDEFINED;IN_MESH +type ProxyConfigProxyHeadersMetadataExchangeMode string + +const ( + // Existing Istio behavior for the metadata exchange headers is unchanged. + ProxyConfigProxyHeadersMetadataExchangeModeUndefined ProxyConfigProxyHeadersMetadataExchangeMode = "UNDEFINED" + // Only append the istio metadata exchange headers for services considered in-mesh. + // Traffic is considered in-mesh if it is secured with Istio mutual TLS. This means that `MESH_EXTERNAL` services, unmatched passthrough traffic, and requests to workloads without Istio enabled will be considered out of mesh. + ProxyConfigProxyHeadersMetadataExchangeModeInMesh ProxyConfigProxyHeadersMetadataExchangeMode = "IN_MESH" +) + +// Tracing defines configuration for the tracing performed by Envoy instances. +// +kubebuilder:validation:XValidation:message="At most one of [zipkin lightstep datadog stackdriver openCensusAgent] should be set",rule="(has(self.zipkin)?1:0) + (has(self.lightstep)?1:0) + (has(self.datadog)?1:0) + (has(self.stackdriver)?1:0) + (has(self.openCensusAgent)?1:0) <= 1" +type Tracing struct { + // Use a Zipkin tracer. + Zipkin *TracingZipkin `json:"zipkin,omitempty"` + + // Use a Lightstep tracer. + // NOTE: For Istio 1.15+, this configuration option will result + // in using OpenTelemetry-based Lightstep integration. + Lightstep *TracingLightstep `json:"lightstep,omitempty"` + + // Use a Datadog tracer. + Datadog *TracingDatadog `json:"datadog,omitempty"` + + // Use a Stackdriver tracer. + Stackdriver *TracingStackdriver `json:"stackdriver,omitempty"` + + // Use an OpenCensus tracer exporting to an OpenCensus agent. + OpenCensusAgent *TracingOpenCensusAgent `json:"openCensusAgent,omitempty"` // Configures the custom tags to be added to active span by all proxies (i.e. sidecars + // and gateways). + // The key represents the name of the tag. + // Ex: + // ```yaml + // custom_tags: + // + // new_tag_name: + // header: + // name: custom-http-header-name + // default_value: defaulted-value-from-custom-header + // + // ``` + // +hidefromdoc + CustomTags map[string]*TracingCustomTag `json:"customTags,omitempty"` + // Configures the maximum length of the request path to extract and include in the + // HttpUrl tag. Used to truncate length request paths to meet the needs of tracing + // backend. If not set, then a length of 256 will be used. + // +hidefromdoc + MaxPathTagLength uint32 `json:"maxPathTagLength,omitempty"` + // The percentage of requests (0.0 - 100.0) that will be randomly selected for trace generation, + // if not requested by the client or not forced. Default is 1.0. + Sampling float64 `json:"sampling,omitempty"` + // Use the tls_settings to specify the tls mode to use. If the remote tracing service + // uses Istio mutual TLS and shares the root CA with Pilot, specify the TLS + // mode as `ISTIO_MUTUAL`. + TlsSettings *ClientTLSSettings `json:"tlsSettings,omitempty"` +} + +// SDS defines secret discovery service(SDS) configuration to be used by the proxy. +// For workload, its values are set in sidecar injector(passed as arguments to istio-proxy container). +// For pilot/mixer, it's passed as arguments to istio-proxy container in pilot/mixer deployment yaml files directly. +// +hidefromdoc +type SDS struct { + // True if SDS is enabled. + Enabled bool `json:"enabled,omitempty"` + // Path of k8s service account JWT path. + K8SSaJwtPath string `json:"k8sSaJwtPath,omitempty"` +} + +// Topology describes the configuration for relative location of a proxy with +// respect to intermediate trusted proxies and the client. These settings +// control how the client attributes are retrieved from the incoming traffic by +// the gateway proxy and propagated to the upstream services in the cluster. +type Topology struct { + // Number of trusted proxies deployed in front of the Istio gateway proxy. + // When this option is set to value N greater than zero, the trusted client + // address is assumed to be the Nth address from the right end of the + // X-Forwarded-For (XFF) header from the incoming request. If the + // X-Forwarded-For (XFF) header is missing or has fewer than N addresses, the + // gateway proxy falls back to using the immediate downstream connection's + // source address as the trusted client address. + // Note that the gateway proxy will append the downstream connection's source + // address to the X-Forwarded-For (XFF) address and set the + // X-Envoy-External-Address header to the trusted client address before + // forwarding it to the upstream services in the cluster. + // The default value of num_trusted_proxies is 0. + // See [Envoy XFF](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#config-http-conn-man-headers-x-forwarded-for) + // header handling for more details. + NumTrustedProxies uint32 `json:"numTrustedProxies,omitempty"` + // Configures how the gateway proxy handles x-forwarded-client-cert (XFCC) + // header in the incoming request. + ForwardClientCertDetails ForwardClientCertDetails `json:"forwardClientCertDetails,omitempty"` + // Enables [PROXY protocol](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt) for + // downstream connections on a gateway. + ProxyProtocol *TopologyProxyProtocolConfiguration `json:"proxyProtocol,omitempty"` +} + +// PrivateKeyProvider defines private key configuration for gateways and sidecars. This can be configured +// mesh wide or individual per-workload basis. +// +kubebuilder:validation:XValidation:message="At most one of [cryptomb qat] should be set",rule="(has(self.cryptomb)?1:0) + (has(self.qat)?1:0) <= 1" +type PrivateKeyProvider struct { + // Use CryptoMb private key provider + Cryptomb *PrivateKeyProviderCryptoMb `json:"cryptomb,omitempty"` + + // Use QAT private key provider + Qat *PrivateKeyProviderQAT `json:"qat,omitempty"` +} + +// ProxyConfig defines variables for individual Envoy instances. This can be configured on a per-workload basis +// as well as by the mesh-wide defaults. +// To set the mesh wide defaults, configure the `defaultConfig` section of `meshConfig`. For example: +// +// ``` +// meshConfig: +// +// defaultConfig: +// discoveryAddress: istiod:15012 +// +// ``` +// +// This can also be configured on a per-workload basis by configuring the `proxy.istio.io/config` annotation on the pod. For example: +// +// ``` +// annotations: +// +// proxy.istio.io/config: | +// discoveryAddress: istiod:15012 +// +// ``` +// +// If both are configured, the two are merged with per field semantics; the field set in annotation will fully replace the field from mesh config defaults. +// This is different than a deep merge provided by protobuf. +// For example, `"tracing": { "sampling": 5 }` would completely override a setting configuring a tracing provider +// such as `"tracing": { "zipkin": { "address": "..." } }`. +// +// Note: fields in ProxyConfig are not dynamically configured; changes will require restart of workloads to take effect. +// +kubebuilder:validation:XValidation:message="At most one of [serviceCluster tracingServiceName] should be set",rule="(has(self.serviceCluster)?1:0) + (has(self.tracingServiceName)?1:0) <= 1" +type MeshConfigProxyConfig struct { + // Path to the generated configuration file directory. + // Proxy agent generates the actual configuration and stores it in this directory. + ConfigPath string `json:"configPath,omitempty"` + // Path to the proxy binary + BinaryPath string `json:"binaryPath,omitempty"` + + // Service cluster defines the name for the `service_cluster` that is + // shared by all Envoy instances. This setting corresponds to + // `--service-cluster` flag in Envoy. In a typical Envoy deployment, the + // `service-cluster` flag is used to identify the caller, for + // source-based routing scenarios. + // + // Since Istio does not assign a local `service/service` version to each + // Envoy instance, the name is same for all of them. However, the + // source/caller's identity (e.g., IP address) is encoded in the + // `--service-node` flag when launching Envoy. When the RDS service + // receives API calls from Envoy, it uses the value of the `service-node` + // flag to compute routes that are relative to the service instances + // located at that IP address. + ServiceCluster string `json:"serviceCluster,omitempty"` + + // Used by Envoy proxies to assign the values for the service names in trace + // spans. + TracingServiceName ProxyConfigTracingServiceName `json:"tracingServiceName,omitempty"` // The time in seconds that Envoy will drain connections during a hot + // restart. MUST be >=1s (e.g., _1s/1m/1h_) + // Default drain duration is `45s`. + DrainDuration *metav1.Duration `json:"drainDuration,omitempty"` + // Address of the discovery service exposing xDS with mTLS connection. + // The inject configuration may override this value. + DiscoveryAddress string `json:"discoveryAddress,omitempty"` + // +hidefromdoc + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/proxy.proto. + DiscoveryRefreshDelay *metav1.Duration `json:"discoveryRefreshDelay,omitempty"` + // Address of the Zipkin service (e.g. _zipkin:9411_). + // DEPRECATED: Use [tracing][istio.mesh.v1alpha1.ProxyConfig.tracing] instead. + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/proxy.proto. + ZipkinAddress string `json:"zipkinAddress,omitempty"` + // IP Address and Port of a statsd UDP listener (e.g. `10.75.241.127:9125`). + StatsdUdpAddress string `json:"statsdUdpAddress,omitempty"` + // +hidefromdoc + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/proxy.proto. + EnvoyMetricsServiceAddress string `json:"envoyMetricsServiceAddress,omitempty"` + // Port on which Envoy should listen for administrative commands. + // Default port is `15000`. + ProxyAdminPort int32 `json:"proxyAdminPort,omitempty"` + // +hidefromdoc + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/proxy.proto. + AvailabilityZone string `json:"availabilityZone,omitempty"` + // AuthenticationPolicy defines how the proxy is authenticated when it connects to the control plane. + // Default is set to `MUTUAL_TLS`. + ControlPlaneAuthPolicy AuthenticationPolicy `json:"controlPlaneAuthPolicy,omitempty"` + // File path of custom proxy configuration, currently used by proxies + // in front of Mixer and Pilot. + CustomConfigFile string `json:"customConfigFile,omitempty"` + // Maximum length of name field in Envoy's metrics. The length of the name field + // is determined by the length of a name field in a service and the set of labels that + // comprise a particular version of the service. The default value is set to 189 characters. + // Envoy's internal metrics take up 67 characters, for a total of 256 character name per metric. + // Increase the value of this field if you find that the metrics from Envoys are truncated. + StatNameLength int32 `json:"statNameLength,omitempty"` + // The number of worker threads to run. + // If unset, which is recommended, this will be automatically determined based on CPU requests/limits. + // If set to 0, all cores on the machine will be used, ignoring CPU requests or limits. This can lead to major performance + // issues if CPU limits are also set. + Concurrency *int32 `json:"concurrency,omitempty"` + // Path to the proxy bootstrap template file + ProxyBootstrapTemplatePath string `json:"proxyBootstrapTemplatePath,omitempty"` + // The mode used to redirect inbound traffic to Envoy. + InterceptionMode ProxyConfigInboundInterceptionMode `json:"interceptionMode,omitempty"` + // Tracing configuration to be used by the proxy. + Tracing *Tracing `json:"tracing,omitempty"` + // Secret Discovery Service(SDS) configuration to be used by the proxy. + // +hidefromdoc + // + // Deprecated: Marked as deprecated in mesh/v1alpha1/proxy.proto. + Sds *SDS `json:"sds,omitempty"` + // Address of the service to which access logs from Envoys should be + // sent. (e.g. `accesslog-service:15000`). See [Access Log + // Service](https://www.envoyproxy.io/docs/envoy/latest/api-v2/config/accesslog/v2/als.proto) + // for details about Envoy's gRPC Access Log Service API. + EnvoyAccessLogService *RemoteService `json:"envoyAccessLogService,omitempty"` + // Address of the Envoy Metrics Service implementation (e.g. `metrics-service:15000`). + // See [Metric Service](https://www.envoyproxy.io/docs/envoy/latest/api-v2/config/metrics/v2/metrics_service.proto) + // for details about Envoy's Metrics Service API. + EnvoyMetricsService *RemoteService `json:"envoyMetricsService,omitempty"` + // Additional environment variables for the proxy. + // Names starting with `ISTIO_META_` will be included in the generated bootstrap and sent to the XDS server. + ProxyMetadata map[string]string `json:"proxyMetadata,omitempty"` + // Envoy [runtime configuration](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/operations/runtime) to set during bootstrapping. + // This enables setting experimental, unsafe, unsupported, and deprecated features that should be used with extreme caution. + RuntimeValues map[string]string `json:"runtimeValues,omitempty"` + // Port on which the agent should listen for administrative commands such as readiness probe. + // Default is set to port `15020`. + StatusPort int32 `json:"statusPort,omitempty"` + // An additional list of tags to extract from the in-proxy Istio telemetry. These extra tags can be + // added by configuring the telemetry extension. Each additional tag needs to be present in this list. + // Extra tags emitted by the telemetry extensions must be listed here so that they can be processed + // and exposed as Prometheus metrics. + // Deprecated: `istio.stats` is a native filter now, this field is no longer needed. + ExtraStatTags []string `json:"extraStatTags,omitempty"` + // Topology encapsulates the configuration which describes where the proxy is + // located i.e. behind a (or N) trusted proxy (proxies) or directly exposed + // to the internet. This configuration only effects gateways and is applied + // to all the gateways in the cluster unless overridden via annotations of the + // gateway workloads. + GatewayTopology *Topology `json:"gatewayTopology,omitempty"` + // The amount of time allowed for connections to complete on proxy shutdown. + // On receiving `SIGTERM` or `SIGINT`, `istio-agent` tells the active Envoy to start gracefully draining, + // discouraging any new connections and allowing existing connections to complete. It then + // sleeps for the `termination_drain_duration` and then kills any remaining active Envoy processes. + // If not set, a default of `5s` will be applied. + TerminationDrainDuration *metav1.Duration `json:"terminationDrainDuration,omitempty"` + // The unique identifier for the [service mesh](https://istio.io/docs/reference/glossary/#service-mesh) + // All control planes running in the same service mesh should specify the same mesh ID. + // Mesh ID is used to label telemetry reports for cases where telemetry from multiple meshes is mixed together. + MeshId string `json:"meshId,omitempty"` + // VM Health Checking readiness probe. This health check config exactly mirrors the + // kubernetes readiness probe configuration both in schema and logic. + // Only one health check method of 3 can be set at a time. + ReadinessProbe *k8sv1.Probe `json:"readinessProbe,omitempty"` + // Proxy stats matcher defines configuration for reporting custom Envoy stats. + // To reduce memory and CPU overhead from Envoy stats system, Istio proxies by + // default create and expose only a subset of Envoy stats. This option is to + // control creation of additional Envoy stats with prefix, suffix, and regex + // expressions match on the name of the stats. This replaces the stats + // inclusion annotations + // (`sidecar.istio.io/statsInclusionPrefixes`, + // `sidecar.istio.io/statsInclusionRegexps`, and + // `sidecar.istio.io/statsInclusionSuffixes`). For example, to enable stats + // for circuit breakers, request retries, upstream connections, and request timeouts, + // you can specify stats matcher as follows: + // ```yaml + // proxyStatsMatcher: + // + // inclusionRegexps: + // - .*outlier_detection.* + // - .*upstream_rq_retry.* + // - .*upstream_cx_.* + // inclusionSuffixes: + // - upstream_rq_timeout + // + // ``` + // Note including more Envoy stats might increase number of time series + // collected by prometheus significantly. Care needs to be taken on Prometheus + // resource provision and configuration to reduce cardinality. + ProxyStatsMatcher *ProxyConfigProxyStatsMatcher `json:"proxyStatsMatcher,omitempty"` + // Boolean flag for enabling/disabling the holdApplicationUntilProxyStarts behavior. + // This feature adds hooks to delay application startup until the pod proxy + // is ready to accept traffic, mitigating some startup race conditions. + // Default value is 'false'. + HoldApplicationUntilProxyStarts *bool `json:"holdApplicationUntilProxyStarts,omitempty"` + // The PEM data of the extra root certificates for workload-to-workload communication. + // This includes the certificates defined in MeshConfig and any other certificates that Istiod uses as CA. + // The plugin certificates (the 'cacerts' secret), self-signed certificates (the 'istio-ca-secret' secret) + // are added automatically by Istiod. + CaCertificatesPem []string `json:"caCertificatesPem,omitempty"` + // Specifies the details of the proxy image. + Image *ProxyImage `json:"image,omitempty"` + // Specifies the details of the Private Key Provider configuration for gateway and sidecar proxies. + PrivateKeyProvider *PrivateKeyProvider `json:"privateKeyProvider,omitempty"` + // Define the set of headers to add/modify for HTTP request/responses. + // + // To enable an optional header, simply set the field. If no specific configuration is required, an empty object (`{}`) will enable it. + // Note: currently all headers are enabled by default. + // + // Below shows an example of customizing the `server` header and disabling the `X-Envoy-Attempt-Count` header: + // + // ```yaml + // proxyHeaders: + // + // server: + // value: "my-custom-server" + // requestId: {} // Explicitly enable Request IDs. As this is the default, this has no effect. + // attemptCount: + // disabled: true + // + // ``` + // + // Some headers are enabled by default, and require explicitly disabling. See below for an example of disabling all default-enabled headers: + // + // ```yaml + // proxyHeaders: + // + // forwardedClientCert: SANITIZE + // server: + // disabled: true + // requestId: + // disabled: true + // attemptCount: + // disabled: true + // envoyDebugHeaders: + // disabled: true + // metadataExchangeHeaders: + // mode: IN_MESH + // + // ``` + ProxyHeaders *ProxyConfigProxyHeaders `json:"proxyHeaders,omitempty"` +} + +type RemoteService struct { + // Address of a remove service used for various purposes (access log + // receiver, metrics receiver, etc.). Can be IP address or a fully + // qualified DNS name. + Address string `json:"address,omitempty"` + // Use the `tls_settings` to specify the tls mode to use. If the remote service + // uses Istio mutual TLS and shares the root CA with Pilot, specify the TLS + // mode as `ISTIO_MUTUAL`. + TlsSettings *ClientTLSSettings `json:"tlsSettings,omitempty"` + // If set then set `SO_KEEPALIVE` on the socket to enable TCP Keepalives. + TcpKeepalive *ConnectionPoolSettingsTCPSettingsTcpKeepalive `json:"tcpKeepalive,omitempty"` +} + +// Zipkin defines configuration for a Zipkin tracer. +type TracingZipkin struct { + // Address of the Zipkin service (e.g. _zipkin:9411_). + Address string `json:"address,omitempty"` +} + +// +hidefromdoc +// Defines configuration for a Lightstep tracer. +type TracingLightstep struct { + // Address of the Lightstep Satellite pool. + Address string `json:"address,omitempty"` + // The Lightstep access token. + AccessToken string `json:"accessToken,omitempty"` +} + +// Datadog defines configuration for a Datadog tracer. +type TracingDatadog struct { + // Address of the Datadog Agent. + Address string `json:"address,omitempty"` +} + +// Stackdriver defines configuration for a Stackdriver tracer. +// See [Envoy's OpenCensus trace configuration](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/trace/v3/opencensus.proto) +// and +// [OpenCensus trace config](https://github.com/census-instrumentation/opencensus-proto/blob/master/src/opencensus/proto/trace/v1/trace_config.proto) for details. +type TracingStackdriver struct { + // debug enables trace output to stdout. + // +hidefromdoc + Debug bool `json:"debug,omitempty"` + // The global default max number of attributes per span. + // default is 200. + // +hidefromdoc + MaxNumberOfAttributes *int64 `json:"maxNumberOfAttributes,omitempty"` + // The global default max number of annotation events per span. + // default is 200. + // +hidefromdoc + MaxNumberOfAnnotations *int64 `json:"maxNumberOfAnnotations,omitempty"` + // The global default max number of message events per span. + // default is 200. + // +hidefromdoc + MaxNumberOfMessageEvents *int64 `json:"maxNumberOfMessageEvents,omitempty"` +} + +// OpenCensusAgent defines configuration for an OpenCensus tracer writing to +// an OpenCensus agent backend. See +// [Envoy's OpenCensus trace configuration](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/trace/v3/opencensus.proto) +// and +// [OpenCensus trace config](https://github.com/census-instrumentation/opencensus-proto/blob/master/src/opencensus/proto/trace/v1/trace_config.proto) +// for details. +type TracingOpenCensusAgent struct { + // gRPC address for the OpenCensus agent (e.g. dns://authority/host:port or + // unix:path). See [gRPC naming + // docs](https://github.com/grpc/grpc/blob/master/doc/naming.md) for + // details. + Address string `json:"address,omitempty"` + // Specifies the set of context propagation headers used for distributed + // tracing. Default is `["W3C_TRACE_CONTEXT"]`. If multiple values are specified, + // the proxy will attempt to read each header for each request and will + // write all headers. + Context []TracingOpenCensusAgentTraceContext `json:"context,omitempty"` +} + +// Configure custom tags that will be added to any active span. +// Tags can be generated via literals, environment variables or an incoming request header. +// +hidefromdoc +// +kubebuilder:validation:XValidation:message="At most one of [literal environment header] should be set",rule="(has(self.literal)?1:0) + (has(self.environment)?1:0) + (has(self.header)?1:0) <= 1" +type TracingCustomTag struct { + // The custom tag's value is the specified literal. + Literal *TracingLiteral `json:"literal,omitempty"` + + // The custom tag's value should be populated from an environmental + // variable + Environment *TracingEnvironment `json:"environment,omitempty"` + + // The custom tag's value is populated by an http header from + // an incoming request. + Header *TracingRequestHeader `json:"header,omitempty"` +} + +// Literal type represents a static value. +// +hidefromdoc +type TracingLiteral struct { + // Static literal value used to populate the tag value. + Value string `json:"value,omitempty"` +} + +// Environment is the proxy's environment variable to be used for populating the custom span tag. +// +hidefromdoc +type TracingEnvironment struct { + // Name of the environment variable used to populate the tag's value + Name string `json:"name,omitempty"` + // When the environment variable is not found, + // the tag's value will be populated with this default value if specified, + // otherwise the tag will not be populated. + DefaultValue string `json:"defaultValue,omitempty"` +} + +// RequestHeader is the HTTP request header which will be used to populate the span tag. +// A default value can be configured if the header does not exist. +// +hidefromdoc +type TracingRequestHeader struct { + // HTTP header name used to obtain the value from to populate the tag value. + Name string `json:"name,omitempty"` + // Default value to be used for the tag when the named HTTP header does not exist. + // The tag will be skipped if no default value is provided. + DefaultValue string `json:"defaultValue,omitempty"` +} + +// PROXY protocol configuration. +type TopologyProxyProtocolConfiguration struct { +} + +// CryptoMb PrivateKeyProvider configuration +type PrivateKeyProviderCryptoMb struct { + // How long to wait until the per-thread processing queue should be processed. If the processing queue + // gets full (eight sign or decrypt requests are received) it is processed immediately. + // However, if the queue is not filled before the delay has expired, the requests already in the queue + // are processed, even if the queue is not full. + // In effect, this value controls the balance between latency and throughput. + // The duration needs to be set to a value greater than or equal to 1 millisecond. + PollDelay *metav1.Duration `json:"pollDelay,omitempty"` + // If the private key provider isn’t available (eg. the required hardware capability doesn’t existed) + // Envoy will fallback to the BoringSSL default implementation when the fallback is true. + // The default value is false. + Fallback *bool `json:"fallback,omitempty"` +} + +// QAT (QuickAssist Technology) PrivateKeyProvider configuration +type PrivateKeyProviderQAT struct { + // How long to wait before polling the hardware accelerator after a request has been submitted there. + // Having a small value leads to quicker answers from the hardware but causes more polling loop spins, + // leading to potentially larger CPU usage. + // The duration needs to be set to a value greater than or equal to 1 millisecond. + PollDelay *metav1.Duration `json:"pollDelay,omitempty"` + // If the private key provider isn’t available (eg. the required hardware capability doesn’t existed) + // Envoy will fallback to the BoringSSL default implementation when the fallback is true. + // The default value is false. + Fallback *bool `json:"fallback,omitempty"` +} + +// Proxy stats name matchers for stats creation. Note this is in addition to +// the minimum Envoy stats that Istio generates by default. +type ProxyConfigProxyStatsMatcher struct { + // Proxy stats name prefix matcher for inclusion. + InclusionPrefixes []string `json:"inclusionPrefixes,omitempty"` + // Proxy stats name suffix matcher for inclusion. + InclusionSuffixes []string `json:"inclusionSuffixes,omitempty"` + // Proxy stats name regexps matcher for inclusion. + InclusionRegexps []string `json:"inclusionRegexps,omitempty"` +} + +type ProxyConfigProxyHeaders struct { + // Controls the `X-Forwarded-Client-Cert` header for inbound sidecar requests. To set this on gateways, use the `Topology` setting. + // To disable the header, configure either `SANITIZE` (to always remove the header, if present) or `FORWARD_ONLY` (to leave the header as-is). + // By default, `APPEND_FORWARD` will be used. + ForwardedClientCert ForwardClientCertDetails `json:"forwardedClientCert,omitempty"` + // Controls the `X-Request-Id` header. If enabled, a request ID is generated for each request if one is not already set. + // This applies to all types of traffic (inbound, outbound, and gateways). + // If disabled, no request ID will be generate for the request. If it is already present, it will be preserved. + // Warning: request IDs are a critical component to mesh tracing and logging, so disabling this is not recommended. + // This header is enabled by default if not configured. + RequestId *ProxyConfigProxyHeadersRequestId `json:"requestId,omitempty"` + // Controls the `server` header. If enabled, the `Server: istio-envoy` header is set in response headers for inbound traffic (including gateways). + // If disabled, the `Server` header is not modified. If it is already present, it will be preserved. + Server *ProxyConfigProxyHeadersServer `json:"server,omitempty"` + // Controls the `X-Envoy-Attempt-Count` header. + // If enabled, this header will be added on outbound request headers (including gateways) that have retries configured. + // If disabled, this header will not be set. If it is already present, it will be preserved. + // This header is enabled by default if not configured. + AttemptCount *ProxyConfigProxyHeadersAttemptCount `json:"attemptCount,omitempty"` + // Controls various `X-Envoy-*` headers, such as `X-Envoy-Overloaded` and `X-Envoy-Upstream-Service-Time`. If enabled, + // these headers will be included. + // If disabled, these headers will not be set. If they are already present, they will be preserved. + // See the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/http/router/v3/router.proto#envoy-v3-api-field-extensions-filters-http-router-v3-router-suppress-envoy-headers) for more details. + // These headers are enabled by default if not configured. + EnvoyDebugHeaders *ProxyConfigProxyHeadersEnvoyDebugHeaders `json:"envoyDebugHeaders,omitempty"` + // Controls Istio metadata exchange headers `X-Envoy-Peer-Metadata` and `X-Envoy-Peer-Metadata-Id`. + // By default, the behavior is unspecified. + // If IN_MESH, these headers will not be appended to outbound requests from sidecars to services not in-mesh. + MetadataExchangeHeaders *ProxyConfigProxyHeadersMetadataExchangeHeaders `json:"metadataExchangeHeaders,omitempty"` +} + +type ProxyConfigProxyHeadersServer struct { + Disabled *bool `json:"disabled,omitempty"` + // If set, and the server header is enabled, this value will be set as the server header. By default, `istio-envoy` will be used. + Value string `json:"value,omitempty"` +} + +type ProxyConfigProxyHeadersRequestId struct { + Disabled *bool `json:"disabled,omitempty"` +} + +type ProxyConfigProxyHeadersAttemptCount struct { + Disabled *bool `json:"disabled,omitempty"` +} + +type ProxyConfigProxyHeadersEnvoyDebugHeaders struct { + Disabled *bool `json:"disabled,omitempty"` +} + +type ProxyConfigProxyHeadersMetadataExchangeHeaders struct { + Mode ProxyConfigProxyHeadersMetadataExchangeMode `json:"mode,omitempty"` +} + +// The following values are used to construct proxy image url. +// format: `${hub}/${image_name}/${tag}-${image_type}`, +// example: `docker.io/istio/proxyv2:1.11.1` or `docker.io/istio/proxyv2:1.11.1-distroless`. +// This information was previously part of the Values API. +type ProxyImage struct { + // The image type of the image. + // Istio publishes default, debug, and distroless images. + // Other values are allowed if those image types (example: centos) are published to the specified hub. + // supported values: default, debug, distroless. + ImageType string `json:"imageType,omitempty"` +} + +// WorkloadMode allows selection of the role of the underlying workload in +// network traffic. A workload is considered as acting as a SERVER if it is +// the destination of the traffic (that is, traffic direction, from the +// perspective of the workload is *inbound*). If the workload is the source of +// the network traffic, it is considered to be in CLIENT mode (traffic is +// *outbound* from the workload). +// +kubebuilder:validation:Enum=UNDEFINED;CLIENT;SERVER;CLIENT_AND_SERVER +type WorkloadMode string + +const ( + // Default value, which will be interpreted by its own usage. + WorkloadModeUndefined WorkloadMode = "UNDEFINED" + // Selects for scenarios when the workload is the + // source of the network traffic. In addition, + // if the workload is a gateway, selects this. + WorkloadModeClient WorkloadMode = "CLIENT" + // Selects for scenarios when the workload is the + // destination of the network traffic. + WorkloadModeServer WorkloadMode = "SERVER" + // Selects for scenarios when the workload is either the + // source or destination of the network traffic. + WorkloadModeClientAndServer WorkloadMode = "CLIENT_AND_SERVER" +) + +// WorkloadSelector specifies the criteria used to determine if a policy can be applied +// to a proxy. The matching criteria includes the metadata associated with a proxy, +// workload instance info such as labels attached to the pod/VM, or any other info +// that the proxy provides to Istio during the initial handshake. If multiple conditions are +// specified, all conditions need to match in order for the workload instance to be +// selected. Currently, only label based selection mechanism is supported. +type WorkloadSelector struct { + // One or more labels that indicate a specific set of pods/VMs + // on which a policy should be applied. The scope of label search is restricted to + // the configuration namespace in which the resource is present. + // +kubebuilder:validation:XValidation:message="wildcard not allowed in label key match",rule="self.all(key, !key.contains('*'))" + // +kubebuilder:validation:XValidation:message="key must not be empty",rule="self.all(key, key.size() != 0)" + // +kubebuilder:map-value-validation:XValidation:message="wildcard not allowed in label value match",rule="!self.contains('*')" + // +kubebuilder:map-value-validation:MaxLength=63 + // +kubebuilder:validation:MaxProperties=4096 + MatchLabels map[string]string `json:"matchLabels,omitempty"` +} + +// PortSelector is the criteria for specifying if a policy can be applied to +// a listener having a specific port. +type PortSelector struct { + // Port number + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Number uint32 `json:"number,omitempty"` +} + +// PolicyTargetReference format as defined by [GEP-2648](https://gateway-api.sigs.k8s.io/geps/gep-2648/#direct-policy-design-rules). +// +// PolicyTargetReference specifies the targeted resource which the policy +// should be applied to. It must only target a single resource at a time, but it +// can be used to target larger resources such as Gateways that may apply to +// multiple child resources. The PolicyTargetReference will be used instead of +// a WorkloadSelector in the RequestAuthentication, AuthorizationPolicy, +// Telemetry, and WasmPlugin CRDs to target a Kubernetes Gateway. +// +// The following is an example of an AuthorizationPolicy bound to a waypoint proxy using +// a PolicyTargetReference. The example sets `action` to `DENY` to create a deny policy. +// It denies all the requests with `POST` method on port `8080` directed through the +// `waypoint` Gateway in the `foo` namespace. +// +// ```yaml +// apiVersion: security.istio.io/v1 +// kind: AuthorizationPolicy +// metadata: +// +// name: httpbin +// namespace: foo +// +// spec: +// +// targetRefs: +// - name: waypoint +// kind: Gateway +// group: gateway.networking.k8s.io +// action: DENY +// rules: +// - to: +// - operation: +// methods: ["POST"] +// ports: ["8080"] +// +// ``` +// +kubebuilder:validation:XValidation:message="Support kinds are core/Service, networking.istio.io/ServiceEntry, gateway.networking.k8s.io/Gateway",rule="[self.group, self.kind] in [['core','Service'], [”,'Service'], ['gateway.networking.k8s.io','Gateway'], ['networking.istio.io','ServiceEntry']]" +type PolicyTargetReference struct { + // group is the group of the target resource. + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` + Group string `json:"group,omitempty"` + // kind is kind of the target resource. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$` + Kind string `json:"kind,omitempty"` + // name is the name of the target resource. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + Name string `json:"name,omitempty"` + // namespace is the namespace of the referent. When unspecified, the local + // namespace is inferred. + // +kubebuilder:validation:XValidation:message="cross namespace referencing is not currently supported",rule="self.size() == 0" + Namespace string `json:"namespace,omitempty"` +} + +// TLS connection mode +// +kubebuilder:validation:Enum=DISABLE;SIMPLE;MUTUAL;ISTIO_MUTUAL +type ClientTLSSettingsTLSmode string + +const ( + // Do not setup a TLS connection to the upstream endpoint. + ClientTLSSettingsTLSmodeDisable ClientTLSSettingsTLSmode = "DISABLE" + // Originate a TLS connection to the upstream endpoint. + ClientTLSSettingsTLSmodeSimple ClientTLSSettingsTLSmode = "SIMPLE" + // Secure connections to the upstream using mutual TLS by presenting + // client certificates for authentication. + ClientTLSSettingsTLSmodeMutual ClientTLSSettingsTLSmode = "MUTUAL" + // Secure connections to the upstream using mutual TLS by presenting + // client certificates for authentication. + // Compared to Mutual mode, this mode uses certificates generated + // automatically by Istio for mTLS authentication. When this mode is + // used, all other fields in `ClientTLSSettings` should be empty. + ClientTLSSettingsTLSmodeIstioMutual ClientTLSSettingsTLSmode = "ISTIO_MUTUAL" +) + +// SSL/TLS related settings for upstream connections. See Envoy's [TLS +// context](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto.html#common-tls-configuration) +// for more details. These settings are common to both HTTP and TCP upstreams. +// +// For example, the following rule configures a client to use mutual TLS +// for connections to upstream database cluster. +// +// ```yaml +// apiVersion: networking.istio.io/v1 +// kind: DestinationRule +// metadata: +// +// name: db-mtls +// +// spec: +// +// host: mydbserver.prod.svc.cluster.local +// trafficPolicy: +// tls: +// mode: MUTUAL +// clientCertificate: /etc/certs/myclientcert.pem +// privateKey: /etc/certs/client_private_key.pem +// caCertificates: /etc/certs/rootcacerts.pem +// +// ``` +// +// The following rule configures a client to use TLS when talking to a +// foreign service whose domain matches *.foo.com. +// +// ```yaml +// apiVersion: networking.istio.io/v1 +// kind: DestinationRule +// metadata: +// +// name: tls-foo +// +// spec: +// +// host: "*.foo.com" +// trafficPolicy: +// tls: +// mode: SIMPLE +// +// ``` +// +// The following rule configures a client to use Istio mutual TLS when talking +// to rating services. +// +// ```yaml +// apiVersion: networking.istio.io/v1 +// kind: DestinationRule +// metadata: +// +// name: ratings-istio-mtls +// +// spec: +// +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// tls: +// mode: ISTIO_MUTUAL +// +// ``` +type ClientTLSSettings struct { + // Indicates whether connections to this port should be secured + // using TLS. The value of this field determines how TLS is enforced. + Mode ClientTLSSettingsTLSmode `json:"mode,omitempty"` + // REQUIRED if mode is `MUTUAL`. The path to the file holding the + // client-side TLS certificate to use. + // Should be empty if mode is `ISTIO_MUTUAL`. + ClientCertificate string `json:"clientCertificate,omitempty"` + // REQUIRED if mode is `MUTUAL`. The path to the file holding the + // client's private key. + // Should be empty if mode is `ISTIO_MUTUAL`. + PrivateKey string `json:"privateKey,omitempty"` + // OPTIONAL: The path to the file containing certificate authority + // certificates to use in verifying a presented server certificate. If + // omitted, the proxy will verify the server's certificate using + // the OS CA certificates. + // Should be empty if mode is `ISTIO_MUTUAL`. + CaCertificates string `json:"caCertificates,omitempty"` + // The name of the secret that holds the TLS certs for the + // client including the CA certificates. This secret must exist in + // the namespace of the proxy using the certificates. + // An Opaque secret should contain the following keys and values: + // `key: `, `cert: `, `cacert: `, + // `crl: ` + // Here CACertificate is used to verify the server certificate. + // For mutual TLS, `cacert: ` can be provided in the + // same secret or a separate secret named `-cacert`. + // A TLS secret for client certificates with an additional + // `ca.crt` key for CA certificates and `ca.crl` key for + // certificate revocation list(CRL) is also supported. + // Only one of client certificates and CA certificate + // or credentialName can be specified. + // + // **NOTE:** This field is applicable at sidecars only if + // `DestinationRule` has a `workloadSelector` specified. + // Otherwise the field will be applicable only at gateways, and + // sidecars will continue to use the certificate paths. + CredentialName string `json:"credentialName,omitempty"` + // A list of alternate names to verify the subject identity in the + // certificate. If specified, the proxy will verify that the server + // certificate's subject alt name matches one of the specified values. + // If specified, this list overrides the value of subject_alt_names + // from the ServiceEntry. If unspecified, automatic validation of upstream + // presented certificate for new upstream connections will be done based on the + // downstream HTTP host/authority header. + SubjectAltNames []string `json:"subjectAltNames,omitempty"` + // SNI string to present to the server during TLS handshake. + // If unspecified, SNI will be automatically set based on downstream HTTP + // host/authority header for SIMPLE and MUTUAL TLS modes. + Sni string `json:"sni,omitempty"` + // `insecureSkipVerify` specifies whether the proxy should skip verifying the + // CA signature and SAN for the server certificate corresponding to the host. + // The default value of this field is false. + InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"` + // OPTIONAL: The path to the file containing the certificate revocation list (CRL) + // to use in verifying a presented server certificate. `CRL` is a list of certificates + // that have been revoked by the CA (Certificate Authority) before their scheduled expiration date. + // If specified, the proxy will verify if the presented certificate is part of the revoked list of certificates. + // If omitted, the proxy will not verify the certificate against the `crl`. + CaCrl string `json:"caCrl,omitempty"` +} + +// Locality-weighted load balancing allows administrators to control the +// distribution of traffic to endpoints based on the localities of where the +// traffic originates and where it will terminate. These localities are +// specified using arbitrary labels that designate a hierarchy of localities in +// {region}/{zone}/{sub-zone} form. For additional detail refer to +// [Locality Weight](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/locality_weight) +// The following example shows how to setup locality weights mesh-wide. +// +// Given a mesh with workloads and their service deployed to "us-west/zone1/*" +// and "us-west/zone2/*". This example specifies that when traffic accessing a +// service originates from workloads in "us-west/zone1/*", 80% of the traffic +// will be sent to endpoints in "us-west/zone1/*", i.e the same zone, and the +// remaining 20% will go to endpoints in "us-west/zone2/*". This setup is +// intended to favor routing traffic to endpoints in the same locality. +// A similar setting is specified for traffic originating in "us-west/zone2/*". +// +// ```yaml +// +// distribute: +// - from: us-west/zone1/* +// to: +// "us-west/zone1/*": 80 +// "us-west/zone2/*": 20 +// - from: us-west/zone2/* +// to: +// "us-west/zone1/*": 20 +// "us-west/zone2/*": 80 +// +// ``` +// +// If the goal of the operator is not to distribute load across zones and +// regions but rather to restrict the regionality of failover to meet other +// operational requirements an operator can set a 'failover' policy instead of +// a 'distribute' policy. +// +// The following example sets up a locality failover policy for regions. +// Assume a service resides in zones within us-east, us-west & eu-west +// this example specifies that when endpoints within us-east become unhealthy +// traffic should failover to endpoints in any zone or sub-zone within eu-west +// and similarly us-west should failover to us-east. +// +// ```yaml +// +// failover: +// - from: us-east +// to: eu-west +// - from: us-west +// to: us-east +// +// ``` +// Locality load balancing settings. +type LocalityLoadBalancerSetting struct { + // Optional: only one of distribute, failover or failoverPriority can be set. + // Explicitly specify loadbalancing weight across different zones and geographical locations. + // Refer to [Locality weighted load balancing](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/locality_weight) + // If empty, the locality weight is set according to the endpoints number within it. + Distribute []*LocalityLoadBalancerSettingDistribute `json:"distribute,omitempty"` + // Optional: only one of distribute, failover or failoverPriority can be set. + // Explicitly specify the region traffic will land on when endpoints in local region becomes unhealthy. + // Should be used together with OutlierDetection to detect unhealthy endpoints. + // Note: if no OutlierDetection specified, this will not take effect. + Failover []*LocalityLoadBalancerSettingFailover `json:"failover,omitempty"` + // failoverPriority is an ordered list of labels used to sort endpoints to do priority based load balancing. + // This is to support traffic failover across different groups of endpoints. + // Two kinds of labels can be specified: + // + // - Specify only label keys `[key1, key2, key3]`, istio would compare the label values of client with endpoints. + // Suppose there are total N label keys `[key1, key2, key3, ...keyN]` specified: + // + // 1. Endpoints matching all N labels with the client proxy have priority P(0) i.e. the highest priority. + // 2. Endpoints matching the first N-1 labels with the client proxy have priority P(1) i.e. second highest priority. + // 3. By extension of this logic, endpoints matching only the first label with the client proxy has priority P(N-1) i.e. second lowest priority. + // 4. All the other endpoints have priority P(N) i.e. lowest priority. + // + // - Specify labels with key and value `[key1=value1, key2=value2, key3=value3]`, istio would compare the labels with endpoints. + // Suppose there are total N labels `[key1=value1, key2=value2, key3=value3, ...keyN=valueN]` specified: + // + // 1. Endpoints matching all N labels have priority P(0) i.e. the highest priority. + // 2. Endpoints matching the first N-1 labels have priority P(1) i.e. second highest priority. + // 3. By extension of this logic, endpoints matching only the first label has priority P(N-1) i.e. second lowest priority. + // 4. All the other endpoints have priority P(N) i.e. lowest priority. + // + // Note: For a label to be considered for match, the previous labels must match, i.e. nth label would be considered matched only if first n-1 labels match. + // + // It can be any label specified on both client and server workloads. + // The following labels which have special semantic meaning are also supported: + // + // - `topology.istio.io/network` is used to match the network metadata of an endpoint, which can be specified by pod/namespace label `topology.istio.io/network`, sidecar env `ISTIO_META_NETWORK` or MeshNetworks. + // - `topology.istio.io/cluster` is used to match the clusterID of an endpoint, which can be specified by pod label `topology.istio.io/cluster` or pod env `ISTIO_META_CLUSTER_ID`. + // - `topology.kubernetes.io/region` is used to match the region metadata of an endpoint, which maps to Kubernetes node label `topology.kubernetes.io/region` or the deprecated label `failure-domain.beta.kubernetes.io/region`. + // - `topology.kubernetes.io/zone` is used to match the zone metadata of an endpoint, which maps to Kubernetes node label `topology.kubernetes.io/zone` or the deprecated label `failure-domain.beta.kubernetes.io/zone`. + // - `topology.istio.io/subzone` is used to match the subzone metadata of an endpoint, which maps to Istio node label `topology.istio.io/subzone`. + // - `kubernetes.io/hostname` is used to match the current node of an endpoint, which maps to Kubernetes node label `kubernetes.io/hostname`. + // + // The below topology config indicates the following priority levels: + // + // ```yaml + // failoverPriority: + // - "topology.istio.io/network" + // - "topology.kubernetes.io/region" + // - "topology.kubernetes.io/zone" + // - "topology.istio.io/subzone" + // ``` + // + // 1. endpoints match same [network, region, zone, subzone] label with the client proxy have the highest priority. + // 2. endpoints have same [network, region, zone] label but different [subzone] label with the client proxy have the second highest priority. + // 3. endpoints have same [network, region] label but different [zone] label with the client proxy have the third highest priority. + // 4. endpoints have same [network] but different [region] labels with the client proxy have the fourth highest priority. + // 5. all the other endpoints have the same lowest priority. + // + // Suppose a service associated endpoints reside in multi clusters, the below example represents: + // 1. endpoints in `clusterA` and has `version=v1` label have P(0) priority. + // 2. endpoints not in `clusterA` but has `version=v1` label have P(1) priority. + // 2. all the other endpoints have P(2) priority. + // + // ```yaml + // failoverPriority: + // - "version=v1" + // - "topology.istio.io/cluster=clusterA" + // ``` + // + // Optional: only one of distribute, failover or failoverPriority can be set. + // And it should be used together with `OutlierDetection` to detect unhealthy endpoints, otherwise has no effect. + FailoverPriority []string `json:"failoverPriority,omitempty"` + // enable locality load balancing, this is DestinationRule-level and will override mesh wide settings in entirety. + // e.g. true means that turn on locality load balancing for this DestinationRule no matter what mesh wide settings is. + Enabled *bool `json:"enabled,omitempty"` +} + +// TCP keepalive. +type ConnectionPoolSettingsTCPSettingsTcpKeepalive struct { + // Maximum number of keepalive probes to send without response before + // deciding the connection is dead. Default is to use the OS level configuration + // (unless overridden, Linux defaults to 9.) + Probes uint32 `json:"probes,omitempty"` + // The time duration a connection needs to be idle before keep-alive + // probes start being sent. Default is to use the OS level configuration + // (unless overridden, Linux defaults to 7200s (ie 2 hours.) + Time *metav1.Duration `json:"time,omitempty"` + // The time duration between keep-alive probes. + // Default is to use the OS level configuration + // (unless overridden, Linux defaults to 75s.) + Interval *metav1.Duration `json:"interval,omitempty"` +} + +// Describes how traffic originating in the 'from' zone or sub-zone is +// distributed over a set of 'to' zones. Syntax for specifying a zone is +// {region}/{zone}/{sub-zone} and terminal wildcards are allowed on any +// segment of the specification. Examples: +// +// `*` - matches all localities +// +// `us-west/*` - all zones and sub-zones within the us-west region +// +// `us-west/zone-1/*` - all sub-zones within us-west/zone-1 +type LocalityLoadBalancerSettingDistribute struct { + // Originating locality, '/' separated, e.g. 'region/zone/sub_zone'. + From string `json:"from,omitempty"` + // Map of upstream localities to traffic distribution weights. The sum of + // all weights should be 100. Any locality not present will + // receive no traffic. + To map[string]uint32 `json:"to,omitempty"` +} + +// Specify the traffic failover policy across regions. Since zone and sub-zone +// failover is supported by default this only needs to be specified for +// regions when the operator needs to constrain traffic failover so that +// the default behavior of failing over to any endpoint globally does not +// apply. This is useful when failing over traffic across regions would not +// improve service health or may need to be restricted for other reasons +// like regulatory controls. +type LocalityLoadBalancerSettingFailover struct { + // Originating region. + From string `json:"from,omitempty"` + // Destination region the traffic will fail over to when endpoints in + // the 'from' region becomes unhealthy. + To string `json:"to,omitempty"` +} + +// Describes the retry policy to use when a HTTP request fails. For +// example, the following rule sets the maximum number of retries to 3 when +// calling ratings:v1 service, with a 2s timeout per retry attempt. +// A retry will be attempted if there is a connect-failure, refused_stream +// or when the upstream server responds with Service Unavailable(503). +// +// ```yaml +// apiVersion: networking.istio.io/v1 +// kind: VirtualService +// metadata: +// +// name: ratings-route +// +// spec: +// +// hosts: +// - ratings.prod.svc.cluster.local +// http: +// - route: +// - destination: +// host: ratings.prod.svc.cluster.local +// subset: v1 +// retries: +// attempts: 3 +// perTryTimeout: 2s +// retryOn: gateway-error,connect-failure,refused-stream +// +// ``` +type HTTPRetry struct { + // Number of retries to be allowed for a given request. The interval + // between retries will be determined automatically (25ms+). When request + // `timeout` of the [HTTP route](https://istio.io/docs/reference/config/networking/virtual-service/#HTTPRoute) + // or `per_try_timeout` is configured, the actual number of retries attempted also depends on + // the specified request `timeout` and `per_try_timeout` values. MUST BE >= 0. If `0`, retries will be disabled. + // The maximum possible number of requests made will be 1 + `attempts`. + Attempts int32 `json:"attempts,omitempty"` + // Timeout per attempt for a given request, including the initial call and any retries. Format: 1h/1m/1s/1ms. MUST BE >=1ms. + // Default is same value as request + // `timeout` of the [HTTP route](https://istio.io/docs/reference/config/networking/virtual-service/#HTTPRoute), + // which means no timeout. + PerTryTimeout *metav1.Duration `json:"perTryTimeout,omitempty"` + // Specifies the conditions under which retry takes place. + // One or more policies can be specified using a ‘,’ delimited list. + // See the [retry policies](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#x-envoy-retry-on) + // and [gRPC retry policies](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#x-envoy-retry-grpc-on) for more details. + // + // In addition to the policies specified above, a list of HTTP status codes can be passed, such as `retryOn: "503,reset"`. + // Note these status codes refer to the actual responses received from the destination. + // For example, if a connection is reset, Istio will translate this to 503 for it's response. + // However, the destination did not return a 503 error, so this would not match `"503"` (it would, however, match `"reset"`). + // + // If not specified, this defaults to `connect-failure,refused-stream,unavailable,cancelled,503`. + RetryOn string `json:"retryOn,omitempty"` + // Flag to specify whether the retries should retry to other localities. + // See the [retry plugin configuration](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/http/http_connection_management#retry-plugin-configuration) for more details. + RetryRemoteLocalities *bool `json:"retryRemoteLocalities,omitempty"` +} + diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/values_types_extra.go similarity index 56% rename from vendor/github.com/go-openapi/swag/post_go18.go rename to vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/values_types_extra.go index f5228b82c..6e3d9ef97 100644 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/values_types_extra.go @@ -1,10 +1,10 @@ -// Copyright 2015 go-swagger maintainers +// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,13 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.8 -// +build go1.8 +package v1alpha1 -package swag +type SDSConfigToken struct { + Aud string `json:"aud,omitempty"` +} -import "net/url" +type CNIValues struct { + // Configuration for the Istio CNI plugin. + Cni *CNIConfig `json:"cni,omitempty"` -func pathUnescape(path string) (string, error) { - return url.PathUnescape(path) + // Part of the global configuration applicable to the Istio CNI component. + Global *CNIGlobalConfig `json:"global,omitempty"` } diff --git a/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..97e2fda45 --- /dev/null +++ b/vendor/github.com/istio-ecosystem/sail-operator/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,3895 @@ +//go:build !ignore_autogenerated + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "encoding/json" + "k8s.io/api/autoscaling/v2" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchConfig) DeepCopyInto(out *ArchConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchConfig. +func (in *ArchConfig) DeepCopy() *ArchConfig { + if in == nil { + return nil + } + out := new(ArchConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseConfig) DeepCopyInto(out *BaseConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseConfig. +func (in *BaseConfig) DeepCopy() *BaseConfig { + if in == nil { + return nil + } + out := new(BaseConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CNIAmbientConfig) DeepCopyInto(out *CNIAmbientConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.DnsCapture != nil { + in, out := &in.DnsCapture, &out.DnsCapture + *out = new(bool) + **out = **in + } + if in.Ipv6 != nil { + in, out := &in.Ipv6, &out.Ipv6 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNIAmbientConfig. +func (in *CNIAmbientConfig) DeepCopy() *CNIAmbientConfig { + if in == nil { + return nil + } + out := new(CNIAmbientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CNIConfig) DeepCopyInto(out *CNIConfig) { + *out = *in + if in.PullPolicy != nil { + in, out := &in.PullPolicy, &out.PullPolicy + *out = new(v1.PullPolicy) + **out = **in + } + if in.ExcludeNamespaces != nil { + in, out := &in.ExcludeNamespaces, &out.ExcludeNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.PodAnnotations != nil { + in, out := &in.PodAnnotations, &out.PodAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(GlobalLoggingConfig) + **out = **in + } + if in.Repair != nil { + in, out := &in.Repair, &out.Repair + *out = new(CNIRepairConfig) + (*in).DeepCopyInto(*out) + } + if in.Chained != nil { + in, out := &in.Chained, &out.Chained + *out = new(bool) + **out = **in + } + if in.ResourceQuotas != nil { + in, out := &in.ResourceQuotas, &out.ResourceQuotas + *out = new(ResourceQuotas) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.SeccompProfile != nil { + in, out := &in.SeccompProfile, &out.SeccompProfile + *out = new(v1.SeccompProfile) + (*in).DeepCopyInto(*out) + } + if in.RollingMaxUnavailable != nil { + in, out := &in.RollingMaxUnavailable, &out.RollingMaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNIConfig. +func (in *CNIConfig) DeepCopy() *CNIConfig { + if in == nil { + return nil + } + out := new(CNIConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CNIGlobalConfig) DeepCopyInto(out *CNIGlobalConfig) { + *out = *in + if in.DefaultResources != nil { + in, out := &in.DefaultResources, &out.DefaultResources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.ImagePullPolicy != nil { + in, out := &in.ImagePullPolicy, &out.ImagePullPolicy + *out = new(v1.PullPolicy) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LogAsJson != nil { + in, out := &in.LogAsJson, &out.LogAsJson + *out = new(bool) + **out = **in + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(GlobalLoggingConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNIGlobalConfig. +func (in *CNIGlobalConfig) DeepCopy() *CNIGlobalConfig { + if in == nil { + return nil + } + out := new(CNIGlobalConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CNIRepairConfig) DeepCopyInto(out *CNIRepairConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNIRepairConfig. +func (in *CNIRepairConfig) DeepCopy() *CNIRepairConfig { + if in == nil { + return nil + } + out := new(CNIRepairConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CNIUsageConfig) DeepCopyInto(out *CNIUsageConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNIUsageConfig. +func (in *CNIUsageConfig) DeepCopy() *CNIUsageConfig { + if in == nil { + return nil + } + out := new(CNIUsageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CNIValues) DeepCopyInto(out *CNIValues) { + *out = *in + if in.Cni != nil { + in, out := &in.Cni, &out.Cni + *out = new(CNIConfig) + (*in).DeepCopyInto(*out) + } + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = new(CNIGlobalConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNIValues. +func (in *CNIValues) DeepCopy() *CNIValues { + if in == nil { + return nil + } + out := new(CNIValues) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Certificate) DeepCopyInto(out *Certificate) { + *out = *in + if in.DnsNames != nil { + in, out := &in.DnsNames, &out.DnsNames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Certificate. +func (in *Certificate) DeepCopy() *Certificate { + if in == nil { + return nil + } + out := new(Certificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientTLSSettings) DeepCopyInto(out *ClientTLSSettings) { + *out = *in + if in.SubjectAltNames != nil { + in, out := &in.SubjectAltNames, &out.SubjectAltNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.InsecureSkipVerify != nil { + in, out := &in.InsecureSkipVerify, &out.InsecureSkipVerify + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientTLSSettings. +func (in *ClientTLSSettings) DeepCopy() *ClientTLSSettings { + if in == nil { + return nil + } + out := new(ClientTLSSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSource) DeepCopyInto(out *ConfigSource) { + *out = *in + if in.TlsSettings != nil { + in, out := &in.TlsSettings, &out.TlsSettings + *out = new(ClientTLSSettings) + (*in).DeepCopyInto(*out) + } + if in.SubscribedResources != nil { + in, out := &in.SubscribedResources, &out.SubscribedResources + *out = make([]Resource, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSource. +func (in *ConfigSource) DeepCopy() *ConfigSource { + if in == nil { + return nil + } + out := new(ConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolSettingsTCPSettingsTcpKeepalive) DeepCopyInto(out *ConnectionPoolSettingsTCPSettingsTcpKeepalive) { + *out = *in + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(metav1.Duration) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolSettingsTCPSettingsTcpKeepalive. +func (in *ConnectionPoolSettingsTCPSettingsTcpKeepalive) DeepCopy() *ConnectionPoolSettingsTCPSettingsTcpKeepalive { + if in == nil { + return nil + } + out := new(ConnectionPoolSettingsTCPSettingsTcpKeepalive) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultPodDisruptionBudgetConfig) DeepCopyInto(out *DefaultPodDisruptionBudgetConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultPodDisruptionBudgetConfig. +func (in *DefaultPodDisruptionBudgetConfig) DeepCopy() *DefaultPodDisruptionBudgetConfig { + if in == nil { + return nil + } + out := new(DefaultPodDisruptionBudgetConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperimentalConfig) DeepCopyInto(out *ExperimentalConfig) { + *out = *in + if in.StableValidationPolicy != nil { + in, out := &in.StableValidationPolicy, &out.StableValidationPolicy + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentalConfig. +func (in *ExperimentalConfig) DeepCopy() *ExperimentalConfig { + if in == nil { + return nil + } + out := new(ExperimentalConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalConfig) DeepCopyInto(out *GlobalConfig) { + *out = *in + if in.Arch != nil { + in, out := &in.Arch, &out.Arch + *out = new(ArchConfig) + **out = **in + } + if in.CertSigners != nil { + in, out := &in.CertSigners, &out.CertSigners + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConfigValidation != nil { + in, out := &in.ConfigValidation, &out.ConfigValidation + *out = new(bool) + **out = **in + } + if in.DefaultNodeSelector != nil { + in, out := &in.DefaultNodeSelector, &out.DefaultNodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.DefaultPodDisruptionBudget != nil { + in, out := &in.DefaultPodDisruptionBudget, &out.DefaultPodDisruptionBudget + *out = new(DefaultPodDisruptionBudgetConfig) + (*in).DeepCopyInto(*out) + } + if in.DefaultResources != nil { + in, out := &in.DefaultResources, &out.DefaultResources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.DefaultTolerations != nil { + in, out := &in.DefaultTolerations, &out.DefaultTolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImagePullPolicy != nil { + in, out := &in.ImagePullPolicy, &out.ImagePullPolicy + *out = new(v1.PullPolicy) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LogAsJson != nil { + in, out := &in.LogAsJson, &out.LogAsJson + *out = new(bool) + **out = **in + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(GlobalLoggingConfig) + **out = **in + } + if in.MeshNetworks != nil { + in, out := &in.MeshNetworks, &out.MeshNetworks + *out = make(map[string]Network, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.MultiCluster != nil { + in, out := &in.MultiCluster, &out.MultiCluster + *out = new(MultiClusterConfig) + (*in).DeepCopyInto(*out) + } + if in.PodDNSSearchNamespaces != nil { + in, out := &in.PodDNSSearchNamespaces, &out.PodDNSSearchNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OmitSidecarInjectorConfigMap != nil { + in, out := &in.OmitSidecarInjectorConfigMap, &out.OmitSidecarInjectorConfigMap + *out = new(bool) + **out = **in + } + if in.OperatorManageWebhooks != nil { + in, out := &in.OperatorManageWebhooks, &out.OperatorManageWebhooks + *out = new(bool) + **out = **in + } + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(ProxyConfig) + (*in).DeepCopyInto(*out) + } + if in.ProxyInit != nil { + in, out := &in.ProxyInit, &out.ProxyInit + *out = new(ProxyInitConfig) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(SDSConfig) + (*in).DeepCopyInto(*out) + } + if in.Tracer != nil { + in, out := &in.Tracer, &out.Tracer + *out = new(TracerConfig) + (*in).DeepCopyInto(*out) + } + if in.Istiod != nil { + in, out := &in.Istiod, &out.Istiod + *out = new(IstiodConfig) + (*in).DeepCopyInto(*out) + } + if in.Sts != nil { + in, out := &in.Sts, &out.Sts + *out = new(STSConfig) + **out = **in + } + if in.MountMtlsCerts != nil { + in, out := &in.MountMtlsCerts, &out.MountMtlsCerts + *out = new(bool) + **out = **in + } + if in.ExternalIstiod != nil { + in, out := &in.ExternalIstiod, &out.ExternalIstiod + *out = new(bool) + **out = **in + } + if in.ConfigCluster != nil { + in, out := &in.ConfigCluster, &out.ConfigCluster + *out = new(bool) + **out = **in + } + if in.IpFamilies != nil { + in, out := &in.IpFamilies, &out.IpFamilies + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Waypoint != nil { + in, out := &in.Waypoint, &out.Waypoint + *out = new(WaypointConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalConfig. +func (in *GlobalConfig) DeepCopy() *GlobalConfig { + if in == nil { + return nil + } + out := new(GlobalConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalLoggingConfig) DeepCopyInto(out *GlobalLoggingConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalLoggingConfig. +func (in *GlobalLoggingConfig) DeepCopy() *GlobalLoggingConfig { + if in == nil { + return nil + } + out := new(GlobalLoggingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRetry) DeepCopyInto(out *HTTPRetry) { + *out = *in + if in.PerTryTimeout != nil { + in, out := &in.PerTryTimeout, &out.PerTryTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.RetryRemoteLocalities != nil { + in, out := &in.RetryRemoteLocalities, &out.RetryRemoteLocalities + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRetry. +func (in *HTTPRetry) DeepCopy() *HTTPRetry { + if in == nil { + return nil + } + out := new(HTTPRetry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Istio) DeepCopyInto(out *Istio) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Istio. +func (in *Istio) DeepCopy() *Istio { + if in == nil { + return nil + } + out := new(Istio) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Istio) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioCNI) DeepCopyInto(out *IstioCNI) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioCNI. +func (in *IstioCNI) DeepCopy() *IstioCNI { + if in == nil { + return nil + } + out := new(IstioCNI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IstioCNI) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioCNICondition) DeepCopyInto(out *IstioCNICondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioCNICondition. +func (in *IstioCNICondition) DeepCopy() *IstioCNICondition { + if in == nil { + return nil + } + out := new(IstioCNICondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioCNIList) DeepCopyInto(out *IstioCNIList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IstioCNI, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioCNIList. +func (in *IstioCNIList) DeepCopy() *IstioCNIList { + if in == nil { + return nil + } + out := new(IstioCNIList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IstioCNIList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioCNISpec) DeepCopyInto(out *IstioCNISpec) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = new(CNIValues) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioCNISpec. +func (in *IstioCNISpec) DeepCopy() *IstioCNISpec { + if in == nil { + return nil + } + out := new(IstioCNISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioCNIStatus) DeepCopyInto(out *IstioCNIStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]IstioCNICondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioCNIStatus. +func (in *IstioCNIStatus) DeepCopy() *IstioCNIStatus { + if in == nil { + return nil + } + out := new(IstioCNIStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioCondition) DeepCopyInto(out *IstioCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioCondition. +func (in *IstioCondition) DeepCopy() *IstioCondition { + if in == nil { + return nil + } + out := new(IstioCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioList) DeepCopyInto(out *IstioList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Istio, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioList. +func (in *IstioList) DeepCopy() *IstioList { + if in == nil { + return nil + } + out := new(IstioList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IstioList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioRevision) DeepCopyInto(out *IstioRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioRevision. +func (in *IstioRevision) DeepCopy() *IstioRevision { + if in == nil { + return nil + } + out := new(IstioRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IstioRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioRevisionCondition) DeepCopyInto(out *IstioRevisionCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioRevisionCondition. +func (in *IstioRevisionCondition) DeepCopy() *IstioRevisionCondition { + if in == nil { + return nil + } + out := new(IstioRevisionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioRevisionList) DeepCopyInto(out *IstioRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IstioRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioRevisionList. +func (in *IstioRevisionList) DeepCopy() *IstioRevisionList { + if in == nil { + return nil + } + out := new(IstioRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IstioRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioRevisionSpec) DeepCopyInto(out *IstioRevisionSpec) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = new(Values) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioRevisionSpec. +func (in *IstioRevisionSpec) DeepCopy() *IstioRevisionSpec { + if in == nil { + return nil + } + out := new(IstioRevisionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioRevisionStatus) DeepCopyInto(out *IstioRevisionStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]IstioRevisionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioRevisionStatus. +func (in *IstioRevisionStatus) DeepCopy() *IstioRevisionStatus { + if in == nil { + return nil + } + out := new(IstioRevisionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioSpec) DeepCopyInto(out *IstioSpec) { + *out = *in + if in.UpdateStrategy != nil { + in, out := &in.UpdateStrategy, &out.UpdateStrategy + *out = new(IstioUpdateStrategy) + (*in).DeepCopyInto(*out) + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = new(Values) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioSpec. +func (in *IstioSpec) DeepCopy() *IstioSpec { + if in == nil { + return nil + } + out := new(IstioSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioStatus) DeepCopyInto(out *IstioStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]IstioCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Revisions = in.Revisions +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioStatus. +func (in *IstioStatus) DeepCopy() *IstioStatus { + if in == nil { + return nil + } + out := new(IstioStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioUpdateStrategy) DeepCopyInto(out *IstioUpdateStrategy) { + *out = *in + if in.InactiveRevisionDeletionGracePeriodSeconds != nil { + in, out := &in.InactiveRevisionDeletionGracePeriodSeconds, &out.InactiveRevisionDeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioUpdateStrategy. +func (in *IstioUpdateStrategy) DeepCopy() *IstioUpdateStrategy { + if in == nil { + return nil + } + out := new(IstioUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstiodConfig) DeepCopyInto(out *IstiodConfig) { + *out = *in + if in.EnableAnalysis != nil { + in, out := &in.EnableAnalysis, &out.EnableAnalysis + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstiodConfig. +func (in *IstiodConfig) DeepCopy() *IstiodConfig { + if in == nil { + return nil + } + out := new(IstiodConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstiodRemoteConfig) DeepCopyInto(out *IstiodRemoteConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstiodRemoteConfig. +func (in *IstiodRemoteConfig) DeepCopy() *IstiodRemoteConfig { + if in == nil { + return nil + } + out := new(IstiodRemoteConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalityLoadBalancerSetting) DeepCopyInto(out *LocalityLoadBalancerSetting) { + *out = *in + if in.Distribute != nil { + in, out := &in.Distribute, &out.Distribute + *out = make([]*LocalityLoadBalancerSettingDistribute, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(LocalityLoadBalancerSettingDistribute) + (*in).DeepCopyInto(*out) + } + } + } + if in.Failover != nil { + in, out := &in.Failover, &out.Failover + *out = make([]*LocalityLoadBalancerSettingFailover, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(LocalityLoadBalancerSettingFailover) + **out = **in + } + } + } + if in.FailoverPriority != nil { + in, out := &in.FailoverPriority, &out.FailoverPriority + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalityLoadBalancerSetting. +func (in *LocalityLoadBalancerSetting) DeepCopy() *LocalityLoadBalancerSetting { + if in == nil { + return nil + } + out := new(LocalityLoadBalancerSetting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalityLoadBalancerSettingDistribute) DeepCopyInto(out *LocalityLoadBalancerSettingDistribute) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = make(map[string]uint32, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalityLoadBalancerSettingDistribute. +func (in *LocalityLoadBalancerSettingDistribute) DeepCopy() *LocalityLoadBalancerSettingDistribute { + if in == nil { + return nil + } + out := new(LocalityLoadBalancerSettingDistribute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalityLoadBalancerSettingFailover) DeepCopyInto(out *LocalityLoadBalancerSettingFailover) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalityLoadBalancerSettingFailover. +func (in *LocalityLoadBalancerSettingFailover) DeepCopy() *LocalityLoadBalancerSettingFailover { + if in == nil { + return nil + } + out := new(LocalityLoadBalancerSettingFailover) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfig) DeepCopyInto(out *MeshConfig) { + *out = *in + if in.ConnectTimeout != nil { + in, out := &in.ConnectTimeout, &out.ConnectTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.ProtocolDetectionTimeout != nil { + in, out := &in.ProtocolDetectionTimeout, &out.ProtocolDetectionTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.TcpKeepalive != nil { + in, out := &in.TcpKeepalive, &out.TcpKeepalive + *out = new(ConnectionPoolSettingsTCPSettingsTcpKeepalive) + (*in).DeepCopyInto(*out) + } + if in.DefaultConfig != nil { + in, out := &in.DefaultConfig, &out.DefaultConfig + *out = new(MeshConfigProxyConfig) + (*in).DeepCopyInto(*out) + } + if in.OutboundTrafficPolicy != nil { + in, out := &in.OutboundTrafficPolicy, &out.OutboundTrafficPolicy + *out = new(MeshConfigOutboundTrafficPolicy) + **out = **in + } + if in.InboundTrafficPolicy != nil { + in, out := &in.InboundTrafficPolicy, &out.InboundTrafficPolicy + *out = new(MeshConfigInboundTrafficPolicy) + **out = **in + } + if in.ConfigSources != nil { + in, out := &in.ConfigSources, &out.ConfigSources + *out = make([]*ConfigSource, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ConfigSource) + (*in).DeepCopyInto(*out) + } + } + } + if in.EnableAutoMtls != nil { + in, out := &in.EnableAutoMtls, &out.EnableAutoMtls + *out = new(bool) + **out = **in + } + if in.TrustDomainAliases != nil { + in, out := &in.TrustDomainAliases, &out.TrustDomainAliases + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.CaCertificates != nil { + in, out := &in.CaCertificates, &out.CaCertificates + *out = make([]*MeshConfigCertificateData, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MeshConfigCertificateData) + (*in).DeepCopyInto(*out) + } + } + } + if in.DefaultServiceExportTo != nil { + in, out := &in.DefaultServiceExportTo, &out.DefaultServiceExportTo + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultVirtualServiceExportTo != nil { + in, out := &in.DefaultVirtualServiceExportTo, &out.DefaultVirtualServiceExportTo + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultDestinationRuleExportTo != nil { + in, out := &in.DefaultDestinationRuleExportTo, &out.DefaultDestinationRuleExportTo + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LocalityLbSetting != nil { + in, out := &in.LocalityLbSetting, &out.LocalityLbSetting + *out = new(LocalityLoadBalancerSetting) + (*in).DeepCopyInto(*out) + } + if in.DnsRefreshRate != nil { + in, out := &in.DnsRefreshRate, &out.DnsRefreshRate + *out = new(metav1.Duration) + **out = **in + } + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*Certificate, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Certificate) + (*in).DeepCopyInto(*out) + } + } + } + if in.ServiceSettings != nil { + in, out := &in.ServiceSettings, &out.ServiceSettings + *out = make([]*MeshConfigServiceSettings, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MeshConfigServiceSettings) + (*in).DeepCopyInto(*out) + } + } + } + if in.EnablePrometheusMerge != nil { + in, out := &in.EnablePrometheusMerge, &out.EnablePrometheusMerge + *out = new(bool) + **out = **in + } + if in.VerifyCertificateAtClient != nil { + in, out := &in.VerifyCertificateAtClient, &out.VerifyCertificateAtClient + *out = new(bool) + **out = **in + } + if in.Ca != nil { + in, out := &in.Ca, &out.Ca + *out = new(MeshConfigCA) + (*in).DeepCopyInto(*out) + } + if in.ExtensionProviders != nil { + in, out := &in.ExtensionProviders, &out.ExtensionProviders + *out = make([]*MeshConfigExtensionProvider, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MeshConfigExtensionProvider) + (*in).DeepCopyInto(*out) + } + } + } + if in.DefaultProviders != nil { + in, out := &in.DefaultProviders, &out.DefaultProviders + *out = new(MeshConfigDefaultProviders) + (*in).DeepCopyInto(*out) + } + if in.DiscoverySelectors != nil { + in, out := &in.DiscoverySelectors, &out.DiscoverySelectors + *out = make([]*metav1.LabelSelector, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + } + if in.PathNormalization != nil { + in, out := &in.PathNormalization, &out.PathNormalization + *out = new(MeshConfigProxyPathNormalization) + **out = **in + } + if in.DefaultHttpRetryPolicy != nil { + in, out := &in.DefaultHttpRetryPolicy, &out.DefaultHttpRetryPolicy + *out = new(HTTPRetry) + (*in).DeepCopyInto(*out) + } + if in.MeshMTLS != nil { + in, out := &in.MeshMTLS, &out.MeshMTLS + *out = new(MeshConfigTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.TlsDefaults != nil { + in, out := &in.TlsDefaults, &out.TlsDefaults + *out = new(MeshConfigTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfig. +func (in *MeshConfig) DeepCopy() *MeshConfig { + if in == nil { + return nil + } + out := new(MeshConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigCA) DeepCopyInto(out *MeshConfigCA) { + *out = *in + if in.TlsSettings != nil { + in, out := &in.TlsSettings, &out.TlsSettings + *out = new(ClientTLSSettings) + (*in).DeepCopyInto(*out) + } + if in.RequestTimeout != nil { + in, out := &in.RequestTimeout, &out.RequestTimeout + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigCA. +func (in *MeshConfigCA) DeepCopy() *MeshConfigCA { + if in == nil { + return nil + } + out := new(MeshConfigCA) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigCertificateData) DeepCopyInto(out *MeshConfigCertificateData) { + *out = *in + if in.CertSigners != nil { + in, out := &in.CertSigners, &out.CertSigners + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TrustDomains != nil { + in, out := &in.TrustDomains, &out.TrustDomains + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigCertificateData. +func (in *MeshConfigCertificateData) DeepCopy() *MeshConfigCertificateData { + if in == nil { + return nil + } + out := new(MeshConfigCertificateData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigDefaultProviders) DeepCopyInto(out *MeshConfigDefaultProviders) { + *out = *in + if in.Tracing != nil { + in, out := &in.Tracing, &out.Tracing + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AccessLogging != nil { + in, out := &in.AccessLogging, &out.AccessLogging + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigDefaultProviders. +func (in *MeshConfigDefaultProviders) DeepCopy() *MeshConfigDefaultProviders { + if in == nil { + return nil + } + out := new(MeshConfigDefaultProviders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProvider) DeepCopyInto(out *MeshConfigExtensionProvider) { + *out = *in + if in.EnvoyExtAuthzHttp != nil { + in, out := &in.EnvoyExtAuthzHttp, &out.EnvoyExtAuthzHttp + *out = new(MeshConfigExtensionProviderEnvoyExternalAuthorizationHttpProvider) + (*in).DeepCopyInto(*out) + } + if in.EnvoyExtAuthzGrpc != nil { + in, out := &in.EnvoyExtAuthzGrpc, &out.EnvoyExtAuthzGrpc + *out = new(MeshConfigExtensionProviderEnvoyExternalAuthorizationGrpcProvider) + (*in).DeepCopyInto(*out) + } + if in.Zipkin != nil { + in, out := &in.Zipkin, &out.Zipkin + *out = new(MeshConfigExtensionProviderZipkinTracingProvider) + **out = **in + } + if in.Lightstep != nil { + in, out := &in.Lightstep, &out.Lightstep + *out = new(MeshConfigExtensionProviderLightstepTracingProvider) + **out = **in + } + if in.Datadog != nil { + in, out := &in.Datadog, &out.Datadog + *out = new(MeshConfigExtensionProviderDatadogTracingProvider) + **out = **in + } + if in.Stackdriver != nil { + in, out := &in.Stackdriver, &out.Stackdriver + *out = new(MeshConfigExtensionProviderStackdriverProvider) + (*in).DeepCopyInto(*out) + } + if in.Opencensus != nil { + in, out := &in.Opencensus, &out.Opencensus + *out = new(MeshConfigExtensionProviderOpenCensusAgentTracingProvider) + (*in).DeepCopyInto(*out) + } + if in.Skywalking != nil { + in, out := &in.Skywalking, &out.Skywalking + *out = new(MeshConfigExtensionProviderSkyWalkingTracingProvider) + **out = **in + } + if in.Opentelemetry != nil { + in, out := &in.Opentelemetry, &out.Opentelemetry + *out = new(MeshConfigExtensionProviderOpenTelemetryTracingProvider) + (*in).DeepCopyInto(*out) + } + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = new(MeshConfigExtensionProviderPrometheusMetricsProvider) + **out = **in + } + if in.EnvoyFileAccessLog != nil { + in, out := &in.EnvoyFileAccessLog, &out.EnvoyFileAccessLog + *out = new(MeshConfigExtensionProviderEnvoyFileAccessLogProvider) + (*in).DeepCopyInto(*out) + } + if in.EnvoyHttpAls != nil { + in, out := &in.EnvoyHttpAls, &out.EnvoyHttpAls + *out = new(MeshConfigExtensionProviderEnvoyHttpGrpcV3LogProvider) + (*in).DeepCopyInto(*out) + } + if in.EnvoyTcpAls != nil { + in, out := &in.EnvoyTcpAls, &out.EnvoyTcpAls + *out = new(MeshConfigExtensionProviderEnvoyTcpGrpcV3LogProvider) + (*in).DeepCopyInto(*out) + } + if in.EnvoyOtelAls != nil { + in, out := &in.EnvoyOtelAls, &out.EnvoyOtelAls + *out = new(MeshConfigExtensionProviderEnvoyOpenTelemetryLogProvider) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProvider. +func (in *MeshConfigExtensionProvider) DeepCopy() *MeshConfigExtensionProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderDatadogTracingProvider) DeepCopyInto(out *MeshConfigExtensionProviderDatadogTracingProvider) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderDatadogTracingProvider. +func (in *MeshConfigExtensionProviderDatadogTracingProvider) DeepCopy() *MeshConfigExtensionProviderDatadogTracingProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderDatadogTracingProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderEnvoyExternalAuthorizationGrpcProvider) DeepCopyInto(out *MeshConfigExtensionProviderEnvoyExternalAuthorizationGrpcProvider) { + *out = *in + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.IncludeRequestBodyInCheck != nil { + in, out := &in.IncludeRequestBodyInCheck, &out.IncludeRequestBodyInCheck + *out = new(MeshConfigExtensionProviderEnvoyExternalAuthorizationRequestBody) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderEnvoyExternalAuthorizationGrpcProvider. +func (in *MeshConfigExtensionProviderEnvoyExternalAuthorizationGrpcProvider) DeepCopy() *MeshConfigExtensionProviderEnvoyExternalAuthorizationGrpcProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderEnvoyExternalAuthorizationGrpcProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderEnvoyExternalAuthorizationHttpProvider) DeepCopyInto(out *MeshConfigExtensionProviderEnvoyExternalAuthorizationHttpProvider) { + *out = *in + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.IncludeHeadersInCheck != nil { + in, out := &in.IncludeHeadersInCheck, &out.IncludeHeadersInCheck + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludeRequestHeadersInCheck != nil { + in, out := &in.IncludeRequestHeadersInCheck, &out.IncludeRequestHeadersInCheck + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludeAdditionalHeadersInCheck != nil { + in, out := &in.IncludeAdditionalHeadersInCheck, &out.IncludeAdditionalHeadersInCheck + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.IncludeRequestBodyInCheck != nil { + in, out := &in.IncludeRequestBodyInCheck, &out.IncludeRequestBodyInCheck + *out = new(MeshConfigExtensionProviderEnvoyExternalAuthorizationRequestBody) + **out = **in + } + if in.HeadersToUpstreamOnAllow != nil { + in, out := &in.HeadersToUpstreamOnAllow, &out.HeadersToUpstreamOnAllow + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.HeadersToDownstreamOnDeny != nil { + in, out := &in.HeadersToDownstreamOnDeny, &out.HeadersToDownstreamOnDeny + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.HeadersToDownstreamOnAllow != nil { + in, out := &in.HeadersToDownstreamOnAllow, &out.HeadersToDownstreamOnAllow + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderEnvoyExternalAuthorizationHttpProvider. +func (in *MeshConfigExtensionProviderEnvoyExternalAuthorizationHttpProvider) DeepCopy() *MeshConfigExtensionProviderEnvoyExternalAuthorizationHttpProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderEnvoyExternalAuthorizationHttpProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderEnvoyExternalAuthorizationRequestBody) DeepCopyInto(out *MeshConfigExtensionProviderEnvoyExternalAuthorizationRequestBody) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderEnvoyExternalAuthorizationRequestBody. +func (in *MeshConfigExtensionProviderEnvoyExternalAuthorizationRequestBody) DeepCopy() *MeshConfigExtensionProviderEnvoyExternalAuthorizationRequestBody { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderEnvoyExternalAuthorizationRequestBody) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderEnvoyFileAccessLogProvider) DeepCopyInto(out *MeshConfigExtensionProviderEnvoyFileAccessLogProvider) { + *out = *in + if in.LogFormat != nil { + in, out := &in.LogFormat, &out.LogFormat + *out = new(MeshConfigExtensionProviderEnvoyFileAccessLogProviderLogFormat) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderEnvoyFileAccessLogProvider. +func (in *MeshConfigExtensionProviderEnvoyFileAccessLogProvider) DeepCopy() *MeshConfigExtensionProviderEnvoyFileAccessLogProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderEnvoyFileAccessLogProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderEnvoyFileAccessLogProviderLogFormat) DeepCopyInto(out *MeshConfigExtensionProviderEnvoyFileAccessLogProviderLogFormat) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderEnvoyFileAccessLogProviderLogFormat. +func (in *MeshConfigExtensionProviderEnvoyFileAccessLogProviderLogFormat) DeepCopy() *MeshConfigExtensionProviderEnvoyFileAccessLogProviderLogFormat { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderEnvoyFileAccessLogProviderLogFormat) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderEnvoyHttpGrpcV3LogProvider) DeepCopyInto(out *MeshConfigExtensionProviderEnvoyHttpGrpcV3LogProvider) { + *out = *in + if in.FilterStateObjectsToLog != nil { + in, out := &in.FilterStateObjectsToLog, &out.FilterStateObjectsToLog + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AdditionalRequestHeadersToLog != nil { + in, out := &in.AdditionalRequestHeadersToLog, &out.AdditionalRequestHeadersToLog + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AdditionalResponseHeadersToLog != nil { + in, out := &in.AdditionalResponseHeadersToLog, &out.AdditionalResponseHeadersToLog + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AdditionalResponseTrailersToLog != nil { + in, out := &in.AdditionalResponseTrailersToLog, &out.AdditionalResponseTrailersToLog + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderEnvoyHttpGrpcV3LogProvider. +func (in *MeshConfigExtensionProviderEnvoyHttpGrpcV3LogProvider) DeepCopy() *MeshConfigExtensionProviderEnvoyHttpGrpcV3LogProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderEnvoyHttpGrpcV3LogProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderEnvoyOpenTelemetryLogProvider) DeepCopyInto(out *MeshConfigExtensionProviderEnvoyOpenTelemetryLogProvider) { + *out = *in + if in.LogFormat != nil { + in, out := &in.LogFormat, &out.LogFormat + *out = new(MeshConfigExtensionProviderEnvoyOpenTelemetryLogProviderLogFormat) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderEnvoyOpenTelemetryLogProvider. +func (in *MeshConfigExtensionProviderEnvoyOpenTelemetryLogProvider) DeepCopy() *MeshConfigExtensionProviderEnvoyOpenTelemetryLogProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderEnvoyOpenTelemetryLogProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderEnvoyOpenTelemetryLogProviderLogFormat) DeepCopyInto(out *MeshConfigExtensionProviderEnvoyOpenTelemetryLogProviderLogFormat) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderEnvoyOpenTelemetryLogProviderLogFormat. +func (in *MeshConfigExtensionProviderEnvoyOpenTelemetryLogProviderLogFormat) DeepCopy() *MeshConfigExtensionProviderEnvoyOpenTelemetryLogProviderLogFormat { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderEnvoyOpenTelemetryLogProviderLogFormat) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderEnvoyTcpGrpcV3LogProvider) DeepCopyInto(out *MeshConfigExtensionProviderEnvoyTcpGrpcV3LogProvider) { + *out = *in + if in.FilterStateObjectsToLog != nil { + in, out := &in.FilterStateObjectsToLog, &out.FilterStateObjectsToLog + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderEnvoyTcpGrpcV3LogProvider. +func (in *MeshConfigExtensionProviderEnvoyTcpGrpcV3LogProvider) DeepCopy() *MeshConfigExtensionProviderEnvoyTcpGrpcV3LogProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderEnvoyTcpGrpcV3LogProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderHttpHeader) DeepCopyInto(out *MeshConfigExtensionProviderHttpHeader) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderHttpHeader. +func (in *MeshConfigExtensionProviderHttpHeader) DeepCopy() *MeshConfigExtensionProviderHttpHeader { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderHttpHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderHttpService) DeepCopyInto(out *MeshConfigExtensionProviderHttpService) { + *out = *in + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]*MeshConfigExtensionProviderHttpHeader, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MeshConfigExtensionProviderHttpHeader) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderHttpService. +func (in *MeshConfigExtensionProviderHttpService) DeepCopy() *MeshConfigExtensionProviderHttpService { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderHttpService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderLightstepTracingProvider) DeepCopyInto(out *MeshConfigExtensionProviderLightstepTracingProvider) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderLightstepTracingProvider. +func (in *MeshConfigExtensionProviderLightstepTracingProvider) DeepCopy() *MeshConfigExtensionProviderLightstepTracingProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderLightstepTracingProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderOpenCensusAgentTracingProvider) DeepCopyInto(out *MeshConfigExtensionProviderOpenCensusAgentTracingProvider) { + *out = *in + if in.Context != nil { + in, out := &in.Context, &out.Context + *out = make([]MeshConfigExtensionProviderOpenCensusAgentTracingProviderTraceContext, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderOpenCensusAgentTracingProvider. +func (in *MeshConfigExtensionProviderOpenCensusAgentTracingProvider) DeepCopy() *MeshConfigExtensionProviderOpenCensusAgentTracingProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderOpenCensusAgentTracingProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderOpenTelemetryTracingProvider) DeepCopyInto(out *MeshConfigExtensionProviderOpenTelemetryTracingProvider) { + *out = *in + if in.Http != nil { + in, out := &in.Http, &out.Http + *out = new(MeshConfigExtensionProviderHttpService) + (*in).DeepCopyInto(*out) + } + if in.ResourceDetectors != nil { + in, out := &in.ResourceDetectors, &out.ResourceDetectors + *out = new(MeshConfigExtensionProviderResourceDetectors) + (*in).DeepCopyInto(*out) + } + if in.DynatraceSampler != nil { + in, out := &in.DynatraceSampler, &out.DynatraceSampler + *out = new(MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSampler) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderOpenTelemetryTracingProvider. +func (in *MeshConfigExtensionProviderOpenTelemetryTracingProvider) DeepCopy() *MeshConfigExtensionProviderOpenTelemetryTracingProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderOpenTelemetryTracingProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSampler) DeepCopyInto(out *MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSampler) { + *out = *in + if in.HttpService != nil { + in, out := &in.HttpService, &out.HttpService + *out = new(MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSamplerDynatraceApi) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSampler. +func (in *MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSampler) DeepCopy() *MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSampler { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSampler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSamplerDynatraceApi) DeepCopyInto(out *MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSamplerDynatraceApi) { + *out = *in + if in.Http != nil { + in, out := &in.Http, &out.Http + *out = new(MeshConfigExtensionProviderHttpService) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSamplerDynatraceApi. +func (in *MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSamplerDynatraceApi) DeepCopy() *MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSamplerDynatraceApi { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderOpenTelemetryTracingProviderDynatraceSamplerDynatraceApi) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderPrometheusMetricsProvider) DeepCopyInto(out *MeshConfigExtensionProviderPrometheusMetricsProvider) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderPrometheusMetricsProvider. +func (in *MeshConfigExtensionProviderPrometheusMetricsProvider) DeepCopy() *MeshConfigExtensionProviderPrometheusMetricsProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderPrometheusMetricsProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderResourceDetectors) DeepCopyInto(out *MeshConfigExtensionProviderResourceDetectors) { + *out = *in + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(MeshConfigExtensionProviderResourceDetectorsEnvironmentResourceDetector) + **out = **in + } + if in.Dynatrace != nil { + in, out := &in.Dynatrace, &out.Dynatrace + *out = new(MeshConfigExtensionProviderResourceDetectorsDynatraceResourceDetector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderResourceDetectors. +func (in *MeshConfigExtensionProviderResourceDetectors) DeepCopy() *MeshConfigExtensionProviderResourceDetectors { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderResourceDetectors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderResourceDetectorsDynatraceResourceDetector) DeepCopyInto(out *MeshConfigExtensionProviderResourceDetectorsDynatraceResourceDetector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderResourceDetectorsDynatraceResourceDetector. +func (in *MeshConfigExtensionProviderResourceDetectorsDynatraceResourceDetector) DeepCopy() *MeshConfigExtensionProviderResourceDetectorsDynatraceResourceDetector { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderResourceDetectorsDynatraceResourceDetector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderResourceDetectorsEnvironmentResourceDetector) DeepCopyInto(out *MeshConfigExtensionProviderResourceDetectorsEnvironmentResourceDetector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderResourceDetectorsEnvironmentResourceDetector. +func (in *MeshConfigExtensionProviderResourceDetectorsEnvironmentResourceDetector) DeepCopy() *MeshConfigExtensionProviderResourceDetectorsEnvironmentResourceDetector { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderResourceDetectorsEnvironmentResourceDetector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderSkyWalkingTracingProvider) DeepCopyInto(out *MeshConfigExtensionProviderSkyWalkingTracingProvider) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderSkyWalkingTracingProvider. +func (in *MeshConfigExtensionProviderSkyWalkingTracingProvider) DeepCopy() *MeshConfigExtensionProviderSkyWalkingTracingProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderSkyWalkingTracingProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderStackdriverProvider) DeepCopyInto(out *MeshConfigExtensionProviderStackdriverProvider) { + *out = *in + if in.MaxNumberOfAttributes != nil { + in, out := &in.MaxNumberOfAttributes, &out.MaxNumberOfAttributes + *out = new(int64) + **out = **in + } + if in.MaxNumberOfAnnotations != nil { + in, out := &in.MaxNumberOfAnnotations, &out.MaxNumberOfAnnotations + *out = new(int64) + **out = **in + } + if in.MaxNumberOfMessageEvents != nil { + in, out := &in.MaxNumberOfMessageEvents, &out.MaxNumberOfMessageEvents + *out = new(int64) + **out = **in + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(MeshConfigExtensionProviderStackdriverProviderLogging) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderStackdriverProvider. +func (in *MeshConfigExtensionProviderStackdriverProvider) DeepCopy() *MeshConfigExtensionProviderStackdriverProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderStackdriverProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderStackdriverProviderLogging) DeepCopyInto(out *MeshConfigExtensionProviderStackdriverProviderLogging) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderStackdriverProviderLogging. +func (in *MeshConfigExtensionProviderStackdriverProviderLogging) DeepCopy() *MeshConfigExtensionProviderStackdriverProviderLogging { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderStackdriverProviderLogging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigExtensionProviderZipkinTracingProvider) DeepCopyInto(out *MeshConfigExtensionProviderZipkinTracingProvider) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigExtensionProviderZipkinTracingProvider. +func (in *MeshConfigExtensionProviderZipkinTracingProvider) DeepCopy() *MeshConfigExtensionProviderZipkinTracingProvider { + if in == nil { + return nil + } + out := new(MeshConfigExtensionProviderZipkinTracingProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigInboundTrafficPolicy) DeepCopyInto(out *MeshConfigInboundTrafficPolicy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigInboundTrafficPolicy. +func (in *MeshConfigInboundTrafficPolicy) DeepCopy() *MeshConfigInboundTrafficPolicy { + if in == nil { + return nil + } + out := new(MeshConfigInboundTrafficPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigOutboundTrafficPolicy) DeepCopyInto(out *MeshConfigOutboundTrafficPolicy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigOutboundTrafficPolicy. +func (in *MeshConfigOutboundTrafficPolicy) DeepCopy() *MeshConfigOutboundTrafficPolicy { + if in == nil { + return nil + } + out := new(MeshConfigOutboundTrafficPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigProxyConfig) DeepCopyInto(out *MeshConfigProxyConfig) { + *out = *in + if in.DrainDuration != nil { + in, out := &in.DrainDuration, &out.DrainDuration + *out = new(metav1.Duration) + **out = **in + } + if in.DiscoveryRefreshDelay != nil { + in, out := &in.DiscoveryRefreshDelay, &out.DiscoveryRefreshDelay + *out = new(metav1.Duration) + **out = **in + } + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(int32) + **out = **in + } + if in.Tracing != nil { + in, out := &in.Tracing, &out.Tracing + *out = new(Tracing) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(SDS) + **out = **in + } + if in.EnvoyAccessLogService != nil { + in, out := &in.EnvoyAccessLogService, &out.EnvoyAccessLogService + *out = new(RemoteService) + (*in).DeepCopyInto(*out) + } + if in.EnvoyMetricsService != nil { + in, out := &in.EnvoyMetricsService, &out.EnvoyMetricsService + *out = new(RemoteService) + (*in).DeepCopyInto(*out) + } + if in.ProxyMetadata != nil { + in, out := &in.ProxyMetadata, &out.ProxyMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RuntimeValues != nil { + in, out := &in.RuntimeValues, &out.RuntimeValues + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExtraStatTags != nil { + in, out := &in.ExtraStatTags, &out.ExtraStatTags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GatewayTopology != nil { + in, out := &in.GatewayTopology, &out.GatewayTopology + *out = new(Topology) + (*in).DeepCopyInto(*out) + } + if in.TerminationDrainDuration != nil { + in, out := &in.TerminationDrainDuration, &out.TerminationDrainDuration + *out = new(metav1.Duration) + **out = **in + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(v1.Probe) + (*in).DeepCopyInto(*out) + } + if in.ProxyStatsMatcher != nil { + in, out := &in.ProxyStatsMatcher, &out.ProxyStatsMatcher + *out = new(ProxyConfigProxyStatsMatcher) + (*in).DeepCopyInto(*out) + } + if in.HoldApplicationUntilProxyStarts != nil { + in, out := &in.HoldApplicationUntilProxyStarts, &out.HoldApplicationUntilProxyStarts + *out = new(bool) + **out = **in + } + if in.CaCertificatesPem != nil { + in, out := &in.CaCertificatesPem, &out.CaCertificatesPem + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ProxyImage) + **out = **in + } + if in.PrivateKeyProvider != nil { + in, out := &in.PrivateKeyProvider, &out.PrivateKeyProvider + *out = new(PrivateKeyProvider) + (*in).DeepCopyInto(*out) + } + if in.ProxyHeaders != nil { + in, out := &in.ProxyHeaders, &out.ProxyHeaders + *out = new(ProxyConfigProxyHeaders) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigProxyConfig. +func (in *MeshConfigProxyConfig) DeepCopy() *MeshConfigProxyConfig { + if in == nil { + return nil + } + out := new(MeshConfigProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigProxyPathNormalization) DeepCopyInto(out *MeshConfigProxyPathNormalization) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigProxyPathNormalization. +func (in *MeshConfigProxyPathNormalization) DeepCopy() *MeshConfigProxyPathNormalization { + if in == nil { + return nil + } + out := new(MeshConfigProxyPathNormalization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigServiceSettings) DeepCopyInto(out *MeshConfigServiceSettings) { + *out = *in + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(MeshConfigServiceSettingsSettings) + **out = **in + } + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigServiceSettings. +func (in *MeshConfigServiceSettings) DeepCopy() *MeshConfigServiceSettings { + if in == nil { + return nil + } + out := new(MeshConfigServiceSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigServiceSettingsSettings) DeepCopyInto(out *MeshConfigServiceSettingsSettings) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigServiceSettingsSettings. +func (in *MeshConfigServiceSettingsSettings) DeepCopy() *MeshConfigServiceSettingsSettings { + if in == nil { + return nil + } + out := new(MeshConfigServiceSettingsSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigTLSConfig) DeepCopyInto(out *MeshConfigTLSConfig) { + *out = *in + if in.EcdhCurves != nil { + in, out := &in.EcdhCurves, &out.EcdhCurves + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigTLSConfig. +func (in *MeshConfigTLSConfig) DeepCopy() *MeshConfigTLSConfig { + if in == nil { + return nil + } + out := new(MeshConfigTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshNetworks) DeepCopyInto(out *MeshNetworks) { + *out = *in + if in.Networks != nil { + in, out := &in.Networks, &out.Networks + *out = make(map[string]*Network, len(*in)) + for key, val := range *in { + var outVal *Network + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(Network) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshNetworks. +func (in *MeshNetworks) DeepCopy() *MeshNetworks { + if in == nil { + return nil + } + out := new(MeshNetworks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterConfig) DeepCopyInto(out *MultiClusterConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IncludeEnvoyFilter != nil { + in, out := &in.IncludeEnvoyFilter, &out.IncludeEnvoyFilter + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterConfig. +func (in *MultiClusterConfig) DeepCopy() *MultiClusterConfig { + if in == nil { + return nil + } + out := new(MultiClusterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]*NetworkNetworkEndpoints, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(NetworkNetworkEndpoints) + **out = **in + } + } + } + if in.Gateways != nil { + in, out := &in.Gateways, &out.Gateways + *out = make([]*NetworkIstioNetworkGateway, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(NetworkIstioNetworkGateway) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkIstioNetworkGateway) DeepCopyInto(out *NetworkIstioNetworkGateway) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkIstioNetworkGateway. +func (in *NetworkIstioNetworkGateway) DeepCopy() *NetworkIstioNetworkGateway { + if in == nil { + return nil + } + out := new(NetworkIstioNetworkGateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkNetworkEndpoints) DeepCopyInto(out *NetworkNetworkEndpoints) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkNetworkEndpoints. +func (in *NetworkNetworkEndpoints) DeepCopy() *NetworkNetworkEndpoints { + if in == nil { + return nil + } + out := new(NetworkNetworkEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutboundTrafficPolicyConfig) DeepCopyInto(out *OutboundTrafficPolicyConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutboundTrafficPolicyConfig. +func (in *OutboundTrafficPolicyConfig) DeepCopy() *OutboundTrafficPolicyConfig { + if in == nil { + return nil + } + out := new(OutboundTrafficPolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PilotConfig) DeepCopyInto(out *PilotConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.AutoscaleEnabled != nil { + in, out := &in.AutoscaleEnabled, &out.AutoscaleEnabled + *out = new(bool) + **out = **in + } + if in.AutoscaleBehavior != nil { + in, out := &in.AutoscaleBehavior, &out.AutoscaleBehavior + *out = new(v2.HorizontalPodAutoscalerBehavior) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Cpu != nil { + in, out := &in.Cpu, &out.Cpu + *out = new(TargetUtilizationConfig) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.KeepaliveMaxServerConnectionAge != nil { + in, out := &in.KeepaliveMaxServerConnectionAge, &out.KeepaliveMaxServerConnectionAge + *out = new(metav1.Duration) + **out = **in + } + if in.DeploymentLabels != nil { + in, out := &in.DeploymentLabels, &out.DeploymentLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PodLabels != nil { + in, out := &in.PodLabels, &out.PodLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(bool) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.RollingMaxSurge != nil { + in, out := &in.RollingMaxSurge, &out.RollingMaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + if in.RollingMaxUnavailable != nil { + in, out := &in.RollingMaxUnavailable, &out.RollingMaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PodAnnotations != nil { + in, out := &in.PodAnnotations, &out.PodAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceAccountAnnotations != nil { + in, out := &in.ServiceAccountAnnotations, &out.ServiceAccountAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SeccompProfile != nil { + in, out := &in.SeccompProfile, &out.SeccompProfile + *out = new(v1.SeccompProfile) + (*in).DeepCopyInto(*out) + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtraContainerArgs != nil { + in, out := &in.ExtraContainerArgs, &out.ExtraContainerArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IpFamilies != nil { + in, out := &in.IpFamilies, &out.IpFamilies + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(TargetUtilizationConfig) + **out = **in + } + if in.Cni != nil { + in, out := &in.Cni, &out.Cni + *out = new(CNIUsageConfig) + (*in).DeepCopyInto(*out) + } + if in.Taint != nil { + in, out := &in.Taint, &out.Taint + *out = new(PilotTaintControllerConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PilotConfig. +func (in *PilotConfig) DeepCopy() *PilotConfig { + if in == nil { + return nil + } + out := new(PilotConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PilotPolicyConfig) DeepCopyInto(out *PilotPolicyConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PilotPolicyConfig. +func (in *PilotPolicyConfig) DeepCopy() *PilotPolicyConfig { + if in == nil { + return nil + } + out := new(PilotPolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PilotTaintControllerConfig) DeepCopyInto(out *PilotTaintControllerConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PilotTaintControllerConfig. +func (in *PilotTaintControllerConfig) DeepCopy() *PilotTaintControllerConfig { + if in == nil { + return nil + } + out := new(PilotTaintControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyTargetReference) DeepCopyInto(out *PolicyTargetReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyTargetReference. +func (in *PolicyTargetReference) DeepCopy() *PolicyTargetReference { + if in == nil { + return nil + } + out := new(PolicyTargetReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortSelector) DeepCopyInto(out *PortSelector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSelector. +func (in *PortSelector) DeepCopy() *PortSelector { + if in == nil { + return nil + } + out := new(PortSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortsConfig) DeepCopyInto(out *PortsConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortsConfig. +func (in *PortsConfig) DeepCopy() *PortsConfig { + if in == nil { + return nil + } + out := new(PortsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateKeyProvider) DeepCopyInto(out *PrivateKeyProvider) { + *out = *in + if in.Cryptomb != nil { + in, out := &in.Cryptomb, &out.Cryptomb + *out = new(PrivateKeyProviderCryptoMb) + (*in).DeepCopyInto(*out) + } + if in.Qat != nil { + in, out := &in.Qat, &out.Qat + *out = new(PrivateKeyProviderQAT) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateKeyProvider. +func (in *PrivateKeyProvider) DeepCopy() *PrivateKeyProvider { + if in == nil { + return nil + } + out := new(PrivateKeyProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateKeyProviderCryptoMb) DeepCopyInto(out *PrivateKeyProviderCryptoMb) { + *out = *in + if in.PollDelay != nil { + in, out := &in.PollDelay, &out.PollDelay + *out = new(metav1.Duration) + **out = **in + } + if in.Fallback != nil { + in, out := &in.Fallback, &out.Fallback + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateKeyProviderCryptoMb. +func (in *PrivateKeyProviderCryptoMb) DeepCopy() *PrivateKeyProviderCryptoMb { + if in == nil { + return nil + } + out := new(PrivateKeyProviderCryptoMb) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateKeyProviderQAT) DeepCopyInto(out *PrivateKeyProviderQAT) { + *out = *in + if in.PollDelay != nil { + in, out := &in.PollDelay, &out.PollDelay + *out = new(metav1.Duration) + **out = **in + } + if in.Fallback != nil { + in, out := &in.Fallback, &out.Fallback + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateKeyProviderQAT. +func (in *PrivateKeyProviderQAT) DeepCopy() *PrivateKeyProviderQAT { + if in == nil { + return nil + } + out := new(PrivateKeyProviderQAT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.EnableCoreDump != nil { + in, out := &in.EnableCoreDump, &out.EnableCoreDump + *out = new(bool) + **out = **in + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(StartupProbe) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(v1.Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.HoldApplicationUntilProxyStarts != nil { + in, out := &in.HoldApplicationUntilProxyStarts, &out.HoldApplicationUntilProxyStarts + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigProxyHeaders) DeepCopyInto(out *ProxyConfigProxyHeaders) { + *out = *in + if in.RequestId != nil { + in, out := &in.RequestId, &out.RequestId + *out = new(ProxyConfigProxyHeadersRequestId) + (*in).DeepCopyInto(*out) + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(ProxyConfigProxyHeadersServer) + (*in).DeepCopyInto(*out) + } + if in.AttemptCount != nil { + in, out := &in.AttemptCount, &out.AttemptCount + *out = new(ProxyConfigProxyHeadersAttemptCount) + (*in).DeepCopyInto(*out) + } + if in.EnvoyDebugHeaders != nil { + in, out := &in.EnvoyDebugHeaders, &out.EnvoyDebugHeaders + *out = new(ProxyConfigProxyHeadersEnvoyDebugHeaders) + (*in).DeepCopyInto(*out) + } + if in.MetadataExchangeHeaders != nil { + in, out := &in.MetadataExchangeHeaders, &out.MetadataExchangeHeaders + *out = new(ProxyConfigProxyHeadersMetadataExchangeHeaders) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigProxyHeaders. +func (in *ProxyConfigProxyHeaders) DeepCopy() *ProxyConfigProxyHeaders { + if in == nil { + return nil + } + out := new(ProxyConfigProxyHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigProxyHeadersAttemptCount) DeepCopyInto(out *ProxyConfigProxyHeadersAttemptCount) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigProxyHeadersAttemptCount. +func (in *ProxyConfigProxyHeadersAttemptCount) DeepCopy() *ProxyConfigProxyHeadersAttemptCount { + if in == nil { + return nil + } + out := new(ProxyConfigProxyHeadersAttemptCount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigProxyHeadersEnvoyDebugHeaders) DeepCopyInto(out *ProxyConfigProxyHeadersEnvoyDebugHeaders) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigProxyHeadersEnvoyDebugHeaders. +func (in *ProxyConfigProxyHeadersEnvoyDebugHeaders) DeepCopy() *ProxyConfigProxyHeadersEnvoyDebugHeaders { + if in == nil { + return nil + } + out := new(ProxyConfigProxyHeadersEnvoyDebugHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigProxyHeadersMetadataExchangeHeaders) DeepCopyInto(out *ProxyConfigProxyHeadersMetadataExchangeHeaders) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigProxyHeadersMetadataExchangeHeaders. +func (in *ProxyConfigProxyHeadersMetadataExchangeHeaders) DeepCopy() *ProxyConfigProxyHeadersMetadataExchangeHeaders { + if in == nil { + return nil + } + out := new(ProxyConfigProxyHeadersMetadataExchangeHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigProxyHeadersRequestId) DeepCopyInto(out *ProxyConfigProxyHeadersRequestId) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigProxyHeadersRequestId. +func (in *ProxyConfigProxyHeadersRequestId) DeepCopy() *ProxyConfigProxyHeadersRequestId { + if in == nil { + return nil + } + out := new(ProxyConfigProxyHeadersRequestId) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigProxyHeadersServer) DeepCopyInto(out *ProxyConfigProxyHeadersServer) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigProxyHeadersServer. +func (in *ProxyConfigProxyHeadersServer) DeepCopy() *ProxyConfigProxyHeadersServer { + if in == nil { + return nil + } + out := new(ProxyConfigProxyHeadersServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigProxyStatsMatcher) DeepCopyInto(out *ProxyConfigProxyStatsMatcher) { + *out = *in + if in.InclusionPrefixes != nil { + in, out := &in.InclusionPrefixes, &out.InclusionPrefixes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.InclusionSuffixes != nil { + in, out := &in.InclusionSuffixes, &out.InclusionSuffixes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.InclusionRegexps != nil { + in, out := &in.InclusionRegexps, &out.InclusionRegexps + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigProxyStatsMatcher. +func (in *ProxyConfigProxyStatsMatcher) DeepCopy() *ProxyConfigProxyStatsMatcher { + if in == nil { + return nil + } + out := new(ProxyConfigProxyStatsMatcher) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyImage) DeepCopyInto(out *ProxyImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyImage. +func (in *ProxyImage) DeepCopy() *ProxyImage { + if in == nil { + return nil + } + out := new(ProxyImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyInitConfig) DeepCopyInto(out *ProxyInitConfig) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyInitConfig. +func (in *ProxyInitConfig) DeepCopy() *ProxyInitConfig { + if in == nil { + return nil + } + out := new(ProxyInitConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteIstio) DeepCopyInto(out *RemoteIstio) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteIstio. +func (in *RemoteIstio) DeepCopy() *RemoteIstio { + if in == nil { + return nil + } + out := new(RemoteIstio) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RemoteIstio) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteIstioCondition) DeepCopyInto(out *RemoteIstioCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteIstioCondition. +func (in *RemoteIstioCondition) DeepCopy() *RemoteIstioCondition { + if in == nil { + return nil + } + out := new(RemoteIstioCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteIstioList) DeepCopyInto(out *RemoteIstioList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RemoteIstio, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteIstioList. +func (in *RemoteIstioList) DeepCopy() *RemoteIstioList { + if in == nil { + return nil + } + out := new(RemoteIstioList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RemoteIstioList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteIstioSpec) DeepCopyInto(out *RemoteIstioSpec) { + *out = *in + if in.UpdateStrategy != nil { + in, out := &in.UpdateStrategy, &out.UpdateStrategy + *out = new(IstioUpdateStrategy) + (*in).DeepCopyInto(*out) + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = new(Values) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteIstioSpec. +func (in *RemoteIstioSpec) DeepCopy() *RemoteIstioSpec { + if in == nil { + return nil + } + out := new(RemoteIstioSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteIstioStatus) DeepCopyInto(out *RemoteIstioStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]RemoteIstioCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Revisions = in.Revisions +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteIstioStatus. +func (in *RemoteIstioStatus) DeepCopy() *RemoteIstioStatus { + if in == nil { + return nil + } + out := new(RemoteIstioStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteService) DeepCopyInto(out *RemoteService) { + *out = *in + if in.TlsSettings != nil { + in, out := &in.TlsSettings, &out.TlsSettings + *out = new(ClientTLSSettings) + (*in).DeepCopyInto(*out) + } + if in.TcpKeepalive != nil { + in, out := &in.TcpKeepalive, &out.TcpKeepalive + *out = new(ConnectionPoolSettingsTCPSettingsTcpKeepalive) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteService. +func (in *RemoteService) DeepCopy() *RemoteService { + if in == nil { + return nil + } + out := new(RemoteService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotas) DeepCopyInto(out *ResourceQuotas) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotas. +func (in *ResourceQuotas) DeepCopy() *ResourceQuotas { + if in == nil { + return nil + } + out := new(ResourceQuotas) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesRequestsConfig) DeepCopyInto(out *ResourcesRequestsConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesRequestsConfig. +func (in *ResourcesRequestsConfig) DeepCopy() *ResourcesRequestsConfig { + if in == nil { + return nil + } + out := new(ResourcesRequestsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RevisionSummary) DeepCopyInto(out *RevisionSummary) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevisionSummary. +func (in *RevisionSummary) DeepCopy() *RevisionSummary { + if in == nil { + return nil + } + out := new(RevisionSummary) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SDS) DeepCopyInto(out *SDS) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SDS. +func (in *SDS) DeepCopy() *SDS { + if in == nil { + return nil + } + out := new(SDS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SDSConfig) DeepCopyInto(out *SDSConfig) { + *out = *in + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(SDSConfigToken) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SDSConfig. +func (in *SDSConfig) DeepCopy() *SDSConfig { + if in == nil { + return nil + } + out := new(SDSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SDSConfigToken) DeepCopyInto(out *SDSConfigToken) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SDSConfigToken. +func (in *SDSConfigToken) DeepCopy() *SDSConfigToken { + if in == nil { + return nil + } + out := new(SDSConfigToken) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *STSConfig) DeepCopyInto(out *STSConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new STSConfig. +func (in *STSConfig) DeepCopy() *STSConfig { + if in == nil { + return nil + } + out := new(STSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVolume) DeepCopyInto(out *SecretVolume) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolume. +func (in *SecretVolume) DeepCopy() *SecretVolume { + if in == nil { + return nil + } + out := new(SecretVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SidecarInjectorConfig) DeepCopyInto(out *SidecarInjectorConfig) { + *out = *in + if in.EnableNamespacesByDefault != nil { + in, out := &in.EnableNamespacesByDefault, &out.EnableNamespacesByDefault + *out = new(bool) + **out = **in + } + if in.NeverInjectSelector != nil { + in, out := &in.NeverInjectSelector, &out.NeverInjectSelector + *out = make([]metav1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AlwaysInjectSelector != nil { + in, out := &in.AlwaysInjectSelector, &out.AlwaysInjectSelector + *out = make([]metav1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RewriteAppHTTPProbe != nil { + in, out := &in.RewriteAppHTTPProbe, &out.RewriteAppHTTPProbe + *out = new(bool) + **out = **in + } + if in.InjectedAnnotations != nil { + in, out := &in.InjectedAnnotations, &out.InjectedAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.DefaultTemplates != nil { + in, out := &in.DefaultTemplates, &out.DefaultTemplates + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarInjectorConfig. +func (in *SidecarInjectorConfig) DeepCopy() *SidecarInjectorConfig { + if in == nil { + return nil + } + out := new(SidecarInjectorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartupProbe) DeepCopyInto(out *StartupProbe) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartupProbe. +func (in *StartupProbe) DeepCopy() *StartupProbe { + if in == nil { + return nil + } + out := new(StartupProbe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetUtilizationConfig) DeepCopyInto(out *TargetUtilizationConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetUtilizationConfig. +func (in *TargetUtilizationConfig) DeepCopy() *TargetUtilizationConfig { + if in == nil { + return nil + } + out := new(TargetUtilizationConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TelemetryConfig) DeepCopyInto(out *TelemetryConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.V2 != nil { + in, out := &in.V2, &out.V2 + *out = new(TelemetryV2Config) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryConfig. +func (in *TelemetryConfig) DeepCopy() *TelemetryConfig { + if in == nil { + return nil + } + out := new(TelemetryConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TelemetryV2Config) DeepCopyInto(out *TelemetryV2Config) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = new(TelemetryV2PrometheusConfig) + (*in).DeepCopyInto(*out) + } + if in.Stackdriver != nil { + in, out := &in.Stackdriver, &out.Stackdriver + *out = new(TelemetryV2StackDriverConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryV2Config. +func (in *TelemetryV2Config) DeepCopy() *TelemetryV2Config { + if in == nil { + return nil + } + out := new(TelemetryV2Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TelemetryV2PrometheusConfig) DeepCopyInto(out *TelemetryV2PrometheusConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryV2PrometheusConfig. +func (in *TelemetryV2PrometheusConfig) DeepCopy() *TelemetryV2PrometheusConfig { + if in == nil { + return nil + } + out := new(TelemetryV2PrometheusConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TelemetryV2StackDriverConfig) DeepCopyInto(out *TelemetryV2StackDriverConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryV2StackDriverConfig. +func (in *TelemetryV2StackDriverConfig) DeepCopy() *TelemetryV2StackDriverConfig { + if in == nil { + return nil + } + out := new(TelemetryV2StackDriverConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Topology) DeepCopyInto(out *Topology) { + *out = *in + if in.ProxyProtocol != nil { + in, out := &in.ProxyProtocol, &out.ProxyProtocol + *out = new(TopologyProxyProtocolConfiguration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topology. +func (in *Topology) DeepCopy() *Topology { + if in == nil { + return nil + } + out := new(Topology) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologyProxyProtocolConfiguration) DeepCopyInto(out *TopologyProxyProtocolConfiguration) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologyProxyProtocolConfiguration. +func (in *TopologyProxyProtocolConfiguration) DeepCopy() *TopologyProxyProtocolConfiguration { + if in == nil { + return nil + } + out := new(TopologyProxyProtocolConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracerConfig) DeepCopyInto(out *TracerConfig) { + *out = *in + if in.Datadog != nil { + in, out := &in.Datadog, &out.Datadog + *out = new(TracerDatadogConfig) + **out = **in + } + if in.Lightstep != nil { + in, out := &in.Lightstep, &out.Lightstep + *out = new(TracerLightStepConfig) + **out = **in + } + if in.Zipkin != nil { + in, out := &in.Zipkin, &out.Zipkin + *out = new(TracerZipkinConfig) + **out = **in + } + if in.Stackdriver != nil { + in, out := &in.Stackdriver, &out.Stackdriver + *out = new(TracerStackdriverConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracerConfig. +func (in *TracerConfig) DeepCopy() *TracerConfig { + if in == nil { + return nil + } + out := new(TracerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracerDatadogConfig) DeepCopyInto(out *TracerDatadogConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracerDatadogConfig. +func (in *TracerDatadogConfig) DeepCopy() *TracerDatadogConfig { + if in == nil { + return nil + } + out := new(TracerDatadogConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracerLightStepConfig) DeepCopyInto(out *TracerLightStepConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracerLightStepConfig. +func (in *TracerLightStepConfig) DeepCopy() *TracerLightStepConfig { + if in == nil { + return nil + } + out := new(TracerLightStepConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracerStackdriverConfig) DeepCopyInto(out *TracerStackdriverConfig) { + *out = *in + if in.Debug != nil { + in, out := &in.Debug, &out.Debug + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracerStackdriverConfig. +func (in *TracerStackdriverConfig) DeepCopy() *TracerStackdriverConfig { + if in == nil { + return nil + } + out := new(TracerStackdriverConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracerZipkinConfig) DeepCopyInto(out *TracerZipkinConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracerZipkinConfig. +func (in *TracerZipkinConfig) DeepCopy() *TracerZipkinConfig { + if in == nil { + return nil + } + out := new(TracerZipkinConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tracing) DeepCopyInto(out *Tracing) { + *out = *in + if in.Zipkin != nil { + in, out := &in.Zipkin, &out.Zipkin + *out = new(TracingZipkin) + **out = **in + } + if in.Lightstep != nil { + in, out := &in.Lightstep, &out.Lightstep + *out = new(TracingLightstep) + **out = **in + } + if in.Datadog != nil { + in, out := &in.Datadog, &out.Datadog + *out = new(TracingDatadog) + **out = **in + } + if in.Stackdriver != nil { + in, out := &in.Stackdriver, &out.Stackdriver + *out = new(TracingStackdriver) + (*in).DeepCopyInto(*out) + } + if in.OpenCensusAgent != nil { + in, out := &in.OpenCensusAgent, &out.OpenCensusAgent + *out = new(TracingOpenCensusAgent) + (*in).DeepCopyInto(*out) + } + if in.CustomTags != nil { + in, out := &in.CustomTags, &out.CustomTags + *out = make(map[string]*TracingCustomTag, len(*in)) + for key, val := range *in { + var outVal *TracingCustomTag + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(TracingCustomTag) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + if in.TlsSettings != nil { + in, out := &in.TlsSettings, &out.TlsSettings + *out = new(ClientTLSSettings) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tracing. +func (in *Tracing) DeepCopy() *Tracing { + if in == nil { + return nil + } + out := new(Tracing) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingCustomTag) DeepCopyInto(out *TracingCustomTag) { + *out = *in + if in.Literal != nil { + in, out := &in.Literal, &out.Literal + *out = new(TracingLiteral) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(TracingEnvironment) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(TracingRequestHeader) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingCustomTag. +func (in *TracingCustomTag) DeepCopy() *TracingCustomTag { + if in == nil { + return nil + } + out := new(TracingCustomTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingDatadog) DeepCopyInto(out *TracingDatadog) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingDatadog. +func (in *TracingDatadog) DeepCopy() *TracingDatadog { + if in == nil { + return nil + } + out := new(TracingDatadog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingEnvironment) DeepCopyInto(out *TracingEnvironment) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingEnvironment. +func (in *TracingEnvironment) DeepCopy() *TracingEnvironment { + if in == nil { + return nil + } + out := new(TracingEnvironment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingLightstep) DeepCopyInto(out *TracingLightstep) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingLightstep. +func (in *TracingLightstep) DeepCopy() *TracingLightstep { + if in == nil { + return nil + } + out := new(TracingLightstep) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingLiteral) DeepCopyInto(out *TracingLiteral) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingLiteral. +func (in *TracingLiteral) DeepCopy() *TracingLiteral { + if in == nil { + return nil + } + out := new(TracingLiteral) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingOpenCensusAgent) DeepCopyInto(out *TracingOpenCensusAgent) { + *out = *in + if in.Context != nil { + in, out := &in.Context, &out.Context + *out = make([]TracingOpenCensusAgentTraceContext, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingOpenCensusAgent. +func (in *TracingOpenCensusAgent) DeepCopy() *TracingOpenCensusAgent { + if in == nil { + return nil + } + out := new(TracingOpenCensusAgent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingRequestHeader) DeepCopyInto(out *TracingRequestHeader) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingRequestHeader. +func (in *TracingRequestHeader) DeepCopy() *TracingRequestHeader { + if in == nil { + return nil + } + out := new(TracingRequestHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingStackdriver) DeepCopyInto(out *TracingStackdriver) { + *out = *in + if in.MaxNumberOfAttributes != nil { + in, out := &in.MaxNumberOfAttributes, &out.MaxNumberOfAttributes + *out = new(int64) + **out = **in + } + if in.MaxNumberOfAnnotations != nil { + in, out := &in.MaxNumberOfAnnotations, &out.MaxNumberOfAnnotations + *out = new(int64) + **out = **in + } + if in.MaxNumberOfMessageEvents != nil { + in, out := &in.MaxNumberOfMessageEvents, &out.MaxNumberOfMessageEvents + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingStackdriver. +func (in *TracingStackdriver) DeepCopy() *TracingStackdriver { + if in == nil { + return nil + } + out := new(TracingStackdriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingZipkin) DeepCopyInto(out *TracingZipkin) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingZipkin. +func (in *TracingZipkin) DeepCopy() *TracingZipkin { + if in == nil { + return nil + } + out := new(TracingZipkin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Values) DeepCopyInto(out *Values) { + *out = *in + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = new(GlobalConfig) + (*in).DeepCopyInto(*out) + } + if in.Pilot != nil { + in, out := &in.Pilot, &out.Pilot + *out = new(PilotConfig) + (*in).DeepCopyInto(*out) + } + if in.Telemetry != nil { + in, out := &in.Telemetry, &out.Telemetry + *out = new(TelemetryConfig) + (*in).DeepCopyInto(*out) + } + if in.SidecarInjectorWebhook != nil { + in, out := &in.SidecarInjectorWebhook, &out.SidecarInjectorWebhook + *out = new(SidecarInjectorConfig) + (*in).DeepCopyInto(*out) + } + if in.MeshConfig != nil { + in, out := &in.MeshConfig, &out.MeshConfig + *out = new(MeshConfig) + (*in).DeepCopyInto(*out) + } + if in.Base != nil { + in, out := &in.Base, &out.Base + *out = new(BaseConfig) + **out = **in + } + if in.IstiodRemote != nil { + in, out := &in.IstiodRemote, &out.IstiodRemote + *out = new(IstiodRemoteConfig) + **out = **in + } + if in.RevisionTags != nil { + in, out := &in.RevisionTags, &out.RevisionTags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Experimental != nil { + in, out := &in.Experimental, &out.Experimental + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Values. +func (in *Values) DeepCopy() *Values { + if in == nil { + return nil + } + out := new(Values) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WaypointConfig) DeepCopyInto(out *WaypointConfig) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WaypointConfig. +func (in *WaypointConfig) DeepCopy() *WaypointConfig { + if in == nil { + return nil + } + out := new(WaypointConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkloadSelector) DeepCopyInto(out *WorkloadSelector) { + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSelector. +func (in *WorkloadSelector) DeepCopy() *WorkloadSelector { + if in == nil { + return nil + } + out := new(WorkloadSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZeroVPNConfig) DeepCopyInto(out *ZeroVPNConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZeroVPNConfig. +func (in *ZeroVPNConfig) DeepCopy() *ZeroVPNConfig { + if in == nil { + return nil + } + out := new(ZeroVPNConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE deleted file mode 100644 index 5d8cb5b72..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore deleted file mode 100644 index e16fb946b..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile deleted file mode 100644 index 81be21437..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go deleted file mode 100644 index 7c08e564f..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "google.golang.org/protobuf/proto" -) - -// TODO: Give error package name prefix in next minor release. -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // TODO: Consider allowing the caller to specify a decode buffer in the - // next major version. - - // TODO: Consider using error wrapping to annotate error state in pass- - // through cases in the next minor version. - - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil); but if it does, it should - // be treated as no-op according to the Reader contract. - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = binary.Uvarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go deleted file mode 100644 index e58dd9d29..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "google.golang.org/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - // TODO: Consider allowing the caller to specify an encode buffer in the - // next major version. - - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go index dd29cccc3..ea46f38ec 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go @@ -68,7 +68,7 @@ func (l *Linter) Lint() ([]Problem, error) { var problems []Problem if l.r != nil { - d := expfmt.NewDecoder(l.r, expfmt.FmtText) + d := expfmt.NewDecoder(l.r, expfmt.NewFormat(expfmt.TypeTextPlain)) mf := &dto.MetricFamily{} for { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 269f56435..9dce15eaf 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -265,13 +265,13 @@ func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...str // result. func compare(got, want []*dto.MetricFamily) error { var gotBuf, wantBuf bytes.Buffer - enc := expfmt.NewEncoder(&gotBuf, expfmt.FmtText) + enc := expfmt.NewEncoder(&gotBuf, expfmt.NewFormat(expfmt.TypeTextPlain)) for _, mf := range got { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding gathered metrics failed: %w", err) } } - enc = expfmt.NewEncoder(&wantBuf, expfmt.FmtText) + enc = expfmt.NewEncoder(&wantBuf, expfmt.NewFormat(expfmt.TypeTextPlain)) for _, mf := range want { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding expected metrics failed: %w", err) diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index cee360db7..2f1549075 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -483,6 +483,8 @@ type Histogram struct { // histograms. PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. + // Only used for native histograms. These exemplars MUST have a timestamp. + Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"` } func (x *Histogram) Reset() { @@ -622,6 +624,13 @@ func (x *Histogram) GetPositiveCount() []float64 { return nil } +func (x *Histogram) GetExemplars() []*Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + // A Bucket of a conventional histogram, each of which is treated as // an individual counter-like time series by Prometheus. type Bucket struct { @@ -923,6 +932,7 @@ type MetricFamily struct { Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"` } func (x *MetricFamily) Reset() { @@ -985,6 +995,13 @@ func (x *MetricFamily) GetMetric() []*Metric { return nil } +func (x *MetricFamily) GetUnit() string { + if x != nil && x.Unit != nil { + return *x.Unit + } + return "" +} + var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ @@ -1028,7 +1045,7 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, @@ -1071,79 +1088,84 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, - 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, - 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, - 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, - 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, - 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, - 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, + 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, + 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, + 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, + 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, + 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, + 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, - 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, - 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, + 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, + 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, - 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, - 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, - 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, - 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, - 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, - 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, - 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, - 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, - 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55, - 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, - 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b, - 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48, - 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41, - 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42, - 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69, - 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, + 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, + 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a, + 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, + 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, + 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, + 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, + 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, + 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, + 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, } var ( @@ -1185,22 +1207,23 @@ var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan - 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar - 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair - 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp - 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair - 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge - 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter - 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary - 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped - 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram - 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType - 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric - 19, // [19:19] is the sub-list for method output_type - 19, // [19:19] is the sub-list for method input_type - 19, // [19:19] is the sub-list for extension type_name - 19, // [19:19] is the sub-list for extension extendee - 0, // [0:19] is the sub-list for field type_name + 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar + 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_io_prometheus_client_metrics_proto_init() } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 0ca86a3dc..25cfaa216 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -14,6 +14,7 @@ package expfmt import ( + "bufio" "fmt" "io" "math" @@ -21,8 +22,8 @@ import ( "net/http" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/encoding/protodelim" - "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" "github.com/prometheus/common/model" ) @@ -44,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return FmtUnknown + return fmtUnknown } const textType = "text/plain" @@ -52,42 +53,44 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown + return fmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown + return fmtUnknown } - return FmtProtoDelim + return fmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown + return fmtUnknown } - return FmtText + return fmtText } - return FmtUnknown + return fmtUnknown } // NewDecoder returns a new decoder based on the given input format. // If the input format does not imply otherwise, a text format decoder is returned. func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: - return &protoDecoder{r: r} + switch format.FormatType() { + case TypeProtoDelim: + return &protoDecoder{r: bufio.NewReader(r)} } return &textDecoder{r: r} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { - r io.Reader + r protodelim.Reader } // Decode implements the Decoder interface. func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { + opts := protodelim.UnmarshalOptions{ + MaxSize: -1, + } + if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } if !model.IsValidMetricName(model.LabelValue(v.GetName())) { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ca2140600..ff5ef7a9d 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,10 +18,13 @@ import ( "io" "net/http" - "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" + "github.com/prometheus/common/model" + + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" ) @@ -60,23 +63,32 @@ func (ec encoderCloser) Close() error { // as the support is still experimental. To include the option to negotiate // FmtOpenMetrics, use NegotiateOpenMetrics. func Negotiate(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return fmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return fmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return fmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return fmtText + escapingScheme } } - return FmtText + return fmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -84,29 +96,40 @@ func Negotiate(h http.Header) Format { // temporary and will disappear once FmtOpenMetrics is fully supported and as // such may be negotiated by the normal Negotiate function. func NegotiateIncludingOpenMetrics(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return fmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return fmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return fmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return fmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { - if ver == OpenMetricsVersion_1_0_0 { - return FmtOpenMetrics_1_0_0 + switch ver { + case OpenMetricsVersion_1_0_0: + return fmtOpenMetrics_1_0_0 + escapingScheme + default: + return fmtOpenMetrics_0_0_1 + escapingScheme } - return FmtOpenMetrics_0_0_1 } } - return FmtText + return fmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All @@ -115,44 +138,54 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { // for FmtOpenMetrics, but a future (breaking) release will add the Close method // to the Encoder interface directly. The current version of the Encoder // interface is kept for backwards compatibility. -func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: +// In cases where the Format does not allow for UTF-8 names, the global +// NameEscapingScheme will be applied. +// +// NewEncoder can be called with additional options to customize the OpenMetrics text output. +// For example: +// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines()) +// +// Extra options are ignored for all other formats. +func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { + escapingScheme := format.ToEscapingScheme() + + switch format.FormatType() { + case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) + _, err := protodelim.MarshalTo(w, v) return err }, close: func() error { return nil }, } - case FmtProtoCompact: + case TypeProtoCompact: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) + _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String()) return err }, close: func() error { return nil }, } - case FmtProtoText: + case TypeProtoText: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, prototext.Format(v)) + _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme))) return err }, close: func() error { return nil }, } - case FmtText: + case TypeTextPlain: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) + _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, } - case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0: + case TypeOpenMetrics: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, v) + _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...) return err }, close: func() error { diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index c4cb20f0d..051b38cd1 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -14,30 +14,164 @@ // Package expfmt contains tools for reading and writing Prometheus metrics. package expfmt +import ( + "fmt" + "strings" + + "github.com/prometheus/common/model" +) + // Format specifies the HTTP content type of the different wire protocols. type Format string -// Constants to assemble the Content-Type values for the different wire protocols. +// Constants to assemble the Content-Type values for the different wire +// protocols. The Content-Type strings here are all for the legacy exposition +// formats, where valid characters for metric names and label names are limited. +// Support for arbitrary UTF-8 characters in those names is already partially +// implemented in this module (see model.ValidationScheme), but to actually use +// it on the wire, new content-type strings will have to be agreed upon and +// added here. const ( TextVersion = "0.0.4" ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Note that these + // values are now unexported. If code was relying on comparisons to these + // constants, instead use FormatType(). + fmtUnknown Format = `` + fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + fmtProtoDelim Format = protoFmt + ` encoding=delimited` + fmtProtoText Format = protoFmt + ` encoding=text` + fmtProtoCompact Format = protoFmt + ` encoding=compact-text` + fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( hdrContentType = "Content-Type" hdrAccept = "Accept" ) + +// FormatType is a Go enum representing the overall category for the given +// Format. As the number of Format permutations increases, doing basic string +// comparisons are not feasible, so this enum captures the most useful +// high-level attribute of the Format string. +type FormatType int + +const ( + TypeUnknown FormatType = iota + TypeProtoCompact + TypeProtoDelim + TypeProtoText + TypeTextPlain + TypeOpenMetrics +) + +// NewFormat generates a new Format from the type provided. Mostly used for +// tests, most Formats should be generated as part of content negotiation in +// encode.go. If a type has more than one version, the latest version will be +// returned. +func NewFormat(t FormatType) Format { + switch t { + case TypeProtoCompact: + return fmtProtoCompact + case TypeProtoDelim: + return fmtProtoDelim + case TypeProtoText: + return fmtProtoText + case TypeTextPlain: + return fmtText + case TypeOpenMetrics: + return fmtOpenMetrics_1_0_0 + default: + return fmtUnknown + } +} + +// NewOpenMetricsFormat generates a new OpenMetrics format matching the +// specified version number. +func NewOpenMetricsFormat(version string) (Format, error) { + if version == OpenMetricsVersion_0_0_1 { + return fmtOpenMetrics_0_0_1, nil + } + if version == OpenMetricsVersion_1_0_0 { + return fmtOpenMetrics_1_0_0, nil + } + return fmtUnknown, fmt.Errorf("unknown open metrics version string") +} + +// FormatType deduces an overall FormatType for the given format. +func (f Format) FormatType() FormatType { + toks := strings.Split(string(f), ";") + params := make(map[string]string) + for i, t := range toks { + if i == 0 { + continue + } + args := strings.Split(t, "=") + if len(args) != 2 { + continue + } + params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1]) + } + + switch strings.TrimSpace(toks[0]) { + case ProtoType: + if params["proto"] != ProtoProtocol { + return TypeUnknown + } + switch params["encoding"] { + case "delimited": + return TypeProtoDelim + case "text": + return TypeProtoText + case "compact-text": + return TypeProtoCompact + default: + return TypeUnknown + } + case OpenMetricsType: + if params["charset"] != "utf-8" { + return TypeUnknown + } + return TypeOpenMetrics + case "text/plain": + v, ok := params["version"] + if !ok { + return TypeTextPlain + } + if v == TextVersion { + return TypeTextPlain + } + return TypeUnknown + default: + return TypeUnknown + } +} + +// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the +// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid +// "escaping" term exists, that will be used. Otherwise, the global default will +// be returned. +func (format Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(format), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + continue + } + key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) + if key == model.EscapingKey { + scheme, err := model.ToEscapingScheme(value) + if err != nil { + return model.NameEscapingScheme + } + return scheme + } + } + return model.NameEscapingScheme +} diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 21cdddcf0..353c5e93f 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,47 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) +type encoderOption struct { + withCreatedLines bool + withUnit bool +} + +type EncoderOption func(*encoderOption) + +// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder +// to include _created lines (See +// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). +// Created timestamps can improve the accuracy of series reset detection, but +// come with a bandwidth cost. +// +// At the time of writing, created timestamp ingestion is still experimental in +// Prometheus and need to be enabled with the feature-flag +// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are +// still possible. Therefore, it is recommended to use this feature with caution. +func WithCreatedLines() EncoderOption { + return func(t *encoderOption) { + t.withCreatedLines = true + } +} + +// WithUnit is an EncoderOption enabling a set unit to be written to the output +// and to be added to the metric name, if it's not there already, as a suffix. +// Without opting in this way, the unit will not be added to the metric name and, +// on top of that, the unit will not be passed onto the output, even if it +// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil. +func WithUnit() EncoderOption { + return func(t *encoderOption) { + t.withUnit = true + } +} + // MetricFamilyToOpenMetrics converts a MetricFamily proto message into the // OpenMetrics text format and writes the resulting lines to 'out'. It returns // the number of bytes written and any error encountered. The output will have @@ -35,6 +71,18 @@ import ( // sanity checks. If the input contains duplicate metrics or invalid metric or // label names, the conversion will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This function fulfills the type 'expfmt.encoder'. // // Note that OpenMetrics requires a final `# EOF` line. Since this function acts @@ -47,20 +95,34 @@ import ( // Prometheus to OpenMetrics or vice versa: // // - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, +// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT` +// lines. A counter with a missing `_total` suffix is not an error. However, // its type will be set to `unknown` in that case to avoid invalid OpenMetrics // output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - According to the OM specs, the `# UNIT` line is optional, but if populated, +// the unit has to be present in the metric name as its suffix: +// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). +// However, in order to accommodate any potential scenario where such a change in the +// metric name is not desirable, the users are here given the choice of either explicitly +// opt in, in case they wish for the unit to be included in the output AND in the metric name +// as a suffix (see the description of the WithUnit function above), +// or not to opt in, in case they don't want for any of that to happen. +// +// - No support for the following (optional) features: info type, +// stateset type, gaugehistogram type. // // - The size of exemplar labels is not checked (i.e. it's possible to create // exemplars that are larger than allowed by the OpenMetrics specification). // // - The value of Counters is not checked. (OpenMetrics doesn't allow counters // with a `NaN` value.) -func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) { + toOM := encoderOption{} + for _, option := range options { + option(&toOM) + } + name := in.GetName() if name == "" { return 0, fmt.Errorf("MetricFamily has no name: %s", in) @@ -83,12 +145,15 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } var ( - n int - metricType = in.GetType() - shortName = name + n int + metricType = in.GetType() + compliantName = name ) - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { - shortName = name[:len(name)-6] + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { + compliantName = name[:len(name)-6] + } + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { + compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) } // Comments, first HELP, then TYPE. @@ -98,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, compliantName) written += n if err != nil { return @@ -124,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, compliantName) written += n if err != nil { return @@ -151,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } + if toOM.withUnit && in.Unit != nil { + n, err = w.WriteString("# UNIT ") + written += n + if err != nil { + return + } + n, err = writeName(w, compliantName) + written += n + if err != nil { + return + } + + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Unit, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + + var createdTsBytesWritten int // Finally the samples, one line for each. + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + compliantName = compliantName + "_total" + } for _, metric := range in.Metric { switch metricType { case dto.MetricType_COUNTER: if metric.Counter == nil { return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, + "expected counter in metric %s %s", compliantName, metric, ) } - // Note that we have ensured above that either the name - // ends on `_total` or that the rendered type is - // `unknown`. Therefore, no `_total` must be added here. n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Counter.GetValue(), 0, false, metric.Counter.Exemplar, ) + if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp()) + n += createdTsBytesWritten + } case dto.MetricType_GAUGE: if metric.Gauge == nil { return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, + "expected gauge in metric %s %s", compliantName, metric, ) } n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Gauge.GetValue(), 0, false, nil, ) case dto.MetricType_UNTYPED: if metric.Untyped == nil { return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, + "expected untyped in metric %s %s", compliantName, metric, ) } n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Untyped.GetValue(), 0, false, nil, ) case dto.MetricType_SUMMARY: if metric.Summary == nil { return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, + "expected summary in metric %s %s", compliantName, metric, ) } for _, q := range metric.Summary.Quantile { n, err = writeOpenMetricsSample( - w, name, "", metric, + w, compliantName, "", metric, model.QuantileLabel, q.GetQuantile(), q.GetValue(), 0, false, nil, @@ -210,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } } n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, + w, compliantName, "_sum", metric, "", 0, metric.Summary.GetSampleSum(), 0, false, nil, ) @@ -219,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int return } n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, + w, compliantName, "_count", metric, "", 0, 0, metric.Summary.GetSampleCount(), true, nil, ) + if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp()) + n += createdTsBytesWritten + } case dto.MetricType_HISTOGRAM: if metric.Histogram == nil { return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, + "expected histogram in metric %s %s", compliantName, metric, ) } infSeen := false for _, b := range metric.Histogram.Bucket { n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, + w, compliantName, "_bucket", metric, model.BucketLabel, b.GetUpperBound(), 0, b.GetCumulativeCount(), true, b.Exemplar, @@ -247,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } if !infSeen { n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, + w, compliantName, "_bucket", metric, model.BucketLabel, math.Inf(+1), 0, metric.Histogram.GetSampleCount(), true, nil, @@ -258,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } } n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, + w, compliantName, "_sum", metric, "", 0, metric.Histogram.GetSampleSum(), 0, false, nil, ) @@ -267,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int return } n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, + w, compliantName, "_count", metric, "", 0, 0, metric.Histogram.GetSampleCount(), true, nil, ) + if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp()) + n += createdTsBytesWritten + } default: return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, + "unexpected type in metric %s %s", compliantName, metric, ) } written += n @@ -303,21 +410,9 @@ func writeOpenMetricsSample( floatValue float64, intValue uint64, useIntValue bool, exemplar *dto.Exemplar, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeOpenMetricsLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -350,7 +445,7 @@ func writeOpenMetricsSample( return written, err } } - if exemplar != nil { + if exemplar != nil && len(exemplar.Label) > 0 { n, err = writeExemplar(w, exemplar) written += n if err != nil { @@ -365,27 +460,58 @@ func writeOpenMetricsSample( return written, nil } -// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float -// in OpenMetrics style. -func writeOpenMetricsLabelPairs( +// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but +// formats the float in OpenMetrics style. +func writeOpenMetricsNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces, quoted. + if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -442,6 +568,49 @@ func writeOpenMetricsLabelPairs( return written, nil } +// writeOpenMetricsCreated writes the created timestamp for a single time series +// following OpenMetrics text format to w, given the metric name, the metric proto +// message itself, optionally a suffix to be removed, e.g. '_total' for counters, +// an additional label name with a float64 value (use empty string as label name if +// not required) and the timestamp that represents the created timestamp. +// The function returns the number of bytes written and any error encountered. +func writeOpenMetricsCreated(w enhancedWriter, + name, suffixToTrim string, metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + createdTimestamp *timestamppb.Timestamp, +) (int, error) { + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + // writeExemplar writes the provided exemplar in OpenMetrics format to w. The // function returns the number of bytes written and any error encountered. func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { @@ -451,7 +620,7 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0) written += n if err != nil { return written, err diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 2946b8f1a..f9b8265a9 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -62,6 +62,18 @@ var ( // contains duplicate metrics or invalid metric or label names, the conversion // will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This method fulfills the type 'prometheus.encoder'. func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { // Fail-fast checks. @@ -98,7 +110,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -124,7 +136,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -280,21 +292,9 @@ func writeSample( additionalLabelName string, additionalLabelValue float64, value float64, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -330,32 +330,64 @@ func writeSample( return written, nil } -// writeLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'w'. An empty slice in combination with an empty -// string 'additionalLabelName' results in nothing being written. Otherwise, the -// label pairs are written, escaped as required by the text format, and enclosed -// in '{...}'. The function returns the number of bytes written and any error -// encountered. -func writeLabelPairs( +// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given metric name and additional label pair into text formatted as +// required by the text format and writes it to 'w'. An empty slice in +// combination with an empty string 'additionalLabelName' results in nothing +// being written. Otherwise, the label pairs are written, escaped as required by +// the text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. If the metric name is not +// legacy-valid, it will be put inside the brackets as well. Legacy-invalid +// label names will also be quoted. +func writeNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces. + if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -462,3 +494,27 @@ func writeInt(w enhancedWriter, i int64) (int, error) { numBufPool.Put(bp) return written, err } + +// writeName writes a string as-is if it complies with the legacy naming +// scheme, or escapes it in double quotes if not. +func writeName(w enhancedWriter, name string) (int, error) { + if model.IsValidLegacyMetricName(model.LabelValue(name)) { + return w.WriteString(name) + } + var written int + var err error + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + var n int + n, err = writeEscapedString(w, name, true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 35db1cc9d..26490211a 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -16,6 +16,7 @@ package expfmt import ( "bufio" "bytes" + "errors" "fmt" "io" "math" @@ -24,8 +25,9 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" + + "github.com/prometheus/common/model" ) // A stateFn is a function that represents a state in a state machine. By @@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF // stream. Turn this error into something nicer and more // meaningful. (io.EOF is often used as a signal for the legitimate end // of an input stream.) - if p.err == io.EOF { + if p.err != nil && errors.Is(p.err, io.EOF) { p.parseError("unexpected end of input stream") } return p.metricFamiliesByName, p.err @@ -146,7 +148,7 @@ func (p *TextParser) startOfLine() stateFn { // which is not an error but the signal that we are done. // Any other error that happens to align with the start of // a line is still an error. - if p.err == io.EOF { + if errors.Is(p.err, io.EOF) { p.err = nil } return nil diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d5..000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index a21b9d15d..000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 35e739c7a..80d1fe944 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -75,7 +75,12 @@ func (a *Alert) ResolvedAt(ts time.Time) bool { // Status returns the status of the alert. func (a *Alert) Status() AlertStatus { - if a.Resolved() { + return a.StatusAt(time.Now()) +} + +// StatusAt returns the status of the alert at the given timestamp. +func (a *Alert) StatusAt(ts time.Time) AlertStatus { + if a.ResolvedAt(ts) { return AlertResolved } return AlertFiring @@ -90,13 +95,13 @@ func (a *Alert) Validate() error { return fmt.Errorf("start time must be before end time") } if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) + return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { return fmt.Errorf("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) + return fmt.Errorf("invalid annotations: %w", err) } return nil } @@ -127,6 +132,17 @@ func (as Alerts) HasFiring() bool { return false } +// HasFiringAt returns true iff one of the alerts is not resolved +// at the time ts. +func (as Alerts) HasFiringAt(ts time.Time) bool { + for _, a := range as { + if !a.ResolvedAt(ts) { + return true + } + } + return false +} + // Status returns StatusFiring iff at least one of the alerts is firing. func (as Alerts) Status() AlertStatus { if as.HasFiring() { @@ -134,3 +150,12 @@ func (as Alerts) Status() AlertStatus { } return AlertResolved } + +// StatusAt returns StatusFiring iff at least one of the alerts is firing +// at the time ts. +func (as Alerts) StatusAt(ts time.Time) AlertStatus { + if as.HasFiringAt(ts) { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index ef8956335..3317ce22f 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,17 +97,25 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. +// IsValid returns true iff name matches the pattern of LabelNameRE for legacy +// names, and iff it's valid UTF-8 if NameValidationScheme is set to +// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the +// check but a much faster hardcoded implementation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false + switch NameValidationScheme { + case LegacyValidation: + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } } + case UTF8Validation: + return utf8.ValidString(string(ln)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } return true } @@ -164,7 +172,7 @@ func (l LabelNames) String() string { // A LabelValue is an associated value for a LabelName. type LabelValue string -// IsValid returns true iff the string is a valid UTF8. +// IsValid returns true iff the string is a valid UTF-8. func (lv LabelValue) IsValid() bool { return utf8.ValidString(string(lv)) } diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index 6eda08a73..d0ad88da3 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -17,7 +17,6 @@ import ( "encoding/json" "fmt" "sort" - "strings" ) // A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet @@ -129,16 +128,6 @@ func (l LabelSet) Merge(other LabelSet) LabelSet { return result } -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - // Fingerprint returns the LabelSet's fingerprint. func (ls LabelSet) Fingerprint() Fingerprint { return labelSetToFingerprint(ls) diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go new file mode 100644 index 000000000..481c47b46 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -0,0 +1,45 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.21 + +package model + +import ( + "bytes" + "slices" + "strconv" +) + +// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically. +func (l LabelSet) String() string { + var lna [32]string // On stack to avoid memory allocation for sorting names. + labelNames := lna[:0] + for name := range l { + labelNames = append(labelNames, string(name)) + } + slices.Sort(labelNames) + var bytea [1024]byte // On stack to avoid memory allocation while building the output. + b := bytes.NewBuffer(bytea[:0]) + b.WriteByte('{') + for i, name := range labelNames { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(name) + b.WriteByte('=') + b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)]))) + } + b.WriteByte('}') + return b.String() +} diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go new file mode 100644 index 000000000..c4212685e --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset_string_go120.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.21 + +package model + +import ( + "fmt" + "sort" + "strings" +) + +// String was optimized using functions not available for go 1.20 +// or lower. We keep the old implementation for compatibility with client_golang. +// Once client golang drops support for go 1.20 (scheduled for August 2024), this +// file can be removed. +func (l LabelSet) String() string { + labelNames := make([]string, 0, len(l)) + for name := range l { + labelNames = append(labelNames, string(name)) + } + sort.Strings(labelNames) + lstrs := make([]string, 0, len(l)) + for _, name := range labelNames { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) + } + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go new file mode 100644 index 000000000..447ab8ad6 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metadata.go @@ -0,0 +1,28 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// MetricType represents metric type values. +type MetricType string + +const ( + MetricTypeCounter = MetricType("counter") + MetricTypeGauge = MetricType("gauge") + MetricTypeHistogram = MetricType("histogram") + MetricTypeGaugeHistogram = MetricType("gaugehistogram") + MetricTypeSummary = MetricType("summary") + MetricTypeInfo = MetricType("info") + MetricTypeStateset = MetricType("stateset") + MetricTypeUnknown = MetricType("unknown") +) diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 00804b7fe..eb865e5a5 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -18,15 +18,84 @@ import ( "regexp" "sort" "strings" + "unicode/utf8" + + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) var ( - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + // NameValidationScheme determines the method of name validation to be used by + // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode + // in isolation from other components that don't support UTF-8 may result in + // bugs or other undefined behavior. This value is intended to be set by + // UTF-8-aware binaries as part of their startup. To avoid need for locking, + // this value should be set once, ideally in an init(), before multiple + // goroutines are started. + NameValidationScheme = LegacyValidation + + // NameEscapingScheme defines the default way that names will be + // escaped when presented to systems that do not support UTF-8 names. If the + // Content-Type "escaping" term is specified, that will override this value. + NameEscapingScheme = ValueEncodingEscaping +) + +// ValidationScheme is a Go enum for determining how metric and label names will +// be validated by this library. +type ValidationScheme int + +const ( + // LegacyValidation is a setting that requirets that metric and label names + // conform to the original Prometheus character requirements described by + // MetricNameRE and LabelNameRE. + LegacyValidation ValidationScheme = iota + + // UTF8Validation only requires that metric and label names be valid UTF-8 + // strings. + UTF8Validation +) + +type EscapingScheme int + +const ( + // NoEscaping indicates that a name will not be escaped. Unescaped names that + // do not conform to the legacy validity check will use a new exposition + // format syntax that will be officially standardized in future versions. + NoEscaping EscapingScheme = iota + + // UnderscoreEscaping replaces all legacy-invalid characters with underscores. + UnderscoreEscaping + + // DotsEscaping is similar to UnderscoreEscaping, except that dots are + // converted to `_dot_` and pre-existing underscores are converted to `__`. + DotsEscaping + + // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid + // characters with the unicode value, surrounded by underscores. Single + // underscores are replaced with double underscores. + ValueEncodingEscaping +) + +const ( + // EscapingKey is the key in an Accept or Content-Type header that defines how + // metric and label names that do not conform to the legacy character + // requirements should be escaped when being scraped by a legacy prometheus + // system. If a system does not explicitly pass an escaping parameter in the + // Accept header, the default NameEscapingScheme will be used. + EscapingKey = "escaping" + + // Possible values for Escaping Key: + AllowUTF8 = "allow-utf-8" // No escaping required. + EscapeUnderscores = "underscores" + EscapeDots = "dots" + EscapeValues = "values" ) +// MetricNameRE is a regular expression matching valid metric +// names. Note that the IsValidMetricName function performs the same +// check but faster than a match with this regular expression. +var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + // A Metric is similar to a LabelSet, but the key difference is that a Metric is // a singleton and refers to one and only one stream of samples. type Metric LabelSet @@ -86,17 +155,303 @@ func (m Metric) FastFingerprint() Fingerprint { return LabelSet(m).FastFingerprint() } -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE +// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is +// selected. +func IsValidMetricName(n LabelValue) bool { + switch NameValidationScheme { + case LegacyValidation: + return IsValidLegacyMetricName(n) + case UTF8Validation: + if len(n) == 0 { + return false + } + return utf8.ValidString(string(n)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) + } +} + +// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the +// legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n LabelValue) bool { if len(n) == 0 { return false } for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + if !isValidLegacyRune(b, i) { return false } } return true } + +// EscapeMetricFamily escapes the given metric names and labels with the given +// escaping scheme. Returns a new object that uses the same pointers to fields +// when possible and creates new escaped versions so as not to mutate the +// input. +func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily { + if v == nil { + return nil + } + + if scheme == NoEscaping { + return v + } + + out := &dto.MetricFamily{ + Help: v.Help, + Type: v.Type, + Unit: v.Unit, + } + + // If the name is nil, copy as-is, don't try to escape. + if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + out.Name = v.Name + } else { + out.Name = proto.String(EscapeName(v.GetName(), scheme)) + } + for _, m := range v.Metric { + if !metricNeedsEscaping(m) { + out.Metric = append(out.Metric, m) + continue + } + + escaped := &dto.Metric{ + Gauge: m.Gauge, + Counter: m.Counter, + Summary: m.Summary, + Untyped: m.Untyped, + Histogram: m.Histogram, + TimestampMs: m.TimestampMs, + } + + for _, l := range m.Label { + if l.GetName() == MetricNameLabel { + if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(MetricNameLabel), + Value: proto.String(EscapeName(l.GetValue(), scheme)), + }) + continue + } + if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(EscapeName(l.GetName(), scheme)), + Value: l.Value, + }) + } + out.Metric = append(out.Metric, escaped) + } + return out +} + +func metricNeedsEscaping(m *dto.Metric) bool { + for _, l := range m.Label { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + return true + } + if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + return true + } + } + return false +} + +const ( + lowerhex = "0123456789abcdef" +) + +// EscapeName escapes the incoming name according to the provided escaping +// scheme. Depending on the rules of escaping, this may cause no change in the +// string that is returned. (Especially NoEscaping, which by definition is a +// noop). This function does not do any validation of the name. +func EscapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + var escaped strings.Builder + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + if IsValidLegacyMetricName(LabelValue(name)) { + return name + } + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case DotsEscaping: + // Do not early return for legacy valid names, we still escape underscores. + for i, b := range name { + if b == '_' { + escaped.WriteString("__") + } else if b == '.' { + escaped.WriteString("_dot_") + } else if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case ValueEncodingEscaping: + if IsValidLegacyMetricName(LabelValue(name)) { + return name + } + escaped.WriteString("U__") + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else if !utf8.ValidRune(b) { + escaped.WriteString("_FFFD_") + } else if b < 0x100 { + escaped.WriteRune('_') + for s := 4; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } else if b < 0x10000 { + escaped.WriteRune('_') + for s := 12; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } + } + return escaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +// lower function taken from strconv.atoi +func lower(c byte) byte { + return c | ('x' - 'X') +} + +// UnescapeName unescapes the incoming name according to the provided escaping +// scheme if possible. Some schemes are partially or totally non-roundtripable. +// If any error is enountered, returns the original input. +func UnescapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + // It is not possible to unescape from underscore replacement. + return name + case DotsEscaping: + name = strings.ReplaceAll(name, "_dot_", ".") + name = strings.ReplaceAll(name, "__", "_") + return name + case ValueEncodingEscaping: + escapedName, found := strings.CutPrefix(name, "U__") + if !found { + return name + } + + var unescaped strings.Builder + TOP: + for i := 0; i < len(escapedName); i++ { + // All non-underscores are treated normally. + if escapedName[i] != '_' { + unescaped.WriteByte(escapedName[i]) + continue + } + i++ + if i >= len(escapedName) { + return name + } + // A double underscore is a single underscore. + if escapedName[i] == '_' { + unescaped.WriteByte('_') + continue + } + // We think we are in a UTF-8 code, process it. + var utf8Val uint + for j := 0; i < len(escapedName); j++ { + // This is too many characters for a utf8 value. + if j > 4 { + return name + } + // Found a closing underscore, convert to a rune, check validity, and append. + if escapedName[i] == '_' { + utf8Rune := rune(utf8Val) + if !utf8.ValidRune(utf8Rune) { + return name + } + unescaped.WriteRune(utf8Rune) + continue TOP + } + r := lower(escapedName[i]) + utf8Val *= 16 + if r >= '0' && r <= '9' { + utf8Val += uint(r) - '0' + } else if r >= 'a' && r <= 'f' { + utf8Val += uint(r) - 'a' + 10 + } else { + return name + } + i++ + } + // Didn't find closing underscore, invalid. + return name + } + return unescaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +func isValidLegacyRune(b rune, i int) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0) +} + +func (e EscapingScheme) String() string { + switch e { + case NoEscaping: + return AllowUTF8 + case UnderscoreEscaping: + return EscapeUnderscores + case DotsEscaping: + return EscapeDots + case ValueEncodingEscaping: + return EscapeValues + default: + panic(fmt.Sprintf("unknown format scheme %d", e)) + } +} + +func ToEscapingScheme(s string) (EscapingScheme, error) { + if s == "" { + return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + } + switch s { + case AllowUTF8: + return NoEscaping, nil + case EscapeUnderscores: + return UnderscoreEscaping, nil + case EscapeDots: + return DotsEscaping, nil + case EscapeValues: + return ValueEncodingEscaping, nil + default: + return NoEscaping, fmt.Errorf("unknown format scheme " + s) + } +} diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go index 8762b13c6..dc8a0026c 100644 --- a/vendor/github.com/prometheus/common/model/signature.go +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -22,10 +22,8 @@ import ( // when calculating their combined hash value (aka signature aka fingerprint). const SeparatorByte byte = 255 -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) +// cache the signature of an empty label set. +var emptyLabelSignature = hashNew() // LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a // given label set. (Collisions are possible but unlikely if the number of label diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index bb99889d2..910b0b71f 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -81,7 +81,7 @@ func (s *Silence) Validate() error { } for _, m := range s.Matchers { if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) + return fmt.Errorf("invalid matcher: %w", err) } } if s.StartsAt.IsZero() { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 9eb440413..8050637d8 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -21,14 +21,12 @@ import ( "strings" ) -var ( - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) +// ZeroSample is the pseudo zero-value of Sample used to signal a +// non-existing sample. It is a Sample with timestamp Earliest, value 0.0, +// and metric nil. Note that the natural zero value of Sample has a timestamp +// of 0, which is possible to appear in a real Sample and thus not suitable +// to signal a non-existing Sample. +var ZeroSample = Sample{Timestamp: Earliest} // Sample is a sample pair associated with a metric. A single sample must either // define Value or Histogram but not both. Histogram == nil implies the Value @@ -274,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error { value, err := strconv.ParseFloat(f, 64) if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) + return fmt.Errorf("error parsing sample value: %w", err) } s.Value = SampleValue(value) return nil diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index 0f615a705..ae35cc2ab 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -20,14 +20,12 @@ import ( "strconv" ) -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} -) +// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a +// non-existing sample pair. It is a SamplePair with timestamp Earliest and +// value 0.0. Note that the natural zero value of SamplePair has a timestamp +// of 0, which is possible to appear in a real SamplePair and thus not +// suitable to signal a non-existing SamplePair. +var ZeroSamplePair = SamplePair{Timestamp: Earliest} // A SampleValue is a representation of a value for a given sample at a given // time. diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index c24864a92..126df9e67 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,9 +1,16 @@ --- linters: enable: + - errcheck - godot + - gosimple + - govet + - ineffassign - misspell - revive + - staticcheck + - testifylint + - unused linter-settings: godot: diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md index 56ba67d3e..e00f3b365 100644 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -1,2 +1,3 @@ * Johannes 'fish' Ziemke @discordianfish -* Paul Gier @pgier +* Paul Gier @pgier +* Ben Kochie @SuperQ diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 062a28185..161729235 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -49,23 +49,23 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell command -v gotestsum > /dev/null),) +ifneq ($(shell command -v gotestsum 2> /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif -PROMU_VERSION ?= 0.15.0 +PROMU_VERSION ?= 0.17.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.54.2 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +GOLANGCI_LINT_VERSION ?= v1.59.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. ifneq (,$(SKIP_GOLANGCI_LINT)) @@ -169,16 +169,20 @@ common-vet: common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif +.PHONY: common-lint-fix +common-lint-fix: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint fix" + $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) +endif + .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell command -v yamllint > /dev/null)) +ifeq (, $(shell command -v yamllint 2> /dev/null)) @echo "yamllint not installed so skipping" else yamllint . @@ -204,6 +208,10 @@ common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) +.PHONY: common-docker-repo-name +common-docker-repo-name: + @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" + .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 28783e2dd..cdcc8a7cc 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -55,7 +55,7 @@ type ARPEntry struct { func (fs FS) GatherARPEntries() ([]ARPEntry, error) { data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { - return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) + return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) } return parseARPEntries(data) @@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } else if width == expectedDataWidth { entry, err := parseARPEntry(columns) if err != nil { - return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) + return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) } else { - return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) + return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } } diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index 4a173636c..838075009 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) } - node := strings.TrimRight(parts[1], ",") - zone := strings.TrimRight(parts[3], ",") + node := strings.TrimSuffix(parts[1], ",") + zone := strings.TrimSuffix(parts[3], ",") arraySize := len(parts[4:]) if bucketCount == -1 { @@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) + return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) } } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index f4f5501c6..f0950bb49 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -194,7 +194,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) match, err := regexp.MatchString("^[Pp]rocessor", firstLine) if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) + return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) } field := strings.SplitN(firstLine, ": ", 2) @@ -386,7 +386,7 @@ func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 9a73e2639..5f2a37a78 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -55,13 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) { path := fs.proc.Path("crypto") b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err) + return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err) } crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err) + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err) } return crypto, nil @@ -84,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) { kv := strings.Split(text, ":") if len(kv) != 2 { - return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text) } k := strings.TrimSpace(kv[0]) diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index f560a8db3..cf2e3eaa0 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { m, err := parseFscacheinfo(bytes.NewReader(b)) if err != nil { - return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err) + return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err) } return *m, nil @@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { func setFSCacheFields(fields []string, setFields ...*uint64) error { var err error if len(fields) < len(setFields) { - return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) + return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) } for i := range setFields { diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 5a145bbfe..bc3a20c93 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -221,16 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) { case 46: ip = net.ParseIP(s[1:40]) if ip == nil { - return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) + return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) } default: - return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err) + return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err) } portString := s[len(s)-4:] if len(portString) != 4 { return nil, 0, - fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err) + fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err) } port, err := strconv.ParseUint(portString, 16, 16) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 59465c5bb..332e76c17 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -51,7 +51,7 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) + return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) } } return &LoadAvg{ diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index fdd4b9544..67a9d2b44 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -23,7 +23,7 @@ import ( var ( statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) - recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`) recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) @@ -50,6 +50,8 @@ type MDStat struct { BlocksTotal int64 // Number of blocks on the device that are in sync. BlocksSynced int64 + // Number of blocks on the device that need to be synced. + BlocksToBeSynced int64 // progress percentage of current sync BlocksSyncedPct float64 // estimated finishing time for current sync (in minutes) @@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) { } mdstat, err := parseMDStat(data) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) } return mdstat, nil } @@ -90,7 +92,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { deviceFields := strings.Fields(line) if len(deviceFields) < 3 { - return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line) + return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx state := deviceFields[2] // active or inactive @@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) + return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) } syncLineIdx := i + 2 @@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size + blocksSynced := size + blocksToBeSynced := size speed := float64(0) finish := float64(0) pct := float64(0) @@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // Handle case when resync=PENDING or resync=DELAYED. if strings.Contains(lines[syncLineIdx], "PENDING") || strings.Contains(lines[syncLineIdx], "DELAYED") { - syncedBlocks = 0 + blocksSynced = 0 } else { - syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) + blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) + return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) } } } @@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { DisksSpare: spare, DisksTotal: total, BlocksTotal: size, - BlocksSynced: syncedBlocks, + BlocksSynced: blocksSynced, + BlocksToBeSynced: blocksToBeSynced, BlocksSyncedPct: pct, BlocksSyncedFinishTime: finish, BlocksSyncedSpeed: speed, @@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { @@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in matches := statusLineRE.FindStringSubmatch(statusLine) if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err) } down = int64(strings.Count(matches[4], "_")) return active, total, down, size, nil } -func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { +func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) { matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err) } - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + blocks := strings.Split(matches[1], "/") + blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err) + } + + blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64) + if err != nil { + return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err) } // Get percentage complete matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) } pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) } // Get time expected left to complete matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) } finish, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) } // Get recovery speed matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) } speed, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) } - return syncedBlocks, pct, finish, speed, nil + return blocksSynced, blocksToBeSynced, pct, finish, speed, nil } func evalComponentDevices(deviceFields []string) []string { diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index eaf00e224..4b2c4050a 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -126,6 +126,7 @@ type Meminfo struct { VmallocUsed *uint64 // largest contiguous block of vmalloc area which is free VmallocChunk *uint64 + Percpu *uint64 HardwareCorrupted *uint64 AnonHugePages *uint64 ShmemHugePages *uint64 @@ -140,6 +141,55 @@ type Meminfo struct { DirectMap4k *uint64 DirectMap2M *uint64 DirectMap1G *uint64 + + // The struct fields below are the byte-normalized counterparts to the + // existing struct fields. Values are normalized using the optional + // unit field in the meminfo line. + MemTotalBytes *uint64 + MemFreeBytes *uint64 + MemAvailableBytes *uint64 + BuffersBytes *uint64 + CachedBytes *uint64 + SwapCachedBytes *uint64 + ActiveBytes *uint64 + InactiveBytes *uint64 + ActiveAnonBytes *uint64 + InactiveAnonBytes *uint64 + ActiveFileBytes *uint64 + InactiveFileBytes *uint64 + UnevictableBytes *uint64 + MlockedBytes *uint64 + SwapTotalBytes *uint64 + SwapFreeBytes *uint64 + DirtyBytes *uint64 + WritebackBytes *uint64 + AnonPagesBytes *uint64 + MappedBytes *uint64 + ShmemBytes *uint64 + SlabBytes *uint64 + SReclaimableBytes *uint64 + SUnreclaimBytes *uint64 + KernelStackBytes *uint64 + PageTablesBytes *uint64 + NFSUnstableBytes *uint64 + BounceBytes *uint64 + WritebackTmpBytes *uint64 + CommitLimitBytes *uint64 + CommittedASBytes *uint64 + VmallocTotalBytes *uint64 + VmallocUsedBytes *uint64 + VmallocChunkBytes *uint64 + PercpuBytes *uint64 + HardwareCorruptedBytes *uint64 + AnonHugePagesBytes *uint64 + ShmemHugePagesBytes *uint64 + ShmemPmdMappedBytes *uint64 + CmaTotalBytes *uint64 + CmaFreeBytes *uint64 + HugepagesizeBytes *uint64 + DirectMap4kBytes *uint64 + DirectMap2MBytes *uint64 + DirectMap1GBytes *uint64 } // Meminfo returns an information about current kernel/system memory statistics. @@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) { m, err := parseMemInfo(bytes.NewReader(b)) if err != nil { - return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err) + return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err) } return *m, nil @@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { var m Meminfo s := bufio.NewScanner(r) for s.Scan() { - // Each line has at least a name and value; we ignore the unit. fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) - } + var val, valBytes uint64 - v, err := strconv.ParseUint(fields[1], 0, 64) + val, err := strconv.ParseUint(fields[1], 0, 64) if err != nil { return nil, err } + switch len(fields) { + case 2: + // No unit present, use the parsed the value as bytes directly. + valBytes = val + case 3: + // Unit present in optional 3rd field, convert it to + // bytes. The only unit supported within the Linux + // kernel is `kB`. + if fields[2] != "kB" { + return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2]) + } + + valBytes = 1024 * val + + default: + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) + } + switch fields[0] { case "MemTotal:": - m.MemTotal = &v + m.MemTotal = &val + m.MemTotalBytes = &valBytes case "MemFree:": - m.MemFree = &v + m.MemFree = &val + m.MemFreeBytes = &valBytes case "MemAvailable:": - m.MemAvailable = &v + m.MemAvailable = &val + m.MemAvailableBytes = &valBytes case "Buffers:": - m.Buffers = &v + m.Buffers = &val + m.BuffersBytes = &valBytes case "Cached:": - m.Cached = &v + m.Cached = &val + m.CachedBytes = &valBytes case "SwapCached:": - m.SwapCached = &v + m.SwapCached = &val + m.SwapCachedBytes = &valBytes case "Active:": - m.Active = &v + m.Active = &val + m.ActiveBytes = &valBytes case "Inactive:": - m.Inactive = &v + m.Inactive = &val + m.InactiveBytes = &valBytes case "Active(anon):": - m.ActiveAnon = &v + m.ActiveAnon = &val + m.ActiveAnonBytes = &valBytes case "Inactive(anon):": - m.InactiveAnon = &v + m.InactiveAnon = &val + m.InactiveAnonBytes = &valBytes case "Active(file):": - m.ActiveFile = &v + m.ActiveFile = &val + m.ActiveFileBytes = &valBytes case "Inactive(file):": - m.InactiveFile = &v + m.InactiveFile = &val + m.InactiveFileBytes = &valBytes case "Unevictable:": - m.Unevictable = &v + m.Unevictable = &val + m.UnevictableBytes = &valBytes case "Mlocked:": - m.Mlocked = &v + m.Mlocked = &val + m.MlockedBytes = &valBytes case "SwapTotal:": - m.SwapTotal = &v + m.SwapTotal = &val + m.SwapTotalBytes = &valBytes case "SwapFree:": - m.SwapFree = &v + m.SwapFree = &val + m.SwapFreeBytes = &valBytes case "Dirty:": - m.Dirty = &v + m.Dirty = &val + m.DirtyBytes = &valBytes case "Writeback:": - m.Writeback = &v + m.Writeback = &val + m.WritebackBytes = &valBytes case "AnonPages:": - m.AnonPages = &v + m.AnonPages = &val + m.AnonPagesBytes = &valBytes case "Mapped:": - m.Mapped = &v + m.Mapped = &val + m.MappedBytes = &valBytes case "Shmem:": - m.Shmem = &v + m.Shmem = &val + m.ShmemBytes = &valBytes case "Slab:": - m.Slab = &v + m.Slab = &val + m.SlabBytes = &valBytes case "SReclaimable:": - m.SReclaimable = &v + m.SReclaimable = &val + m.SReclaimableBytes = &valBytes case "SUnreclaim:": - m.SUnreclaim = &v + m.SUnreclaim = &val + m.SUnreclaimBytes = &valBytes case "KernelStack:": - m.KernelStack = &v + m.KernelStack = &val + m.KernelStackBytes = &valBytes case "PageTables:": - m.PageTables = &v + m.PageTables = &val + m.PageTablesBytes = &valBytes case "NFS_Unstable:": - m.NFSUnstable = &v + m.NFSUnstable = &val + m.NFSUnstableBytes = &valBytes case "Bounce:": - m.Bounce = &v + m.Bounce = &val + m.BounceBytes = &valBytes case "WritebackTmp:": - m.WritebackTmp = &v + m.WritebackTmp = &val + m.WritebackTmpBytes = &valBytes case "CommitLimit:": - m.CommitLimit = &v + m.CommitLimit = &val + m.CommitLimitBytes = &valBytes case "Committed_AS:": - m.CommittedAS = &v + m.CommittedAS = &val + m.CommittedASBytes = &valBytes case "VmallocTotal:": - m.VmallocTotal = &v + m.VmallocTotal = &val + m.VmallocTotalBytes = &valBytes case "VmallocUsed:": - m.VmallocUsed = &v + m.VmallocUsed = &val + m.VmallocUsedBytes = &valBytes case "VmallocChunk:": - m.VmallocChunk = &v + m.VmallocChunk = &val + m.VmallocChunkBytes = &valBytes + case "Percpu:": + m.Percpu = &val + m.PercpuBytes = &valBytes case "HardwareCorrupted:": - m.HardwareCorrupted = &v + m.HardwareCorrupted = &val + m.HardwareCorruptedBytes = &valBytes case "AnonHugePages:": - m.AnonHugePages = &v + m.AnonHugePages = &val + m.AnonHugePagesBytes = &valBytes case "ShmemHugePages:": - m.ShmemHugePages = &v + m.ShmemHugePages = &val + m.ShmemHugePagesBytes = &valBytes case "ShmemPmdMapped:": - m.ShmemPmdMapped = &v + m.ShmemPmdMapped = &val + m.ShmemPmdMappedBytes = &valBytes case "CmaTotal:": - m.CmaTotal = &v + m.CmaTotal = &val + m.CmaTotalBytes = &valBytes case "CmaFree:": - m.CmaFree = &v + m.CmaFree = &val + m.CmaFreeBytes = &valBytes case "HugePages_Total:": - m.HugePagesTotal = &v + m.HugePagesTotal = &val case "HugePages_Free:": - m.HugePagesFree = &v + m.HugePagesFree = &val case "HugePages_Rsvd:": - m.HugePagesRsvd = &v + m.HugePagesRsvd = &val case "HugePages_Surp:": - m.HugePagesSurp = &v + m.HugePagesSurp = &val case "Hugepagesize:": - m.Hugepagesize = &v + m.Hugepagesize = &val + m.HugepagesizeBytes = &valBytes case "DirectMap4k:": - m.DirectMap4k = &v + m.DirectMap4k = &val + m.DirectMap4kBytes = &valBytes case "DirectMap2M:": - m.DirectMap2M = &v + m.DirectMap2M = &val + m.DirectMap2MBytes = &valBytes case "DirectMap1G:": - m.DirectMap1G = &v + m.DirectMap1G = &val + m.DirectMap1GBytes = &valBytes } } diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index 388ebf396..a704c5e73 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -109,7 +109,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { if mountInfo[6] != "" { mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) if err != nil { - return nil, fmt.Errorf("%s: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: %w", ErrFileParse, err) } } return mount, nil diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 9d8af6db7..75a3b6c81 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -88,7 +88,7 @@ type MountStatsNFS struct { // Statistics broken down by filesystem operation. Operations []NFSOperationStats // Statistics about the NFS RPC transport. - Transport NFSTransportStats + Transport []NFSTransportStats } // mountStats implements MountStats. @@ -194,8 +194,6 @@ type NFSOperationStats struct { CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestMilliseconds uint64 - // The average time from the point the client sends RPC requests until it receives the response. - AverageRTTMilliseconds float64 // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. Errors uint64 } @@ -434,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e return nil, err } - stats.Transport = *tstats + stats.Transport = append(stats.Transport, *tstats) } // When encountering "per-operation statistics", we must break this @@ -582,9 +580,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalRequestMilliseconds: ns[7], } - if ns[0] != 0 { - opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0]) - } if len(ns) > 8 { opStats.Errors = ns[8] @@ -632,7 +627,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) + return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index fdfa45611..316df5fbb 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { stat, err := parseConntrackStat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err) + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err) } return stat, nil @@ -86,7 +86,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { entries, err := util.ParseHexUint64s(fields) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err) + return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err) } numEntries := len(entries) if numEntries < 16 || numEntries > 17 { diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 4da81ea57..b70f1fc7a 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -50,10 +50,13 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 + // Drops shows the total number of dropped packets of all UPD sockets. + Drops *uint64 } // netIPSocketLine represents the fields parsed from a single line // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { Sl uint64 @@ -66,6 +69,7 @@ type ( RxQueue uint64 UID uint64 Inode uint64 + Drops *uint64 } ) @@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) { defer f.Close() var netIPSocket NetIPSocket + isUDP := strings.Contains(file, "udp") lr := io.LimitReader(f, readLimit) s := bufio.NewScanner(lr) s.Scan() // skip first line with headers for s.Scan() { fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) + line, err := parseNetIPSocketLine(fields, isUDP) if err != nil { return nil, err } @@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { defer f.Close() var netIPSocketSummary NetIPSocketSummary + var udpPacketDrops uint64 + isUDP := strings.Contains(file, "udp") lr := io.LimitReader(f, readLimit) s := bufio.NewScanner(lr) s.Scan() // skip first line with headers for s.Scan() { fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) + line, err := parseNetIPSocketLine(fields, isUDP) if err != nil { return nil, err } netIPSocketSummary.TxQueueLength += line.TxQueue netIPSocketSummary.RxQueueLength += line.RxQueue netIPSocketSummary.UsedSockets++ + if isUDP { + udpPacketDrops += *line.Drops + netIPSocketSummary.Drops = &udpPacketDrops + } } if err := s.Err(); err != nil { return nil, err @@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) { var byteIP []byte byteIP, err := hex.DecodeString(hexIP) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) + return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) } switch len(byteIP) { case 4: @@ -144,12 +155,12 @@ func parseIP(hexIP string) (net.IP, error) { } return i, nil default: - return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil) + return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil) } } // parseNetIPSocketLine parses a single line, represented by a list of fields. -func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { +func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) { line := &netIPSocketLine{} if len(fields) < 10 { return nil, fmt.Errorf( @@ -167,7 +178,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { } if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) + return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) } // local_address l := strings.Split(fields[1], ":") @@ -178,7 +189,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { return nil, err } if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) + return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) } // remote_address @@ -190,12 +201,12 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { return nil, err } if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) + return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) } // st if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) + return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) } // tx_queue and rx_queue @@ -208,20 +219,29 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { ) } if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) + return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) } if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) + return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) } // uid if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) + return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) } // inode if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) + return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) + } + + // drops + if isUDP { + drops, err := strconv.ParseUint(fields[12], 0, 64) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err) + } + line.Drops = &drops } return line, nil diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index 360e36af7..fae62b13d 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -69,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) { stat, err := parseSockstat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err) + return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err) } return stat, nil @@ -89,7 +89,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // The remaining fields are key/value pairs. kvs, err := parseSockstatKVs(fields[1:]) if err != nil { - return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) + return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) } // The first field is the protocol. We must trim its colon suffix. diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index c77085291..71c8059f4 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err) } return entries, nil diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go new file mode 100644 index 000000000..13994c178 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go @@ -0,0 +1,119 @@ +// Copyright 2023 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// TLSStat struct represents data in /proc/net/tls_stat. +// See https://docs.kernel.org/networking/tls.html#statistics +type TLSStat struct { + // number of TX sessions currently installed where host handles cryptography + TLSCurrTxSw int + // number of RX sessions currently installed where host handles cryptography + TLSCurrRxSw int + // number of TX sessions currently installed where NIC handles cryptography + TLSCurrTxDevice int + // number of RX sessions currently installed where NIC handles cryptography + TLSCurrRxDevice int + //number of TX sessions opened with host cryptography + TLSTxSw int + //number of RX sessions opened with host cryptography + TLSRxSw int + // number of TX sessions opened with NIC cryptography + TLSTxDevice int + // number of RX sessions opened with NIC cryptography + TLSRxDevice int + // record decryption failed (e.g. due to incorrect authentication tag) + TLSDecryptError int + // number of RX resyncs sent to NICs handling cryptography + TLSRxDeviceResync int + // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records. + TLSDecryptRetry int + // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. + TLSRxNoPadViolation int +} + +// NewTLSStat reads the tls_stat statistics. +func NewTLSStat() (TLSStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return TLSStat{}, err + } + + return fs.NewTLSStat() +} + +// NewTLSStat reads the tls_stat statistics. +func (fs FS) NewTLSStat() (TLSStat, error) { + file, err := os.Open(fs.proc.Path("net/tls_stat")) + if err != nil { + return TLSStat{}, err + } + defer file.Close() + + var ( + tlsstat = TLSStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return TLSStat{}, err + } + + switch name { + case "TlsCurrTxSw": + tlsstat.TLSCurrTxSw = value + case "TlsCurrRxSw": + tlsstat.TLSCurrRxSw = value + case "TlsCurrTxDevice": + tlsstat.TLSCurrTxDevice = value + case "TlsCurrRxDevice": + tlsstat.TLSCurrRxDevice = value + case "TlsTxSw": + tlsstat.TLSTxSw = value + case "TlsRxSw": + tlsstat.TLSRxSw = value + case "TlsTxDevice": + tlsstat.TLSTxDevice = value + case "TlsRxDevice": + tlsstat.TLSRxDevice = value + case "TlsDecryptError": + tlsstat.TLSDecryptError = value + case "TlsRxDeviceResync": + tlsstat.TLSRxDeviceResync = value + case "TlsDecryptRetry": + tlsstat.TLSDecryptRetry = value + case "TlsRxNoPadViolation": + tlsstat.TLSRxNoPadViolation = value + } + + } + + return tlsstat, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index acbbc57ea..d868cebda 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { line := s.Text() item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) + return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) } nu.Rows = append(nu.Rows, item) } if err := s.Err(); err != nil { - return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err) } return &nu, nil @@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err) + return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err) } flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) + return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) } typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err) + return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err) } state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err) + return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err) } var inode uint64 if hasInode { inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err) + return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err) } } diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go index 7443edca9..7c597bc87 100644 --- a/vendor/github.com/prometheus/procfs/net_wireless.go +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) { m, err := parseWireless(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err) } return m, nil @@ -114,47 +114,47 @@ func parseWireless(r io.Reader) ([]*Wireless, error) { qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) if err != nil { - return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) + return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) } qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) if err != nil { - return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) + return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) } qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) if err != nil { - return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) + return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) } dnwid, err := strconv.Atoi(stats[4]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) + return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) } dcrypt, err := strconv.Atoi(stats[5]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) + return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) } dfrag, err := strconv.Atoi(stats[6]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) + return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) } dretry, err := strconv.Atoi(stats[7]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) + return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) } dmisc, err := strconv.Atoi(stats[8]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) + return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) } mbeacon, err := strconv.Atoi(stats[9]) if err != nil { - return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) + return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) } w := &Wireless{ @@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) { } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) + return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) } return interfaces, nil diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index d1f71caa5..142796368 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -111,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) + return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) } p := Procs{} @@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) { return []string{}, nil } - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil + return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil } // Wchan returns the wchan (wait channel) of a process. @@ -212,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err) + return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err) } fds[i] = uintptr(fd) } @@ -297,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) } return names, nil diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index c86d815d7..9530b14bc 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) { } i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err) + return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err) } return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index c22666750..0f8f847f9 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err) + return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err) } ns := make(Namespaces, len(names)) @@ -58,7 +58,7 @@ func (p Proc) Namespaces() (Namespaces, error) { typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err) + return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index fe9dbb425..ccd35f153 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -61,7 +61,7 @@ type PSIStats struct { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { - return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) + return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) } return parsePSIStats(bytes.NewReader(data)) diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index ad8785a40..09060e820 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error { } v := strings.TrimSpace(kv[1]) - v = strings.TrimRight(v, " kB") + v = strings.TrimSuffix(v, " kB") vKBytes, err := strconv.ParseUint(v, 10, 64) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 923e55005..06a8d931c 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -110,6 +110,11 @@ type ProcStat struct { Policy uint // Aggregated block I/O delays, measured in clock ticks (centiseconds). DelayAcctBlkIOTicks uint64 + // Guest time of the process (time spent running a virtual CPU for a guest + // operating system), measured in clock ticks. + GuestTime int + // Guest time of the process's children, measured in clock ticks. + CGuestTime int proc FS } @@ -189,6 +194,8 @@ func (p Proc) Stat() (ProcStat, error) { &s.RTPriority, &s.Policy, &s.DelayAcctBlkIOTicks, + &s.GuestTime, + &s.CGuestTime, ) if err != nil { return ProcStat{}, err diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 46307f572..a055197c6 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -15,6 +15,7 @@ package procfs import ( "bytes" + "math/bits" "sort" "strconv" "strings" @@ -76,9 +77,9 @@ type ProcStatus struct { NonVoluntaryCtxtSwitches uint64 // UIDs of the process (Real, effective, saved set, and filesystem UIDs) - UIDs [4]string + UIDs [4]uint64 // GIDs of the process (Real, effective, saved set, and filesystem GIDs) - GIDs [4]string + GIDs [4]uint64 // CpusAllowedList: List of cpu cores processes are allowed to run on. CpusAllowedList []uint64 @@ -113,22 +114,37 @@ func (p Proc) NewStatus() (ProcStatus, error) { // convert kB to B vBytes := vKBytes * 1024 - s.fillStatus(k, v, vKBytes, vBytes) + err = s.fillStatus(k, v, vKBytes, vBytes) + if err != nil { + return ProcStatus{}, err + } } return s, nil } -func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error { switch k { case "Tgid": s.TGID = int(vUint) case "Name": s.Name = vString case "Uid": - copy(s.UIDs[:], strings.Split(vString, "\t")) + var err error + for i, v := range strings.Split(vString, "\t") { + s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } case "Gid": - copy(s.GIDs[:], strings.Split(vString, "\t")) + var err error + for i, v := range strings.Split(vString, "\t") { + s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } case "NSpid": s.NSpids = calcNSPidsList(vString) case "VmPeak": @@ -173,6 +189,7 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.CpusAllowedList = calcCpusAllowedList(vString) } + return nil } // TotalCtxtSwitches returns the total context switch. diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index 12c5bf05b..5eefbe2ef 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) { vp := util.NewValueParser(f) values[i] = vp.Int() if err := vp.Err(); err != nil { - return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) + return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) } } return values, nil diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index b8fad677d..28708e074 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TIMER:": @@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_TX:": @@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_RX:": @@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "BLOCK:": @@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "IRQ_POLL:": @@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TASKLET:": @@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "SCHED:": @@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "HRTIMER:": @@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "RCU:": @@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) } } } } if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err) } return softirqs, scanner.Err() diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 34fc3ee21..e36b41c18 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -93,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) @@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) } return cpuStat, cpuID, nil @@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err) + return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err) } return softIRQStat, total, nil @@ -201,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -251,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go index fa00f555d..65fec834b 100644 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) { swap.Size, err = strconv.Atoi(swapFields[2]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) + return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) } swap.Used, err = strconv.Atoi(swapFields[3]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) + return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) } swap.Priority, err = strconv.Atoi(swapFields[4]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) + return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) } return swap, nil diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index df2215ece..80e0e947b 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err) + return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err) } t := Procs{} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index ce5fefa5b..e54d94b09 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) func (fs FS) Zoneinfo() ([]Zoneinfo, error) { data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { - return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) } zoneinfo, err := parseZoneinfo(data) if err != nil { - return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) } return zoneinfo, nil } diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index b042c896f..d1d4a85fd 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -9,7 +9,7 @@ the last thing you want from your Logging library (again...). This does not mean Logrus is dead. Logrus will continue to be maintained for security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). +limited by the interface). I believe Logrus' biggest contribution is to have played a part in today's widespread use of structured logging in Golang. There doesn't seem to be a @@ -43,7 +43,7 @@ plain text): With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash or Splunk: -```json +```text {"animal":"walrus","level":"info","msg":"A group of walrus emerges from the ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} @@ -99,7 +99,7 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr ``` Note that this does add measurable overhead - the cost will depend on the version of Go, but is between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: +environment via benchmarks: ``` go test -bench=.*CallerTracing ``` @@ -317,6 +317,8 @@ log.SetLevel(log.InfoLevel) It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. +Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). + #### Entries Besides the fields added with `WithField` or `WithFields` some fields are diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go index 72e8e3a1b..074fd4b8b 100644 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -4,6 +4,7 @@ import ( "bufio" "io" "runtime" + "strings" ) // Writer at INFO level. See WriterLevel for details. @@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { return NewEntry(logger).WriterLevel(level) } +// Writer returns an io.Writer that writes to the logger at the info log level func (entry *Entry) Writer() *io.PipeWriter { return entry.WriterLevel(InfoLevel) } +// WriterLevel returns an io.Writer that writes to the logger at the given log level func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) + // Determine which log function to use based on the specified log level switch level { case TraceLevel: printFunc = entry.Trace @@ -48,23 +52,51 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { printFunc = entry.Print } + // Start a new goroutine to scan the input and write it to the logger using the specified print function. + // It splits the input into chunks of up to 64KB to avoid buffer overflows. go entry.writerScanner(reader, printFunc) + + // Set a finalizer function to close the writer when it is garbage collected runtime.SetFinalizer(writer, writerFinalizer) return writer } +// writerScanner scans the input from the reader and writes it to the logger func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) + + // Set the buffer size to the maximum token size to avoid buffer overflows + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB + chunkSize := bufio.MaxScanTokenSize // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { + if len(data) >= chunkSize { + return chunkSize, data[:chunkSize], nil + } + + return bufio.ScanLines(data, atEOF) + } + + // Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function for scanner.Scan() { - printFunc(scanner.Text()) + printFunc(strings.TrimRight(scanner.Text(), "\r\n")) } + + // If there was an error while scanning the input, log an error if err := scanner.Err(); err != nil { entry.Errorf("Error while reading from Writer: %s", err) } + + // Close the reader when we are done reader.Close() } +// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected func writerFinalizer(writer *io.PipeWriter) { writer.Close() } diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 2578d94b5..2c8f4808c 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -19,44 +19,39 @@ linters: disable-all: true enable: #- bodyclose - - deadcode + # - deadcode ! deprecated since v1.49.0; replaced by 'unused' #- depguard #- dogsled #- dupl - errcheck #- exhaustive #- funlen - - gas #- gochecknoinits - goconst - #- gocritic + - gocritic #- gocyclo - #- gofmt + - gofmt - goimports - - golint #- gomnd #- goprintffuncname - #- gosec - #- gosimple + - gosec + - gosimple - govet - ineffassign - - interfacer #- lll - - maligned - - megacheck - #- misspell + - misspell #- nakedret #- noctx - #- nolintlint + - nolintlint #- rowserrcheck #- scopelint - #- staticcheck - - structcheck - #- stylecheck + - staticcheck + #- structcheck ! deprecated since v1.49.0; replaced by 'unused' + - stylecheck #- typecheck - unconvert #- unparam - #- unused - - varcheck + - unused + # - varcheck ! deprecated since v1.49.0; replaced by 'unused' #- whitespace fast: false diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 592c0b8ab..6444f4b7f 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -4,7 +4,7 @@ Cobra is a library for creating powerful modern CLI applications. Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to -name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. +name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. [![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) @@ -80,7 +80,7 @@ which maintains the same interface while adding POSIX compliance. # Installing Using Cobra is easy. First, use `go get` to install the latest version -of the library. +of the library. ``` go get -u github.com/spf13/cobra@latest @@ -105,8 +105,8 @@ go install github.com/spf13/cobra-cli@latest For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) -For complete details on using the Cobra library, please read the [The Cobra User Guide](user_guide.md). +For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md). # License -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 2d0239437..25c30e3cc 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -17,15 +17,14 @@ package cobra import ( "fmt" "os" - "strings" ) const ( activeHelpMarker = "_activeHelp_ " // The below values should not be changed: programs will be using them explicitly // in their user documentation, and users will be using them explicitly. - activeHelpEnvVarSuffix = "_ACTIVE_HELP" - activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" + activeHelpEnvVarSuffix = "ACTIVE_HELP" + activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix activeHelpGlobalDisable = "0" ) @@ -42,7 +41,7 @@ func AppendActiveHelp(compArray []string, activeHelpStr string) []string { // GetActiveHelpConfig returns the value of the ActiveHelp environment variable // _ACTIVE_HELP where is the name of the root command in upper -// case, with all - replaced by _. +// case, with all non-ASCII-alphanumeric characters replaced by `_`. // It will always return "0" if the global environment variable COBRA_ACTIVE_HELP // is set to "0". func GetActiveHelpConfig(cmd *Command) string { @@ -55,9 +54,7 @@ func GetActiveHelpConfig(cmd *Command) string { // activeHelpEnvVar returns the name of the program-specific ActiveHelp environment // variable. It has the format _ACTIVE_HELP where is the name of the -// root command in upper case, with all - replaced by _. +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. func activeHelpEnvVar(name string) string { - // This format should not be changed: users will be using it explicitly. - activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - return strings.ReplaceAll(activeHelpEnvVar, "-", "_") + return configEnvVar(name, activeHelpEnvVarSuffix) } diff --git a/vendor/github.com/spf13/cobra/active_help.md b/vendor/github.com/spf13/cobra/active_help.md deleted file mode 100644 index 5e7f59af3..000000000 --- a/vendor/github.com/spf13/cobra/active_help.md +++ /dev/null @@ -1,157 +0,0 @@ -# Active Help - -Active Help is a framework provided by Cobra which allows a program to define messages (hints, warnings, etc) that will be printed during program usage. It aims to make it easier for your users to learn how to use your program. If configured by the program, Active Help is printed when the user triggers shell completion. - -For example, -``` -bash-5.1$ helm repo add [tab] -You must choose a name for the repo you are adding. - -bash-5.1$ bin/helm package [tab] -Please specify the path to the chart to package - -bash-5.1$ bin/helm package [tab][tab] -bin/ internal/ scripts/ pkg/ testdata/ -``` - -**Hint**: A good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions to guide the user in knowing what is expected by the program. -## Supported shells - -Active Help is currently only supported for the following shells: -- Bash (using [bash completion V2](shell_completions.md#bash-completion-v2) only). Note that bash 4.4 or higher is required for the prompt to appear when an Active Help message is printed. -- Zsh - -## Adding Active Help messages - -As Active Help uses the shell completion system, the implementation of Active Help messages is done by enhancing custom dynamic completions. If you are not familiar with dynamic completions, please refer to [Shell Completions](shell_completions.md). - -Adding Active Help is done through the use of the `cobra.AppendActiveHelp(...)` function, where the program repeatedly adds Active Help messages to the list of completions. Keep reading for details. - -### Active Help for nouns - -Adding Active Help when completing a noun is done within the `ValidArgsFunction(...)` of a command. Please notice the use of `cobra.AppendActiveHelp(...)` in the following example: - -```go -cmd := &cobra.Command{ - Use: "add [NAME] [URL]", - Short: "add a chart repository", - Args: require.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return addRepo(args) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - var comps []string - if len(args) == 0 { - comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") - } else if len(args) == 1 { - comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") - } else { - comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") - } - return comps, cobra.ShellCompDirectiveNoFileComp - }, -} -``` -The example above defines the completions (none, in this specific example) as well as the Active Help messages for the `helm repo add` command. It yields the following behavior: -``` -bash-5.1$ helm repo add [tab] -You must choose a name for the repo you are adding - -bash-5.1$ helm repo add grafana [tab] -You must specify the URL for the repo you are adding - -bash-5.1$ helm repo add grafana https://grafana.github.io/helm-charts [tab] -This command does not take any more arguments -``` -**Hint**: As can be seen in the above example, a good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions. - -### Active Help for flags - -Providing Active Help for flags is done in the same fashion as for nouns, but using the completion function registered for the flag. For example: -```go -_ = cmd.RegisterFlagCompletionFunc("version", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 2 { - return cobra.AppendActiveHelp(nil, "You must first specify the chart to install before the --version flag can be completed"), cobra.ShellCompDirectiveNoFileComp - } - return compVersionFlag(args[1], toComplete) - }) -``` -The example above prints an Active Help message when not enough information was given by the user to complete the `--version` flag. -``` -bash-5.1$ bin/helm install myrelease --version 2.0.[tab] -You must first specify the chart to install before the --version flag can be completed - -bash-5.1$ bin/helm install myrelease bitnami/solr --version 2.0.[tab][tab] -2.0.1 2.0.2 2.0.3 -``` - -## User control of Active Help - -You may want to allow your users to disable Active Help or choose between different levels of Active Help. It is entirely up to the program to define the type of configurability of Active Help that it wants to offer, if any. -Allowing to configure Active Help is entirely optional; you can use Active Help in your program without doing anything about Active Help configuration. - -The way to configure Active Help is to use the program's Active Help environment -variable. That variable is named `_ACTIVE_HELP` where `` is the name of your -program in uppercase with any `-` replaced by an `_`. The variable should be set by the user to whatever -Active Help configuration values are supported by the program. - -For example, say `helm` has chosen to support three levels for Active Help: `on`, `off`, `local`. Then a user -would set the desired behavior to `local` by doing `export HELM_ACTIVE_HELP=local` in their shell. - -For simplicity, when in `cmd.ValidArgsFunction(...)` or a flag's completion function, the program should read the -Active Help configuration using the `cobra.GetActiveHelpConfig(cmd)` function and select what Active Help messages -should or should not be added (instead of reading the environment variable directly). - -For example: -```go -ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - activeHelpLevel := cobra.GetActiveHelpConfig(cmd) - - var comps []string - if len(args) == 0 { - if activeHelpLevel != "off" { - comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") - } - } else if len(args) == 1 { - if activeHelpLevel != "off" { - comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") - } - } else { - if activeHelpLevel == "local" { - comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") - } - } - return comps, cobra.ShellCompDirectiveNoFileComp -}, -``` -**Note 1**: If the `_ACTIVE_HELP` environment variable is set to the string "0", Cobra will automatically disable all Active Help output (even if some output was specified by the program using the `cobra.AppendActiveHelp(...)` function). Using "0" can simplify your code in situations where you want to blindly disable Active Help without having to call `cobra.GetActiveHelpConfig(cmd)` explicitly. - -**Note 2**: If a user wants to disable Active Help for every single program based on Cobra, she can set the environment variable `COBRA_ACTIVE_HELP` to "0". In this case `cobra.GetActiveHelpConfig(cmd)` will return "0" no matter what the variable `_ACTIVE_HELP` is set to. - -**Note 3**: If the user does not set `_ACTIVE_HELP` or `COBRA_ACTIVE_HELP` (which will be a common case), the default value for the Active Help configuration returned by `cobra.GetActiveHelpConfig(cmd)` will be the empty string. -## Active Help with Cobra's default completion command - -Cobra provides a default `completion` command for programs that wish to use it. -When using the default `completion` command, Active Help is configurable in the same -fashion as described above using environment variables. You may wish to document this in more -details for your users. - -## Debugging Active Help - -Debugging your Active Help code is done in the same way as debugging your dynamic completion code, which is with Cobra's hidden `__complete` command. Please refer to [debugging shell completion](shell_completions.md#debugging) for details. - -When debugging with the `__complete` command, if you want to specify different Active Help configurations, you should use the active help environment variable. That variable is named `_ACTIVE_HELP` where any `-` is replaced by an `_`. For example, we can test deactivating some Active Help as shown below: -``` -$ HELM_ACTIVE_HELP=1 bin/helm __complete install wordpress bitnami/h -bitnami/haproxy -bitnami/harbor -_activeHelp_ WARNING: cannot re-use a name that is still in use -:0 -Completion ended with directive: ShellCompDirectiveDefault - -$ HELM_ACTIVE_HELP=0 bin/helm __complete install wordpress bitnami/h -bitnami/haproxy -bitnami/harbor -:0 -Completion ended with directive: ShellCompDirectiveDefault -``` diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index e79ec33a8..ed1e70cea 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -52,9 +52,9 @@ func OnlyValidArgs(cmd *Command, args []string) error { if len(cmd.ValidArgs) > 0 { // Remove any description that may be included in ValidArgs. // A description is following a tab character. - var validArgs []string + validArgs := make([]string, 0, len(cmd.ValidArgs)) for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) + validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0]) } for _, v := range args { if !stringInSlice(v, validArgs) { diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 10c78847d..f4d198cbc 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -85,7 +85,7 @@ __%[1]s_handle_go_custom_completion() local out requestComp lastParam lastChar comp directive args # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + # Calling ${words[0]} instead of directly %[1]s allows handling aliases args=("${words[@]:1}") # Disable ActiveHelp which is not supported for bash completion v1 requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}" @@ -597,19 +597,16 @@ func writeRequiredFlag(buf io.StringWriter, cmd *Command) { if nonCompletableFlag(flag) { return } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) - } + if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok { + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) + + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } }) @@ -621,7 +618,7 @@ func writeRequiredNouns(buf io.StringWriter, cmd *Command) { for _, value := range cmd.ValidArgs { // Remove any description that may be included following a tab character. // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] + value = strings.SplitN(value, "\t", 2)[0] WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) } if cmd.ValidArgsFunction != nil { diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index 52919b2fa..000000000 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,93 +0,0 @@ -# Generating Bash Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. - -**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own. - -The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. - -Some code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__kubectl_custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -Similarly, for flags: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index 19b09560c..1cce5c329 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -57,7 +57,7 @@ __%[1]s_get_completion_results() { local requestComp lastParam lastChar args # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + # Calling ${words[0]} instead of directly %[1]s allows handling aliases args=("${words[@]:1}") requestComp="${words[0]} %[2]s ${args[*]}" diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index b07b44a0c..e0b0947b0 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -43,12 +43,13 @@ var initializers []func() var finalizers []func() const ( - defaultPrefixMatching = false - defaultCommandSorting = true - defaultCaseInsensitive = false + defaultPrefixMatching = false + defaultCommandSorting = true + defaultCaseInsensitive = false + defaultTraverseRunHooks = false ) -// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing +// EnablePrefixMatching allows setting automatic prefix matching. Automatic prefix matching can be a dangerous thing // to automatically enable in CLI tools. // Set this to true to enable it. var EnablePrefixMatching = defaultPrefixMatching @@ -60,6 +61,10 @@ var EnableCommandSorting = defaultCommandSorting // EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default) var EnableCaseInsensitive = defaultCaseInsensitive +// EnableTraverseRunHooks executes persistent pre-run and post-run hooks from all parents. +// By default this is disabled, which means only the first run hook to be found is executed. +var EnableTraverseRunHooks = defaultTraverseRunHooks + // MousetrapHelpText enables an information splash screen on Windows // if the CLI is started from explorer.exe. // To disable the mousetrap, just set this variable to blank string (""). @@ -188,8 +193,6 @@ func ld(s, t string, ignoreCase bool) int { d := make([][]int, len(s)+1) for i := range d { d[i] = make([]int, len(t)+1) - } - for i := range d { d[i][0] = i } for j := range d[0] { diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 01f7c6f1c..54748fc67 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -30,7 +30,10 @@ import ( flag "github.com/spf13/pflag" ) -const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" +const ( + FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" + CommandDisplayNameAnnotation = "cobra_annotation_command_display_name" +) // FParseErrWhitelist configures Flag parse errors to be ignored type FParseErrWhitelist flag.ParseErrorsWhitelist @@ -99,7 +102,7 @@ type Command struct { Deprecated string // Annotations are key/value pairs that can be used by applications to identify or - // group commands. + // group commands or set special options. Annotations map[string]string // Version defines the version for this command. If this value is non-empty and the command does not @@ -115,6 +118,8 @@ type Command struct { // * PostRun() // * PersistentPostRun() // All functions get the same args, the arguments after the command name. + // The *PreRun and *PostRun functions will only be executed if the Run function of the current + // command has been declared. // // PersistentPreRun: children of this command will inherit and execute. PersistentPreRun func(cmd *Command, args []string) @@ -149,8 +154,10 @@ type Command struct { // pflags contains persistent flags. pflags *flag.FlagSet // lflags contains local flags. + // This field does not represent internal state, it's used as a cache to optimise LocalFlags function call lflags *flag.FlagSet // iflags contains inherited flags. + // This field does not represent internal state, it's used as a cache to optimise InheritedFlags function call iflags *flag.FlagSet // parentsPflags is all persistent flags of cmd's parents. parentsPflags *flag.FlagSet @@ -181,6 +188,9 @@ type Command struct { // versionTemplate is the version template defined by user. versionTemplate string + // errPrefix is the error message prefix defined by user. + errPrefix string + // inReader is a reader defined by the user that replaces stdin inReader io.Reader // outWriter is a writer defined by the user that replaces stdout @@ -346,6 +356,11 @@ func (c *Command) SetVersionTemplate(s string) { c.versionTemplate = s } +// SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. +func (c *Command) SetErrPrefix(s string) { + c.errPrefix = s +} + // SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. // The user should not have a cyclic dependency on commands. func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { @@ -595,6 +610,18 @@ func (c *Command) VersionTemplate() string { ` } +// ErrPrefix return error message prefix for the command +func (c *Command) ErrPrefix() string { + if c.errPrefix != "" { + return c.errPrefix + } + + if c.HasParent() { + return c.parent.ErrPrefix() + } + return "Error:" +} + func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { flag := fs.Lookup(name) if flag == nil { @@ -681,7 +708,7 @@ Loop: // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, // return the args, excluding the one at this position. if s == x { - ret := []string{} + ret := make([]string, 0, len(args)-1) ret = append(ret, args[:pos]...) ret = append(ret, args[pos+1:]...) return ret @@ -729,14 +756,14 @@ func (c *Command) findSuggestions(arg string) string { if c.SuggestionsMinimumDistance <= 0 { c.SuggestionsMinimumDistance = 2 } - suggestionsString := "" + var sb strings.Builder if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" + sb.WriteString("\n\nDid you mean this?\n") for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) + _, _ = fmt.Fprintf(&sb, "\t%v\n", s) } } - return suggestionsString + return sb.String() } func (c *Command) findNext(next string) *Command { @@ -752,7 +779,9 @@ func (c *Command) findNext(next string) *Command { } if len(matches) == 1 { - return matches[0] + // Temporarily disable gosec G602, which produces a false positive. + // See https://github.com/securego/gosec/issues/1005. + return matches[0] // #nosec G602 } return nil @@ -846,7 +875,7 @@ func (c *Command) ArgsLenAtDash() int { func (c *Command) execute(a []string) (err error) { if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") + return fmt.Errorf("called Execute() on a nil Command") } if len(c.Deprecated) > 0 { @@ -910,15 +939,31 @@ func (c *Command) execute(a []string) (err error) { return err } + parents := make([]*Command, 0, 5) for p := c; p != nil; p = p.Parent() { + if EnableTraverseRunHooks { + // When EnableTraverseRunHooks is set: + // - Execute all persistent pre-runs from the root parent till this command. + // - Execute all persistent post-runs from this command till the root parent. + parents = append([]*Command{p}, parents...) + } else { + // Otherwise, execute only the first found persistent hook. + parents = append(parents, p) + } + } + for _, p := range parents { if p.PersistentPreRunE != nil { if err := p.PersistentPreRunE(c, argWoFlags); err != nil { return err } - break + if !EnableTraverseRunHooks { + break + } } else if p.PersistentPreRun != nil { p.PersistentPreRun(c, argWoFlags) - break + if !EnableTraverseRunHooks { + break + } } } if c.PreRunE != nil { @@ -955,10 +1000,14 @@ func (c *Command) execute(a []string) (err error) { if err := p.PersistentPostRunE(c, argWoFlags); err != nil { return err } - break + if !EnableTraverseRunHooks { + break + } } else if p.PersistentPostRun != nil { p.PersistentPostRun(c, argWoFlags) - break + if !EnableTraverseRunHooks { + break + } } } @@ -1048,7 +1097,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { c = cmd } if !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) + c.PrintErrln(c.ErrPrefix(), err.Error()) c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath()) } return c, err @@ -1077,7 +1126,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // If root command has SilenceErrors flagged, // all subcommands should respect it if !cmd.SilenceErrors && !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) + c.PrintErrln(cmd.ErrPrefix(), err.Error()) } // If root command has SilenceUsage flagged, @@ -1140,10 +1189,11 @@ func (c *Command) InitDefaultHelpFlag() { c.mergePersistentFlags() if c.Flags().Lookup("help") == nil { usage := "help for " - if c.Name() == "" { + name := c.displayName() + if name == "" { usage += "this command" } else { - usage += c.Name() + usage += name } c.Flags().BoolP("help", "h", false, usage) _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) @@ -1189,7 +1239,7 @@ func (c *Command) InitDefaultHelpCmd() { Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, +Simply type ` + c.displayName() + ` help [path to command] for full details.`, ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { var completions []string cmd, _, e := c.Root().Find(args) @@ -1380,16 +1430,24 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } + return c.displayName() +} + +func (c *Command) displayName() string { + if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { + return displayName + } return c.Name() } // UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { var useline string + use := strings.Replace(c.Use, c.Name(), c.displayName(), 1) if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use + useline = c.parent.CommandPath() + " " + use } else { - useline = c.Use + useline = use } if c.DisableFlagsInUseLine { return useline @@ -1591,7 +1649,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f // to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1602,10 +1660,11 @@ func (c *Command) Flags() *flag.FlagSet { } // LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + out := flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.LocalFlags().VisitAll(func(f *flag.Flag) { if persistentFlags.Lookup(f.Name) == nil { out.AddFlag(f) @@ -1615,11 +1674,12 @@ func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { } // LocalFlags returns the local FlagSet specifically set in the current command. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1642,11 +1702,12 @@ func (c *Command) LocalFlags() *flag.FlagSet { } // InheritedFlags returns all flags which were inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.iflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1667,6 +1728,7 @@ func (c *Command) InheritedFlags() *flag.FlagSet { } // NonInheritedFlags returns all flags which were not inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) NonInheritedFlags() *flag.FlagSet { return c.LocalFlags() } @@ -1674,7 +1736,7 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet { // PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1687,9 +1749,9 @@ func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) c.lflags = nil @@ -1806,7 +1868,7 @@ func (c *Command) mergePersistentFlags() { // If c.parentsPflags == nil, it makes new. func (c *Command) updateParentsPflags() { if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.parentsPflags.SetOutput(c.flagErrorBuf) c.parentsPflags.SortFlags = false } diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index ee38c4d0b..c0c08b057 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -17,6 +17,8 @@ package cobra import ( "fmt" "os" + "regexp" + "strconv" "strings" "sync" @@ -145,6 +147,20 @@ func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Comman return nil } +// GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. +func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) { + flag := c.Flag(flagName) + if flag == nil { + return nil, false + } + + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() + + completionFunc, exists := flagCompletionFunctions[flag] + return completionFunc, exists +} + // Returns a string listing the different directive enabled in the specified parameter func (d ShellCompDirective) string() string { var directives []string @@ -197,24 +213,29 @@ func (c *Command) initCompleteCmd(args []string) { // 2- Even without completions, we need to print the directive } - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) + noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd + if !noDescriptions { + if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil { + noDescriptions = !doDescriptions + } + } + noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable + out := finalCmd.OutOrStdout() for _, comp := range completions { - if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { - // Remove all activeHelp entries in this case - if strings.HasPrefix(comp, activeHelpMarker) { - continue - } + if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) { + // Remove all activeHelp entries if it's disabled. + continue } if noDescriptions { // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] + comp = strings.SplitN(comp, "\t", 2)[0] } // Make sure we only write the first line to the output. // This is needed if a description contains a linebreak. // Otherwise the shell scripts will interpret the other lines as new flags // and could therefore provide a wrong completion. - comp = strings.Split(comp, "\n")[0] + comp = strings.SplitN(comp, "\n", 2)[0] // Finally trim the completion. This is especially important to get rid // of a trailing tab when there are no description following it. @@ -223,14 +244,14 @@ func (c *Command) initCompleteCmd(args []string) { // although there is no description). comp = strings.TrimSpace(comp) - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) + // Print each possible completion to the output for the completion script to consume. + fmt.Fprintln(out, comp) } // As the last printout, print the completion directive for the completion script to parse. // The directive integer must be that last character following a single colon (:). // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) + fmt.Fprintf(out, ":%d\n", directive) // Print some helpful info to stderr for the user to understand. // Output from stderr must be ignored by the completion script. @@ -277,15 +298,19 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } if err != nil { // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) + return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) } finalCmd.ctx = c.ctx // These flags are normally added when `execute()` is called on `finalCmd`, // however, when doing completion, we don't call `finalCmd.execute()`. - // Let's add the --help and --version flag ourselves. - finalCmd.InitDefaultHelpFlag() - finalCmd.InitDefaultVersionFlag() + // Let's add the --help and --version flag ourselves but only if the finalCmd + // has not disabled flag parsing; if flag parsing is disabled, it is up to the + // finalCmd itself to handle the completion of *all* flags. + if !finalCmd.DisableFlagParsing { + finalCmd.InitDefaultHelpFlag() + finalCmd.InitDefaultVersionFlag() + } // Check if we are doing flag value completion before parsing the flags. // This is important because if we are completing a flag value, we need to also @@ -389,6 +414,11 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { doCompleteFlags(flag) }) + // Try to complete non-inherited flags even if DisableFlagParsing==true. + // This allows programs to tell Cobra about flags for completion even + // if the actual parsing of flags is not done by Cobra. + // For instance, Helm uses this to provide flag name completion for + // some of its plugins. finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { doCompleteFlags(flag) }) @@ -876,3 +906,34 @@ func CompError(msg string) { func CompErrorln(msg string) { CompError(fmt.Sprintf("%s\n", msg)) } + +// These values should not be changed: users will be using them explicitly. +const ( + configEnvVarGlobalPrefix = "COBRA" + configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS" +) + +var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) + +// configEnvVar returns the name of the program-specific configuration environment +// variable. It has the format _ where is the name of the +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. +func configEnvVar(name, suffix string) string { + // This format should not be changed: users will be using it explicitly. + v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix)) + v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_") + return v +} + +// getEnvConfig returns the value of the configuration environment variable +// _ where is the name of the root command in upper +// case, with all non-ASCII-alphanumeric characters replaced by `_`. +// If the value is empty or not set, the value of the environment variable +// COBRA_ is returned instead. +func getEnvConfig(cmd *Command, suffix string) string { + v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix)) + if v == "" { + v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix)) + } + return v +} diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index 12ca0d2b1..12d61b691 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -113,7 +113,7 @@ function __%[1]s_clear_perform_completion_once_result __%[1]s_debug "" __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable ==========" set --erase __%[1]s_perform_completion_once_result - __%[1]s_debug "Succesfully erased the variable __%[1]s_perform_completion_once_result" + __%[1]s_debug "Successfully erased the variable __%[1]s_perform_completion_once_result" end function __%[1]s_requires_order_preservation diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md deleted file mode 100644 index 19b2ed129..000000000 --- a/vendor/github.com/spf13/cobra/fish_completions.md +++ /dev/null @@ -1,4 +0,0 @@ -## Generating Fish Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index b35fde155..560612fd3 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -23,8 +23,9 @@ import ( ) const ( - requiredAsGroup = "cobra_annotation_required_if_others_set" - mutuallyExclusive = "cobra_annotation_mutually_exclusive" + requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set" + oneRequiredAnnotation = "cobra_annotation_one_required" + mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive" ) // MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors @@ -36,7 +37,23 @@ func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) } - if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil { + // Only errs if the flag isn't found. + panic(err) + } + } +} + +// MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors +// if the command is invoked without at least one flag from the given set of flags. +func (c *Command) MarkFlagsOneRequired(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) + } + if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -53,13 +70,13 @@ func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) } // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. - if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil { panic(err) } } } -// ValidateFlagGroups validates the mutuallyExclusive/requiredAsGroup logic and returns the +// ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the // first error encountered. func (c *Command) ValidateFlagGroups() error { if c.DisableFlagParsing { @@ -71,15 +88,20 @@ func (c *Command) ValidateFlagGroups() error { // groupStatus format is the list of flags as a unique ID, // then a map of each flag name and whether it is set or not. groupStatus := map[string]map[string]bool{} + oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} flags.VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) if err := validateRequiredFlagGroups(groupStatus); err != nil { return err } + if err := validateOneRequiredFlagGroups(oneRequiredGroupStatus); err != nil { + return err + } if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil { return err } @@ -108,7 +130,7 @@ func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annota continue } - groupStatus[group] = map[string]bool{} + groupStatus[group] = make(map[string]bool, len(flagnames)) for _, name := range flagnames { groupStatus[group][name] = false } @@ -142,6 +164,27 @@ func validateRequiredFlagGroups(data map[string]map[string]bool) error { return nil } +func validateOneRequiredFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + var set []string + for flagname, isSet := range flagnameAndStatus { + if isSet { + set = append(set, flagname) + } + } + if len(set) >= 1 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(set) + return fmt.Errorf("at least one of the flags in the group [%v] is required", flagList) + } + return nil +} + func validateExclusiveFlagGroups(data map[string]map[string]bool) error { keys := sortedKeys(data) for _, flagList := range keys { @@ -176,6 +219,7 @@ func sortedKeys(m map[string]map[string]bool) []string { // enforceFlagGroupsForCompletion will do the following: // - when a flag in a group is present, other flags in the group will be marked required +// - when none of the flags in a one-required group are present, all flags in the group will be marked required // - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden // This allows the standard completion logic to behave appropriately for flag groups func (c *Command) enforceFlagGroupsForCompletion() { @@ -185,10 +229,12 @@ func (c *Command) enforceFlagGroupsForCompletion() { flags := c.Flags() groupStatus := map[string]map[string]bool{} + oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} c.Flags().VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) // If a flag that is part of a group is present, we make all the other flags @@ -204,6 +250,26 @@ func (c *Command) enforceFlagGroupsForCompletion() { } } + // If none of the flags of a one-required group are present, we make all the flags + // of that group required so that the shell completion suggests them automatically + for flagList, flagnameAndStatus := range oneRequiredGroupStatus { + isSet := false + + for _, isSet = range flagnameAndStatus { + if isSet { + break + } + } + + // None of the flags of the group are set, mark all flags in the group + // as required + if !isSet { + for _, fName := range strings.Split(flagList, " ") { + _ = c.MarkFlagRequired(fName) + } + } + } + // If a flag that is mutually exclusive to others is present, we hide the other // flags of that group so the shell completion does not suggest them for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus { diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 177d2755f..a830b7bca 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -28,8 +28,8 @@ import ( func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { // Variables should not contain a '-' or ':' character nameForVar := name - nameForVar = strings.Replace(nameForVar, "-", "_", -1) - nameForVar = strings.Replace(nameForVar, ":", "_", -1) + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") compCmd := ShellCompRequestCmd if !includeDesc { @@ -47,7 +47,7 @@ filter __%[1]s_escapeStringWithSpecialChars { `+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` } -[scriptblock]$__%[2]sCompleterBlock = { +[scriptblock]${__%[2]sCompleterBlock} = { param( $WordToComplete, $CommandAst, @@ -122,7 +122,7 @@ filter __%[1]s_escapeStringWithSpecialChars { __%[1]s_debug "Calling $RequestComp" # First disable ActiveHelp which is not supported for Powershell - $env:%[10]s=0 + ${env:%[10]s}=0 #call the command store the output in $out and redirect stderr and stdout to null # $Out is an array contains each line per element @@ -279,7 +279,7 @@ filter __%[1]s_escapeStringWithSpecialChars { } } -Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock +Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock ${__%[2]sCompleterBlock} `, name, nameForVar, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md deleted file mode 100644 index c449f1e5c..000000000 --- a/vendor/github.com/spf13/cobra/powershell_completions.md +++ /dev/null @@ -1,3 +0,0 @@ -# Generating PowerShell Completions For Your Own cobra.Command - -Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details. diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md deleted file mode 100644 index 8a291eb20..000000000 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ /dev/null @@ -1,64 +0,0 @@ -## Projects using Cobra - -- [Allero](https://github.com/allero-io/allero) -- [Arewefastyet](https://benchmark.vitess.io) -- [Arduino CLI](https://github.com/arduino/arduino-cli) -- [Bleve](https://blevesearch.com/) -- [Cilium](https://cilium.io/) -- [CloudQuery](https://github.com/cloudquery/cloudquery) -- [CockroachDB](https://www.cockroachlabs.com/) -- [Constellation](https://github.com/edgelesssys/constellation) -- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) -- [Datree](https://github.com/datreeio/datree) -- [Delve](https://github.com/derekparker/delve) -- [Docker (distribution)](https://github.com/docker/distribution) -- [Etcd](https://etcd.io/) -- [Gardener](https://github.com/gardener/gardenctl) -- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl) -- [Git Bump](https://github.com/erdaltsksn/git-bump) -- [GitHub CLI](https://github.com/cli/cli) -- [GitHub Labeler](https://github.com/erdaltsksn/gh-label) -- [Golangci-lint](https://golangci-lint.run) -- [GopherJS](https://github.com/gopherjs/gopherjs) -- [GoReleaser](https://goreleaser.com) -- [Helm](https://helm.sh) -- [Hugo](https://gohugo.io) -- [Infracost](https://github.com/infracost/infracost) -- [Istio](https://istio.io) -- [Kool](https://github.com/kool-dev/kool) -- [Kubernetes](https://kubernetes.io/) -- [Kubescape](https://github.com/kubescape/kubescape) -- [KubeVirt](https://github.com/kubevirt/kubevirt) -- [Linkerd](https://linkerd.io/) -- [Mattermost-server](https://github.com/mattermost/mattermost-server) -- [Mercure](https://mercure.rocks/) -- [Meroxa CLI](https://github.com/meroxa/cli) -- [Metal Stack CLI](https://github.com/metal-stack/metalctl) -- [Moby (former Docker)](https://github.com/moby/moby) -- [Moldy](https://github.com/Moldy-Community/moldy) -- [Multi-gitter](https://github.com/lindell/multi-gitter) -- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -- [nFPM](https://nfpm.goreleaser.com) -- [Okteto](https://github.com/okteto/okteto) -- [OpenShift](https://www.openshift.com/) -- [Ory Hydra](https://github.com/ory/hydra) -- [Ory Kratos](https://github.com/ory/kratos) -- [Pixie](https://github.com/pixie-io/pixie) -- [Polygon Edge](https://github.com/0xPolygon/polygon-edge) -- [Pouch](https://github.com/alibaba/pouch) -- [ProjectAtomic (enterprise)](https://www.projectatomic.io/) -- [Prototool](https://github.com/uber/prototool) -- [Pulumi](https://www.pulumi.com) -- [QRcp](https://github.com/claudiodangelis/qrcp) -- [Random](https://github.com/erdaltsksn/random) -- [Rclone](https://rclone.org/) -- [Scaleway CLI](https://github.com/scaleway/scaleway-cli) -- [Sia](https://github.com/SiaFoundation/siad) -- [Skaffold](https://skaffold.dev/) -- [Tendermint](https://github.com/tendermint/tendermint) -- [Twitch CLI](https://github.com/twitchdev/twitch-cli) -- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) -- [Vitess](https://vitess.io) -- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) -- [Werf](https://werf.io/) -- [ZITADEL](https://github.com/zitadel/zitadel) diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md deleted file mode 100644 index 065c0621d..000000000 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ /dev/null @@ -1,576 +0,0 @@ -# Generating shell completions - -Cobra can generate shell completions for multiple shells. -The currently supported shells are: -- Bash -- Zsh -- fish -- PowerShell - -Cobra will automatically provide your program with a fully functional `completion` command, -similarly to how it provides the `help` command. - -## Creating your own completion command - -If you do not wish to use the default `completion` command, you can choose to -provide your own, which will take precedence over the default one. (This also provides -backwards-compatibility with programs that already have their own `completion` command.) - -If you are using the `cobra-cli` generator, -which can be found at [spf13/cobra-cli](https://github.com/spf13/cobra-cli), -you can create a completion command by running - -```bash -cobra-cli add completion -``` -and then modifying the generated `cmd/completion.go` file to look something like this -(writing the shell script to stdout allows the most flexible use): - -```go -var completionCmd = &cobra.Command{ - Use: "completion [bash|zsh|fish|powershell]", - Short: "Generate completion script", - Long: fmt.Sprintf(`To load completions: - -Bash: - - $ source <(%[1]s completion bash) - - # To load completions for each session, execute once: - # Linux: - $ %[1]s completion bash > /etc/bash_completion.d/%[1]s - # macOS: - $ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s - -Zsh: - - # If shell completion is not already enabled in your environment, - # you will need to enable it. You can execute the following once: - - $ echo "autoload -U compinit; compinit" >> ~/.zshrc - - # To load completions for each session, execute once: - $ %[1]s completion zsh > "${fpath[1]}/_%[1]s" - - # You will need to start a new shell for this setup to take effect. - -fish: - - $ %[1]s completion fish | source - - # To load completions for each session, execute once: - $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish - -PowerShell: - - PS> %[1]s completion powershell | Out-String | Invoke-Expression - - # To load completions for every new session, run: - PS> %[1]s completion powershell > %[1]s.ps1 - # and source this file from your PowerShell profile. -`,cmd.Root().Name()), - DisableFlagsInUseLine: true, - ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), - Run: func(cmd *cobra.Command, args []string) { - switch args[0] { - case "bash": - cmd.Root().GenBashCompletion(os.Stdout) - case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) - case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) - case "powershell": - cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) - } - }, -} -``` - -**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. - -## Adapting the default completion command - -Cobra provides a few options for the default `completion` command. To configure such options you must set -the `CompletionOptions` field on the *root* command. - -To tell Cobra *not* to provide the default `completion` command: -``` -rootCmd.CompletionOptions.DisableDefaultCmd = true -``` - -To tell Cobra to mark the default `completion` command as *hidden*: -``` -rootCmd.CompletionOptions.HiddenDefaultCmd = true -``` - -To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands: -``` -rootCmd.CompletionOptions.DisableNoDescFlag = true -``` - -To tell Cobra to completely disable descriptions for completions: -``` -rootCmd.CompletionOptions.DisableDescriptions = true -``` - -# Customizing completions - -The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values. - -## Completion of nouns - -### Static completion of nouns - -Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field. -For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. -Some simplified code from `kubectl get` looks like: - -```go -validArgs = []string{ "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - cobra.CheckErr(RunGet(f, out, cmd, args)) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like: - -```bash -$ kubectl get [tab][tab] -node pod replicationcontroller service -``` - -#### Aliases for nouns - -If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases = []string { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are shown to the user on tab completion only if no completions were found within sub-commands or `ValidArgs`. - -### Dynamic completion of nouns - -In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both. -Simplified code from `helm status` looks like: - -```go -cmd := &cobra.Command{ - Use: "status RELEASE_NAME", - Short: "Display the status of the named release", - Long: status_long, - RunE: func(cmd *cobra.Command, args []string) { - RunGet(args[0]) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 0 { - return nil, cobra.ShellCompDirectiveNoFileComp - } - return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp - }, -} -``` -Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster. -Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like: - -```bash -$ helm status [tab][tab] -harbor notary rook thanos -``` -You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp` -```go -// Indicates that the shell will perform its default behavior after completions -// have been provided (this implies none of the other directives). -ShellCompDirectiveDefault - -// Indicates an error occurred and completions should be ignored. -ShellCompDirectiveError - -// Indicates that the shell should not add a space after the completion, -// even if there is a single completion provided. -ShellCompDirectiveNoSpace - -// Indicates that the shell should not provide file completion even when -// no completion is provided. -ShellCompDirectiveNoFileComp - -// Indicates that the returned completions should be used as file extension filters. -// For example, to complete only files of the form *.json or *.yaml: -// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt -// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename() -// is a shortcut to using this directive explicitly. -// -ShellCompDirectiveFilterFileExt - -// Indicates that only directory names should be provided in file completion. -// For example: -// return nil, ShellCompDirectiveFilterDirs -// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly. -// -// To request directory names within another directory, the returned completions -// should specify a single directory name within which to search. For example, -// to complete directories within "themes/": -// return []string{"themes"}, ShellCompDirectiveFilterDirs -// -ShellCompDirectiveFilterDirs - -// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order -// in which the completions are provided -ShellCompDirectiveKeepOrder -``` - -***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. - -#### Debugging - -Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly: -```bash -$ helm __complete status har -harbor -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command: -```bash -$ helm __complete status "" -harbor -notary -rook -thanos -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code: -```go -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and optionally prints to stderr. -cobra.CompDebug(msg string, printToStdErr bool) { -cobra.CompDebugln(msg string, printToStdErr bool) - -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and to stderr. -cobra.CompError(msg string) -cobra.CompErrorln(msg string) -``` -***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above. - -## Completions for flags - -### Mark flags as required - -Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so: - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -$ kubectl exec [tab][tab] --c --container= -p --pod= -``` - -### Specify dynamic flag completion - -As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function. - -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault -}) -``` -Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so: - -```bash -$ helm status --output [tab][tab] -json table yaml -``` - -#### Debugging - -You can also easily debug your Go completion code for flags: -```bash -$ helm __complete status --output "" -json -table -yaml -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above. - -### Specify valid filename extensions for flags that take a filename - -To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so: -```go -flagName := "output" -cmd.MarkFlagFilename(flagName, "yaml", "json") -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt}) -``` - -### Limit flag completions to directory names - -To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so: -```go -flagName := "output" -cmd.MarkFlagDirname(flagName) -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return nil, cobra.ShellCompDirectiveFilterDirs -}) -``` -To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so: -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs -}) -``` -### Descriptions for completions - -Cobra provides support for completion descriptions. Such descriptions are supported for each shell -(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)). -For commands and flags, Cobra will provide the descriptions automatically, based on usage information. -For example, using zsh: -``` -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release -``` -while using fish: -``` -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) -``` - -Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: -```go -ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp -}} -``` -or -```go -ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} -``` - -If you don't want to show descriptions in the completions, you can add `--no-descriptions` to the default `completion` command to disable them, like: - -```bash -$ source <(helm completion bash) -$ helm completion [tab][tab] -bash (generate autocompletion script for bash) powershell (generate autocompletion script for powershell) -fish (generate autocompletion script for fish) zsh (generate autocompletion script for zsh) - -$ source <(helm completion bash --no-descriptions) -$ helm completion [tab][tab] -bash fish powershell zsh -``` -## Bash completions - -### Dependencies - -The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)) - -### Aliases - -You can also configure `bash` aliases for your program and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$ aliasname -completion firstcommand secondcommand -``` -### Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. -Please refer to [Bash Completions](bash_completions.md) for details. - -### Bash completion V2 - -Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling -`GenBashCompletion()` or `GenBashCompletionFile()`. - -A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or -`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion -(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion -solution described in this document. -Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash -completion V2 solution which provides the following extra features: -- Supports completion descriptions (like the other shells) -- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines) -- Streamlined user experience thanks to a completion behavior aligned with the other shells - -`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()` -you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra -will provide the description automatically based on usage information. You can choose to make this option configurable by -your users. - -``` -# With descriptions -$ helm s[tab][tab] -search (search for a keyword in charts) status (display the status of the named release) -show (show information of a chart) - -# Without descriptions -$ helm s[tab][tab] -search show status -``` -**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command. -## Zsh completions - -Cobra supports native zsh completion generated from the root `cobra.Command`. -The generated completion script should be put somewhere in your `$fpath` and be named -`_`. You will need to start a new shell for the completions to become available. - -Zsh supports descriptions for completions. Cobra will provide the description automatically, -based on usage information. Cobra provides a way to completely disable such descriptions by -using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make -this a configurable option to your users. -``` -# With descriptions -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`. - * You should instead use `RegisterFlagCompletionFunc()`. - -### Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced. -Please refer to [Zsh Completions](zsh_completions.md) for details. - -## fish completions - -Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. -``` -# With descriptions -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `fish`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `fish`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) - -## PowerShell completions - -Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. - -The script is designed to support all three PowerShell completion modes: - -* TabCompleteNext (default windows style - on each key press the next option is displayed) -* Complete (works like bash) -* MenuComplete (works like zsh) - -You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode. - -Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. - -``` -# With descriptions and Mode 'Complete' -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions. -$ helm s[tab] -search show status - -search for a keyword in charts - -# Without descriptions -$ helm s[tab] -search show status -``` -### Aliases - -You can also configure `powershell` aliases for your program and they will also support completions. - -``` -$ sal aliasname origcommand -$ Register-ArgumentCompleter -CommandName 'aliasname' -ScriptBlock $__origcommandCompleterBlock - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$ aliasname -completion firstcommand secondcommand -``` -The name of the completer block variable is of the form `$__CompleterBlock` where every `-` and `:` in the program name have been replaced with `_`, to respect powershell naming syntax. - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `powershell`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `powershell`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md deleted file mode 100644 index 85201d840..000000000 --- a/vendor/github.com/spf13/cobra/user_guide.md +++ /dev/null @@ -1,726 +0,0 @@ -# User Guide - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra-CLI is its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -For complete details on using the Cobra generator, please refer to [The Cobra-CLI Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at https://gohugo.io/documentation/`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -package cmd - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - // Used for flags. - cfgFile string - userLicense string - - rootCmd = &cobra.Command{ - Use: "cobra-cli", - Short: "A generator for Cobra based Applications", - Long: `Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, - } -) - -// Execute executes the root command. -func Execute() error { - return rootCmd.Execute() -} - -func init() { - cobra.OnInitialize(initConfig) - - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") - rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") - - rootCmd.AddCommand(addCmd) - rootCmd.AddCommand(initCmd) -} - -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := os.UserHomeDir() - cobra.CheckErr(err) - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigType("yaml") - viper.SetConfigName(".cobra") - } - - viper.AutomaticEnv() - - if err := viper.ReadInConfig(); err == nil { - fmt.Println("Using config file:", viper.ConfigFileUsed()) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -### Organizing subcommands - -A command may have subcommands which in turn may have other subcommands. This is achieved by using -`AddCommand`. In some cases, especially in larger applications, each subcommand may be defined in -its own go package. - -The suggested approach is for the parent command to use `AddCommand` to add its most immediate -subcommands. For example, consider the following directory structure: - -```text -├── cmd -│   ├── root.go -│   └── sub1 -│   ├── sub1.go -│   └── sub2 -│   ├── leafA.go -│   ├── leafB.go -│   └── sub2.go -└── main.go -``` - -In this case: - -* The `init` function of `root.go` adds the command defined in `sub1.go` to the root command. -* The `init` function of `sub1.go` adds the command defined in `sub2.go` to the sub1 command. -* The `init` function of `sub2.go` adds the commands defined in `leafA.go` and `leafB.go` to the - sub2 command. - -This approach ensures the subcommands are always included at compile time while avoiding cyclic -references. - -### Returning and handling errors - -If you wish to return an error to the caller of a command, `RunE` can be used. - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(tryCmd) -} - -var tryCmd = &cobra.Command{ - Use: "try", - Short: "Try and possibly fail at something", - RunE: func(cmd *cobra.Command, args []string) error { - if err := someFunc(); err != nil { - return err - } - return nil - }, -} -``` - -The error can then be caught at the execute function call. - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent', meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally, which will only apply to that specific command. - -```go -localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default, Cobra only parses local flags on the target command, and any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example, the persistent flag `author` is bound with `viper`. -**Note**: the variable `author` will not be set to the value from config, -when the `--author` flag is provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -Or, for persistent flags: -```go -rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkPersistentFlagRequired("region") -``` - -### Flag Groups - -If you have different flags that must be provided together (e.g. if they provide the `--username` flag they MUST provide the `--password` flag as well) then -Cobra can enforce that requirement: -```go -rootCmd.Flags().StringVarP(&u, "username", "u", "", "Username (required if password is set)") -rootCmd.Flags().StringVarP(&pw, "password", "p", "", "Password (required if username is set)") -rootCmd.MarkFlagsRequiredTogether("username", "password") -``` - -You can also prevent different flags from being provided together if they represent mutually -exclusive options such as specifying an output format as either `--json` or `--yaml` but never both: -```go -rootCmd.Flags().BoolVar(&ofJson, "json", false, "Output in JSON") -rootCmd.Flags().BoolVar(&ofYaml, "yaml", false, "Output in YAML") -rootCmd.MarkFlagsMutuallyExclusive("json", "yaml") -``` - -In both of these cases: - - both local and persistent flags can be used - - **NOTE:** the group is only enforced on commands where every flag is defined - - a flag may appear in multiple groups - - a group may contain any number of flags - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field of `Command`. -The following validators are built in: - -- Number of arguments: - - `NoArgs` - report an error if there are any positional args. - - `ArbitraryArgs` - accept any number of args. - - `MinimumNArgs(int)` - report an error if less than N positional args are provided. - - `MaximumNArgs(int)` - report an error if more than N positional args are provided. - - `ExactArgs(int)` - report an error if there are not exactly N positional args. - - `RangeArgs(min, max)` - report an error if the number of args is not between `min` and `max`. -- Content of the arguments: - - `OnlyValidArgs` - report an error if there are any positional args not specified in the `ValidArgs` field of `Command`, which can optionally be set to a list of valid values for positional args. - -If `Args` is undefined or `nil`, it defaults to `ArbitraryArgs`. - -Moreover, `MatchAll(pargs ...PositionalArgs)` enables combining existing checks with arbitrary other checks. -For instance, if you want to report an error if there are not exactly N positional args OR if there are any positional -args that are not in the `ValidArgs` field of `Command`, you can call `MatchAll` on `ExactArgs` and `OnlyValidArgs`, as -shown below: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -It is possible to set any custom validator that satisfies `func(cmd *cobra.Command, args []string) error`. -For example: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - // Optionally run one of the validators provided by cobra - if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { - return err - } - // Run the custom validation logic - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable, meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Echo: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](https://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra-cli help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra-cli [command] - - Available Commands: - add Add a command to a Cobra Application - completion Generate the autocompletion script for the specified shell - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra-cli - -l, --license string name of license for the project - --viper use Viper for configuration - - Use "cobra-cli [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Grouping commands in help - -Cobra supports grouping of available commands in the help output. To group commands, each group must be explicitly -defined using `AddGroup()` on the parent command. Then a subcommand can be added to a group using the `GroupID` element -of that subcommand. The groups will appear in the help output in the same order as they are defined using different -calls to `AddGroup()`. If you use the generated `help` or `completion` commands, you can set their group ids using -`SetHelpCommandGroupId()` and `SetCompletionCommandGroupId()` on the root command, respectively. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with the following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra-cli --invalid - Error: unknown flag: --invalid - Usage: - cobra-cli [command] - - Available Commands: - add Add a command to a Cobra Application - completion Generate the autocompletion script for the specified shell - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra-cli - -l, --license string name of license for the project - --viper use Viper for configuration - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatically generated based on existing subcommands and use an implementation of [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but make sense in your set of commands but for which -you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). - -## Generating shell completions - -Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). - -## Providing Active Help - -Cobra makes use of the shell-completion system to define a framework allowing you to provide Active Help to your users. Active Help are messages (hints, warnings, etc) printed as the program is being used. Read more about it in [Active Help](active_help.md). diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md deleted file mode 100644 index 7cff61787..000000000 --- a/vendor/github.com/spf13/cobra/zsh_completions.md +++ /dev/null @@ -1,48 +0,0 @@ -## Generating Zsh Completion For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced. - -### Deprecation summary - -See further below for more details on these deprecations. - -* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored. -* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`. -* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction`. - -### Behavioral changes - -**Noun completion** -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis| -|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`| -`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored| -|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)| -|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior| - -**Flag-value completion** - -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion| -|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored| -|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)| -|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells| -|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`| -|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`| - -**Improvements** - -* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`) -* File completion by default if no other completions found -* Handling of required flags -* File extension filtering no longer mutually exclusive with bash usage -* Completion of directory names *within* another directory -* Support for `=` form of flags diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index b774da88d..4d4b4aad6 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -28,6 +28,8 @@ var ( uint32Type = reflect.TypeOf(uint32(1)) uint64Type = reflect.TypeOf(uint64(1)) + uintptrType = reflect.TypeOf(uintptr(1)) + float32Type = reflect.TypeOf(float32(1)) float64Type = reflect.TypeOf(float64(1)) @@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Struct: { // All structs enter here. We're not interested in most types. - if !canConvert(obj1Value, timeType) { + if !obj1Value.CanConvert(timeType) { break } - // time.Time can compared! + // time.Time can be compared! timeObj1, ok := obj1.(time.Time) if !ok { timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) @@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Slice: { // We only care about the []byte type. - if !canConvert(obj1Value, bytesType) { + if !obj1Value.CanConvert(bytesType) { break } @@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true } + case reflect.Uintptr: + { + uintptrObj1, ok := obj1.(uintptr) + if !ok { + uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr) + } + uintptrObj2, ok := obj2.(uintptr) + if !ok { + uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr) + } + if uintptrObj1 > uintptrObj2 { + return compareGreater, true + } + if uintptrObj1 == uintptrObj2 { + return compareEqual, true + } + if uintptrObj1 < uintptrObj2 { + return compareLess, true + } + } } return compareEqual, false diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go deleted file mode 100644 index da867903e..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_legacy.go - -package assert - -import "reflect" - -// Wrapper around reflect.Value.CanConvert, for compatibility -// reasons. -func canConvert(value reflect.Value, to reflect.Type) bool { - return value.CanConvert(to) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go deleted file mode 100644 index 1701af2a3..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_can_convert.go - -package assert - -import "reflect" - -// Older versions of Go does not have the reflect.Value.CanConvert -// method. -func canConvert(value reflect.Value, to reflect.Type) bool { - return false -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 84dbd6c79..3ddab109a 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) } +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index b1d94aec5..a84e09bd4 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in return NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index a55d1bba9..0b7570f21 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} { return result.Interface() case reflect.Array, reflect.Slice: - result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + var result reflect.Value + if expectedKind == reflect.Array { + result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem() + } else { + result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + } for i := 0; i < expectedValue.Len(); i++ { index := expectedValue.Index(i) if isNil(index) { @@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} { // structures. // // This function does no assertion of any kind. +// +// Deprecated: Use [EqualExportedValues] instead. func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { expectedCleaned := copyExportedFields(expected) actualCleaned := copyExportedFields(actual) @@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { return true } - actualType := reflect.TypeOf(actual) - if actualType == nil { + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + if !expectedValue.IsValid() || !actualValue.IsValid() { return false } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + + expectedType := expectedValue.Type() + actualType := actualValue.Type() + if !expectedType.ConvertibleTo(actualType) { + return false + } + + if !isNumericType(expectedType) || !isNumericType(actualType) { // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + return reflect.DeepEqual( + expectedValue.Convert(actualType).Interface(), actual, + ) } - return false + // If BOTH values are numeric, there are chances of false positives due + // to overflow or underflow. So, we need to make sure to always convert + // the smaller type to a larger type before comparing. + if expectedType.Size() >= actualType.Size() { + return actualValue.Convert(expectedType).Interface() == expected + } + + return expectedValue.Convert(actualType).Interface() == actual +} + +// isNumericType returns true if the type is one of: +// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, +// float32, float64, complex64, complex128 +func isNumericType(t reflect.Type) bool { + return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128 } /* CallerInfo is necessary because the assert functions use the testing object @@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { // Aligns the provided message so that all lines after the first line start at the same location as the first line. // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the // basis on which the alignment occurs). func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) @@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg return true } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...) + } + if reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...) + } + + return true +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool { // representations appropriate to be presented to the user. // // If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar +// with the type name, and the value will be enclosed in parentheses similar // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { @@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } + if aType.Kind() == reflect.Ptr { + aType = aType.Elem() + } + if bType.Kind() == reflect.Ptr { + bType = bType.Elem() + } + if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) } if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) } expected = copyExportedFields(expected) @@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, "Expected value not to be nil.", msgAndArgs...) } -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - // isNil checks if a specified object is nil or not, without Failing. func isNil(object interface{}) bool { if object == nil { @@ -638,16 +683,13 @@ func isNil(object interface{}) bool { } value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, - kind) - - if isNilableKind && value.IsNil() { - return true + switch value.Kind() { + case + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + + return value.IsNil() } return false @@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { +// getLen tries to get the length of an object. +// It returns (0, false) if impossible. +func getLen(x interface{}) (length int, ok bool) { v := reflect.ValueOf(x) defer func() { - if e := recover(); e != nil { - ok = false - } + ok = recover() == nil }() - return true, v.Len() + return v.Len(), true } // Len asserts that the specified object has specific length. @@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - ok, l := getLen(object) + l, ok := getLen(object) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...) } if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) } return true } @@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd h.Helper() } if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") + return Fail(t, "epsilon must not be NaN", msgAndArgs...) } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { @@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if h, ok := t.(tHelper); ok { h.Helper() } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { + + if expected == nil || actual == nil { return Fail(t, "Parameters must be slice", msgAndArgs...) } - actualSlice := reflect.ValueOf(actual) expectedSlice := reflect.ValueOf(expected) + actualSlice := reflect.ValueOf(actual) - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result + if expectedSlice.Type().Kind() != reflect.Slice { + return Fail(t, "Expected value must be slice", msgAndArgs...) + } + + expectedLen := expectedSlice.Len() + if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) { + return false + } + + for i := 0; i < expectedLen; i++ { + if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) { + return false } } @@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { } // FailNow panics. -func (c *CollectT) FailNow() { +func (*CollectT) FailNow() { panic("Assertion failed") } -// Reset clears the collected errors. -func (c *CollectT) Reset() { - c.errors = nil +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Reset() { + panic("Reset() is deprecated") } -// Copy copies the collected errors to the supplied t. -func (c *CollectT) Copy(t TestingT) { - if tt, ok := t.(tHelper); ok { - tt.Helper() - } - for _, err := range c.errors { - t.Errorf("%v", err) - } +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Copy(TestingT) { + panic("Copy() is deprecated") } // EventuallyWithT asserts that given condition will be met in waitFor time, @@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time h.Helper() } - collect := new(CollectT) - ch := make(chan bool, 1) + var lastFinishedTickErrs []error + ch := make(chan []error, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time for tick := ticker.C; ; { select { case <-timer.C: - collect.Copy(t) + for _, err := range lastFinishedTickErrs { + t.Errorf("%v", err) + } return Fail(t, "Condition never satisfied", msgAndArgs...) case <-tick: tick = nil - collect.Reset() go func() { + collect := new(CollectT) + defer func() { + ch <- collect.errors + }() condition(collect) - ch <- len(collect.errors) == 0 }() - case v := <-ch: - if v { + case errs := <-ch: + if len(errs) == 0 { return true } + // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. + lastFinishedTickErrs = errs tick = ticker.C } } diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index d8038c28a..861ed4b7c 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -12,7 +12,7 @@ import ( // an error if building a new request fails. func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return -1, err } @@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isSuccessCode @@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isRedirectCode @@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isErrorCode := code >= http.StatusBadRequest if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isErrorCode @@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } successful := code == statuscode if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...) } return successful @@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if len(values) > 0 { + url += "?" + values.Encode() + } + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return "" } @@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml index fbc6df790..2346df135 100644 --- a/vendor/go.uber.org/zap/.golangci.yml +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -17,7 +17,7 @@ linters: - unused # Our own extras: - - gofmt + - gofumpt - nolintlint # lints nolint directives - revive diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl index 92aa65d66..4fea3027a 100644 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -1,7 +1,15 @@ # :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +
+ Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -92,7 +100,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 11b465976..6d6cd5f4d 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,14 +3,30 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.27.0 (20 Feb 2024) +Enhancements: +* [#1378][]: Add `WithLazy` method for `SugaredLogger`. +* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`. +* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`. +* [#1416][]: Add `WithPanicHook` option for testing panic logs. + +Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release. + +[#1378]: https://github.com/uber-go/zap/pull/1378 +[#1399]: https://github.com/uber-go/zap/pull/1399 +[#1406]: https://github.com/uber-go/zap/pull/1406 +[#1416]: https://github.com/uber-go/zap/pull/1416 + ## 1.26.0 (14 Sep 2023) Enhancements: +* [#1297][]: Add Dict as a Field. * [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured context. * [#1350][]: String encoding is much (~50%) faster now. -Thanks to @jquirke, @cdvr1993 for their contributions to this release. +Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release. +[#1297]: https://github.com/uber-go/zap/pull/1297 [#1319]: https://github.com/uber-go/zap/pull/1319 [#1350]: https://github.com/uber-go/zap/pull/1350 @@ -25,7 +41,7 @@ Enhancements: * [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. * [#1281][]: Add `zap/exp/expfield` package which contains helper methods `Str` and `Strs` for constructing String-like zap.Fields. -* [#1310][]: Reduce stack size on `Any`. +* [#1310][]: Reduce stack size on `Any`. Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions to this release. @@ -352,7 +368,7 @@ to this release. [#675]: https://github.com/uber-go/zap/pull/675 [#704]: https://github.com/uber-go/zap/pull/704 -## v1.9.1 (06 Aug 2018) +## 1.9.1 (06 Aug 2018) Bugfixes: @@ -360,7 +376,7 @@ Bugfixes: [#614]: https://github.com/uber-go/zap/pull/614 -## v1.9.0 (19 Jul 2018) +## 1.9.0 (19 Jul 2018) Enhancements: * [#602][]: Reduce number of allocations when logging with reflection. @@ -373,7 +389,7 @@ Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and [#572]: https://github.com/uber-go/zap/pull/572 [#606]: https://github.com/uber-go/zap/pull/606 -## v1.8.0 (13 Apr 2018) +## 1.8.0 (13 Apr 2018) Enhancements: * [#508][]: Make log level configurable when redirecting the standard @@ -391,14 +407,14 @@ Thanks to @DiSiqueira and @djui for their contributions to this release. [#577]: https://github.com/uber-go/zap/pull/577 [#574]: https://github.com/uber-go/zap/pull/574 -## v1.7.1 (25 Sep 2017) +## 1.7.1 (25 Sep 2017) Bugfixes: * [#504][]: Store strings when using AddByteString with the map encoder. [#504]: https://github.com/uber-go/zap/pull/504 -## v1.7.0 (21 Sep 2017) +## 1.7.0 (21 Sep 2017) Enhancements: @@ -407,7 +423,7 @@ Enhancements: [#487]: https://github.com/uber-go/zap/pull/487 -## v1.6.0 (30 Aug 2017) +## 1.6.0 (30 Aug 2017) Enhancements: @@ -418,7 +434,7 @@ Enhancements: [#490]: https://github.com/uber-go/zap/pull/490 [#491]: https://github.com/uber-go/zap/pull/491 -## v1.5.0 (22 Jul 2017) +## 1.5.0 (22 Jul 2017) Enhancements: @@ -436,7 +452,7 @@ Thanks to @richard-tunein and @pavius for their contributions to this release. [#460]: https://github.com/uber-go/zap/pull/460 [#470]: https://github.com/uber-go/zap/pull/470 -## v1.4.1 (08 Jun 2017) +## 1.4.1 (08 Jun 2017) This release fixes two bugs. @@ -448,7 +464,7 @@ Bugfixes: [#435]: https://github.com/uber-go/zap/pull/435 [#444]: https://github.com/uber-go/zap/pull/444 -## v1.4.0 (12 May 2017) +## 1.4.0 (12 May 2017) This release adds a few small features and is fully backward-compatible. @@ -464,7 +480,7 @@ Enhancements: [#425]: https://github.com/uber-go/zap/pull/425 [#431]: https://github.com/uber-go/zap/pull/431 -## v1.3.0 (25 Apr 2017) +## 1.3.0 (25 Apr 2017) This release adds an enhancement to zap's testing helpers as well as the ability to marshal an AtomicLevel. It is fully backward-compatible. @@ -478,7 +494,7 @@ Enhancements: [#415]: https://github.com/uber-go/zap/pull/415 [#416]: https://github.com/uber-go/zap/pull/416 -## v1.2.0 (13 Apr 2017) +## 1.2.0 (13 Apr 2017) This release adds a gRPC compatibility wrapper. It is fully backward-compatible. @@ -489,7 +505,7 @@ Enhancements: [#402]: https://github.com/uber-go/zap/pull/402 -## v1.1.0 (31 Mar 2017) +## 1.1.0 (31 Mar 2017) This release fixes two bugs and adds some enhancements to zap's testing helpers. It is fully backward-compatible. @@ -510,7 +526,7 @@ Thanks to @moitias for contributing to this release. [#396]: https://github.com/uber-go/zap/pull/396 [#386]: https://github.com/uber-go/zap/pull/386 -## v1.0.0 (14 Mar 2017) +## 1.0.0 (14 Mar 2017) This is zap's first stable release. All exported APIs are now final, and no further breaking changes will be made in the 1.x release series. Anyone using a @@ -569,7 +585,7 @@ contributions to this release. [#365]: https://github.com/uber-go/zap/pull/365 [#372]: https://github.com/uber-go/zap/pull/372 -## v1.0.0-rc.3 (7 Mar 2017) +## 1.0.0-rc.3 (7 Mar 2017) This is the third release candidate for zap's stable release. There are no breaking changes. @@ -595,7 +611,7 @@ Thanks to @ansel1 and @suyash for their contributions to this release. [#353]: https://github.com/uber-go/zap/pull/353 [#311]: https://github.com/uber-go/zap/pull/311 -## v1.0.0-rc.2 (21 Feb 2017) +## 1.0.0-rc.2 (21 Feb 2017) This is the second release candidate for zap's stable release. It includes two breaking changes. @@ -641,7 +657,7 @@ Thanks to @skipor and @chapsuk for their contributions to this release. [#326]: https://github.com/uber-go/zap/pull/326 [#300]: https://github.com/uber-go/zap/pull/300 -## v1.0.0-rc.1 (14 Feb 2017) +## 1.0.0-rc.1 (14 Feb 2017) This is the first release candidate for zap's stable release. There are multiple breaking changes and improvements from the pre-release version. Most notably: @@ -661,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably: * Sampling is more accurate, and doesn't depend on the standard library's shared timer heap. -## v0.1.0-beta.1 (6 Feb 2017) +## 0.1.0-beta.1 (6 Feb 2017) This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and upgrade at their leisure. Since this is the first tagged release, there are no diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE similarity index 100% rename from vendor/go.uber.org/zap/LICENSE.txt rename to vendor/go.uber.org/zap/LICENSE diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index 9de08927b..a17035cb6 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -1,7 +1,16 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +# :zap: zap + + +
Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -66,41 +75,44 @@ Log a message and 10 fields: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 1744 ns/op | +0% | 5 allocs/op -| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op -| zerolog | 918 ns/op | -47% | 1 allocs/op -| go-kit | 5590 ns/op | +221% | 57 allocs/op -| slog | 5640 ns/op | +223% | 40 allocs/op -| apex/log | 21184 ns/op | +1115% | 63 allocs/op -| logrus | 24338 ns/op | +1296% | 79 allocs/op -| log15 | 26054 ns/op | +1394% | 74 allocs/op +| :zap: zap | 656 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op +| zerolog | 380 ns/op | -42% | 1 allocs/op +| go-kit | 2249 ns/op | +243% | 57 allocs/op +| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op +| slog | 2481 ns/op | +278% | 42 allocs/op +| apex/log | 9591 ns/op | +1362% | 63 allocs/op +| log15 | 11393 ns/op | +1637% | 75 allocs/op +| logrus | 11654 ns/op | +1677% | 79 allocs/op Log a message with a logger that already has 10 fields of context: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 193 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op -| zerolog | 81 ns/op | -58% | 0 allocs/op -| slog | 322 ns/op | +67% | 0 allocs/op -| go-kit | 5377 ns/op | +2686% | 56 allocs/op -| apex/log | 19518 ns/op | +10013% | 53 allocs/op -| log15 | 19812 ns/op | +10165% | 70 allocs/op -| logrus | 21997 ns/op | +11297% | 68 allocs/op +| :zap: zap | 67 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op +| zerolog | 35 ns/op | -48% | 0 allocs/op +| slog | 193 ns/op | +188% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op +| go-kit | 2460 ns/op | +3572% | 56 allocs/op +| log15 | 9038 ns/op | +13390% | 70 allocs/op +| apex/log | 9068 ns/op | +13434% | 53 allocs/op +| logrus | 10521 ns/op | +15603% | 68 allocs/op Log a static string, without any context or `printf`-style templating: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 165 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op -| zerolog | 95 ns/op | -42% | 0 allocs/op -| slog | 296 ns/op | +79% | 0 allocs/op -| go-kit | 415 ns/op | +152% | 9 allocs/op -| standard library | 422 ns/op | +156% | 2 allocs/op -| apex/log | 1601 ns/op | +870% | 5 allocs/op -| logrus | 3017 ns/op | +1728% | 23 allocs/op -| log15 | 3469 ns/op | +2002% | 20 allocs/op +| :zap: zap | 63 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op +| zerolog | 32 ns/op | -49% | 0 allocs/op +| standard library | 124 ns/op | +97% | 1 allocs/op +| slog | 196 ns/op | +211% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op +| go-kit | 213 ns/op | +238% | 9 allocs/op +| apex/log | 771 ns/op | +1124% | 5 allocs/op +| logrus | 1439 ns/op | +2184% | 23 allocs/op +| log15 | 2069 ns/op | +3184% | 20 allocs/op ## Development Status: Stable @@ -120,7 +132,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 27fb5cd5d..0b8540c21 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -42,7 +42,7 @@ func (b *Buffer) AppendByte(v byte) { b.bs = append(b.bs, v) } -// AppendBytes writes a single byte to the Buffer. +// AppendBytes writes the given slice of bytes to the Buffer. func (b *Buffer) AppendBytes(v []byte) { b.bs = append(b.bs, v...) } diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index c8dd3358a..6743930b8 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -460,6 +460,8 @@ func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { // - https://github.com/uber-go/zap/pull/1304 // - https://github.com/uber-go/zap/pull/1305 // - https://github.com/uber-go/zap/pull/1308 +// +// See https://github.com/golang/go/issues/62077 for upstream issue. type anyFieldC[T any] func(string, T) Field func (f anyFieldC[T]) Any(key string, val any) Field { diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index 6205fe48a..c4d300323 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -43,6 +43,7 @@ type Logger struct { development bool addCaller bool + onPanic zapcore.CheckWriteHook // default is WriteThenPanic onFatal zapcore.CheckWriteHook // default is WriteThenFatal name string @@ -345,27 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Set up any required terminal behavior. switch ent.Level { case zapcore.PanicLevel: - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) case zapcore.FatalLevel: - onFatal := log.onFatal - // nil or WriteThenNoop will lead to continued execution after - // a Fatal log entry, which is unexpected. For example, - // - // f, err := os.Open(..) - // if err != nil { - // log.Fatal("cannot open", zap.Error(err)) - // } - // fmt.Println(f.Name()) - // - // The f.Name() will panic if we continue execution after the - // log.Fatal. - if onFatal == nil || onFatal == zapcore.WriteThenNoop { - onFatal = zapcore.WriteThenFatal - } - ce = ce.After(ent, onFatal) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal)) case zapcore.DPanicLevel: if log.development { - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) } } @@ -430,3 +416,20 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return ce } + +func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook { + // A nil or WriteThenNoop hook will lead to continued execution after + // a Panic or Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the log.Fatal. + if override == nil || override == zapcore.WriteThenNoop { + return defaultHook + } + return override +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index c4f3bca3d..43d357ac9 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -132,6 +132,21 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { }) } +// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs. +// Zap will call this hook after writing a log statement with a Panic/DPanic level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a Panic/DPanic log message, but it will not start a panic. +// +// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit)) +// +// This is useful for testing Panic/DPanic log output. +func WithPanicHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onPanic = hook + }) +} + // OnFatal sets the action to take on fatal logs. // // Deprecated: Use [WithFatalHook] instead. diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index 00ac5fe3a..8904cd087 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -115,6 +115,21 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } +// WithLazy adds a variadic number of fields to the logging context lazily. +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// Similar to [With], fields added to the child don't affect the parent, +// and vice versa. Also, the keys in key-value pairs should be strings. In development, +// passing a non-string key panics, while in production it logs an error and skips the pair. +// Passing an orphaned key has the same behavior. +func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)} +} + // Level reports the minimum enabled level for this logger. // // For NopLoggers, this is [zapcore.InvalidLevel]. @@ -122,6 +137,12 @@ func (s *SugaredLogger) Level() zapcore.Level { return zapcore.LevelOf(s.base.core) } +// Log logs the provided arguments at provided level. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) { + s.log(lvl, "", args, nil) +} + // Debug logs the provided arguments at [DebugLevel]. // Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Debug(args ...interface{}) { @@ -165,6 +186,12 @@ func (s *SugaredLogger) Fatal(args ...interface{}) { s.log(FatalLevel, "", args, nil) } +// Logf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) { + s.log(lvl, template, args, nil) +} + // Debugf formats the message according to the format specifier // and logs it at [DebugLevel]. func (s *SugaredLogger) Debugf(template string, args ...interface{}) { @@ -208,6 +235,12 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { s.log(FatalLevel, template, args, nil) } +// Logw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(lvl, msg, nil, keysAndValues) +} + // Debugw logs a message with some additional context. The variadic key-value // pairs are treated as they are in With. // @@ -255,6 +288,12 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { s.log(FatalLevel, msg, nil, keysAndValues) } +// Logln logs a message at provided level. +// Spaces are always added between arguments. +func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) { + s.logln(lvl, args, nil) +} + // Debugln logs a message at [DebugLevel]. // Spaces are always added between arguments. func (s *SugaredLogger) Debugln(args ...interface{}) { diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index 8ca0bfaf5..cc2b4e07b 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // If this ever becomes a performance bottleneck, we can implement // ArrayEncoder for our plain-text format. arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { + if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() { c.EncodeTime(ent.Time, arr) } if c.LevelKey != "" && c.EncodeLevel != nil { diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go index 5769ff3e4..044625415 100644 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -37,6 +37,9 @@ const DefaultLineEnding = "\n" const OmitKey = "" // A LevelEncoder serializes a Level to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type LevelEncoder func(Level, PrimitiveArrayEncoder) // LowercaseLevelEncoder serializes a Level to a lowercase string. For example, @@ -90,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error { } // A TimeEncoder serializes a time.Time to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type TimeEncoder func(time.Time, PrimitiveArrayEncoder) // EpochTimeEncoder serializes a time.Time to a floating-point number of seconds @@ -219,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error { } // A DurationEncoder serializes a time.Duration to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) // SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. @@ -262,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { } // A CallerEncoder serializes an EntryCaller to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) // FullCallerEncoder serializes a caller in /full/path/to/package/file:line @@ -292,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error { // A NameEncoder serializes a period-separated logger name to a primitive // type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type NameEncoder func(string, PrimitiveArrayEncoder) // FullNameEncoder serializes the logger name as-is. diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index 95bdb0a12..308c9781e 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -47,7 +47,7 @@ const ( ByteStringType // Complex128Type indicates that the field carries a complex128. Complex128Type - // Complex64Type indicates that the field carries a complex128. + // Complex64Type indicates that the field carries a complex64. Complex64Type // DurationType indicates that the field carries a time.Duration. DurationType diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index c8ab86979..9685169b2 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -372,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.Level.String()) } } - if final.TimeKey != "" { + if final.TimeKey != "" && !ent.Time.IsZero() { final.AddTime(final.TimeKey, ent.Time) } if ent.LoggerName != "" && final.NameKey != "" { diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 66aebae25..c672ccf69 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -33,6 +33,9 @@ #define CONSTBASE R16 #define BLOCKS R17 +// for VPERMXOR +#define MASK R18 + DATA consts<>+0x00(SB)/8, $0x3320646e61707865 DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 DATA consts<>+0x10(SB)/8, $0x0000000000000001 @@ -53,7 +56,11 @@ DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 DATA consts<>+0x90(SB)/8, $0x0000000100000000 DATA consts<>+0x98(SB)/8, $0x0000000300000002 -GLOBL consts<>(SB), RODATA, $0xa0 +DATA consts<>+0xa0(SB)/8, $0x5566774411223300 +DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88 +DATA consts<>+0xb0(SB)/8, $0x6677445522330011 +DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899 +GLOBL consts<>(SB), RODATA, $0xc0 //func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 @@ -70,6 +77,9 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $48, R10 MOVD $64, R11 SRD $6, LEN, BLOCKS + // for VPERMXOR + MOVD $consts<>+0xa0(SB), MASK + MOVD $16, R20 // V16 LXVW4X (CONSTBASE)(R0), VS48 ADD $80,CONSTBASE @@ -87,6 +97,10 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 // V28 LXVW4X (CONSTBASE)(R11), VS60 + // Load mask constants for VPERMXOR + LXVW4X (MASK)(R0), V20 + LXVW4X (MASK)(R20), V21 + // splat slot from V19 -> V26 VSPLTW $0, V19, V26 @@ -97,7 +111,7 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $10, R14 MOVD R14, CTR - + PCALIGN $16 loop_outer_vsx: // V0, V1, V2, V3 LXVW4X (R0)(CONSTBASE), VS32 @@ -128,22 +142,17 @@ loop_outer_vsx: VSPLTISW $12, V28 VSPLTISW $8, V29 VSPLTISW $7, V30 - + PCALIGN $16 loop_vsx: VADDUWM V0, V4, V0 VADDUWM V1, V5, V1 VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - VRLW V15, V27, V15 + VPERMXOR V12, V0, V21, V12 + VPERMXOR V13, V1, V21, V13 + VPERMXOR V14, V2, V21, V14 + VPERMXOR V15, V3, V21, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -165,15 +174,10 @@ loop_vsx: VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - VRLW V15, V29, V15 + VPERMXOR V12, V0, V20, V12 + VPERMXOR V13, V1, V20, V13 + VPERMXOR V14, V2, V20, V14 + VPERMXOR V15, V3, V20, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -195,15 +199,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V27, V15 - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 + VPERMXOR V15, V0, V21, V15 + VPERMXOR V12, V1, V21, V12 + VPERMXOR V13, V2, V21, V13 + VPERMXOR V14, V3, V21, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -225,15 +224,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V29, V15 - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 + VPERMXOR V15, V0, V20, V15 + VPERMXOR V12, V1, V20, V12 + VPERMXOR V13, V2, V20, V13 + VPERMXOR V14, V3, V20, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -249,48 +243,48 @@ loop_vsx: VRLW V6, V30, V6 VRLW V7, V30, V7 VRLW V4, V30, V4 - BC 16, LT, loop_vsx + BDNZ loop_vsx VADDUWM V12, V26, V12 - WORD $0x13600F8C // VMRGEW V0, V1, V27 - WORD $0x13821F8C // VMRGEW V2, V3, V28 + VMRGEW V0, V1, V27 + VMRGEW V2, V3, V28 - WORD $0x10000E8C // VMRGOW V0, V1, V0 - WORD $0x10421E8C // VMRGOW V2, V3, V2 + VMRGOW V0, V1, V0 + VMRGOW V2, V3, V2 - WORD $0x13A42F8C // VMRGEW V4, V5, V29 - WORD $0x13C63F8C // VMRGEW V6, V7, V30 + VMRGEW V4, V5, V29 + VMRGEW V6, V7, V30 XXPERMDI VS32, VS34, $0, VS33 XXPERMDI VS32, VS34, $3, VS35 XXPERMDI VS59, VS60, $0, VS32 XXPERMDI VS59, VS60, $3, VS34 - WORD $0x10842E8C // VMRGOW V4, V5, V4 - WORD $0x10C63E8C // VMRGOW V6, V7, V6 + VMRGOW V4, V5, V4 + VMRGOW V6, V7, V6 - WORD $0x13684F8C // VMRGEW V8, V9, V27 - WORD $0x138A5F8C // VMRGEW V10, V11, V28 + VMRGEW V8, V9, V27 + VMRGEW V10, V11, V28 XXPERMDI VS36, VS38, $0, VS37 XXPERMDI VS36, VS38, $3, VS39 XXPERMDI VS61, VS62, $0, VS36 XXPERMDI VS61, VS62, $3, VS38 - WORD $0x11084E8C // VMRGOW V8, V9, V8 - WORD $0x114A5E8C // VMRGOW V10, V11, V10 + VMRGOW V8, V9, V8 + VMRGOW V10, V11, V10 - WORD $0x13AC6F8C // VMRGEW V12, V13, V29 - WORD $0x13CE7F8C // VMRGEW V14, V15, V30 + VMRGEW V12, V13, V29 + VMRGEW V14, V15, V30 XXPERMDI VS40, VS42, $0, VS41 XXPERMDI VS40, VS42, $3, VS43 XXPERMDI VS59, VS60, $0, VS40 XXPERMDI VS59, VS60, $3, VS42 - WORD $0x118C6E8C // VMRGOW V12, V13, V12 - WORD $0x11CE7E8C // VMRGOW V14, V15, V14 + VMRGOW V12, V13, V12 + VMRGOW V14, V15, V14 VSPLTISW $4, V27 VADDUWM V26, V27, V26 @@ -431,7 +425,7 @@ tail_vsx: ADD $-1, R11, R12 ADD $-1, INP ADD $-1, OUT - + PCALIGN $16 looptail_vsx: // Copying the result to OUT // in bytes. @@ -439,7 +433,7 @@ looptail_vsx: MOVBZU 1(INP), TMP XOR KEY, TMP, KEY MOVBU KEY, 1(OUT) - BC 16, LT, looptail_vsx + BDNZ looptail_vsx // Clear the stack values STXVW4X VS48, (R11)(R0) diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go index 93da7322b..8cf5d8112 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go @@ -5,7 +5,7 @@ // Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its // extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and // draft-irtf-cfrg-xchacha-01. -package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" +package chacha20poly1305 import ( "crypto/cipher" diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go index cda8e3edf..90ef6a241 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -4,7 +4,7 @@ // Package asn1 contains supporting types for parsing and building ASN.1 // messages with the cryptobyte package. -package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" +package asn1 // Tag represents an ASN.1 identifier octet, consisting of a tag number // (indicating a type) and class (such as context-specific or constructed). diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go index 10692a8a3..4b0f8097f 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/string.go +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -15,7 +15,7 @@ // // See the documentation and examples for the Builder and String types to get // started. -package cryptobyte // import "golang.org/x/crypto/cryptobyte" +package cryptobyte // String represents a string of bytes. It provides methods for parsing // fixed-length and length-prefixed values from it. diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go index f4ded5fee..3bee66294 100644 --- a/vendor/golang.org/x/crypto/hkdf/hkdf.go +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -8,7 +8,7 @@ // HKDF is a cryptographic key derivation function (KDF) with the goal of // expanding limited input keying material into one or more cryptographically // strong secret keys. -package hkdf // import "golang.org/x/crypto/hkdf" +package hkdf import ( "crypto/hmac" diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go index decd8cf9b..7e0230907 100644 --- a/vendor/golang.org/x/crypto/sha3/doc.go +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -59,4 +59,4 @@ // They produce output of the same length, with the same security strengths // against all attacks. This means, in particular, that SHA3-256 only has // 128-bit collision resistance, because its output length is 32 bytes. -package sha3 // import "golang.org/x/crypto/sha3" +package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go index 0d8043fd2..c544b29e5 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes.go +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -9,6 +9,7 @@ package sha3 // bytes. import ( + "crypto" "hash" ) @@ -16,39 +17,50 @@ import ( // Its generic security strength is 224 bits against preimage attacks, // and 112 bits against collision attacks. func New224() hash.Hash { - if h := new224Asm(); h != nil { - return h - } - return &state{rate: 144, outputLen: 28, dsbyte: 0x06} + return new224() } // New256 creates a new SHA3-256 hash. // Its generic security strength is 256 bits against preimage attacks, // and 128 bits against collision attacks. func New256() hash.Hash { - if h := new256Asm(); h != nil { - return h - } - return &state{rate: 136, outputLen: 32, dsbyte: 0x06} + return new256() } // New384 creates a new SHA3-384 hash. // Its generic security strength is 384 bits against preimage attacks, // and 192 bits against collision attacks. func New384() hash.Hash { - if h := new384Asm(); h != nil { - return h - } - return &state{rate: 104, outputLen: 48, dsbyte: 0x06} + return new384() } // New512 creates a new SHA3-512 hash. // Its generic security strength is 512 bits against preimage attacks, // and 256 bits against collision attacks. func New512() hash.Hash { - if h := new512Asm(); h != nil { - return h - } + return new512() +} + +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} + +func new224Generic() *state { + return &state{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +func new256Generic() *state { + return &state{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +func new384Generic() *state { + return &state{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +func new512Generic() *state { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} } diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go deleted file mode 100644 index fe8c84793..000000000 --- a/vendor/golang.org/x/crypto/sha3/hashes_generic.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !gc || purego || !s390x - -package sha3 - -import ( - "hash" -) - -// new224Asm returns an assembly implementation of SHA3-224 if available, -// otherwise it returns nil. -func new224Asm() hash.Hash { return nil } - -// new256Asm returns an assembly implementation of SHA3-256 if available, -// otherwise it returns nil. -func new256Asm() hash.Hash { return nil } - -// new384Asm returns an assembly implementation of SHA3-384 if available, -// otherwise it returns nil. -func new384Asm() hash.Hash { return nil } - -// new512Asm returns an assembly implementation of SHA3-512 if available, -// otherwise it returns nil. -func new512Asm() hash.Hash { return nil } diff --git a/vendor/golang.org/x/crypto/sha3/hashes_noasm.go b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go new file mode 100644 index 000000000..9d85fb621 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go @@ -0,0 +1,23 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func new224() *state { + return new224Generic() +} + +func new256() *state { + return new256Generic() +} + +func new384() *state { + return new384Generic() +} + +func new512() *state { + return new512Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go deleted file mode 100644 index addfd5049..000000000 --- a/vendor/golang.org/x/crypto/sha3/register.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.4 - -package sha3 - -import ( - "crypto" -) - -func init() { - crypto.RegisterHash(crypto.SHA3_224, New224) - crypto.RegisterHash(crypto.SHA3_256, New256) - crypto.RegisterHash(crypto.SHA3_384, New384) - crypto.RegisterHash(crypto.SHA3_512, New512) -} diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go index 4884d172a..afedde5ab 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3.go +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -23,7 +23,6 @@ const ( type state struct { // Generic sponge components. a [25]uint64 // main state of the hash - buf []byte // points into storage rate int // the number of bytes of state to use // dsbyte contains the "domain separation" bits and the first bit of @@ -40,7 +39,8 @@ type state struct { // Extendable-Output Functions (May 2014)" dsbyte byte - storage storageBuf + i, n int // storage[i:n] is the buffer, i is only used while squeezing + storage [maxRate]byte // Specific to SHA-3 and SHAKE. outputLen int // the default output size in bytes @@ -54,24 +54,18 @@ func (d *state) BlockSize() int { return d.rate } func (d *state) Size() int { return d.outputLen } // Reset clears the internal state by zeroing the sponge state and -// the byte buffer, and setting Sponge.state to absorbing. +// the buffer indexes, and setting Sponge.state to absorbing. func (d *state) Reset() { // Zero the permutation's state. for i := range d.a { d.a[i] = 0 } d.state = spongeAbsorbing - d.buf = d.storage.asBytes()[:0] + d.i, d.n = 0, 0 } func (d *state) clone() *state { ret := *d - if ret.state == spongeAbsorbing { - ret.buf = ret.storage.asBytes()[:len(ret.buf)] - } else { - ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate] - } - return &ret } @@ -82,43 +76,40 @@ func (d *state) permute() { case spongeAbsorbing: // If we're absorbing, we need to xor the input into the state // before applying the permutation. - xorIn(d, d.buf) - d.buf = d.storage.asBytes()[:0] + xorIn(d, d.storage[:d.rate]) + d.n = 0 keccakF1600(&d.a) case spongeSqueezing: // If we're squeezing, we need to apply the permutation before // copying more output. keccakF1600(&d.a) - d.buf = d.storage.asBytes()[:d.rate] - copyOut(d, d.buf) + d.i = 0 + copyOut(d, d.storage[:d.rate]) } } // pads appends the domain separation bits in dsbyte, applies // the multi-bitrate 10..1 padding rule, and permutes the state. -func (d *state) padAndPermute(dsbyte byte) { - if d.buf == nil { - d.buf = d.storage.asBytes()[:0] - } +func (d *state) padAndPermute() { // Pad with this instance's domain-separator bits. We know that there's // at least one byte of space in d.buf because, if it were full, // permute would have been called to empty it. dsbyte also contains the // first one bit for the padding. See the comment in the state struct. - d.buf = append(d.buf, dsbyte) - zerosStart := len(d.buf) - d.buf = d.storage.asBytes()[:d.rate] - for i := zerosStart; i < d.rate; i++ { - d.buf[i] = 0 + d.storage[d.n] = d.dsbyte + d.n++ + for d.n < d.rate { + d.storage[d.n] = 0 + d.n++ } // This adds the final one bit for the padding. Because of the way that // bits are numbered from the LSB upwards, the final bit is the MSB of // the last byte. - d.buf[d.rate-1] ^= 0x80 + d.storage[d.rate-1] ^= 0x80 // Apply the permutation d.permute() d.state = spongeSqueezing - d.buf = d.storage.asBytes()[:d.rate] - copyOut(d, d.buf) + d.n = d.rate + copyOut(d, d.storage[:d.rate]) } // Write absorbs more data into the hash's state. It panics if any @@ -127,28 +118,25 @@ func (d *state) Write(p []byte) (written int, err error) { if d.state != spongeAbsorbing { panic("sha3: Write after Read") } - if d.buf == nil { - d.buf = d.storage.asBytes()[:0] - } written = len(p) for len(p) > 0 { - if len(d.buf) == 0 && len(p) >= d.rate { + if d.n == 0 && len(p) >= d.rate { // The fast path; absorb a full "rate" bytes of input and apply the permutation. xorIn(d, p[:d.rate]) p = p[d.rate:] keccakF1600(&d.a) } else { // The slow path; buffer the input until we can fill the sponge, and then xor it in. - todo := d.rate - len(d.buf) + todo := d.rate - d.n if todo > len(p) { todo = len(p) } - d.buf = append(d.buf, p[:todo]...) + d.n += copy(d.storage[d.n:], p[:todo]) p = p[todo:] // If the sponge is full, apply the permutation. - if len(d.buf) == d.rate { + if d.n == d.rate { d.permute() } } @@ -161,19 +149,19 @@ func (d *state) Write(p []byte) (written int, err error) { func (d *state) Read(out []byte) (n int, err error) { // If we're still absorbing, pad and apply the permutation. if d.state == spongeAbsorbing { - d.padAndPermute(d.dsbyte) + d.padAndPermute() } n = len(out) // Now, do the squeezing. for len(out) > 0 { - n := copy(out, d.buf) - d.buf = d.buf[n:] + n := copy(out, d.storage[d.i:d.n]) + d.i += n out = out[n:] // Apply the permutation if we've squeezed the sponge dry. - if len(d.buf) == 0 { + if d.i == d.rate { d.permute() } } diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index d861bca52..00d8034ae 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -143,6 +143,12 @@ func (s *asmState) Write(b []byte) (int, error) { // Read squeezes an arbitrary number of bytes from the sponge. func (s *asmState) Read(out []byte) (n int, err error) { + // The 'compute last message digest' instruction only stores the digest + // at the first operand (dst) for SHAKE functions. + if s.function != shake_128 && s.function != shake_256 { + panic("sha3: can only call Read for SHAKE functions") + } + n = len(out) // need to pad if we were absorbing @@ -202,8 +208,17 @@ func (s *asmState) Sum(b []byte) []byte { // Hash the buffer. Note that we don't clear it because we // aren't updating the state. - klmd(s.function, &a, nil, s.buf) - return append(b, a[:s.outputLen]...) + switch s.function { + case sha3_224, sha3_256, sha3_384, sha3_512: + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) + case shake_128, shake_256: + d := make([]byte, s.outputLen, 64) + klmd(s.function, &a, d, s.buf) + return append(b, d[:s.outputLen]...) + default: + panic("sha3: unknown function") + } } // Reset resets the Hash to its initial state. @@ -233,56 +248,56 @@ func (s *asmState) Clone() ShakeHash { return s.clone() } -// new224Asm returns an assembly implementation of SHA3-224 if available, -// otherwise it returns nil. -func new224Asm() hash.Hash { +// new224 returns an assembly implementation of SHA3-224 if available, +// otherwise it returns a generic implementation. +func new224() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_224) } - return nil + return new224Generic() } -// new256Asm returns an assembly implementation of SHA3-256 if available, -// otherwise it returns nil. -func new256Asm() hash.Hash { +// new256 returns an assembly implementation of SHA3-256 if available, +// otherwise it returns a generic implementation. +func new256() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_256) } - return nil + return new256Generic() } -// new384Asm returns an assembly implementation of SHA3-384 if available, -// otherwise it returns nil. -func new384Asm() hash.Hash { +// new384 returns an assembly implementation of SHA3-384 if available, +// otherwise it returns a generic implementation. +func new384() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_384) } - return nil + return new384Generic() } -// new512Asm returns an assembly implementation of SHA3-512 if available, -// otherwise it returns nil. -func new512Asm() hash.Hash { +// new512 returns an assembly implementation of SHA3-512 if available, +// otherwise it returns a generic implementation. +func new512() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_512) } - return nil + return new512Generic() } -// newShake128Asm returns an assembly implementation of SHAKE-128 if available, -// otherwise it returns nil. -func newShake128Asm() ShakeHash { +// newShake128 returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns a generic implementation. +func newShake128() ShakeHash { if cpu.S390X.HasSHA3 { return newAsmState(shake_128) } - return nil + return newShake128Generic() } -// newShake256Asm returns an assembly implementation of SHAKE-256 if available, -// otherwise it returns nil. -func newShake256Asm() ShakeHash { +// newShake256 returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns a generic implementation. +func newShake256() ShakeHash { if cpu.S390X.HasSHA3 { return newAsmState(shake_256) } - return nil + return newShake256Generic() } diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index bb6998402..1ea9275b8 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -115,19 +115,21 @@ func (c *state) Clone() ShakeHash { // Its generic security strength is 128 bits against all attacks if at // least 32 bytes of its output are used. func NewShake128() ShakeHash { - if h := newShake128Asm(); h != nil { - return h - } - return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} + return newShake128() } // NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. // Its generic security strength is 256 bits against all attacks if // at least 64 bytes of its output are used. func NewShake256() ShakeHash { - if h := newShake256Asm(); h != nil { - return h - } + return newShake256() +} + +func newShake128Generic() *state { + return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} +} + +func newShake256Generic() *state { return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake} } diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go deleted file mode 100644 index 8d31cf5be..000000000 --- a/vendor/golang.org/x/crypto/sha3/shake_generic.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !gc || purego || !s390x - -package sha3 - -// newShake128Asm returns an assembly implementation of SHAKE-128 if available, -// otherwise it returns nil. -func newShake128Asm() ShakeHash { - return nil -} - -// newShake256Asm returns an assembly implementation of SHAKE-256 if available, -// otherwise it returns nil. -func newShake256Asm() ShakeHash { - return nil -} diff --git a/vendor/golang.org/x/crypto/sha3/shake_noasm.go b/vendor/golang.org/x/crypto/sha3/shake_noasm.go new file mode 100644 index 000000000..4276ba4ab --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake_noasm.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func newShake128() *state { + return newShake128Generic() +} + +func newShake256() *state { + return newShake256Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go index 7337cca88..6ada5c957 100644 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -2,22 +2,39 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !386 && !ppc64le) || purego - package sha3 -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate]byte - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(b) -} +import ( + "crypto/subtle" + "encoding/binary" + "unsafe" -var ( - xorIn = xorInGeneric - copyOut = copyOutGeneric - xorInUnaligned = xorInGeneric - copyOutUnaligned = copyOutGeneric + "golang.org/x/sys/cpu" ) -const xorImplementationUnaligned = "generic" +// xorIn xors the bytes in buf into the state. +func xorIn(d *state, buf []byte) { + if cpu.IsBigEndian { + for i := 0; len(buf) >= 8; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } + } else { + ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) + subtle.XORBytes(ab[:], ab[:], buf) + } +} + +// copyOut copies uint64s to a byte buffer. +func copyOut(d *state, b []byte) { + if cpu.IsBigEndian { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } + } else { + ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) + copy(b, ab[:]) + } +} diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go deleted file mode 100644 index 8d9477112..000000000 --- a/vendor/golang.org/x/crypto/sha3/xor_generic.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -import "encoding/binary" - -// xorInGeneric xors the bytes in buf into the state; it -// makes no non-portable assumptions about memory layout -// or alignment. -func xorInGeneric(d *state, buf []byte) { - n := len(buf) / 8 - - for i := 0; i < n; i++ { - a := binary.LittleEndian.Uint64(buf) - d.a[i] ^= a - buf = buf[8:] - } -} - -// copyOutGeneric copies uint64s to a byte buffer. -func copyOutGeneric(d *state, b []byte) { - for i := 0; len(b) >= 8; i++ { - binary.LittleEndian.PutUint64(b, d.a[i]) - b = b[8:] - } -} diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go deleted file mode 100644 index 870e2d16e..000000000 --- a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (amd64 || 386 || ppc64le) && !purego - -package sha3 - -import "unsafe" - -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate / 8]uint64 - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(unsafe.Pointer(b)) -} - -// xorInUnaligned uses unaligned reads and writes to update d.a to contain d.a -// XOR buf. -func xorInUnaligned(d *state, buf []byte) { - n := len(buf) - bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] - if n >= 72 { - d.a[0] ^= bw[0] - d.a[1] ^= bw[1] - d.a[2] ^= bw[2] - d.a[3] ^= bw[3] - d.a[4] ^= bw[4] - d.a[5] ^= bw[5] - d.a[6] ^= bw[6] - d.a[7] ^= bw[7] - d.a[8] ^= bw[8] - } - if n >= 104 { - d.a[9] ^= bw[9] - d.a[10] ^= bw[10] - d.a[11] ^= bw[11] - d.a[12] ^= bw[12] - } - if n >= 136 { - d.a[13] ^= bw[13] - d.a[14] ^= bw[14] - d.a[15] ^= bw[15] - d.a[16] ^= bw[16] - } - if n >= 144 { - d.a[17] ^= bw[17] - } - if n >= 168 { - d.a[18] ^= bw[18] - d.a[19] ^= bw[19] - d.a[20] ^= bw[20] - } -} - -func copyOutUnaligned(d *state, buf []byte) { - ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) - copy(buf, ab[:]) -} - -var ( - xorIn = xorInUnaligned - copyOut = copyOutUnaligned -) - -const xorImplementationUnaligned = "unaligned" diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/exp/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 000000000..fbf1934a0 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index a17b3cf69..46ceac343 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -3,23 +3,20 @@ // license that can be found in the LICENSE file. // Package slices defines various functions useful with slices of any type. -// Unless otherwise specified, these functions all apply to the elements -// of a slice at index 0 <= i < len(s). -// -// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a -// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), -// or the sorting may fail to sort correctly. A common case is when sorting slices of -// floating-point numbers containing NaN values. package slices -import "golang.org/x/exp/constraints" +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) // Equal reports whether two slices are equal: the same length and all // elements equal. If the lengths are different, Equal returns false. // Otherwise, the elements are compared in increasing index order, and the // comparison stops at the first unequal pair. // Floating point NaNs are not considered equal. -func Equal[E comparable](s1, s2 []E) bool { +func Equal[S ~[]E, E comparable](s1, s2 S) bool { if len(s1) != len(s2) { return false } @@ -31,12 +28,12 @@ func Equal[E comparable](s1, s2 []E) bool { return true } -// EqualFunc reports whether two slices are equal using a comparison +// EqualFunc reports whether two slices are equal using an equality // function on each pair of elements. If the lengths are different, // EqualFunc returns false. Otherwise, the elements are compared in // increasing index order, and the comparison stops at the first index // for which eq returns false. -func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { if len(s1) != len(s2) { return false } @@ -49,45 +46,37 @@ func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { return true } -// Compare compares the elements of s1 and s2. -// The elements are compared sequentially, starting at index 0, +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, // until one element is not equal to the other. // The result of comparing the first non-matching elements is returned. // If both slices are equal until one of them ends, the shorter slice is // considered less than the longer one. // The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -// Comparisons involving floating point NaNs are ignored. -func Compare[E constraints.Ordered](s1, s2 []E) int { - s2len := len(s2) +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] - switch { - case v1 < v2: - return -1 - case v1 > v2: - return +1 + if c := cmpCompare(v1, v2); c != 0 { + return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 } -// CompareFunc is like Compare but uses a comparison function -// on each pair of elements. The elements are compared in increasing -// index order, and the comparisons stop after the first time cmp -// returns non-zero. +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. // The result is the first non-zero result of cmp; if cmp always // returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), // and +1 if len(s1) > len(s2). -func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { - s2len := len(s2) +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] @@ -95,7 +84,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 @@ -103,9 +92,9 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { // Index returns the index of the first occurrence of v in s, // or -1 if not present. -func Index[E comparable](s []E, v E) int { - for i, vs := range s { - if v == vs { +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { return i } } @@ -114,9 +103,9 @@ func Index[E comparable](s []E, v E) int { // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. -func IndexFunc[E any](s []E, f func(E) bool) int { - for i, v := range s { - if f(v) { +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { return i } } @@ -124,42 +113,237 @@ func IndexFunc[E any](s []E, f func(E) bool) int { } // Contains reports whether v is present in s. -func Contains[E comparable](s []E, v E) bool { +func Contains[S ~[]E, E comparable](s S, v E) bool { return Index(s, v) >= 0 } +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + // Insert inserts the values v... into s at index i, // returning the modified slice. -// In the returned slice r, r[i] == v[0]. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. // Insert panics if i is out of range. // This function is O(len(s) + len(v)). func Insert[S ~[]E, E any](s S, i int, v ...E) S { - tot := len(s) + len(v) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[i:]) + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) copy(s2[i:], v) + copy(s2[i+m:], s[i:]) return s2 } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[i:]) - return s2 + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s +} + +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } } // Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete modifies the contents of the slice s; it does not create a new slice. -// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. -// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those -// elements contain pointers you might consider zeroing those elements so that -// objects they reference can be garbage collected. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j] // bounds check + _ = s[i:j:len(s)] // bounds check + + if i == j { + return s + } + + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s +} + +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// DeleteFunc zeroes the elements between the new length and the original length. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 + } + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion - return append(s[:i], s[j:]...) + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r } // Clone returns a copy of the slice. @@ -174,50 +358,158 @@ func Clone[S ~[]E, E any](s S) S { // Compact replaces consecutive runs of equal elements with a single copy. // This is like the uniq command found on Unix. -// Compact modifies the contents of the slice s; it does not create a new slice. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. +// Compact zeroes the elements between the new length and the original length. func Compact[S ~[]E, E comparable](s S) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if v != last { - s[i] = v + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } i++ - last = v } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } -// CompactFunc is like Compact but uses a comparison function. +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if !eq(v, last) { - s[i] = v + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } i++ - last = v } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // Grow increases the slice's capacity, if necessary, to guarantee space for // another n elements. After Grow(n), at least n elements can be appended -// to the slice without another allocation. Grow may modify elements of the -// slice between the length and the capacity. If n is negative or too large to +// to the slice without another allocation. If n is negative or too large to // allocate the memory, Grow panics. func Grow[S ~[]E, E any](s S, n int) S { - return append(s, make(S, n)...)[:len(s)] + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s } // Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. func Clip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index c22e74bd1..f58bbc7ba 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + package slices import ( @@ -11,97 +13,159 @@ import ( ) // Sort sorts a slice of any ordered type in ascending order. -// Sort may fail to sort correctly when sorting slices of floating-point -// numbers containing Not-a-number (NaN) values. -// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) -// instead if the input may contain NaNs. -func Sort[E constraints.Ordered](x []E) { +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { n := len(x) pdqsortOrdered(x, 0, n, bits.Len(uint(n))) } -// SortFunc sorts the slice x in ascending order as determined by the less function. -// This sort is not guaranteed to be stable. +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. // -// SortFunc requires that less is a strict weak ordering. +// SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. -func SortFunc[E any](x []E, less func(a, b E) bool) { +// To indicate 'uncomparable', return 0 from the function. +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) - pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) } -// SortStable sorts the slice x while keeping the original order of equal -// elements, using less to compare elements. -func SortStableFunc[E any](x []E, less func(a, b E) bool) { - stableLessFunc(x, len(x), less) +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) } // IsSorted reports whether x is sorted in ascending order. -func IsSorted[E constraints.Ordered](x []E) bool { +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { for i := len(x) - 1; i > 0; i-- { - if x[i] < x[i-1] { + if cmpLess(x[i], x[i-1]) { return false } } return true } -// IsSortedFunc reports whether x is sorted in ascending order, with less as the -// comparison function. -func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { for i := len(x) - 1; i > 0; i-- { - if less(x[i], x[i-1]) { + if cmp(x[i], x[i-1]) < 0 { return false } } return true } +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + // BinarySearch searches for target in a sorted slice and returns the position // where target is found, or the position where target would appear in the // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. -func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { - // search returns the leftmost position where f returns true, or len(x) if f - // returns false for all x. This is the insertion position for target in x, - // and could point to an element that's either == target or not. - pos := search(len(x), func(i int) bool { return x[i] >= target }) - if pos >= len(x) || x[pos] != target { - return pos, false - } else { - return pos, true - } -} - -// BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" is -// defined by cmp. cmp(a, b) is expected to return an integer comparing the two -// parameters: 0 if a == b, a negative number if a < b and a positive number if -// a > b. -func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) { - pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 }) - if pos >= len(x) || cmp(x[pos], target) != 0 { - return pos, false - } else { - return pos, true +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmpLess(x[h], target) { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) } -func search(n int, f func(int) bool) int { - // Define f(-1) == false and f(n) == true. - // Invariant: f(i-1) == false, f(j) == true. +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. i, j := 0, n for i < j { h := int(uint(i+j) >> 1) // avoid overflow when computing h // i ≤ h < j - if !f(h) { - i = h + 1 // preserves f(i-1) == false + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 } else { - j = h // preserves f(j) == true + j = h // preserves cmp(x[j], target) >= 0 } } - // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. - return i + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 } type sortedHint int // hint for pdqsort when choosing the pivot @@ -125,3 +189,9 @@ func (r *xorshift) Next() uint64 { func nextPowerOfTwo(length int) uint { return 1 << bits.Len(uint(length)) } + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go similarity index 64% rename from vendor/golang.org/x/exp/slices/zsortfunc.go rename to vendor/golang.org/x/exp/slices/zsortanyfunc.go index 2a632476c..06f2c7a24 100644 --- a/vendor/golang.org/x/exp/slices/zsortfunc.go +++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -6,28 +6,28 @@ package slices -// insertionSortLessFunc sorts data[a:b] using insertion sort. -func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { for i := a + 1; i < b; i++ { - for j := i; j > a && less(data[j], data[j-1]); j-- { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { data[j], data[j-1] = data[j-1], data[j] } } } -// siftDownLessFunc implements the heap property on data[lo:hi]. +// siftDownCmpFunc implements the heap property on data[lo:hi]. // first is an offset into the array where the root of the heap lies. -func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { root := lo for { child := 2*root + 1 if child >= hi { break } - if child+1 < hi && less(data[first+child], data[first+child+1]) { + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { child++ } - if !less(data[first+root], data[first+child]) { + if !(cmp(data[first+root], data[first+child]) < 0) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -35,30 +35,30 @@ func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool } } -func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { first := a lo := 0 hi := b - a // Build heap with greatest element at top. for i := (hi - 1) / 2; i >= 0; i-- { - siftDownLessFunc(data, i, hi, first, less) + siftDownCmpFunc(data, i, hi, first, cmp) } // Pop elements, largest first, into end of data. for i := hi - 1; i >= 0; i-- { data[first], data[first+i] = data[first+i], data[first] - siftDownLessFunc(data, lo, i, first, less) + siftDownCmpFunc(data, lo, i, first, cmp) } } -// pdqsortLessFunc sorts data[a:b]. +// pdqsortCmpFunc sorts data[a:b]. // The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. // pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf // C++ implementation: https://github.com/orlp/pdqsort // Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ // limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { const maxInsertion = 12 var ( @@ -70,25 +70,25 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { length := b - a if length <= maxInsertion { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) return } // Fall back to heapsort if too many bad choices were made. if limit == 0 { - heapSortLessFunc(data, a, b, less) + heapSortCmpFunc(data, a, b, cmp) return } // If the last partitioning was imbalanced, we need to breaking patterns. if !wasBalanced { - breakPatternsLessFunc(data, a, b, less) + breakPatternsCmpFunc(data, a, b, cmp) limit-- } - pivot, hint := choosePivotLessFunc(data, a, b, less) + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) if hint == decreasingHint { - reverseRangeLessFunc(data, a, b, less) + reverseRangeCmpFunc(data, a, b, cmp) // The chosen pivot was pivot-a elements after the start of the array. // After reversing it is pivot-a elements before the end of the array. // The idea came from Rust's implementation. @@ -98,48 +98,48 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { // The slice is likely already sorted. if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortLessFunc(data, a, b, less) { + if partialInsertionSortCmpFunc(data, a, b, cmp) { return } } // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !less(data[a-1], data[pivot]) { - mid := partitionEqualLessFunc(data, a, b, pivot, less) + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) a = mid continue } - mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) wasPartitioned = alreadyPartitioned leftLen, rightLen := mid-a, b-mid balanceThreshold := length / 8 if leftLen < rightLen { wasBalanced = leftLen >= balanceThreshold - pdqsortLessFunc(data, a, mid, limit, less) + pdqsortCmpFunc(data, a, mid, limit, cmp) a = mid + 1 } else { wasBalanced = rightLen >= balanceThreshold - pdqsortLessFunc(data, mid+1, b, limit, less) + pdqsortCmpFunc(data, mid+1, b, limit, cmp) b = mid } } } -// partitionLessFunc does one quicksort partition. +// partitionCmpFunc does one quicksort partition. // Let p = data[pivot] // Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. // On return, data[newpivot] = p -func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -151,10 +151,10 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) j-- for { - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -168,17 +168,17 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) return j, false } -// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. // It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !less(data[a], data[i]) { + for i <= j && !(cmp(data[a], data[i]) < 0) { i++ } - for i <= j && less(data[a], data[j]) { + for i <= j && (cmp(data[a], data[j]) < 0) { j-- } if i > j { @@ -191,15 +191,15 @@ func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) return i } -// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { const ( maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted shortestShifting = 50 // don't shift any elements on short arrays ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !less(data[i], data[i-1]) { + for i < b && !(cmp(data[i], data[i-1]) < 0) { i++ } @@ -216,7 +216,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -225,7 +225,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -235,9 +235,9 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b return false } -// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns // that might cause imbalanced partitions in quicksort. -func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { length := b - a if length >= 8 { random := xorshift(length) @@ -253,12 +253,12 @@ func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -// choosePivotLessFunc chooses a pivot in data[a:b]. +// choosePivotCmpFunc chooses a pivot in data[a:b]. // // [0,8): chooses a static pivot. // [8,shortestNinther): uses the simple median-of-three method. // [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { const ( shortestNinther = 50 maxSwaps = 4 * 3 @@ -276,12 +276,12 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv if l >= 8 { if l >= shortestNinther { // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentLessFunc(data, i, &swaps, less) - j = medianAdjacentLessFunc(data, j, &swaps, less) - k = medianAdjacentLessFunc(data, k, &swaps, less) + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) } // Find the median among i, j, k and stores it into j. - j = medianLessFunc(data, i, j, k, &swaps, less) + j = medianCmpFunc(data, i, j, k, &swaps, cmp) } switch swaps { @@ -294,29 +294,29 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv } } -// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { - if less(data[b], data[a]) { +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { *swaps++ return b, a } return a, b } -// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { - a, b = order2LessFunc(data, a, b, swaps, less) - b, c = order2LessFunc(data, b, c, swaps, less) - a, b = order2LessFunc(data, a, b, swaps, less) +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) return b } -// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { - return medianLessFunc(data, a-1, a, a+1, swaps, less) +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) } -func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { i := a j := b - 1 for i < j { @@ -326,37 +326,37 @@ func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { for i := 0; i < n; i++ { data[a+i], data[b+i] = data[b+i], data[a+i] } } -func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { blockSize := 20 // must be > 0 a, b := 0, blockSize for b <= n { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) a = b b += blockSize } - insertionSortLessFunc(data, a, n, less) + insertionSortCmpFunc(data, a, n, cmp) for blockSize < n { a, b = 0, 2*blockSize for b <= n { - symMergeLessFunc(data, a, a+blockSize, b, less) + symMergeCmpFunc(data, a, a+blockSize, b, cmp) a = b b += 2 * blockSize } if m := a + blockSize; m < n { - symMergeLessFunc(data, a, m, n, less) + symMergeCmpFunc(data, a, m, n, cmp) } blockSize *= 2 } } -// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using // the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum // Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz // Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in @@ -375,7 +375,7 @@ func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { // symMerge assumes non-degenerate arguments: a < m && m < b. // Having the caller check this condition eliminates many leaf recursion calls, // which improves performance. -func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { // Avoid unnecessary recursions of symMerge // by direct insertion of data[a] into data[m:b] // if data[a:m] only contains one element. @@ -387,7 +387,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := b for i < j { h := int(uint(i+j) >> 1) - if less(data[h], data[a]) { + if cmp(data[h], data[a]) < 0 { i = h + 1 } else { j = h @@ -411,7 +411,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := m for i < j { h := int(uint(i+j) >> 1) - if !less(data[m], data[h]) { + if !(cmp(data[m], data[h]) < 0) { i = h + 1 } else { j = h @@ -438,7 +438,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { for start < r { c := int(uint(start+r) >> 1) - if !less(data[p-c], data[c]) { + if !(cmp(data[p-c], data[c]) < 0) { start = c + 1 } else { r = c @@ -447,33 +447,33 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { end := n - start if start < m && m < end { - rotateLessFunc(data, start, m, end, less) + rotateCmpFunc(data, start, m, end, cmp) } if a < start && start < mid { - symMergeLessFunc(data, a, start, mid, less) + symMergeCmpFunc(data, a, start, mid, cmp) } if mid < end && end < b { - symMergeLessFunc(data, mid, end, b, less) + symMergeCmpFunc(data, mid, end, b, cmp) } } -// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: // Data of the form 'x u v y' is changed to 'x v u y'. // rotate performs at most b-a many calls to data.Swap, // and it assumes non-degenerate arguments: a < m && m < b. -func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { i := m - a j := b - m for i != j { if i > j { - swapRangeLessFunc(data, m-i, m, j, less) + swapRangeCmpFunc(data, m-i, m, j, cmp) i -= j } else { - swapRangeLessFunc(data, m-i, m+j-i, i, less) + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) j -= i } } // i == j - swapRangeLessFunc(data, m-i, m, i, less) + swapRangeCmpFunc(data, m-i, m, i, cmp) } diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go index efaa1c8b7..99b47c398 100644 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -11,7 +11,7 @@ import "golang.org/x/exp/constraints" // insertionSortOrdered sorts data[a:b] using insertion sort. func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j] < data[j-1]); j-- { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { data[j], data[j-1] = data[j-1], data[j] } } @@ -26,10 +26,10 @@ func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { if child >= hi { break } - if child+1 < hi && (data[first+child] < data[first+child+1]) { + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { child++ } - if !(data[first+root] < data[first+child]) { + if !cmpLess(data[first+root], data[first+child]) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -107,7 +107,7 @@ func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !(data[a-1] < data[pivot]) { + if a > 0 && !cmpLess(data[a-1], data[pivot]) { mid := partitionEqualOrdered(data, a, b, pivot) a = mid continue @@ -138,10 +138,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -153,10 +153,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo j-- for { - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -177,10 +177,10 @@ func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (ne i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !(data[a] < data[i]) { + for i <= j && !cmpLess(data[a], data[i]) { i++ } - for i <= j && (data[a] < data[j]) { + for i <= j && cmpLess(data[a], data[j]) { j-- } if i > j { @@ -201,7 +201,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !(data[i] < data[i-1]) { + for i < b && !cmpLess(data[i], data[i-1]) { i++ } @@ -218,7 +218,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -227,7 +227,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -298,7 +298,7 @@ func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, h // order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if data[b] < data[a] { + if cmpLess(data[b], data[a]) { *swaps++ return b, a } @@ -389,7 +389,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := b for i < j { h := int(uint(i+j) >> 1) - if data[h] < data[a] { + if cmpLess(data[h], data[a]) { i = h + 1 } else { j = h @@ -413,7 +413,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := m for i < j { h := int(uint(i+j) >> 1) - if !(data[m] < data[h]) { + if !cmpLess(data[m], data[h]) { i = h + 1 } else { j = h @@ -440,7 +440,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { for start < r { c := int(uint(start+r) >> 1) - if !(data[p-c] < data[c]) { + if !cmpLess(data[p-c], data[c]) { start = c + 1 } else { r = c diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 2466ae3d9..3a7e5ab17 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -104,7 +104,7 @@ tokenization, and tokenization and tree construction stages of the WHATWG HTML parsing specification respectively. While the tokenizer parses and normalizes individual HTML tokens, only the parser constructs the DOM tree from the tokenized HTML, as described in the tree construction stage of the -specification, dynamically modifying or extending the docuemnt's DOM tree. +specification, dynamically modifying or extending the document's DOM tree. If your use case requires semantically well-formed HTML documents, as defined by the WHATWG specification, the parser should be used rather than the tokenizer. diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index 6e071e852..9b4de9401 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/idna" ) -var isTokenTable = [127]bool{ +var isTokenTable = [256]bool{ '!': true, '#': true, '$': true, @@ -93,12 +93,7 @@ var isTokenTable = [127]bool{ } func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) + return r < utf8.RuneSelf && isTokenTable[byte(r)] } // HeaderValuesContainsToken reports whether any string in values @@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false } - for _, r := range v { - if !IsTokenRune(r) { + for i := 0; i < len(v); i++ { + if !isTokenTable[v[i]] { return false } } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 43557ab7e..105c3b279 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool { // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { @@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int { // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } @@ -1592,7 +1595,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } // Also close the connection after any CONTINUATION frame following an @@ -1604,11 +1607,11 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { @@ -1625,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6f2df2818..003e649f3 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -17,6 +17,7 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" + "context" "crypto/tls" "fmt" "io" @@ -26,6 +27,7 @@ import ( "strconv" "strings" "sync" + "time" "golang.org/x/net/http/httpguts" ) @@ -210,12 +212,6 @@ type stringWriter interface { WriteString(s string) (n int, err error) } -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} @@ -383,3 +379,14 @@ func validPseudoPath(v string) bool { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() + +// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. +// It's defined as an interface here to let us keep synctestGroup entirely test-only +// and not a part of non-test builds. +type synctestGroupInterface interface { + Join() + Now() time.Time + NewTimer(d time.Duration) timer + AfterFunc(d time.Duration, f func()) timer + ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ce2e8b40e..6c349f3ec 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -154,6 +154,39 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState + + // Synchronization group used for testing. + // Outside of tests, this is nil. + group synctestGroupInterface +} + +func (s *Server) markNewGoroutine() { + if s.group != nil { + s.group.Join() + } +} + +func (s *Server) now() time.Time { + if s.group != nil { + return s.group.Now() + } + return time.Now() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (s *Server) newTimer(d time.Duration) timer { + if s.group != nil { + return s.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (s *Server) afterFunc(d time.Duration, f func()) timer { + if s.group != nil { + return s.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} } func (s *Server) initialConnRecvWindowSize() int32 { @@ -400,6 +433,10 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + s.serveConn(c, opts, nil) +} + +func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) { baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() @@ -426,6 +463,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { pushEnabled: true, sawClientPreface: opts.SawClientPreface, } + if newf != nil { + newf(sc) + } s.state.registerConn(sc) defer s.state.unregisterConn(sc) @@ -599,8 +639,8 @@ type serverConn struct { inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused + shutdownTimer timer // nil until used + idleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -649,12 +689,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline *time.Timer // nil if unused - writeDeadline *time.Timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline timer // nil if unused + writeDeadline timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -732,11 +772,7 @@ func isClosedConnError(err error) bool { return false } - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { + if errors.Is(err, net.ErrClosed) { return true } @@ -815,8 +851,9 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done + sc.srv.markNewGoroutine() + gate := make(chan struct{}) + gateDone := func() { gate <- struct{}{} } for { f, err := sc.framer.ReadFrame() select { @@ -847,6 +884,7 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { + sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -926,13 +964,13 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() loopNum := 0 @@ -1061,10 +1099,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C: + case <-timer.C(): return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1429,7 +1467,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -1482,6 +1520,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: + if res.f != nil { + if id := res.f.Header().StreamID; id > sc.maxClientStreamID { + sc.maxClientStreamID = id + } + } sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown @@ -1638,7 +1681,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout > 0 { + if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -1660,6 +1703,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { } } st.closeErr = err + st.cancelCtx() st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -2020,7 +2064,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2118,7 +2162,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2342,6 +2386,7 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2638,7 +2683,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) + date = rws.conn.srv.now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2760,7 +2805,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2776,9 +2821,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(time.Now())) + st.readDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil @@ -2786,7 +2831,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2802,9 +2847,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(time.Now())) + st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go deleted file mode 100644 index 61075bd16..000000000 --- a/vendor/golang.org/x/net/http2/testsync.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import ( - "context" - "sync" - "time" -) - -// testSyncHooks coordinates goroutines in tests. -// -// For example, a call to ClientConn.RoundTrip involves several goroutines, including: -// - the goroutine running RoundTrip; -// - the clientStream.doRequest goroutine, which writes the request; and -// - the clientStream.readLoop goroutine, which reads the response. -// -// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines -// are blocked waiting for some condition such as reading the Request.Body or waiting for -// flow control to become available. -// -// The testSyncHooks also manage timers and synthetic time in tests. -// This permits us to, for example, start a request and cause it to time out waiting for -// response headers without resorting to time.Sleep calls. -type testSyncHooks struct { - // active/inactive act as a mutex and condition variable. - // - // - neither chan contains a value: testSyncHooks is locked. - // - active contains a value: unlocked, and at least one goroutine is not blocked - // - inactive contains a value: unlocked, and all goroutines are blocked - active chan struct{} - inactive chan struct{} - - // goroutine counts - total int // total goroutines - condwait map[*sync.Cond]int // blocked in sync.Cond.Wait - blocked []*testBlockedGoroutine // otherwise blocked - - // fake time - now time.Time - timers []*fakeTimer - - // Transport testing: Report various events. - newclientconn func(*ClientConn) - newstream func(*clientStream) -} - -// testBlockedGoroutine is a blocked goroutine. -type testBlockedGoroutine struct { - f func() bool // blocked until f returns true - ch chan struct{} // closed when unblocked -} - -func newTestSyncHooks() *testSyncHooks { - h := &testSyncHooks{ - active: make(chan struct{}, 1), - inactive: make(chan struct{}, 1), - condwait: map[*sync.Cond]int{}, - } - h.inactive <- struct{}{} - h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) - return h -} - -// lock acquires the testSyncHooks mutex. -func (h *testSyncHooks) lock() { - select { - case <-h.active: - case <-h.inactive: - } -} - -// waitInactive waits for all goroutines to become inactive. -func (h *testSyncHooks) waitInactive() { - for { - <-h.inactive - if !h.unlock() { - break - } - } -} - -// unlock releases the testSyncHooks mutex. -// It reports whether any goroutines are active. -func (h *testSyncHooks) unlock() (active bool) { - // Look for a blocked goroutine which can be unblocked. - blocked := h.blocked[:0] - unblocked := false - for _, b := range h.blocked { - if !unblocked && b.f() { - unblocked = true - close(b.ch) - } else { - blocked = append(blocked, b) - } - } - h.blocked = blocked - - // Count goroutines blocked on condition variables. - condwait := 0 - for _, count := range h.condwait { - condwait += count - } - - if h.total > condwait+len(blocked) { - h.active <- struct{}{} - return true - } else { - h.inactive <- struct{}{} - return false - } -} - -// goRun starts a new goroutine. -func (h *testSyncHooks) goRun(f func()) { - h.lock() - h.total++ - h.unlock() - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - f() - }() -} - -// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. -// It waits until f returns true before proceeding. -// -// Example usage: -// -// h.blockUntil(func() bool { -// // Is the context done yet? -// select { -// case <-ctx.Done(): -// default: -// return false -// } -// return true -// }) -// // Wait for the context to become done. -// <-ctx.Done() -// -// The function f passed to blockUntil must be non-blocking and idempotent. -func (h *testSyncHooks) blockUntil(f func() bool) { - if f() { - return - } - ch := make(chan struct{}) - h.lock() - h.blocked = append(h.blocked, &testBlockedGoroutine{ - f: f, - ch: ch, - }) - h.unlock() - <-ch -} - -// broadcast is sync.Cond.Broadcast. -func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { - h.lock() - delete(h.condwait, cond) - h.unlock() - cond.Broadcast() -} - -// broadcast is sync.Cond.Wait. -func (h *testSyncHooks) condWait(cond *sync.Cond) { - h.lock() - h.condwait[cond]++ - h.unlock() -} - -// newTimer creates a new fake timer. -func (h *testSyncHooks) newTimer(d time.Duration) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - c: make(chan time.Time), - } - h.timers = append(h.timers, t) - return t -} - -// afterFunc creates a new fake AfterFunc timer. -func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - f: f, - } - h.timers = append(h.timers, t) - return t -} - -func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(ctx) - t := h.afterFunc(d, cancel) - return ctx, func() { - t.Stop() - cancel() - } -} - -func (h *testSyncHooks) timeUntilEvent() time.Duration { - h.lock() - defer h.unlock() - var next time.Time - for _, t := range h.timers { - if next.IsZero() || t.when.Before(next) { - next = t.when - } - } - if d := next.Sub(h.now); d > 0 { - return d - } - return 0 -} - -// advance advances time and causes synthetic timers to fire. -func (h *testSyncHooks) advance(d time.Duration) { - h.lock() - defer h.unlock() - h.now = h.now.Add(d) - timers := h.timers[:0] - for _, t := range h.timers { - t := t // remove after go.mod depends on go1.22 - t.mu.Lock() - switch { - case t.when.After(h.now): - timers = append(timers, t) - case t.when.IsZero(): - // stopped timer - default: - t.when = time.Time{} - if t.c != nil { - close(t.c) - } - if t.f != nil { - h.total++ - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - t.f() - }() - } - } - t.mu.Unlock() - } - h.timers = timers -} - -// A timer wraps a time.Timer, or a synthetic equivalent in tests. -// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. -type timer interface { - C() <-chan time.Time - Stop() bool - Reset(d time.Duration) bool -} - -// timeTimer implements timer using real time. -type timeTimer struct { - t *time.Timer - c chan time.Time -} - -// newTimeTimer creates a new timer using real time. -func newTimeTimer(d time.Duration) timer { - ch := make(chan time.Time) - t := time.AfterFunc(d, func() { - close(ch) - }) - return &timeTimer{t, ch} -} - -// newTimeAfterFunc creates an AfterFunc timer using real time. -func newTimeAfterFunc(d time.Duration, f func()) timer { - return &timeTimer{ - t: time.AfterFunc(d, f), - } -} - -func (t timeTimer) C() <-chan time.Time { return t.c } -func (t timeTimer) Stop() bool { return t.t.Stop() } -func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } - -// fakeTimer implements timer using fake time. -type fakeTimer struct { - hooks *testSyncHooks - - mu sync.Mutex - when time.Time // when the timer will fire - c chan time.Time // closed when the timer fires; mutually exclusive with f - f func() // called when the timer fires; mutually exclusive with c -} - -func (t *fakeTimer) C() <-chan time.Time { return t.c } - -func (t *fakeTimer) Stop() bool { - t.mu.Lock() - defer t.mu.Unlock() - stopped := t.when.IsZero() - t.when = time.Time{} - return stopped -} - -func (t *fakeTimer) Reset(d time.Duration) bool { - if t.c != nil || t.f == nil { - panic("fakeTimer only supports Reset on AfterFunc timers") - } - t.mu.Lock() - defer t.mu.Unlock() - t.hooks.lock() - defer t.hooks.unlock() - active := !t.when.IsZero() - t.when = t.hooks.now.Add(d) - if !active { - t.hooks.timers = append(t.hooks.timers, t) - } - return active -} diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go new file mode 100644 index 000000000..0b1c17b81 --- /dev/null +++ b/vendor/golang.org/x/net/http2/timer.go @@ -0,0 +1,20 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import "time" + +// A timer is a time.Timer, as an interface which can be replaced in tests. +type timer = interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +// timeTimer adapts a time.Timer to the timer interface. +type timeTimer struct { + *time.Timer +} + +func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index ce375c8c7..61f511f97 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -185,7 +185,45 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool - syncHooks *testSyncHooks + *transportTestHooks +} + +// Hook points used for testing. +// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +type transportTestHooks struct { + newclientconn func(*ClientConn) + group synctestGroupInterface +} + +func (t *Transport) markNewGoroutine() { + if t != nil && t.transportTestHooks != nil { + t.transportTestHooks.group.Join() + } +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (t *Transport) newTimer(d time.Duration) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (t *Transport) afterFunc(d time.Duration, f func()) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} +} + +func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.ContextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -352,60 +390,6 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder - - syncHooks *testSyncHooks // can be nil -} - -// Hook points used for testing. -// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. -// Inside tests, see the testSyncHooks function docs. - -// goRun starts a new goroutine. -func (cc *ClientConn) goRun(f func()) { - if cc.syncHooks != nil { - cc.syncHooks.goRun(f) - return - } - go f() -} - -// condBroadcast is cc.cond.Broadcast. -func (cc *ClientConn) condBroadcast() { - if cc.syncHooks != nil { - cc.syncHooks.condBroadcast(cc.cond) - } - cc.cond.Broadcast() -} - -// condWait is cc.cond.Wait. -func (cc *ClientConn) condWait() { - if cc.syncHooks != nil { - cc.syncHooks.condWait(cc.cond) - } - cc.cond.Wait() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (cc *ClientConn) newTimer(d time.Duration) timer { - if cc.syncHooks != nil { - return cc.syncHooks.newTimer(d) - } - return newTimeTimer(d) -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { - if cc.syncHooks != nil { - return cc.syncHooks.afterFunc(d, f) - } - return newTimeAfterFunc(d, f) -} - -func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if cc.syncHooks != nil { - return cc.syncHooks.contextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -487,7 +471,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.condBroadcast() + cs.cc.cond.Broadcast() } } @@ -497,7 +481,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.condBroadcast() + cc.cond.Broadcast() } } @@ -507,10 +491,11 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - cs.cc.goRun(func() { + go func() { + cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) - }) + }() } type stickyErrWriter struct { @@ -626,21 +611,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - var tm timer - if t.syncHooks != nil { - tm = t.syncHooks.newTimer(d) - t.syncHooks.blockUntil(func() bool { - select { - case <-tm.C(): - case <-req.Context().Done(): - default: - return false - } - return true - }) - } else { - tm = newTimeTimer(d) - } + tm := t.newTimer(d) select { case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) @@ -725,8 +696,8 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { - if t.syncHooks != nil { - return t.newClientConn(nil, singleUse, t.syncHooks) + if t.transportTestHooks != nil { + return t.newClientConn(nil, singleUse) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -736,7 +707,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse, nil) + return t.newClientConn(tconn, singleUse) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -802,10 +773,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives(), nil) + return t.newClientConn(c, t.disableKeepAlives()) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -820,16 +791,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - syncHooks: hooks, } - if hooks != nil { - hooks.newclientconn(cc) + if t.transportTestHooks != nil { + t.markNewGoroutine() + t.transportTestHooks.newclientconn(cc) c = cc.tconn } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) - } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) } @@ -860,10 +827,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -893,7 +856,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo return nil, cc.werr } - cc.goRun(cc.readLoop) + // Start the idle timer after the connection is fully initialized. + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + } + + go cc.readLoop() return cc, nil } @@ -901,7 +870,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -936,7 +905,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } last := f.LastStreamID for streamID, cs := range cc.streams { - if streamID > last { + if streamID <= last { + // The server's GOAWAY indicates that it received this stream. + // It will either finish processing it, or close the connection + // without doing so. Either way, leave the stream alone for now. + continue + } + if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { + // Don't retry the first stream on a connection if we get a non-NO error. + // If the server is sending an error on a new connection, + // retrying the request on a new one probably isn't going to work. + cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) + } else { + // Aborting the stream with errClentConnGotGoAway indicates that + // the request should be retried on a new connection. cs.abortStreamLocked(errClientConnGotGoAway) } } @@ -1131,7 +1113,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1143,9 +1126,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.condWait() + cc.cond.Wait() } - }) + }() shutdownEnterWaitStateHook() select { case <-done: @@ -1155,7 +1138,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() return ctx.Err() } @@ -1193,7 +1176,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() cc.closeConn() } @@ -1308,23 +1291,30 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - cc.goRun(func() { - cs.doRequest(req) - }) + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + !cs.isHead { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + go cs.doRequest(req, streamf) waitDone := func() error { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.donec: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.donec: return nil @@ -1385,24 +1375,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) return err } - if streamf != nil { - streamf(cs) - } - for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1432,8 +1405,9 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // doRequest runs for the duration of the request lifetime. // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). -func (cs *clientStream) doRequest(req *http.Request) { - err := cs.writeRequest(req) +func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { + cs.cc.t.markNewGoroutine() + err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1444,7 +1418,7 @@ func (cs *clientStream) doRequest(req *http.Request) { // // It returns non-nil if the request ends otherwise. // If the returned error is StreamError, the error Code may be used in resetting the stream. -func (cs *clientStream) writeRequest(req *http.Request) (err error) { +func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) { cc := cs.cc ctx := cs.ctx @@ -1458,21 +1432,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } - var newStreamHook func(*clientStream) - if cc.syncHooks != nil { - newStreamHook = cc.syncHooks.newstream - cc.syncHooks.blockUntil(func() bool { - select { - case cc.reqHeaderMu <- struct{}{}: - <-cc.reqHeaderMu - case <-cs.reqCancel: - case <-ctx.Done(): - default: - return false - } - return true - }) - } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1497,28 +1456,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() - if newStreamHook != nil { - newStreamHook(cs) - } - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + if streamf != nil { + streamf(cs) } continueTimeout := cc.t.expectContinueTimeout() @@ -1581,7 +1520,7 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.newTimer(d) + timer := cc.t.newTimer(d) defer timer.Stop() respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv @@ -1590,21 +1529,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.peerClosed: - case <-respHeaderTimer: - case <-respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.peerClosed: return nil @@ -1753,7 +1677,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.condWait() + cc.cond.Wait() cc.pendingRequests-- select { case <-cs.abort: @@ -2015,7 +1939,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.condWait() + cc.cond.Wait() } } @@ -2298,7 +2222,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.condBroadcast() + cc.cond.Broadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2320,6 +2244,7 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { + cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2386,7 +2311,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() } @@ -2423,7 +2348,7 @@ func (rl *clientConnReadLoop) run() error { readIdleTimeout := cc.t.ReadIdleTimeout var t timer if readIdleTimeout != 0 { - t = cc.afterFunc(readIdleTimeout, cc.healthCheck) + t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -3021,7 +2946,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.condBroadcast() + cc.cond.Broadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -3076,7 +3001,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { return ConnectionError(ErrCodeFlowControl) } - cc.condBroadcast() + cc.cond.Broadcast() return nil } @@ -3120,7 +3045,8 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } var pingError error errc := make(chan struct{}) - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3131,20 +3057,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { close(errc) return } - }) - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-c: - case <-errc: - case <-ctx.Done(): - case <-cc.readerDone: - default: - return false - } - return true - }) - } + }() select { case <-c: return nil diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 0a242c669..f6783339d 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max } func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) + for n.kids != nil { + n.kids.setParent(n.parent) } n.setParent(nil) delete(ws.nodes, n.id) diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go index 573fe79e8..d7d4b8b6e 100644 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -137,9 +137,7 @@ func (p *PerHost) AddNetwork(net *net.IPNet) { // AddZone specifies a DNS suffix that will use the bypass proxy. A zone of // "example.com" matches "example.com" and all of its subdomains. func (p *PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } + zone = strings.TrimSuffix(zone, ".") if !strings.HasPrefix(zone, ".") { zone = "." + zone } @@ -148,8 +146,6 @@ func (p *PerHost) AddZone(zone string) { // AddHost specifies a host name that will use the bypass proxy. func (p *PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } + host = strings.TrimSuffix(host, ".") p.bypassHosts = append(p.bypassHosts, host) } diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go index 48a069e19..dda743466 100644 --- a/vendor/golang.org/x/net/websocket/hybi.go +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -16,7 +16,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -279,7 +278,7 @@ func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, er } } if header := frame.HeaderReader(); header != nil { - io.Copy(ioutil.Discard, header) + io.Copy(io.Discard, header) } switch frame.PayloadType() { case ContinuationFrame: @@ -294,7 +293,7 @@ func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, er if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return nil, err } - io.Copy(ioutil.Discard, frame) + io.Copy(io.Discard, frame) if frame.PayloadType() == PingFrame { if _, err := handler.WritePong(b[:n]); err != nil { return nil, err diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index 90a2257cd..923a5780e 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -17,7 +17,6 @@ import ( "encoding/json" "errors" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -208,7 +207,7 @@ again: n, err = ws.frameReader.Read(msg) if err == io.EOF { if trailer := ws.frameReader.TrailerReader(); trailer != nil { - io.Copy(ioutil.Discard, trailer) + io.Copy(io.Discard, trailer) } ws.frameReader = nil goto again @@ -330,7 +329,7 @@ func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { ws.rio.Lock() defer ws.rio.Unlock() if ws.frameReader != nil { - _, err = io.Copy(ioutil.Discard, ws.frameReader) + _, err = io.Copy(io.Discard, ws.frameReader) if err != nil { return err } @@ -362,7 +361,7 @@ again: return ErrFrameTooLarge } payloadType := frame.PayloadType() - data, err := ioutil.ReadAll(frame) + data, err := io.ReadAll(frame) if err != nil { return err } diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 000000000..e99c92f39 --- /dev/null +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go index feb1157b1..564920bd4 100644 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -6,16 +6,13 @@ package google import ( "context" - "time" + "log" + "sync" "golang.org/x/oauth2" ) -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) - -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineAppIDFunc func(c context.Context) string +var logOnce sync.Once // only spam about deprecation once // AppEngineTokenSource returns a token source that fetches tokens from either // the current application's service account or from the metadata server, @@ -23,8 +20,10 @@ var appengineAppIDFunc func(c context.Context) string // details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that // involves user accounts, see oauth2.Config instead. // -// First generation App Engine runtimes (<= Go 1.9): -// AppEngineTokenSource returns a token source that fetches tokens issued to the +// The current version of this library requires at least Go 1.17 to build, +// so first generation App Engine runtimes (<= Go 1.9) are unsupported. +// Previously, on first generation App Engine runtimes, AppEngineTokenSource +// returned a token source that fetches tokens issued to the // current App Engine application's service account. The provided context must have // come from appengine.NewContext. // @@ -34,5 +33,8 @@ var appengineAppIDFunc func(c context.Context) string // context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, // which DefaultTokenSource will use in this case) instead. func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - return appEngineTokenSource(ctx, scope...) + logOnce.Do(func() { + log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") + }) + return ComputeTokenSource("") } diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go deleted file mode 100644 index e61587945..000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine - -// This file applies to App Engine first generation runtimes (<= Go 1.9). - -package google - -import ( - "context" - "sort" - "strings" - "sync" - - "golang.org/x/oauth2" - "google.golang.org/appengine" -) - -func init() { - appengineTokenFunc = appengine.AccessToken - appengineAppIDFunc = appengine.AppID -} - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &gaeTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type gaeTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go deleted file mode 100644 index 9c79aa0a0..000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !appengine - -// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. - -package google - -import ( - "context" - "log" - "sync" - - "golang.org/x/oauth2" -) - -var logOnce sync.Once // only spam about deprecation once - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - logOnce.Do(func() { - log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") - }) - return ComputeTokenSource("") -} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 2cf71f0f9..df958359a 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -12,6 +12,7 @@ import ( "os" "path/filepath" "runtime" + "sync" "time" "cloud.google.com/go/compute/metadata" @@ -19,7 +20,10 @@ import ( "golang.org/x/oauth2/authhandler" ) -const adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" +const ( + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + defaultUniverseDomain = "googleapis.com" +) // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: @@ -37,6 +41,64 @@ type Credentials struct { // environment and not with a credentials file, e.g. when code is // running on Google Cloud Platform. JSON []byte + + // UniverseDomainProvider returns the default service domain for a given + // Cloud universe. Optional. + // + // On GCE, UniverseDomainProvider should return the universe domain value + // from Google Compute Engine (GCE)'s metadata server. See also [The attached service + // account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). + // If the GCE metadata server returns a 404 error, the default universe + // domain value should be returned. If the GCE metadata server returns an + // error other than 404, the error should be returned. + UniverseDomainProvider func() (string, error) + + udMu sync.Mutex // guards universeDomain + // universeDomain is the default service domain for a given Cloud universe. + universeDomain string +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// +// The default value is "googleapis.com". +// +// Deprecated: Use instead (*Credentials).GetUniverseDomain(), which supports +// obtaining the universe domain when authenticating via the GCE metadata server. +// Unlike GetUniverseDomain, this method, UniverseDomain, will always return the +// default value when authenticating via the GCE metadata server. +// See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). +func (c *Credentials) UniverseDomain() string { + if c.universeDomain == "" { + return defaultUniverseDomain + } + return c.universeDomain +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe. If present, UniverseDomainProvider will be invoked and its return +// value will be cached. +// +// The default value is "googleapis.com". +func (c *Credentials) GetUniverseDomain() (string, error) { + c.udMu.Lock() + defer c.udMu.Unlock() + if c.universeDomain == "" && c.UniverseDomainProvider != nil { + // On Google Compute Engine, an App Engine standard second generation + // runtime, or App Engine flexible, use an externally provided function + // to request the universe domain from the metadata server. + ud, err := c.UniverseDomainProvider() + if err != nil { + return "", err + } + c.universeDomain = ud + } + // If no UniverseDomainProvider (meaning not on Google Compute Engine), or + // in case of any (non-error) empty return value from + // UniverseDomainProvider, set the default universe domain. + if c.universeDomain == "" { + c.universeDomain = defaultUniverseDomain + } + return c.universeDomain, nil } // DefaultCredentials is the old name of Credentials. @@ -76,6 +138,12 @@ type CredentialsParams struct { // Note: This option is currently only respected when using credentials // fetched from the GCE metadata server. EarlyTokenRefresh time.Duration + + // UniverseDomain is the default service domain for a given Cloud universe. + // Only supported in authentication flows that support universe domains. + // This value takes precedence over a universe domain explicitly specified + // in a credentials config file or by the GCE metadata server. Optional. + UniverseDomain string } func (params CredentialsParams) deepCopy() CredentialsParams { @@ -120,9 +188,7 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc // 2. A JSON file in a location known to the gcloud command-line tool. // On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. // On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses -// the appengine.AccessToken function. -// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// 3. On Google Compute Engine, Google App Engine standard second generation runtimes // (>= Go 1.11), and Google App Engine flexible environment, it fetches // credentials from the metadata server. func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { @@ -145,23 +211,27 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar return CredentialsFromJSONWithParams(ctx, b, params) } - // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) - // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) - // and App Engine flexible use ComputeTokenSource and the metadata server. - if appengineTokenFunc != nil { - return &Credentials{ - ProjectID: appengineAppIDFunc(ctx), - TokenSource: AppEngineTokenSource(ctx, params.Scopes...), - }, nil - } - - // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, + // Third, if we're on Google Compute Engine, an App Engine standard second generation runtime, // or App Engine flexible, use the metadata server. if metadata.OnGCE() { id, _ := metadata.ProjectID() + universeDomainProvider := func() (string, error) { + universeDomain, err := metadata.Get("universe/universe_domain") + if err != nil { + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + return defaultUniverseDomain, nil + } else { + return "", err + } + } + return universeDomain, nil + } return &Credentials{ - ProjectID: id, - TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), + ProjectID: id, + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), + UniverseDomainProvider: universeDomainProvider, + universeDomain: params.UniverseDomain, }, nil } @@ -200,15 +270,26 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if err := json.Unmarshal(jsonData, &f); err != nil { return nil, err } + + universeDomain := f.UniverseDomain + if params.UniverseDomain != "" { + universeDomain = params.UniverseDomain + } + // Authorized user credentials are only supported in the googleapis.com universe. + if f.Type == userCredentialsKey { + universeDomain = defaultUniverseDomain + } + ts, err := f.tokenSource(ctx, params) if err != nil { return nil, err } ts = newErrWrappingTokenSource(ts) return &Credentials{ - ProjectID: f.ProjectID, - TokenSource: ts, - JSON: jsonData, + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + universeDomain: universeDomain, }, nil } diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index ca717634a..830d268c1 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -22,89 +22,9 @@ // the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or // create an http.Client. // -// # Workload Identity Federation +// # Workload and Workforce Identity Federation // -// Using workload identity federation, your application can access Google Cloud -// resources from Amazon Web Services (AWS), Microsoft Azure or any identity -// provider that supports OpenID Connect (OIDC) or SAML 2.0. -// Traditionally, applications running outside Google Cloud have used service -// account keys to access Google Cloud resources. Using identity federation, -// you can allow your workload to impersonate a service account. -// This lets you access Google Cloud resources directly, eliminating the -// maintenance and security burden associated with service account keys. -// -// Follow the detailed instructions on how to configure Workload Identity Federation -// in various platforms: -// -// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws -// Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure -// OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc -// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml -// -// For OIDC and SAML providers, the library can retrieve tokens in three ways: -// from a local file location (file-sourced credentials), from a server -// (URL-sourced credentials), or from a local executable (executable-sourced -// credentials). -// For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC/SAML token prior to expiration. -// For tokens with one hour lifetimes, the token needs to be updated in the file -// every hour. The token can be stored directly as plain text or in JSON format. -// For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC/SAML token. The response can be in plain text or JSON. -// Additional required request headers can also be specified. -// For executable-sourced credentials, an application needs to be available to -// output the OIDC/SAML token and other information in a JSON format. -// For more information on how these work (and how to implement -// executable-sourced credentials), please check out: -// https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration -// -// Note that this library does not perform any validation on the token_url, token_info_url, -// or service_account_impersonation_url fields of the credential configuration. -// It is not recommended to use a credential configuration that you did not generate with -// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. -// -// # Workforce Identity Federation -// -// Workforce identity federation lets you use an external identity provider (IdP) to -// authenticate and authorize a workforce—a group of users, such as employees, partners, -// and contractors—using IAM, so that the users can access Google Cloud services. -// Workforce identity federation extends Google Cloud's identity capabilities to support -// syncless, attribute-based single sign on. -// -// With workforce identity federation, your workforce can access Google Cloud resources -// using an external identity provider (IdP) that supports OpenID Connect (OIDC) or -// SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation -// Services (AD FS), Okta, and others. -// -// Follow the detailed instructions on how to configure Workload Identity Federation -// in various platforms: -// -// Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad -// Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta -// OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc -// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml -// -// For workforce identity federation, the library can retrieve tokens in three ways: -// from a local file location (file-sourced credentials), from a server -// (URL-sourced credentials), or from a local executable (executable-sourced -// credentials). -// For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC/SAML token prior to expiration. -// For tokens with one hour lifetimes, the token needs to be updated in the file -// every hour. The token can be stored directly as plain text or in JSON format. -// For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC/SAML token. The response can be in plain text or JSON. -// Additional required request headers can also be specified. -// For executable-sourced credentials, an application needs to be available to -// output the OIDC/SAML token and other information in a JSON format. -// For more information on how these work (and how to implement -// executable-sourced credentials), please check out: -// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in -// -// Note that this library does not perform any validation on the token_url, token_info_url, -// or service_account_impersonation_url fields of the credential configuration. -// It is not recommended to use a credential configuration that you did not generate with -// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +// For information on how to use Workload and Workforce Identity Federation, see [golang.org/x/oauth2/google/externalaccount]. // // # Credentials // diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go similarity index 77% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go rename to vendor/golang.org/x/oauth2/google/externalaccount/aws.go index 2bf3202b2..ca27c2e98 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go @@ -26,22 +26,28 @@ import ( "golang.org/x/oauth2" ) -type awsSecurityCredentials struct { - AccessKeyID string `json:"AccessKeyID"` +// AwsSecurityCredentials models AWS security credentials. +type AwsSecurityCredentials struct { + // AccessKeyId is the AWS Access Key ID - Required. + AccessKeyID string `json:"AccessKeyID"` + // SecretAccessKey is the AWS Secret Access Key - Required. SecretAccessKey string `json:"SecretAccessKey"` - SecurityToken string `json:"Token"` + // SessionToken is the AWS Session token. This should be provided for temporary AWS security credentials - Optional. + SessionToken string `json:"Token"` } // awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. type awsRequestSigner struct { RegionName string - AwsSecurityCredentials awsSecurityCredentials + AwsSecurityCredentials *AwsSecurityCredentials } // getenv aliases os.Getenv for testing var getenv = os.Getenv const ( + defaultRegionalCredentialVerificationUrl = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" + // AWS Signature Version 4 signing algorithm identifier. awsAlgorithm = "AWS4-HMAC-SHA256" @@ -197,8 +203,8 @@ func (rs *awsRequestSigner) SignRequest(req *http.Request) error { signedRequest.Header.Add("host", requestHost(req)) - if rs.AwsSecurityCredentials.SecurityToken != "" { - signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SecurityToken) + if rs.AwsSecurityCredentials.SessionToken != "" { + signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken) } if signedRequest.Header.Get("date") == "" { @@ -251,16 +257,18 @@ func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp } type awsCredentialSource struct { - EnvironmentID string - RegionURL string - RegionalCredVerificationURL string - CredVerificationURL string - IMDSv2SessionTokenURL string - TargetResource string - requestSigner *awsRequestSigner - region string - ctx context.Context - client *http.Client + environmentID string + regionURL string + regionalCredVerificationURL string + credVerificationURL string + imdsv2SessionTokenURL string + targetResource string + requestSigner *awsRequestSigner + region string + ctx context.Context + client *http.Client + awsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier + supplierOptions SupplierOptions } type awsRequestHeader struct { @@ -274,49 +282,6 @@ type awsRequest struct { Headers []awsRequestHeader `json:"headers"` } -func (cs awsCredentialSource) validateMetadataServers() error { - if err := cs.validateMetadataServer(cs.RegionURL, "region_url"); err != nil { - return err - } - if err := cs.validateMetadataServer(cs.CredVerificationURL, "url"); err != nil { - return err - } - return cs.validateMetadataServer(cs.IMDSv2SessionTokenURL, "imdsv2_session_token_url") -} - -var validHostnames []string = []string{"169.254.169.254", "fd00:ec2::254"} - -func (cs awsCredentialSource) isValidMetadataServer(metadataUrl string) bool { - if metadataUrl == "" { - // Zero value means use default, which is valid. - return true - } - - u, err := url.Parse(metadataUrl) - if err != nil { - // Unparseable URL means invalid - return false - } - - for _, validHostname := range validHostnames { - if u.Hostname() == validHostname { - // If it's one of the valid hostnames, everything is good - return true - } - } - - // hostname not found in our allowlist, so not valid - return false -} - -func (cs awsCredentialSource) validateMetadataServer(metadataUrl, urlName string) error { - if !cs.isValidMetadataServer(metadataUrl) { - return fmt.Errorf("oauth2/google: invalid hostname %s for %s", metadataUrl, urlName) - } - - return nil -} - func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { if cs.client == nil { cs.client = oauth2.NewClient(cs.ctx, nil) @@ -335,14 +300,25 @@ func canRetrieveSecurityCredentialFromEnvironment() bool { return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" } -func shouldUseMetadataServer() bool { - return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() +func (cs awsCredentialSource) shouldUseMetadataServer() bool { + return cs.awsSecurityCredentialsSupplier == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()) +} + +func (cs awsCredentialSource) credentialSourceType() string { + if cs.awsSecurityCredentialsSupplier != nil { + return "programmatic" + } + return "aws" } func (cs awsCredentialSource) subjectToken() (string, error) { + // Set Defaults + if cs.regionalCredVerificationURL == "" { + cs.regionalCredVerificationURL = defaultRegionalCredentialVerificationUrl + } if cs.requestSigner == nil { headers := make(map[string]string) - if shouldUseMetadataServer() { + if cs.shouldUseMetadataServer() { awsSessionToken, err := cs.getAWSSessionToken() if err != nil { return "", err @@ -357,8 +333,8 @@ func (cs awsCredentialSource) subjectToken() (string, error) { if err != nil { return "", err } - - if cs.region, err = cs.getRegion(headers); err != nil { + cs.region, err = cs.getRegion(headers) + if err != nil { return "", err } @@ -370,7 +346,7 @@ func (cs awsCredentialSource) subjectToken() (string, error) { // Generate the signed request to AWS STS GetCallerIdentity API. // Use the required regional endpoint. Otherwise, the request will fail. - req, err := http.NewRequest("POST", strings.Replace(cs.RegionalCredVerificationURL, "{region}", cs.region, 1), nil) + req, err := http.NewRequest("POST", strings.Replace(cs.regionalCredVerificationURL, "{region}", cs.region, 1), nil) if err != nil { return "", err } @@ -378,8 +354,8 @@ func (cs awsCredentialSource) subjectToken() (string, error) { // provider, with or without the HTTPS prefix. // Including this header as part of the signature is recommended to // ensure data integrity. - if cs.TargetResource != "" { - req.Header.Add("x-goog-cloud-target-resource", cs.TargetResource) + if cs.targetResource != "" { + req.Header.Add("x-goog-cloud-target-resource", cs.targetResource) } cs.requestSigner.SignRequest(req) @@ -426,11 +402,11 @@ func (cs awsCredentialSource) subjectToken() (string, error) { } func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { - if cs.IMDSv2SessionTokenURL == "" { + if cs.imdsv2SessionTokenURL == "" { return "", nil } - req, err := http.NewRequest("PUT", cs.IMDSv2SessionTokenURL, nil) + req, err := http.NewRequest("PUT", cs.imdsv2SessionTokenURL, nil) if err != nil { return "", err } @@ -449,25 +425,29 @@ func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { } if resp.StatusCode != 200 { - return "", fmt.Errorf("oauth2/google: unable to retrieve AWS session token - %s", string(respBody)) + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS session token - %s", string(respBody)) } return string(respBody), nil } func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { + if cs.awsSecurityCredentialsSupplier != nil { + return cs.awsSecurityCredentialsSupplier.AwsRegion(cs.ctx, cs.supplierOptions) + } if canRetrieveRegionFromEnvironment() { if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + cs.region = envAwsRegion return envAwsRegion, nil } return getenv("AWS_DEFAULT_REGION"), nil } - if cs.RegionURL == "" { - return "", errors.New("oauth2/google: unable to determine AWS region") + if cs.regionURL == "" { + return "", errors.New("oauth2/google/externalaccount: unable to determine AWS region") } - req, err := http.NewRequest("GET", cs.RegionURL, nil) + req, err := http.NewRequest("GET", cs.regionURL, nil) if err != nil { return "", err } @@ -488,7 +468,7 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err } if resp.StatusCode != 200 { - return "", fmt.Errorf("oauth2/google: unable to retrieve AWS region - %s", string(respBody)) + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS region - %s", string(respBody)) } // This endpoint will return the region in format: us-east-2b. @@ -500,12 +480,15 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err return string(respBody[:respBodyEnd]), nil } -func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) { +func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result *AwsSecurityCredentials, err error) { + if cs.awsSecurityCredentialsSupplier != nil { + return cs.awsSecurityCredentialsSupplier.AwsSecurityCredentials(cs.ctx, cs.supplierOptions) + } if canRetrieveSecurityCredentialFromEnvironment() { - return awsSecurityCredentials{ + return &AwsSecurityCredentials{ AccessKeyID: getenv(awsAccessKeyId), SecretAccessKey: getenv(awsSecretAccessKey), - SecurityToken: getenv(awsSessionToken), + SessionToken: getenv(awsSessionToken), }, nil } @@ -520,24 +503,23 @@ func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) } if credentials.AccessKeyID == "" { - return result, errors.New("oauth2/google: missing AccessKeyId credential") + return result, errors.New("oauth2/google/externalaccount: missing AccessKeyId credential") } if credentials.SecretAccessKey == "" { - return result, errors.New("oauth2/google: missing SecretAccessKey credential") + return result, errors.New("oauth2/google/externalaccount: missing SecretAccessKey credential") } - return credentials, nil + return &credentials, nil } -func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (awsSecurityCredentials, error) { - var result awsSecurityCredentials +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (AwsSecurityCredentials, error) { + var result AwsSecurityCredentials - req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil) + req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.credVerificationURL, roleName), nil) if err != nil { return result, err } - req.Header.Add("Content-Type", "application/json") for name, value := range headers { req.Header.Add(name, value) @@ -555,7 +537,7 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, h } if resp.StatusCode != 200 { - return result, fmt.Errorf("oauth2/google: unable to retrieve AWS security credentials - %s", string(respBody)) + return result, fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS security credentials - %s", string(respBody)) } err = json.Unmarshal(respBody, &result) @@ -563,11 +545,11 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, h } func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) { - if cs.CredVerificationURL == "" { - return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint") + if cs.credVerificationURL == "" { + return "", errors.New("oauth2/google/externalaccount: unable to determine the AWS metadata server security credentials endpoint") } - req, err := http.NewRequest("GET", cs.CredVerificationURL, nil) + req, err := http.NewRequest("GET", cs.credVerificationURL, nil) if err != nil { return "", err } @@ -588,7 +570,7 @@ func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (s } if resp.StatusCode != 200 { - return "", fmt.Errorf("oauth2/google: unable to retrieve AWS role name - %s", string(respBody)) + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS role name - %s", string(respBody)) } return string(respBody), nil diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go new file mode 100644 index 000000000..6c81a6872 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go @@ -0,0 +1,485 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package externalaccount provides support for creating workload identity +federation and workforce identity federation token sources that can be +used to access Google Cloud resources from external identity providers. + +# Workload Identity Federation + +Using workload identity federation, your application can access Google Cloud +resources from Amazon Web Services (AWS), Microsoft Azure or any identity +provider that supports OpenID Connect (OIDC) or SAML 2.0. +Traditionally, applications running outside Google Cloud have used service +account keys to access Google Cloud resources. Using identity federation, +you can allow your workload to impersonate a service account. +This lets you access Google Cloud resources directly, eliminating the +maintenance and security burden associated with service account keys. + +Follow the detailed instructions on how to configure Workload Identity Federation +in various platforms: + +Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws +Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure +OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc +SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml + +For OIDC and SAML providers, the library can retrieve tokens in fours ways: +from a local file location (file-sourced credentials), from a server +(URL-sourced credentials), from a local executable (executable-sourced +credentials), or from a user defined function that returns an OIDC or SAML token. +For file-sourced credentials, a background process needs to be continuously +refreshing the file location with a new OIDC/SAML token prior to expiration. +For tokens with one hour lifetimes, the token needs to be updated in the file +every hour. The token can be stored directly as plain text or in JSON format. +For URL-sourced credentials, a local server needs to host a GET endpoint to +return the OIDC/SAML token. The response can be in plain text or JSON. +Additional required request headers can also be specified. +For executable-sourced credentials, an application needs to be available to +output the OIDC/SAML token and other information in a JSON format. +For more information on how these work (and how to implement +executable-sourced credentials), please check out: +https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration + +To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers, +or one that implements [AwsSecurityCredentialsSupplier] for AWS providers. This can then be used when building a [Config]. +The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used to access Google +Cloud resources. For instance, you can create a new client from the +[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource)) + +Note that this library does not perform any validation on the token_url, token_info_url, +or service_account_impersonation_url fields of the credential configuration. +It is not recommended to use a credential configuration that you did not generate with +the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. + +# Workforce Identity Federation + +Workforce identity federation lets you use an external identity provider (IdP) to +authenticate and authorize a workforce—a group of users, such as employees, partners, +and contractors—using IAM, so that the users can access Google Cloud services. +Workforce identity federation extends Google Cloud's identity capabilities to support +syncless, attribute-based single sign on. + +With workforce identity federation, your workforce can access Google Cloud resources +using an external identity provider (IdP) that supports OpenID Connect (OIDC) or +SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation +Services (AD FS), Okta, and others. + +Follow the detailed instructions on how to configure Workload Identity Federation +in various platforms: + +Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad +Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta +OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc +SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml + +For workforce identity federation, the library can retrieve tokens in four ways: +from a local file location (file-sourced credentials), from a server +(URL-sourced credentials), from a local executable (executable-sourced +credentials), or from a user supplied function that returns an OIDC or SAML token. +For file-sourced credentials, a background process needs to be continuously +refreshing the file location with a new OIDC/SAML token prior to expiration. +For tokens with one hour lifetimes, the token needs to be updated in the file +every hour. The token can be stored directly as plain text or in JSON format. +For URL-sourced credentials, a local server needs to host a GET endpoint to +return the OIDC/SAML token. The response can be in plain text or JSON. +Additional required request headers can also be specified. +For executable-sourced credentials, an application needs to be available to +output the OIDC/SAML token and other information in a JSON format. +For more information on how these work (and how to implement +executable-sourced credentials), please check out: +https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in + +To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers. +This can then be used when building a [Config]. +The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used access Google +Cloud resources. For instance, you can create a new client from the +[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource)) + +# Security considerations + +Note that this library does not perform any validation on the token_url, token_info_url, +or service_account_impersonation_url fields of the credential configuration. +It is not recommended to use a credential configuration that you did not generate with +the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +*/ +package externalaccount + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/impersonate" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +const ( + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token" + defaultUniverseDomain = "googleapis.com" +) + +// now aliases time.Now for testing +var now = func() time.Time { + return time.Now().UTC() +} + +// Config stores the configuration for fetching tokens with external credentials. +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. Required. + Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec. + // Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + // Required. + SubjectTokenType string + // TokenURL is the STS token exchange endpoint. If not provided, will default to + // https://sts.UNIVERSE_DOMAIN/v1/token, with UNIVERSE_DOMAIN set to the + // default service domain googleapis.com unless UniverseDomain is set. + // Optional. + TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. Optional. + TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. Optional. + ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. If not provided, it will default to 3600. Optional. + ServiceAccountImpersonationLifetimeSeconds int + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using ClientId as username and ClientSecret as password. Optional. + ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. Optional. + ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or + // CredentialSource must be provided. Optional. + CredentialSource *CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project header which overrides the project associated with the credentials. Optional. + QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. Optional. + Scopes []string + // WorkforcePoolUserProject is the workforce pool user project number when the credential + // corresponds to a workforce pool and not a workload identity pool. + // The underlying principal must still have serviceusage.services.use IAM + // permission to use the project for billing/quota. Optional. + WorkforcePoolUserProject string + // SubjectTokenSupplier is an optional token supplier for OIDC/SAML credentials. + // One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional. + SubjectTokenSupplier SubjectTokenSupplier + // AwsSecurityCredentialsSupplier is an AWS Security Credential supplier for AWS credentials. + // One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional. + AwsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier + // UniverseDomain is the default service domain for a given Cloud universe. + // This value will be used in the default STS token URL. The default value + // is "googleapis.com". It will not be used if TokenURL is set. Optional. + UniverseDomain string +} + +var ( + validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) +) + +func validateWorkforceAudience(input string) bool { + return validWorkforceAudiencePattern.MatchString(input) +} + +// NewTokenSource Returns an external account TokenSource using the provided external account config. +func NewTokenSource(ctx context.Context, conf Config) (oauth2.TokenSource, error) { + if conf.Audience == "" { + return nil, fmt.Errorf("oauth2/google/externalaccount: Audience must be set") + } + if conf.SubjectTokenType == "" { + return nil, fmt.Errorf("oauth2/google/externalaccount: Subject token type must be set") + } + if conf.WorkforcePoolUserProject != "" { + valid := validateWorkforceAudience(conf.Audience) + if !valid { + return nil, fmt.Errorf("oauth2/google/externalaccount: Workforce pool user project should not be set for non-workforce pool credentials") + } + } + count := 0 + if conf.CredentialSource != nil { + count++ + } + if conf.SubjectTokenSupplier != nil { + count++ + } + if conf.AwsSecurityCredentialsSupplier != nil { + count++ + } + if count == 0 { + return nil, fmt.Errorf("oauth2/google/externalaccount: One of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set") + } + if count > 1 { + return nil, fmt.Errorf("oauth2/google/externalaccount: Only one of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set") + } + return conf.tokenSource(ctx, "https") +} + +// tokenSource is a private function that's directly called by some of the tests, +// because the unit test URLs are mocked, and would otherwise fail the +// validity check. +func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + if c.ServiceAccountImpersonationURL == "" { + return oauth2.ReuseTokenSource(nil, ts), nil + } + scopes := c.Scopes + ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp := impersonate.ImpersonateTokenSource{ + Ctx: ctx, + URL: c.ServiceAccountImpersonationURL, + Scopes: scopes, + Ts: oauth2.ReuseTokenSource(nil, ts), + TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, + } + return oauth2.ReuseTokenSource(nil, imp), nil +} + +// Subject token file types. +const ( + fileTypeText = "text" + fileTypeJSON = "json" +) + +// Format contains information needed to retireve a subject token for URL or File sourced credentials. +type Format struct { + // Type should be either "text" or "json". This determines whether the file or URL sourced credentials + // expect a simple text subject token or if the subject token will be contained in a JSON object. + // When not provided "text" type is assumed. + Type string `json:"type"` + // SubjectTokenFieldName is only required for JSON format. This is the field name that the credentials will check + // for the subject token in the file or URL response. This would be "access_token" for azure. + SubjectTokenFieldName string `json:"subject_token_field_name"` +} + +// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +type CredentialSource struct { + // File is the location for file sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + File string `json:"file"` + + // Url is the URL to call for URL sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + URL string `json:"url"` + // Headers are the headers to attach to the request for URL sourced credentials. + Headers map[string]string `json:"headers"` + + // Executable is the configuration object for executable sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + Executable *ExecutableConfig `json:"executable"` + + // EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS". + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + EnvironmentID string `json:"environment_id"` + // RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials. + RegionURL string `json:"region_url"` + // RegionalCredVerificationURL is the AWS regional credential verification URL, will default to + // "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" if not provided." + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + // IMDSv2SessionTokenURL is the URL to retrieve the session token when using IMDSv2 in AWS. + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + // Format is the format type for the subject token. Used for File and URL sourced credentials. Expected values are "text" or "json". + Format Format `json:"format"` +} + +// ExecutableConfig contains information needed for executable sourced credentials. +type ExecutableConfig struct { + // Command is the the full command to run to retrieve the subject token. + // This can include arguments. Must be an absolute path for the program. Required. + Command string `json:"command"` + // TimeoutMillis is the timeout duration, in milliseconds. Defaults to 30000 milliseconds when not provided. Optional. + TimeoutMillis *int `json:"timeout_millis"` + // OutputFile is the absolute path to the output file where the executable will cache the response. + // If specified the auth libraries will first check this location before running the executable. Optional. + OutputFile string `json:"output_file"` +} + +// SubjectTokenSupplier can be used to supply a subject token to exchange for a GCP access token. +type SubjectTokenSupplier interface { + // SubjectToken should return a valid subject token or an error. + // The external account token source does not cache the returned subject token, so caching + // logic should be implemented in the supplier to prevent multiple requests for the same subject token. + SubjectToken(ctx context.Context, options SupplierOptions) (string, error) +} + +// AWSSecurityCredentialsSupplier can be used to supply AwsSecurityCredentials and an AWS Region to +// exchange for a GCP access token. +type AwsSecurityCredentialsSupplier interface { + // AwsRegion should return the AWS region or an error. + AwsRegion(ctx context.Context, options SupplierOptions) (string, error) + // GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. + // The external account token source does not cache the returned security credentials, so caching + // logic should be implemented in the supplier to prevent multiple requests for the same security credentials. + AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error) +} + +// SupplierOptions contains information about the requested subject token or AWS security credentials from the +// Google external account credential. +type SupplierOptions struct { + // Audience is the requested audience for the external account credential. + Audience string + // Subject token type is the requested subject token type for the external account credential. Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + SubjectTokenType string +} + +// tokenURL returns the default STS token endpoint with the configured universe +// domain. +func (c *Config) tokenURL() string { + if c.UniverseDomain == "" { + return strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1) + } + return strings.Replace(defaultTokenURL, universeDomainPlaceholder, c.UniverseDomain, 1) +} + +// parse determines the type of CredentialSource needed. +func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { + //set Defaults + if c.TokenURL == "" { + c.TokenURL = c.tokenURL() + } + supplierOptions := SupplierOptions{Audience: c.Audience, SubjectTokenType: c.SubjectTokenType} + + if c.AwsSecurityCredentialsSupplier != nil { + awsCredSource := awsCredentialSource{ + awsSecurityCredentialsSupplier: c.AwsSecurityCredentialsSupplier, + targetResource: c.Audience, + supplierOptions: supplierOptions, + ctx: ctx, + } + return awsCredSource, nil + } else if c.SubjectTokenSupplier != nil { + return programmaticRefreshCredentialSource{subjectTokenSupplier: c.SubjectTokenSupplier, supplierOptions: supplierOptions, ctx: ctx}, nil + } else if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("oauth2/google/externalaccount: aws version '%d' is not supported in the current build", awsVersion) + } + + awsCredSource := awsCredentialSource{ + environmentID: c.CredentialSource.EnvironmentID, + regionURL: c.CredentialSource.RegionURL, + regionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, + credVerificationURL: c.CredentialSource.URL, + targetResource: c.Audience, + ctx: ctx, + } + if c.CredentialSource.IMDSv2SessionTokenURL != "" { + awsCredSource.imdsv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL + } + + return awsCredSource, nil + } + } else if c.CredentialSource.File != "" { + return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil + } else if c.CredentialSource.URL != "" { + return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil + } else if c.CredentialSource.Executable != nil { + return createExecutableCredential(ctx, c.CredentialSource.Executable, c) + } + return nil, fmt.Errorf("oauth2/google/externalaccount: unable to parse credential source") +} + +type baseCredentialSource interface { + credentialSourceType() string + subjectToken() (string, error) +} + +// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. +type tokenSource struct { + ctx context.Context + conf *Config +} + +func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + credSource.credentialSourceType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} + +// Token allows tokenSource to conform to the oauth2.TokenSource interface. +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + + credSource, err := conf.parse(ts.ctx) + if err != nil { + return nil, err + } + subjectToken, err := credSource.subjectToken() + + if err != nil { + return nil, err + } + stsRequest := stsexchange.TokenExchangeRequest{ + GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", + Audience: conf.Audience, + Scope: conf.Scopes, + RequestedTokenType: "urn:ietf:params:oauth:token-type:access_token", + SubjectToken: subjectToken, + SubjectTokenType: conf.SubjectTokenType, + } + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + var options map[string]interface{} + // Do not pass workforce_pool_user_project when client authentication is used. + // The client ID is sufficient for determining the user project. + if conf.WorkforcePoolUserProject != "" && conf.ClientID == "" { + options = map[string]interface{}{ + "userProject": conf.WorkforcePoolUserProject, + } + } + stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) + if err != nil { + return nil, err + } + + accessToken := &oauth2.Token{ + AccessToken: stsResp.AccessToken, + TokenType: stsResp.TokenType, + } + + // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior. + if stsResp.ExpiresIn <= 0 { + return nil, fmt.Errorf("oauth2/google/externalaccount: got invalid expiry from security token service") + } + accessToken.Expiry = now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) + + if stsResp.RefreshToken != "" { + accessToken.RefreshToken = stsResp.RefreshToken + } + return accessToken, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go similarity index 83% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go rename to vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go index 579bcce5f..dca5681a4 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go @@ -19,7 +19,7 @@ import ( "time" ) -var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken") +var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials\\..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken") const ( executableSupportedMaxVersion = 1 @@ -39,51 +39,51 @@ func (nce nonCacheableError) Error() string { } func missingFieldError(source, field string) error { - return fmt.Errorf("oauth2/google: %v missing `%q` field", source, field) + return fmt.Errorf("oauth2/google/externalaccount: %v missing `%q` field", source, field) } func jsonParsingError(source, data string) error { - return fmt.Errorf("oauth2/google: unable to parse %v\nResponse: %v", source, data) + return fmt.Errorf("oauth2/google/externalaccount: unable to parse %v\nResponse: %v", source, data) } func malformedFailureError() error { - return nonCacheableError{"oauth2/google: response must include `error` and `message` fields when unsuccessful"} + return nonCacheableError{"oauth2/google/externalaccount: response must include `error` and `message` fields when unsuccessful"} } func userDefinedError(code, message string) error { - return nonCacheableError{fmt.Sprintf("oauth2/google: response contains unsuccessful response: (%v) %v", code, message)} + return nonCacheableError{fmt.Sprintf("oauth2/google/externalaccount: response contains unsuccessful response: (%v) %v", code, message)} } func unsupportedVersionError(source string, version int) error { - return fmt.Errorf("oauth2/google: %v contains unsupported version: %v", source, version) + return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported version: %v", source, version) } func tokenExpiredError() error { - return nonCacheableError{"oauth2/google: the token returned by the executable is expired"} + return nonCacheableError{"oauth2/google/externalaccount: the token returned by the executable is expired"} } func tokenTypeError(source string) error { - return fmt.Errorf("oauth2/google: %v contains unsupported token type", source) + return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported token type", source) } func exitCodeError(exitCode int) error { - return fmt.Errorf("oauth2/google: executable command failed with exit code %v", exitCode) + return fmt.Errorf("oauth2/google/externalaccount: executable command failed with exit code %v", exitCode) } func executableError(err error) error { - return fmt.Errorf("oauth2/google: executable command failed: %v", err) + return fmt.Errorf("oauth2/google/externalaccount: executable command failed: %v", err) } func executablesDisallowedError() error { - return errors.New("oauth2/google: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") + return errors.New("oauth2/google/externalaccount: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") } func timeoutRangeError() error { - return errors.New("oauth2/google: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") + return errors.New("oauth2/google/externalaccount: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") } func commandMissingError() error { - return errors.New("oauth2/google: missing `command` field — executable command must be provided") + return errors.New("oauth2/google/externalaccount: missing `command` field — executable command must be provided") } type environment interface { @@ -146,7 +146,7 @@ type executableCredentialSource struct { // CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig. // It also performs defaulting and type conversions. -func CreateExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { +func createExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { if ec.Command == "" { return executableCredentialSource{}, commandMissingError() } @@ -233,6 +233,10 @@ func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte return "", tokenTypeError(source) } +func (cs executableCredentialSource) credentialSourceType() string { + return "executable" +} + func (cs executableCredentialSource) subjectToken() (string, error) { if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { return token, err diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go similarity index 57% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go rename to vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go index e953ddb47..33766b972 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go @@ -16,18 +16,22 @@ import ( type fileCredentialSource struct { File string - Format format + Format Format +} + +func (cs fileCredentialSource) credentialSourceType() string { + return "file" } func (cs fileCredentialSource) subjectToken() (string, error) { tokenFile, err := os.Open(cs.File) if err != nil { - return "", fmt.Errorf("oauth2/google: failed to open credential file %q", cs.File) + return "", fmt.Errorf("oauth2/google/externalaccount: failed to open credential file %q", cs.File) } defer tokenFile.Close() tokenBytes, err := ioutil.ReadAll(io.LimitReader(tokenFile, 1<<20)) if err != nil { - return "", fmt.Errorf("oauth2/google: failed to read credential file: %v", err) + return "", fmt.Errorf("oauth2/google/externalaccount: failed to read credential file: %v", err) } tokenBytes = bytes.TrimSpace(tokenBytes) switch cs.Format.Type { @@ -35,15 +39,15 @@ func (cs fileCredentialSource) subjectToken() (string, error) { jsonData := make(map[string]interface{}) err = json.Unmarshal(tokenBytes, &jsonData) if err != nil { - return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) + return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err) } val, ok := jsonData[cs.Format.SubjectTokenFieldName] if !ok { - return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials") + return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials") } token, ok := val.(string) if !ok { - return "", errors.New("oauth2/google: improperly formatted subject token") + return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token") } return token, nil case "text": @@ -51,7 +55,7 @@ func (cs fileCredentialSource) subjectToken() (string, error) { case "": return string(tokenBytes), nil default: - return "", errors.New("oauth2/google: invalid credential_source file format type") + return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type") } } diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/header.go b/vendor/golang.org/x/oauth2/google/externalaccount/header.go new file mode 100644 index 000000000..1d5aad2e2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/header.go @@ -0,0 +1,64 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go new file mode 100644 index 000000000..6c1abdf2d --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go @@ -0,0 +1,21 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import "context" + +type programmaticRefreshCredentialSource struct { + supplierOptions SupplierOptions + subjectTokenSupplier SubjectTokenSupplier + ctx context.Context +} + +func (cs programmaticRefreshCredentialSource) credentialSourceType() string { + return "programmatic" +} + +func (cs programmaticRefreshCredentialSource) subjectToken() (string, error) { + return cs.subjectTokenSupplier.SubjectToken(cs.ctx, cs.supplierOptions) +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go similarity index 57% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go rename to vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go index 16dca6541..71a7184e0 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go @@ -19,15 +19,19 @@ import ( type urlCredentialSource struct { URL string Headers map[string]string - Format format + Format Format ctx context.Context } +func (cs urlCredentialSource) credentialSourceType() string { + return "url" +} + func (cs urlCredentialSource) subjectToken() (string, error) { client := oauth2.NewClient(cs.ctx, nil) req, err := http.NewRequest("GET", cs.URL, nil) if err != nil { - return "", fmt.Errorf("oauth2/google: HTTP request for URL-sourced credential failed: %v", err) + return "", fmt.Errorf("oauth2/google/externalaccount: HTTP request for URL-sourced credential failed: %v", err) } req = req.WithContext(cs.ctx) @@ -36,16 +40,16 @@ func (cs urlCredentialSource) subjectToken() (string, error) { } resp, err := client.Do(req) if err != nil { - return "", fmt.Errorf("oauth2/google: invalid response when retrieving subject token: %v", err) + return "", fmt.Errorf("oauth2/google/externalaccount: invalid response when retrieving subject token: %v", err) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) if err != nil { - return "", fmt.Errorf("oauth2/google: invalid body in subject token URL query: %v", err) + return "", fmt.Errorf("oauth2/google/externalaccount: invalid body in subject token URL query: %v", err) } if c := resp.StatusCode; c < 200 || c > 299 { - return "", fmt.Errorf("oauth2/google: status code %d: %s", c, respBody) + return "", fmt.Errorf("oauth2/google/externalaccount: status code %d: %s", c, respBody) } switch cs.Format.Type { @@ -53,15 +57,15 @@ func (cs urlCredentialSource) subjectToken() (string, error) { jsonData := make(map[string]interface{}) err = json.Unmarshal(respBody, &jsonData) if err != nil { - return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) + return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err) } val, ok := jsonData[cs.Format.SubjectTokenFieldName] if !ok { - return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials") + return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials") } token, ok := val.(string) if !ok { - return "", errors.New("oauth2/google: improperly formatted subject token") + return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token") } return token, nil case "text": @@ -69,7 +73,7 @@ func (cs urlCredentialSource) subjectToken() (string, error) { case "": return string(respBody), nil default: - return "", errors.New("oauth2/google: invalid credential_source file format type") + return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type") } } diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index cc1223889..7b82e7a08 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -15,15 +15,18 @@ import ( "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" - "golang.org/x/oauth2/google/internal/externalaccount" + "golang.org/x/oauth2/google/externalaccount" + "golang.org/x/oauth2/google/internal/externalaccountauthorizeduser" + "golang.org/x/oauth2/google/internal/impersonate" "golang.org/x/oauth2/jwt" ) // Endpoint is Google's OAuth 2.0 default endpoint. var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://oauth2.googleapis.com/token", - AuthStyle: oauth2.AuthStyleInParams, + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + DeviceAuthURL: "https://oauth2.googleapis.com/device/code", + AuthStyle: oauth2.AuthStyleInParams, } // MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. @@ -95,10 +98,11 @@ func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { // JSON key file types. const ( - serviceAccountKey = "service_account" - userCredentialsKey = "authorized_user" - externalAccountKey = "external_account" - impersonatedServiceAccount = "impersonated_service_account" + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" + externalAccountKey = "external_account" + externalAccountAuthorizedUserKey = "external_account_authorized_user" + impersonatedServiceAccount = "impersonated_service_account" ) // credentialsFile is the unmarshalled representation of a credentials file. @@ -106,12 +110,13 @@ type credentialsFile struct { Type string `json:"type"` // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - AuthURL string `json:"auth_uri"` - TokenURL string `json:"token_uri"` - ProjectID string `json:"project_id"` + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + UniverseDomain string `json:"universe_domain"` // User Credential fields // (These typically come from gcloud auth.) @@ -131,6 +136,9 @@ type credentialsFile struct { QuotaProjectID string `json:"quota_project_id"` WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + // External Account Authorized User fields + RevokeURL string `json:"revoke_url"` + // Service account impersonation SourceCredentials *credentialsFile `json:"source_credentials"` } @@ -193,11 +201,24 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar ServiceAccountImpersonationLifetimeSeconds: f.ServiceAccountImpersonation.TokenLifetimeSeconds, ClientSecret: f.ClientSecret, ClientID: f.ClientID, - CredentialSource: f.CredentialSource, + CredentialSource: &f.CredentialSource, QuotaProjectID: f.QuotaProjectID, Scopes: params.Scopes, WorkforcePoolUserProject: f.WorkforcePoolUserProject, } + return externalaccount.NewTokenSource(ctx, *cfg) + case externalAccountAuthorizedUserKey: + cfg := &externalaccountauthorizeduser.Config{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + RevokeURL: f.RevokeURL, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + } return cfg.TokenSource(ctx) case impersonatedServiceAccount: if f.ServiceAccountImpersonationURL == "" || f.SourceCredentials == nil { @@ -208,7 +229,7 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar if err != nil { return nil, err } - imp := externalaccount.ImpersonateTokenSource{ + imp := impersonate.ImpersonateTokenSource{ Ctx: ctx, URL: f.ServiceAccountImpersonationURL, Scopes: params.Scopes, @@ -231,7 +252,10 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return computeTokenSource(account, 0, scope...) + // refresh 3 minutes and 45 seconds early. The shortest MDS cache is currently 4 minutes, so any + // refreshes earlier are a waste of compute. + earlyExpirySecs := 225 * time.Second + return computeTokenSource(account, earlyExpirySecs, scope...) } func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go deleted file mode 100644 index dcd252a61..000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "context" - "fmt" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" - "time" - - "golang.org/x/oauth2" -) - -// now aliases time.Now for testing -var now = func() time.Time { - return time.Now().UTC() -} - -// Config stores the configuration for fetching tokens with external credentials. -type Config struct { - // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload - // identity pool or the workforce pool and the provider identifier in that pool. - Audience string - // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec - // e.g. `urn:ietf:params:oauth:token-type:jwt`. - SubjectTokenType string - // TokenURL is the STS token exchange endpoint. - TokenURL string - // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( - // user attributes like account identifier, eg. email, username, uid, etc). This is - // needed for gCloud session account identification. - TokenInfoURL string - // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only - // required for workload identity pools when APIs to be accessed have not integrated with UberMint. - ServiceAccountImpersonationURL string - // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation - // token will be valid for. - ServiceAccountImpersonationLifetimeSeconds int - // ClientSecret is currently only required if token_info endpoint also - // needs to be called with the generated GCP access token. When provided, STS will be - // called with additional basic authentication using client_id as username and client_secret as password. - ClientSecret string - // ClientID is only required in conjunction with ClientSecret, as described above. - ClientID string - // CredentialSource contains the necessary information to retrieve the token itself, as well - // as some environmental information. - CredentialSource CredentialSource - // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries - // will set the x-goog-user-project which overrides the project associated with the credentials. - QuotaProjectID string - // Scopes contains the desired scopes for the returned access token. - Scopes []string - // The optional workforce pool user project number when the credential - // corresponds to a workforce pool and not a workload identity pool. - // The underlying principal must still have serviceusage.services.use IAM - // permission to use the project for billing/quota. - WorkforcePoolUserProject string -} - -// Each element consists of a list of patterns. validateURLs checks for matches -// that include all elements in a given list, in that order. - -var ( - validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) -) - -func validateURL(input string, patterns []*regexp.Regexp, scheme string) bool { - parsed, err := url.Parse(input) - if err != nil { - return false - } - if !strings.EqualFold(parsed.Scheme, scheme) { - return false - } - toTest := parsed.Host - - for _, pattern := range patterns { - if pattern.MatchString(toTest) { - return true - } - } - return false -} - -func validateWorkforceAudience(input string) bool { - return validWorkforceAudiencePattern.MatchString(input) -} - -// TokenSource Returns an external account TokenSource struct. This is to be called by package google to construct a google.Credentials. -func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { - return c.tokenSource(ctx, "https") -} - -// tokenSource is a private function that's directly called by some of the tests, -// because the unit test URLs are mocked, and would otherwise fail the -// validity check. -func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { - if c.WorkforcePoolUserProject != "" { - valid := validateWorkforceAudience(c.Audience) - if !valid { - return nil, fmt.Errorf("oauth2/google: workforce_pool_user_project should not be set for non-workforce pool credentials") - } - } - - ts := tokenSource{ - ctx: ctx, - conf: c, - } - if c.ServiceAccountImpersonationURL == "" { - return oauth2.ReuseTokenSource(nil, ts), nil - } - scopes := c.Scopes - ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} - imp := ImpersonateTokenSource{ - Ctx: ctx, - URL: c.ServiceAccountImpersonationURL, - Scopes: scopes, - Ts: oauth2.ReuseTokenSource(nil, ts), - TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, - } - return oauth2.ReuseTokenSource(nil, imp), nil -} - -// Subject token file types. -const ( - fileTypeText = "text" - fileTypeJSON = "json" -) - -type format struct { - // Type is either "text" or "json". When not provided "text" type is assumed. - Type string `json:"type"` - // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure. - SubjectTokenFieldName string `json:"subject_token_field_name"` -} - -// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. -// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. -// The EnvironmentID should start with AWS if being used for an AWS credential. -type CredentialSource struct { - File string `json:"file"` - - URL string `json:"url"` - Headers map[string]string `json:"headers"` - - Executable *ExecutableConfig `json:"executable"` - - EnvironmentID string `json:"environment_id"` - RegionURL string `json:"region_url"` - RegionalCredVerificationURL string `json:"regional_cred_verification_url"` - CredVerificationURL string `json:"cred_verification_url"` - IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` - Format format `json:"format"` -} - -type ExecutableConfig struct { - Command string `json:"command"` - TimeoutMillis *int `json:"timeout_millis"` - OutputFile string `json:"output_file"` -} - -// parse determines the type of CredentialSource needed. -func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { - if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { - if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { - if awsVersion != 1 { - return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion) - } - - awsCredSource := awsCredentialSource{ - EnvironmentID: c.CredentialSource.EnvironmentID, - RegionURL: c.CredentialSource.RegionURL, - RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, - CredVerificationURL: c.CredentialSource.URL, - TargetResource: c.Audience, - ctx: ctx, - } - if c.CredentialSource.IMDSv2SessionTokenURL != "" { - awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL - } - - if err := awsCredSource.validateMetadataServers(); err != nil { - return nil, err - } - - return awsCredSource, nil - } - } else if c.CredentialSource.File != "" { - return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil - } else if c.CredentialSource.URL != "" { - return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil - } else if c.CredentialSource.Executable != nil { - return CreateExecutableCredential(ctx, c.CredentialSource.Executable, c) - } - return nil, fmt.Errorf("oauth2/google: unable to parse credential source") -} - -type baseCredentialSource interface { - subjectToken() (string, error) -} - -// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. -type tokenSource struct { - ctx context.Context - conf *Config -} - -// Token allows tokenSource to conform to the oauth2.TokenSource interface. -func (ts tokenSource) Token() (*oauth2.Token, error) { - conf := ts.conf - - credSource, err := conf.parse(ts.ctx) - if err != nil { - return nil, err - } - subjectToken, err := credSource.subjectToken() - - if err != nil { - return nil, err - } - stsRequest := stsTokenExchangeRequest{ - GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", - Audience: conf.Audience, - Scope: conf.Scopes, - RequestedTokenType: "urn:ietf:params:oauth:token-type:access_token", - SubjectToken: subjectToken, - SubjectTokenType: conf.SubjectTokenType, - } - header := make(http.Header) - header.Add("Content-Type", "application/x-www-form-urlencoded") - clientAuth := clientAuthentication{ - AuthStyle: oauth2.AuthStyleInHeader, - ClientID: conf.ClientID, - ClientSecret: conf.ClientSecret, - } - var options map[string]interface{} - // Do not pass workforce_pool_user_project when client authentication is used. - // The client ID is sufficient for determining the user project. - if conf.WorkforcePoolUserProject != "" && conf.ClientID == "" { - options = map[string]interface{}{ - "userProject": conf.WorkforcePoolUserProject, - } - } - stsResp, err := exchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) - if err != nil { - return nil, err - } - - accessToken := &oauth2.Token{ - AccessToken: stsResp.AccessToken, - TokenType: stsResp.TokenType, - } - if stsResp.ExpiresIn < 0 { - return nil, fmt.Errorf("oauth2/google: got invalid expiry from security token service") - } else if stsResp.ExpiresIn >= 0 { - accessToken.Expiry = now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) - } - - if stsResp.RefreshToken != "" { - accessToken.RefreshToken = stsResp.RefreshToken - } - return accessToken, nil -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go deleted file mode 100644 index 233a78cef..000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import "fmt" - -// Error for handling OAuth related error responses as stated in rfc6749#5.2. -type Error struct { - Code string - URI string - Description string -} - -func (err *Error) Error() string { - return fmt.Sprintf("got error code %s from %s: %s", err.Code, err.URI, err.Description) -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go new file mode 100644 index 000000000..cb5820707 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go @@ -0,0 +1,114 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccountauthorizeduser + +import ( + "context" + "errors" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +// now aliases time.Now for testing. +var now = func() time.Time { + return time.Now().UTC() +} + +var tokenValid = func(token oauth2.Token) bool { + return token.Valid() +} + +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workforce pool and + // the provider identifier in that pool. + Audience string + // RefreshToken is the optional OAuth 2.0 refresh token. If specified, credentials can be refreshed. + RefreshToken string + // TokenURL is the optional STS token exchange endpoint for refresh. Must be specified for refresh, can be left as + // None if the token can not be refreshed. + TokenURL string + // TokenInfoURL is the optional STS endpoint URL for token introspection. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs to be called with the generated GCP + // access token. When provided, STS will be called with additional basic authentication using client_id as username + // and client_secret as password. + ClientSecret string + // Token is the OAuth2.0 access token. Can be nil if refresh information is provided. + Token string + // Expiry is the optional expiration datetime of the OAuth 2.0 access token. + Expiry time.Time + // RevokeURL is the optional STS endpoint URL for revoking tokens. + RevokeURL string + // QuotaProjectID is the optional project ID used for quota and billing. This project may be different from the + // project used to create the credentials. + QuotaProjectID string + Scopes []string +} + +func (c *Config) canRefresh() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { + var token oauth2.Token + if c.Token != "" && !c.Expiry.IsZero() { + token = oauth2.Token{ + AccessToken: c.Token, + Expiry: c.Expiry, + TokenType: "Bearer", + } + } + if !tokenValid(token) && !c.canRefresh() { + return nil, errors.New("oauth2/google: Token should be created with fields to make it valid (`token` and `expiry`), or fields to allow it to refresh (`refresh_token`, `token_url`, `client_id`, `client_secret`).") + } + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + + return oauth2.ReuseTokenSource(&token, ts), nil +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + if !conf.canRefresh() { + return nil, errors.New("oauth2/google: The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret.") + } + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + + stsResponse, err := stsexchange.RefreshAccessToken(ts.ctx, conf.TokenURL, conf.RefreshToken, clientAuth, nil) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("oauth2/google: got invalid expiry from security token service") + } + + if stsResponse.RefreshToken != "" { + conf.RefreshToken = stsResponse.RefreshToken + } + + token := &oauth2.Token{ + AccessToken: stsResponse.AccessToken, + Expiry: now().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + TokenType: "Bearer", + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go similarity index 99% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go rename to vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go index 54c8f209f..6bc3af110 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go +++ b/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package impersonate import ( "bytes" diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go similarity index 88% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go rename to vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go index 99987ce29..ebd520eac 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package stsexchange import ( "encoding/base64" @@ -12,8 +12,8 @@ import ( "golang.org/x/oauth2" ) -// clientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. -type clientAuthentication struct { +// ClientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { // AuthStyle can be either basic or request-body AuthStyle oauth2.AuthStyle ClientID string @@ -23,7 +23,7 @@ type clientAuthentication struct { // InjectAuthentication is used to add authentication to a Secure Token Service exchange // request. It modifies either the passed url.Values or http.Header depending on the desired // authentication format. -func (c *clientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { return } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go similarity index 68% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go rename to vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go index e6fcae5fc..1a0bebd15 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package stsexchange import ( "context" @@ -18,14 +18,17 @@ import ( "golang.org/x/oauth2" ) -// exchangeToken performs an oauth2 token exchange with the provided endpoint. +func defaultHeader() http.Header { + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + return header +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. // The first 4 fields are all mandatory. headers can be used to pass additional // headers beyond the bare minimum required by the token exchange. options can // be used to pass additional JSON-structured options to the remote server. -func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchangeRequest, authentication clientAuthentication, headers http.Header, options map[string]interface{}) (*stsTokenExchangeResponse, error) { - - client := oauth2.NewClient(ctx, nil) - +func ExchangeToken(ctx context.Context, endpoint string, request *TokenExchangeRequest, authentication ClientAuthentication, headers http.Header, options map[string]interface{}) (*Response, error) { data := url.Values{} data.Set("audience", request.Audience) data.Set("grant_type", "urn:ietf:params:oauth:grant-type:token-exchange") @@ -41,13 +44,28 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan data.Set("options", string(opts)) } + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func RefreshAccessToken(ctx context.Context, endpoint string, refreshToken string, authentication ClientAuthentication, headers http.Header) (*Response, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", refreshToken) + + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func makeRequest(ctx context.Context, endpoint string, data url.Values, authentication ClientAuthentication, headers http.Header) (*Response, error) { + if headers == nil { + headers = defaultHeader() + } + client := oauth2.NewClient(ctx, nil) authentication.InjectAuthentication(data, headers) encodedData := data.Encode() req, err := http.NewRequest("POST", endpoint, strings.NewReader(encodedData)) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to properly build http request: %v", err) - } req = req.WithContext(ctx) for key, list := range headers { @@ -71,7 +89,7 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) } - var stsResp stsTokenExchangeResponse + var stsResp Response err = json.Unmarshal(body, &stsResp) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) @@ -81,8 +99,8 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan return &stsResp, nil } -// stsTokenExchangeRequest contains fields necessary to make an oauth2 token exchange. -type stsTokenExchangeRequest struct { +// TokenExchangeRequest contains fields necessary to make an oauth2 token exchange. +type TokenExchangeRequest struct { ActingParty struct { ActorToken string ActorTokenType string @@ -96,8 +114,8 @@ type stsTokenExchangeRequest struct { SubjectTokenType string } -// stsTokenExchangeResponse is used to decode the remote server response during an oauth2 token exchange. -type stsTokenExchangeResponse struct { +// Response is used to decode the remote server response during an oauth2 token exchange. +type Response struct { AccessToken string `json:"access_token"` IssuedTokenType string `json:"issued_token_type"` TokenType string `json:"token_type"` diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go deleted file mode 100644 index d28140f78..000000000 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine - -package internal - -import "google.golang.org/appengine/urlfetch" - -func init() { - appengineClientHook = urlfetch.Client -} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index 572074a63..b9db01ddf 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -18,16 +18,11 @@ var HTTPClient ContextKey // because nobody else can create a ContextKey, being unexported. type ContextKey struct{} -var appengineClientHook func(context.Context) *http.Client - func ContextClient(ctx context.Context) *http.Client { if ctx != nil { if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { return hc } } - if appengineClientHook != nil { - return appengineClientHook(ctx) - } return http.DefaultClient } diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index cc7c98c25..09f6a49b8 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -75,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -143,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -166,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -211,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, @@ -388,7 +393,7 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSource returns a TokenSource that acts in the same manner as the +// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the // TokenSource returned by ReuseTokenSource, except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 000000000..50593b6df --- /dev/null +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index fdcaa974d..4ed2e488b 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -263,6 +263,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -549,6 +550,7 @@ ccflags="$@" $2 !~ "NLA_TYPE_MASK" && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || + $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe529..3a5e776f8 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a897..4cc7b0059 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -542,6 +542,18 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8c..4e92e5aa4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 93a38a97d..877a62b47 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -502,6 +502,7 @@ const ( BPF_IMM = 0x0 BPF_IND = 0x40 BPF_JA = 0x0 + BPF_JCOND = 0xe0 BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 @@ -657,6 +658,9 @@ const ( CAN_NPROTO = 0x8 CAN_RAW = 0x1 CAN_RAW_FILTER_MAX = 0x200 + CAN_RAW_XL_VCID_RX_FILTER = 0x4 + CAN_RAW_XL_VCID_TX_PASS = 0x2 + CAN_RAW_XL_VCID_TX_SET = 0x1 CAN_RTR_FLAG = 0x40000000 CAN_SFF_ID_BITS = 0xb CAN_SFF_MASK = 0x7ff @@ -1339,6 +1343,7 @@ const ( F_OFD_SETLK = 0x25 F_OFD_SETLKW = 0x26 F_OK = 0x0 + F_SEAL_EXEC = 0x20 F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 @@ -1627,6 +1632,7 @@ const ( IP_FREEBIND = 0xf IP_HDRINCL = 0x3 IP_IPSEC_POLICY = 0x10 + IP_LOCAL_PORT_RANGE = 0x33 IP_MAXPACKET = 0xffff IP_MAX_MEMBERSHIPS = 0x14 IP_MF = 0x2000 @@ -1653,6 +1659,7 @@ const ( IP_PMTUDISC_OMIT = 0x5 IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 + IP_PROTOCOL = 0x34 IP_RECVERR = 0xb IP_RECVERR_RFC4884 = 0x1a IP_RECVFRAGSIZE = 0x19 @@ -2169,7 +2176,7 @@ const ( NFT_SECMARK_CTX_MAXLEN = 0x100 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 - NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_F_MASK = 0x7 NFT_TABLE_MAXNAMELEN = 0x100 NFT_TRACETYPE_MAX = 0x3 NFT_TUNNEL_F_MASK = 0x7 @@ -2403,6 +2410,7 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c PPPIOCNEWUNIT = 0xc004743e @@ -2896,8 +2904,9 @@ const ( RWF_APPEND = 0x10 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 + RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x1f + RWF_SUPPORTED = 0x3f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -2918,7 +2927,9 @@ const ( SCHED_RESET_ON_FORK = 0x40000000 SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 + SCM_PIDFD = 0x4 SCM_RIGHTS = 0x1 + SCM_SECURITY = 0x3 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 SECCOMP_ADDFD_FLAG_SEND = 0x2 @@ -3051,6 +3062,8 @@ const ( SIOCSMIIREG = 0x8949 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SK_DIAG_BPF_STORAGE_MAX = 0x3 + SK_DIAG_BPF_STORAGE_REQ_MAX = 0x1 SMACK_MAGIC = 0x43415d53 SMART_AUTOSAVE = 0xd2 SMART_AUTO_OFFLINE = 0xdb @@ -3071,6 +3084,8 @@ const ( SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 + SOCK_DESTROY = 0x15 + SOCK_DIAG_BY_FAMILY = 0x14 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -3260,6 +3275,7 @@ const ( TCP_MAX_WINSHIFT = 0xe TCP_MD5SIG = 0xe TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_IFINDEX = 0x2 TCP_MD5SIG_FLAG_PREFIX = 0x1 TCP_MD5SIG_MAXKEYLEN = 0x50 TCP_MSS = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 42ff8c3c1..e4bc0bd57 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dca436004..689317afd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d8cae6d15..14270508b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -87,6 +87,7 @@ const ( FICLONE = 0x40049409 FICLONERANGE = 0x4020940d FLUSHO = 0x1000 + FPMR_MAGIC = 0x46504d52 FPSIMD_MAGIC = 0x46508001 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f240..07642c308 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb2840..923e08cb7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997b..7d73dda64 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1ab..057700111 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 0036746ea..4740b8348 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4605,7 +4605,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x149 + NL80211_ATTR_MAX = 0x14a NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5209,7 +5209,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1f + NL80211_FREQUENCY_ATTR_MAX = 0x20 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5703,7 +5703,7 @@ const ( NL80211_STA_FLAG_ASSOCIATED = 0x7 NL80211_STA_FLAG_AUTHENTICATED = 0x5 NL80211_STA_FLAG_AUTHORIZED = 0x1 - NL80211_STA_FLAG_MAX = 0x7 + NL80211_STA_FLAG_MAX = 0x8 NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 @@ -6001,3 +6001,34 @@ type CachestatRange struct { Off uint64 Len uint64 } + +const ( + SK_MEMINFO_RMEM_ALLOC = 0x0 + SK_MEMINFO_RCVBUF = 0x1 + SK_MEMINFO_WMEM_ALLOC = 0x2 + SK_MEMINFO_SNDBUF = 0x3 + SK_MEMINFO_FWD_ALLOC = 0x4 + SK_MEMINFO_WMEM_QUEUED = 0x5 + SK_MEMINFO_OPTMEM = 0x6 + SK_MEMINFO_BACKLOG = 0x7 + SK_MEMINFO_DROPS = 0x8 + SK_MEMINFO_VARS = 0x9 + SKNLGRP_NONE = 0x0 + SKNLGRP_INET_TCP_DESTROY = 0x1 + SKNLGRP_INET_UDP_DESTROY = 0x2 + SKNLGRP_INET6_TCP_DESTROY = 0x3 + SKNLGRP_INET6_UDP_DESTROY = 0x4 + SK_DIAG_BPF_STORAGE_REQ_NONE = 0x0 + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 0x1 + SK_DIAG_BPF_STORAGE_REP_NONE = 0x0 + SK_DIAG_BPF_STORAGE = 0x1 + SK_DIAG_BPF_STORAGE_NONE = 0x0 + SK_DIAG_BPF_STORAGE_PAD = 0x1 + SK_DIAG_BPF_STORAGE_MAP_ID = 0x2 + SK_DIAG_BPF_STORAGE_MAP_VALUE = 0x3 +) + +type SockDiagReq struct { + Family uint8 + Protocol uint8 +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 26be94a8a..97651b5bd 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -68,6 +68,7 @@ type UserInfo10 struct { //sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo //sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation //sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree +//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum const ( // do not reorder @@ -893,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1086,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1157,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 5c6035ddf..eba761018 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -401,6 +402,7 @@ var ( procTransmitFile = modmswsock.NewProc("TransmitFile") procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserEnum = modnetapi32.NewProc("NetUserEnum") procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") procNtCreateFile = modntdll.NewProc("NtCreateFile") procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") @@ -1223,6 +1225,14 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) { + r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r0 == 0 { + ret = GetLastError() + } + return +} + func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { @@ -3486,6 +3496,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete return } +func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) if r0 != 0 { diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index f0e0cf3cb..8f6c7f493 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit { // or its associated context.Context is canceled. // // The methods AllowN, ReserveN, and WaitN consume n tokens. +// +// Limiter is safe for simultaneous use by multiple goroutines. type Limiter struct { mu sync.Mutex limit Limit diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/xerrors/LICENSE index e4a47e17f..c3c8b7d2b 100644 --- a/vendor/golang.org/x/xerrors/LICENSE +++ b/vendor/golang.org/x/xerrors/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019 The Go Authors. All rights reserved. +Copyright 2019 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index cc5d52fbc..bd46edbe7 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/rpc/code.proto package code diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 7bd161e48..3e5621827 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/rpc/error_details.proto package errdetails diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index a6b508188..6ad1b1c1d 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/rpc/status.proto package status diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 608aa6e1a..0854d298e 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -66,7 +66,7 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `./scripts/vet.sh` to catch vet errors - `go test -cpu 1,4 -timeout 7m ./...` to run the tests - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index c6672c0a3..6a8a07781 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,6 +9,7 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [atollena](https://github.com/atollena), Datadog, Inc. - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 1f8960922..be38384ff 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -30,17 +30,20 @@ testdeps: GO111MODULE=on go get -d -v -t google.golang.org/grpc/... vet: vetdeps - ./vet.sh + ./scripts/vet.sh vetdeps: - ./vet.sh -install + ./scripts/vet.sh -install .PHONY: \ all \ build \ clean \ + deps \ proto \ test \ + testsubmodule \ testrace \ + testdeps \ vet \ vetdeps diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 1bc92248c..b572707c6 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the @@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Prerequisites -- **[Go][]**: any one of the **three latest major** [releases][go-releases]. +- **[Go][]**: any one of the **two latest major** [releases][go-releases]. ## Installation diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 712fef4d0..52d530d7a 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -121,9 +121,9 @@ func (a *Attributes) String() string { return sb.String() } -func str(x any) string { +func str(x any) (s string) { if v, ok := x.(fmt.Stringer); ok { - return v.String() + return fmt.Sprint(v) } else if v, ok := x.(string); ok { return v } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index b6377f445..f391744f7 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,7 +54,14 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { - m[strings.ToLower(b.Name())] = b + name := strings.ToLower(b.Name()) + if name != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } + m[name] = b } // unregisterForTesting deletes the balancer with the given name from the @@ -70,6 +80,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } @@ -217,8 +233,8 @@ type BuildOptions struct { // implementations which do not communicate with a remote load balancer // server can ignore this field. Authority string - // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID *channelz.Identifier + // ChannelzParent is the parent ClientConn's channelz channel. + ChannelzParent channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go similarity index 71% rename from vendor/google.golang.org/grpc/pickfirst.go rename to vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 2e9cf66b4..07527603f 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,68 +16,60 @@ * */ -package grpc +// Package pickfirst contains the pick_first load balancing policy. +package pickfirst import ( "encoding/json" "errors" "fmt" + "math/rand" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) +func init() { + balancer.Register(pickfirstBuilder{}) + internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } +} + +var logger = grpclog.Component("pick-first-lb") + const ( - // PickFirstBalancerName is the name of the pick_first balancer. - PickFirstBalancerName = "pick_first" - logPrefix = "[pick-first-lb %p] " + // Name is the name of the pick_first balancer. + Name = "pick_first" + logPrefix = "[pick-first-lb %p] " ) -func newPickfirstBuilder() balancer.Builder { - return &pickfirstBuilder{} -} - type pickfirstBuilder struct{} -func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } -func (*pickfirstBuilder) Name() string { - return PickFirstBalancerName +func (pickfirstBuilder) Name() string { + return Name } type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` // If set to true, instructs the LB policy to shuffle the order of the list - // of addresses received from the name resolver before attempting to + // of endpoints received from the name resolver before attempting to // connect to them. ShuffleAddressList bool `json:"shuffleAddressList"` } -func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - if !envconfig.PickFirstLBConfig { - // Prior to supporting loadbalancing configuration, the pick_first LB - // policy did not implement the balancer.ConfigParser interface. This - // meant that if a non-empty configuration was passed to it, the service - // config unmarshaling code would throw a warning log, but would - // continue using the pick_first LB policy. The code below ensures the - // same behavior is retained if the env var is not set. - if string(js) != "{}" { - logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) - } - return nil, nil - } - +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { var cfg pfConfig if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) @@ -111,9 +103,14 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +type Shuffler interface { + ShuffleAddressListForTesting(n int, swap func(i, j int)) +} + +func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } + func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - addrs := state.ResolverState.Addresses - if len(addrs) == 0 { + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { @@ -125,22 +122,49 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - // We don't have to guard this block with the env var because ParseConfig // already does so. cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) - } if b.logger.V(2) { b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) } + var addrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling will + // change the order of endpoints but not touch the order of the addresses + // within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for each + // of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8304 section 4." - A61 + // TODO: support the above language. + addrs = append(addrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forwarrd the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + addrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + } + if b.subConn != nil { b.cc.UpdateAddresses(b.subConn, addrs) return nil @@ -257,7 +281,3 @@ func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } - -func init() { - balancer.Register(newPickfirstBuilder()) -} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index f7031ad22..260255d31 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,12 +22,12 @@ package roundrobin import ( + "math/rand" "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" ) // Name is the name of round_robin balancer. @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(grpcrand.Intn(len(scs))), + next: uint32(rand.Intn(len(scs))), } } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go deleted file mode 100644 index a4411c22b..000000000 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ /dev/null @@ -1,454 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "fmt" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/resolver" -) - -type ccbMode int - -const ( - ccbModeActive = iota - ccbModeIdle - ccbModeClosed - ccbModeExitingIdle -) - -// ccBalancerWrapper sits between the ClientConn and the Balancer. -// -// ccBalancerWrapper implements methods corresponding to the ones on the -// balancer.Balancer interface. The ClientConn is free to call these methods -// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn -// to the Balancer happen synchronously and in order. -// -// ccBalancerWrapper also implements the balancer.ClientConn interface and is -// passed to the Balancer implementations. It invokes unexported methods on the -// ClientConn to handle these calls from the Balancer. -// -// It uses the gracefulswitch.Balancer internally to ensure that balancer -// switches happen in a graceful manner. -type ccBalancerWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions - - // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled in the serializer. Fields - // accessed *only* in these serializer callbacks, can therefore be accessed - // without a mutex. - balancer *gracefulswitch.Balancer - curBalancerName string - - // mu guards access to the below fields. Access to the serializer and its - // cancel function needs to be mutex protected because they are overwritten - // when the wrapper exits idle mode. - mu sync.Mutex - serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. - serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. - mode ccbMode // Tracks the current mode of the wrapper. -} - -// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer -// is not created until the switchTo() method is invoked. -func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(context.Background()) - ccb := &ccBalancerWrapper{ - cc: cc, - opts: bopts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) - return ccb -} - -// updateClientConnState is invoked by grpc to push a ClientConnState update to -// the underlying balancer. -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.mu.Lock() - errCh := make(chan error, 1) - // Here and everywhere else where Schedule() is called, it is done with the - // lock held. But the lock guards only the scheduling part. The actual - // callback is called asynchronously without the lock being held. - ok := ccb.serializer.Schedule(func(_ context.Context) { - errCh <- ccb.balancer.UpdateClientConnState(*ccs) - }) - if !ok { - // If we are unable to schedule a function with the serializer, it - // indicates that it has been closed. A serializer is only closed when - // the wrapper is closed or is in idle. - ccb.mu.Unlock() - return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") - } - ccb.mu.Unlock() - - // We get here only if the above call to Schedule succeeds, in which case it - // is guaranteed that the scheduled function will run. Therefore it is safe - // to block on this channel. - err := <-errCh - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) - } - return err -} - -// updateSubConnState is invoked by grpc to push a subConn state update to the -// underlying balancer. -func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - // Even though it is optional for balancers, gracefulswitch ensures - // opts.StateListener is set, so this cannot ever be nil. - sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) - }) - ccb.mu.Unlock() -} - -func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - ccb.balancer.ResolverError(err) - }) - ccb.mu.Unlock() -} - -// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the -// LB policy identified by name. -// -// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the -// first good update from the name resolver, it determines the LB policy to use -// and invokes the switchTo() method. Upon receipt of every subsequent update -// from the name resolver, it invokes this method. -// -// the ccBalancerWrapper keeps track of the current LB policy name, and skips -// the graceful balancer switching process if the name does not change. -func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - // TODO: Other languages use case-sensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - ccb.buildLoadBalancingPolicy(name) - }) - ccb.mu.Unlock() -} - -// buildLoadBalancingPolicy performs the following: -// - retrieve a balancer builder for the given name. Use the default LB -// policy, pick_first, if no LB policy with name is found in the registry. -// - instruct the gracefulswitch balancer to switch to the above builder. This -// will actually build the new balancer. -// - update the `curBalancerName` field -// -// Must be called from a serializer callback. -func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) - builder = newPickfirstBuilder() - } else { - channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) - } - - if err := ccb.balancer.SwitchTo(builder); err != nil { - channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) - return - } - ccb.curBalancerName = builder.Name() -} - -func (ccb *ccBalancerWrapper) close() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.closeBalancer(ccbModeClosed) -} - -// enterIdleMode is invoked by grpc when the channel enters idle mode upon -// expiry of idle_timeout. This call blocks until the balancer is closed. -func (ccb *ccBalancerWrapper) enterIdleMode() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") - ccb.closeBalancer(ccbModeIdle) -} - -// closeBalancer is invoked when the channel is being closed or when it enters -// idle mode upon expiry of idle_timeout. -func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { - ccb.mu.Unlock() - return - } - - ccb.mode = m - done := ccb.serializer.Done() - b := ccb.balancer - ok := ccb.serializer.Schedule(func(_ context.Context) { - // Close the serializer to ensure that no more calls from gRPC are sent - // to the balancer. - ccb.serializerCancel() - // Empty the current balancer name because we don't have a balancer - // anymore and also so that we act on the next call to switchTo by - // creating a new balancer specified by the new resolver. - ccb.curBalancerName = "" - }) - if !ok { - ccb.mu.Unlock() - return - } - ccb.mu.Unlock() - - // Give enqueued callbacks a chance to finish before closing the balancer. - <-done - b.Close() -} - -// exitIdleMode is invoked by grpc when the channel exits idle mode either -// because of an RPC or because of an invocation of the Connect() API. This -// recreates the balancer that was closed previously when entering idle mode. -// -// If the channel is not in idle mode, we know for a fact that we are here as a -// result of the user calling the Connect() method on the ClientConn. In this -// case, we can simply forward the call to the underlying balancer, instructing -// it to reconnect to the backends. -func (ccb *ccBalancerWrapper) exitIdleMode() { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed { - // Request to exit idle is a no-op when wrapper is already closed. - ccb.mu.Unlock() - return - } - - if ccb.mode == ccbModeIdle { - // Recreate the serializer which was closed when we entered idle. - ctx, cancel := context.WithCancel(context.Background()) - ccb.serializer = grpcsync.NewCallbackSerializer(ctx) - ccb.serializerCancel = cancel - } - - // The ClientConn guarantees that mutual exclusion between close() and - // exitIdleMode(), and since we just created a new serializer, we can be - // sure that the below function will be scheduled. - done := make(chan struct{}) - ccb.serializer.Schedule(func(_ context.Context) { - defer close(done) - - ccb.mu.Lock() - defer ccb.mu.Unlock() - - if ccb.mode != ccbModeIdle { - ccb.balancer.ExitIdle() - return - } - - // Gracefulswitch balancer does not support a switchTo operation after - // being closed. Hence we need to create a new one here. - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - ccb.mode = ccbModeActive - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") - - }) - ccb.mu.Unlock() - - <-done -} - -func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { - ccb.mu.Lock() - defer ccb.mu.Unlock() - return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed -} - -func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if ccb.isIdleOrClosed() { - return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") - } - - if len(addrs) == 0 { - return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") - } - ac, err := ccb.cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) - return nil, err - } - acbw := &acBalancerWrapper{ - ccb: ccb, - ac: ac, - producers: make(map[balancer.ProducerBuilder]*refCountedProducer), - stateListener: opts.StateListener, - } - ac.acbw = acbw - return acbw, nil -} - -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The graceful switch balancer will never call this. - logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") -} - -func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - if ccb.isIdleOrClosed() { - return - } - - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - acbw.UpdateAddresses(addrs) -} - -func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - if ccb.isIdleOrClosed() { - return - } - - // Update picker before updating state. Even though the ordering here does - // not matter, it can lead to multiple calls of Pick in the common start-up - // case where we wait for ready and then perform an RPC. If the picker is - // updated later, we could call the "connecting" picker when the state is - // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(s.Picker) - ccb.cc.csMgr.updateState(s.ConnectivityState) -} - -func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - if ccb.isIdleOrClosed() { - return - } - - ccb.cc.resolveNow(o) -} - -func (ccb *ccBalancerWrapper) Target() string { - return ccb.cc.target -} - -// acBalancerWrapper is a wrapper on top of ac for balancers. -// It implements balancer.SubConn interface. -type acBalancerWrapper struct { - ac *addrConn // read-only - ccb *ccBalancerWrapper // read-only - stateListener func(balancer.SubConnState) - - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer -} - -func (acbw *acBalancerWrapper) String() string { - return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) -} - -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.ac.updateAddrs(addrs) -} - -func (acbw *acBalancerWrapper) Connect() { - go acbw.ac.connect() -} - -func (acbw *acBalancerWrapper) Shutdown() { - ccb := acbw.ccb - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) -} - -// NewStream begins a streaming RPC on the addrConn. If the addrConn is not -// ready, blocks until it is or ctx expires. Returns an error when the context -// expires or the addrConn is shut down. -func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err - } - return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) -} - -// Invoke performs a unary RPC. If the addrConn is not ready, returns -// errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { - cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) - if err != nil { - return err - } - if err := cs.SendMsg(args); err != nil { - return err - } - return cs.RecvMsg(reply) -} - -type refCountedProducer struct { - producer balancer.Producer - refs int // number of current refs to the producer - close func() // underlying producer's close function -} - -func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - - // Look up existing producer from this builder. - pData := acbw.producers[pb] - if pData == nil { - // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} - acbw.producers[pb] = pData - } - // Account for this new reference. - pData.refs++ - - // Return a cleanup function wrapped in a OnceFunc to remove this reference - // and delete the refCountedProducer from the map if the total reference - // count goes to zero. - unref := func() { - acbw.mu.Lock() - pData.refs-- - if pData.refs == 0 { - defer pData.close() // Run outside the acbw mutex - delete(acbw.producers, pb) - } - acbw.mu.Unlock() - } - return pData.producer, grpcsync.OnceFunc(unref) -} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go new file mode 100644 index 000000000..4161fdf47 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -0,0 +1,341 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // The following fields are only accessed within the serializer or during + // initialization. + curBalancerName string + balancer *gracefulswitch.Balancer + + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool +} + +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the updateClientConnState() method +// is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + ccb := &ccBalancerWrapper{ + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParent: cc.channelz, + Target: cc.parsedTarget, + }, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + return ccb +} + +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + errCh := make(chan error) + ok := ccb.serializer.Schedule(func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + name := gracefulswitch.ChildName(ccs.BalancerConfig) + if ccb.curBalancerName != name { + ccb.curBalancerName = name + channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name) + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err + }) + if !ok { + return nil + } + return <-errCh +} + +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ResolverError(err) + }) +} + +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. +func (ccb *ccBalancerWrapper) close() { + ccb.mu.Lock() + ccb.closed = true + ccb.mu.Unlock() + channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") + ccb.serializer.Schedule(func(context.Context) { + if ccb.balancer == nil { + return + } + ccb.balancer.Close() + ccb.balancer = nil + }) + ccb.serializerCancel() +} + +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") + } + ccb.mu.Unlock() + + if len(addrs) == 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) + if err != nil { + channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } + ac.acbw = acbw + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + if ccb.cc.conns == nil { + // The CC has been closed; ignore this update. + return + } + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) +} + +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID) +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) +} + +func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) Shutdown() { + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 595480112..63c639e4f 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.34.1 +// protoc v4.25.2 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -430,7 +430,7 @@ type ClientHeader struct { MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` // A single process may be used to run multiple virtual // servers with different identities. - // The authority is the name of such a server identitiy. + // The authority is the name of such a server identity. // It is typically a portion of the URI in the form of // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index ff7fea102..423be7b43 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -31,15 +31,13 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" - "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -48,9 +46,9 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( @@ -69,12 +67,14 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errConnIdling indicates the the connection is being closed as the channel + // errConnIdling indicates the connection is being closed as the channel // is moving to an idle mode due to inactivity. errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = pickfirst.Name ) // The following errors are returned from Dial and DialContext @@ -103,11 +103,6 @@ const ( defaultReadBufSize = 32 * 1024 ) -// Dial creates a client connection to the given target. -func Dial(target string, opts ...DialOption) (*ClientConn, error) { - return DialContext(context.Background(), target, opts...) -} - type defaultConfigSelector struct { sc *ServiceConfig } @@ -119,47 +114,30 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. +// NewClient creates a new gRPC "channel" for the target URI provided. No I/O +// is performed. Use of the ClientConn for RPCs will automatically cause it to +// connect. Connect may be used to manually create a connection, but for most +// users this is unnecessary. // // The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { +// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns +// resolver, a "dns:///" prefix should be applied to the target. +// +// The DialOptions returned by WithBlock, WithTimeout, +// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this +// function. +func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, conns: make(map[*addrConn]struct{}), dopts: defaultDialOptions(), - czData: new(channelzData), } - // We start the channel off in idle mode, but kick it out of idle at the end - // of this method, instead of waiting for the first RPC. Other gRPC - // implementations do wait for the first RPC to kick the channel out of - // idle. But doing so would be a major behavior change for our users who are - // used to seeing the channel active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, if at all we ever get to do that. - cc.idlenessState = ccIdlenessStateIdle - cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.exitIdleCond = sync.NewCond(&cc.mu) + // Apply dial options. disableGlobalOpts := false for _, opt := range opts { if _, ok := opt.(*disableGlobalDialOptions); ok { @@ -178,26 +156,24 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * opt.apply(&cc.dopts) } - chainUnaryClientInterceptors(cc) - chainStreamClientInterceptors(cc) - - defer func() { - if err != nil { - cc.Close() - } - }() + // Determine the resolver to use. + if err := cc.initParsedTargetAndResolverBuilder(); err != nil { + return nil, err + } - // Register ClientConn with channelz. - cc.channelzRegistration(target) + for _, opt := range globalPerTargetDialOptions { + opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts) + } - cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) if err := cc.validateTransportCredentials(); err != nil { return nil, err } if cc.dopts.defaultServiceConfigRawJSON != nil { - scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts) if scpr.Err != nil { return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) } @@ -205,10 +181,68 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA + if err = cc.initAuthority(); err != nil { + return nil, err + } + + // Register ClientConn with channelz. Note that this is only done after + // channel creation cannot fail. + cc.channelzRegistration(target) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil +} + +// Dial calls DialContext(context.Background(), target, opts...). +// +// Deprecated: use NewClient instead. Will be supported throughout 1.x. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext calls NewClient and then exits idle mode. If WithBlock(true) is +// used, it calls Connect and WaitForStateChange until either the context +// expires or the state of the ClientConn is Ready. +// +// One subtle difference between NewClient and Dial and DialContext is that the +// former uses "dns" as the default name resolver, while the latter use +// "passthrough" for backward compatibility. This distinction should not matter +// to most users, but could matter to legacy users that specify a custom dialer +// and expect it to receive the target string directly. +// +// Deprecated: use NewClient instead. Will be supported throughout 1.x. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + // At the end of this method, we kick the channel out of idle, rather than + // waiting for the first rpc. + opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...) + cc, err := NewClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. This is the legacy behavior of + // Dial. + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil } if cc.dopts.timeout > 0 { @@ -231,49 +265,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.DefaultExponential - } - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - return nil, err - } - if err = cc.determineAuthority(); err != nil { - return nil, err - } - - if cc.dopts.scChan != nil { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } - - // This creates the name resolver, load balancer, blocking picker etc. - if err := cc.exitIdleMode(); err != nil { - return nil, err - } - - // Configure idleness support with configured idle timeout or default idle - // timeout duration. Idleness can be explicitly disabled by the user, by - // setting the dial option to 0. - cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) - - // Return early for non-blocking dials. - if !cc.dopts.block { - return cc, nil - } - // A blocking dial blocks until the clientConn is ready. for { s := cc.GetState() @@ -305,23 +296,23 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // addTraceEvent is a helper method to add a trace event on the channel. If the // channel is a nested one, the same event is also added on the parent channel. func (cc *ClientConn) addTraceEvent(msg string) { - ted := &channelz.TraceEventDesc{ + ted := &channelz.TraceEvent{ Desc: fmt.Sprintf("Channel %s", msg), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + if cc.dopts.channelzParent != nil { + ted.Parent = &channelz.TraceEvent{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg), Severity: channelz.CtInfo, } } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + channelz.AddTraceEvent(logger, cc.channelz, 0, ted) } type idler ClientConn -func (i *idler) EnterIdleMode() error { - return (*ClientConn)(i).enterIdleMode() +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() } func (i *idler) ExitIdleMode() error { @@ -329,117 +320,71 @@ func (i *idler) ExitIdleMode() error { } // exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. -func (cc *ClientConn) exitIdleMode() error { +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return errConnClosing } - if cc.idlenessState != ccIdlenessStateIdle { - cc.mu.Unlock() - channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) - return nil - } - - defer func() { - // When Close() and exitIdleMode() race against each other, one of the - // following two can happen: - // - Close() wins the race and runs first. exitIdleMode() runs after, and - // sees that the ClientConn is already closed and hence returns early. - // - exitIdleMode() wins the race and runs first and recreates the balancer - // and releases the lock before recreating the resolver. If Close() runs - // in this window, it will wait for exitIdleMode to complete. - // - // We achieve this synchronization using the below condition variable. - cc.mu.Lock() - cc.idlenessState = ccIdlenessStateActive - cc.exitIdleCond.Signal() - cc.mu.Unlock() - }() - - cc.idlenessState = ccIdlenessStateExitingIdle - exitedIdle := false - if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) - } else { - cc.blockingpicker.exitIdleMode() - exitedIdle = true - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - if cc.balancerWrapper == nil { - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) - } else { - cc.balancerWrapper.exitIdleMode() - } - cc.firstResolveEvent = grpcsync.NewEvent() cc.mu.Unlock() // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline which needs to be handled - // by cc.updateResolverState() which also grabs cc.mu. - if err := cc.initResolverWrapper(credsClone); err != nil { + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { return err } - if exitedIdle { - cc.addTraceEvent("exiting idle mode") - } + cc.addTraceEvent("exiting idle mode") return nil } +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + // enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer and any subchannels. -func (cc *ClientConn) enterIdleMode() error { +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { cc.mu.Lock() + if cc.conns == nil { cc.mu.Unlock() - return ErrClientConnClosing - } - if cc.idlenessState != ccIdlenessStateActive { - channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) - cc.mu.Unlock() - return nil + return } - // cc.conns == nil is a proxy for the ClientConn being closed. So, instead - // of setting it to nil here, we recreate the map. This also means that we - // don't have to do this when exiting idle mode. conns := cc.conns - cc.conns = make(map[*addrConn]struct{}) - // TODO: Currently, we close the resolver wrapper upon entering idle mode - // and create a new one upon exiting idle mode. This means that the - // `cc.resolverWrapper` field would be overwritten everytime we exit idle - // mode. While this means that we need to hold `cc.mu` when accessing - // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should - // try to do the same for the balancer and picker wrappers too. - cc.resolverWrapper.close() - cc.blockingpicker.enterIdleMode() - cc.balancerWrapper.enterIdleMode() + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() cc.csMgr.updateState(connectivity.Idle) - cc.idlenessState = ccIdlenessStateIdle + cc.addTraceEvent("entering idle mode") + + cc.initIdleStateLocked() + cc.mu.Unlock() - go func() { - cc.addTraceEvent("entering idle mode") - for ac := range conns { - ac.tearDown(errConnIdling) - } - }() - return nil + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } } // validateTransportCredentials performs a series of checks on the configured @@ -478,14 +423,15 @@ func (cc *ClientConn) validateTransportCredentials() error { } // channelzRegistration registers the newly created ClientConn with channelz and -// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. -// A channelz trace event is emitted for ClientConn creation. If the newly -// created ClientConn is a nested one, i.e a valid parent ClientConn ID is -// specified via a dial option, the trace event is also added to the parent. +// stores the returned identifier in `cc.channelz`. A channelz trace event is +// emitted for ClientConn creation. If the newly created ClientConn is a nested +// one, i.e a valid parent ClientConn ID is specified via a dial option, the +// trace event is also added to the parent. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. func (cc *ClientConn) channelzRegistration(target string) { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel) + cc.channelz = channelz.RegisterChannel(parentChannel, target) cc.addTraceEvent("created") } @@ -552,11 +498,11 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } // newConnectivityStateManager creates an connectivityStateManager with -// the specified id. -func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { +// the specified channel. +func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager { return &connectivityStateManager{ - channelzID: id, - pubSub: grpcsync.NewPubSub(ctx), + channelz: channel, + pubSub: grpcsync.NewPubSub(ctx), } } @@ -570,7 +516,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID *channelz.Identifier + channelz *channelz.Channel pubSub *grpcsync.PubSub } @@ -587,9 +533,10 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.channelz.ChannelMetrics.State.Store(&state) csm.pubSub.Publish(state) - channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) + channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -643,72 +590,40 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See parseTargetAndFindResolver(). - authority string // See determineAuthority(). - dopts dialOptions // Default and user specified dial options. - channelzID *channelz.Identifier // Channelz identifier for the channel. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idle.Manager + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. csMgr *connectivityStateManager - blockingpicker *pickerWrapper + pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector - czData *channelzData retryThrottler atomic.Value // Updated from service config. - // firstResolveEvent is used to track whether the name resolver sent us at - // least one update. RPCs block on this event. - firstResolveEvent *grpcsync.Event - // mu protects the following fields. // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - idlenessState ccIdlenessState // Tracks idleness state of the channel. - exitIdleCond *sync.Cond // Signalled when channel exits idle. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } -// ccIdlenessState tracks the idleness state of the channel. -// -// Channels start off in `active` and move to `idle` after a period of -// inactivity. When moving back to `active` upon an incoming RPC, they -// transition through `exiting_idle`. This state is useful for synchronization -// with Close(). -// -// This state tracking is mostly for self-protection. The idlenessManager is -// expected to keep track of the state as well, and is expected not to call into -// the ClientConn unnecessarily. -type ccIdlenessState int8 - -const ( - ccIdlenessStateActive ccIdlenessState = iota - ccIdlenessStateIdle - ccIdlenessStateExitingIdle -) - -func (s ccIdlenessState) String() string { - switch s { - case ccIdlenessStateActive: - return "active" - case ccIdlenessStateIdle: - return "idle" - case ccIdlenessStateExitingIdle: - return "exitingIdle" - default: - return "unknown" - } -} - // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -748,29 +663,15 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.exitIdleMode() + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } // If the ClientConn was not in idle mode, we need to call ExitIdle on the // LB policy so that connections can be created. - cc.balancerWrapper.exitIdleMode() -} - -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } - } + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -795,7 +696,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { var emptyServiceConfig *ServiceConfig func init() { - cfg := parseServiceConfig("{}") + cfg := parseServiceConfig("{}", defaultMaxCallAttempts) if cfg.Err != nil { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } @@ -804,23 +705,28 @@ func init() { internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { return cc.csMgr.pubSub.Subscribe(s) } + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.idlenessMgr.ExitIdleMode() + } } -func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { +func (cc *ClientConn) maybeApplyDefaultServiceConfig() { if cc.sc != nil { - cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) + cc.applyServiceConfigAndBalancer(cc.sc, nil) return } if cc.dopts.defaultServiceConfig != nil { - cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}) } else { - cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}) } } -func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() - cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. @@ -833,7 +739,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // May need to apply the initial service config in case the resolver // doesn't support service configs, or doesn't provide a service config // with the new addresses. - cc.maybeApplyDefaultServiceConfig(nil) + cc.maybeApplyDefaultServiceConfig() cc.balancerWrapper.resolverError(err) @@ -844,10 +750,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { var ret error if cc.dopts.disableServiceConfig { - channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) - cc.maybeApplyDefaultServiceConfig(s.Addresses) + channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig() } else if s.ServiceConfig == nil { - cc.maybeApplyDefaultServiceConfig(s.Addresses) + cc.maybeApplyDefaultServiceConfig() // TODO: do we need to apply a failing LB policy if there is no // default, per the error handling design? } else { @@ -855,18 +761,18 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { configSelector := iresolver.GetConfigSelector(s) if configSelector != nil { if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { - channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector") } } else { configSelector = &defaultConfigSelector{sc} } - cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) + cc.applyServiceConfigAndBalancer(sc, configSelector) } else { ret = balancer.ErrBadResolverState if cc.sc == nil { // Apply the failing LB only if we haven't received valid service config // from the name resolver in the past. - cc.applyFailingLB(s.ServiceConfig) + cc.applyFailingLBLocked(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -875,7 +781,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { var balCfg serviceconfig.LoadBalancingConfig if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig.cfg + balCfg = cc.sc.lbConfig } bw := cc.balancerWrapper cc.mu.Unlock() @@ -888,15 +794,13 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// applyFailingLB is akin to configuring an LB policy on the channel which +// applyFailingLBLocked is akin to configuring an LB policy on the channel which // always fails RPCs. Here, an actual LB policy is not configured, but an always // erroring picker is configured, which returns errors with information about // what was invalid in the received service config. A config selector with no // service config is configured, and the connectivity state of the channel is // set to TransientFailure. -// -// Caller must hold cc.mu. -func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { var err error if sc.Err != nil { err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) @@ -904,14 +808,10 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.balancerWrapper.updateSubConnState(sc, s, err) -} - // Makes a copy of the input addresses slice and clears out the balancer // attributes field. Addresses are passed during subconn creation and address // update operations. In both cases, we will clear the balancer attributes by @@ -926,42 +826,39 @@ func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Ad return out } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + ac := &addrConn{ state: connectivity.Idle, cc: cc, addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, - czData: new(channelzData), + channelz: channelz.RegisterSubChannel(cc.channelz, ""), resetBackoff: make(chan struct{}), stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.conns == nil { - return nil, ErrClientConnClosing - } + // Start with our address set to the first address; this may be updated if + // we connect to different addresses. + ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr) - var err error - ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") - if err != nil { - return nil, err - } - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ Desc: "Subchannel created", Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID), Severity: channelz.CtInfo, }, }) + // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} return ac, nil } @@ -979,38 +876,27 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { ac.tearDown(err) } -func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { - return &channelz.ChannelInternalMetric{ - State: cc.GetState(), - Target: cc.target, - CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), - } -} - // Target returns the target string of the ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) Target() string { return cc.target } +// CanonicalTarget returns the canonical target string of the ClientConn. +func (cc *ClientConn) CanonicalTarget() string { + return cc.parsedTarget.String() +} + func (cc *ClientConn) incrCallsStarted() { - atomic.AddInt64(&cc.czData.callsStarted, 1) - atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) + cc.channelz.ChannelMetrics.CallsStarted.Add(1) + cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } func (cc *ClientConn) incrCallsSucceeded() { - atomic.AddInt64(&cc.czData.callsSucceeded, 1) + cc.channelz.ChannelMetrics.CallsSucceeded.Add(1) } func (cc *ClientConn) incrCallsFailed() { - atomic.AddInt64(&cc.czData.callsFailed, 1) + cc.channelz.ChannelMetrics.CallsFailed.Add(1) } // connect starts creating a transport. @@ -1053,10 +939,14 @@ func equalAddresses(a, b []resolver.Address) bool { // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) - addrs = copyAddressesWithoutBalancerAttributes(addrs) + limit := len(addrs) + if limit > 5 { + limit = 5 + } + channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) + + ac.mu.Lock() if equalAddresses(ac.addrs, addrs) { ac.mu.Unlock() return @@ -1168,13 +1058,13 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) } -func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. return @@ -1195,27 +1085,16 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } else { cc.retryThrottler.Store((*retryThrottler)(nil)) } - - var newBalancerName string - if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { - // No service config or no LB policy specified in config. - newBalancerName = PickFirstBalancerName - } else if cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { // cc.sc.LB != nil - newBalancerName = *cc.sc.LB - } - cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - r := cc.resolverWrapper + cc.resolverWrapper.resolveNow(o) cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -1247,40 +1126,32 @@ func (cc *ClientConn) Close() error { <-cc.csMgr.pubSub.Done() }() + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() + cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } - for cc.idlenessState == ccIdlenessStateExitingIdle { - cc.exitIdleCond.Wait() - } - conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - pWrapper := cc.blockingpicker - rWrapper := cc.resolverWrapper - bWrapper := cc.balancerWrapper - idlenessMgr := cc.idlenessMgr + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. cc.mu.Unlock() + cc.resolverWrapper.close() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - if pWrapper != nil { - pWrapper.close() - } - if bWrapper != nil { - bWrapper.close() - } - if rWrapper != nil { - rWrapper.close() - } - if idlenessMgr != nil { - idlenessMgr.Close() - } + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1289,7 +1160,7 @@ func (cc *ClientConn) Close() error { // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. - channelz.RemoveEntry(cc.channelzID) + channelz.RemoveEntry(cc.channelz.ID) return nil } @@ -1301,7 +1172,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw balancer.SubConn + acbw *acBalancerWrapper scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -1310,6 +1181,10 @@ type addrConn struct { // is received, transport is closed, ac has been torn down). transport transport.ClientTransport // The current transport. + // This mutex is used on the RPC path, so its usage should be minimized as + // much as possible. + // TODO: Find a lock-free way to retrieve the transport and state from the + // addrConn. mu sync.Mutex curAddr resolver.Address // The current address. addrs []resolver.Address // All addresses that the resolver resolved to. @@ -1321,8 +1196,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.SubChannel } // Note: this requires a lock on ac.mu. @@ -1334,12 +1208,13 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) close(ac.stateChan) ac.stateChan = make(chan struct{}) ac.state = s + ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s) } else { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) + ac.acbw.updateState(s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1435,6 +1310,7 @@ func (ac *addrConn) resetTransport() { func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { + ac.channelz.ChannelMetrics.Target.Store(&addr.Addr) if ctx.Err() != nil { return errConnClosing } @@ -1450,7 +1326,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c } ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr) err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { @@ -1503,7 +1379,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() - copts.ChannelzParentID = ac.channelzID + copts.ChannelzParent = ac.channelz newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { @@ -1512,7 +1388,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, } // newTr is either nil, or closed. hcancel() - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } @@ -1584,7 +1460,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. - channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") + channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.") return } @@ -1614,9 +1490,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { - channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) + channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err) } } }() @@ -1681,18 +1557,18 @@ func (ac *addrConn) tearDown(err error) { ac.cancel() ac.curAddr = resolver.Address{} - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID), Severity: channelz.CtInfo, }, }) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from // being deleted right away. - channelz.RemoveEntry(ac.channelzID) + channelz.RemoveEntry(ac.channelz.ID) ac.mu.Unlock() // We have to release the lock before the call to GracefulClose/Close here @@ -1719,39 +1595,6 @@ func (ac *addrConn) tearDown(err error) { } } -func (ac *addrConn) getState() connectivity.State { - ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} - -func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { - ac.mu.Lock() - addr := ac.curAddr.Addr - ac.mu.Unlock() - return &channelz.ChannelInternalMetric{ - State: ac.getState(), - Target: addr, - CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), - } -} - -func (ac *addrConn) incrCallsStarted() { - atomic.AddInt64(&ac.czData.callsStarted, 1) - atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) -} - -func (ac *addrConn) incrCallsSucceeded() { - atomic.AddInt64(&ac.czData.callsSucceeded, 1) -} - -func (ac *addrConn) incrCallsFailed() { - atomic.AddInt64(&ac.czData.callsFailed, 1) -} - type retryThrottler struct { max float64 thresh float64 @@ -1789,12 +1632,17 @@ func (rt *retryThrottler) successfulRPC() { } } -type channelzChannel struct { - cc *ClientConn +func (ac *addrConn) incrCallsStarted() { + ac.channelz.ChannelMetrics.CallsStarted.Add(1) + ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + ac.channelz.ChannelMetrics.CallsSucceeded.Add(1) } -func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { - return c.cc.channelzMetric() +func (ac *addrConn) incrCallsFailed() { + ac.channelz.ChannelMetrics.CallsFailed.Add(1) } // ErrClientConnTimeout indicates that the ClientConn cannot establish the @@ -1828,22 +1676,19 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -// parseTargetAndFindResolver parses the user's dial target and stores the -// parsed target in `cc.parsedTarget`. +// initParsedTargetAndResolverBuilder parses the user's dial target and stores +// the parsed target in `cc.parsedTarget`. // // The resolver to use is determined based on the scheme in the parsed target // and the same is stored in `cc.resolverBuilder`. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) parseTargetAndFindResolver() error { - channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) +func (cc *ClientConn) initParsedTargetAndResolverBuilder() error { + logger.Infof("original dial target is: %q", cc.target) var rb resolver.Builder parsedTarget, err := parseTarget(cc.target) - if err != nil { - channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) - } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + if err == nil { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1855,17 +1700,19 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { // We are here because the user's dial target did not contain a scheme or // specified an unregistered scheme. We should fallback to the default // scheme, except when a custom dialer is specified in which case, we should - // always use passthrough scheme. - defScheme := resolver.GetDefaultScheme() - channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + // always use passthrough scheme. For either case, we need to respect any overridden + // global defaults set by the user. + defScheme := cc.dopts.defaultScheme + if internal.UserSetDefaultScheme { + defScheme = resolver.GetDefaultScheme() + } + canonicalTarget := defScheme + ":///" + cc.target parsedTarget, err = parseTarget(canonicalTarget) if err != nil { - channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) return err } - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) @@ -1887,6 +1734,8 @@ func parseTarget(target string) (resolver.Target, error) { return resolver.Target{URL: *u}, nil } +// encodeAuthority escapes the authority string based on valid chars defined in +// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2. func encodeAuthority(authority string) string { const upperhex = "0123456789ABCDEF" @@ -1903,7 +1752,7 @@ func encodeAuthority(authority string) string { return false case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters return false - case ':', '[', ']', '@': // Authority related delimeters + case ':', '[', ']', '@': // Authority related delimiters return false } // Everything else must be escaped. @@ -1953,7 +1802,7 @@ func encodeAuthority(authority string) string { // credentials do not match the authority configured through the dial option. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) determineAuthority() error { +func (cc *ClientConn) initAuthority() error { dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials @@ -1975,58 +1824,16 @@ func (cc *ClientConn) determineAuthority() error { } endpoint := cc.parsedTarget.Endpoint() - target := cc.target - switch { - case authorityFromDialOption != "": + if authorityFromDialOption != "" { cc.authority = authorityFromDialOption - case authorityFromCreds != "": + } else if authorityFromCreds != "" { cc.authority = authorityFromCreds - case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): - // TODO: remove when the unix resolver implements optional interface to - // return channel authority. - cc.authority = "localhost" - case strings.HasPrefix(endpoint, ":"): + } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { + cc.authority = auth.OverrideAuthority(cc.parsedTarget) + } else if strings.HasPrefix(endpoint, ":") { cc.authority = "localhost" + endpoint - default: - // TODO: Define an optional interface on the resolver builder to return - // the channel authority given the user's dial target. For resolvers - // which don't implement this interface, we will use the endpoint from - // "scheme://authority/endpoint" as the default authority. - // Escape the endpoint to handle use cases where the endpoint - // might not be a valid authority by default. - // For example an endpoint which has multiple paths like - // 'a/b/c', which is not a valid authority by default. + } else { cc.authority = encodeAuthority(endpoint) } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) - return nil -} - -// initResolverWrapper creates a ccResolverWrapper, which builds the name -// resolver. This method grabs the lock to assign the newly built resolver -// wrapper to the cc.resolverWrapper field. -func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { - rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ - target: cc.parsedTarget, - builder: cc.resolverBuilder, - bOpts: resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: creds, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - }, - channelzID: cc.channelzID, - }) - if err != nil { - return fmt.Errorf("failed to build resolver: %v", err) - } - // Resolver implementations may report state update or error inline when - // built (or right after), and this is handled in cc.updateResolverState. - // Also, an error from the resolver might lead to a re-resolution request - // from the balancer, which is handled in resolveNow() where - // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. - cc.mu.Lock() - cc.resolverWrapper = rw - cc.mu.Unlock() return nil } diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100644 index 4cdc6ba7c..000000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 11b106182..0b42c302b 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -25,7 +25,13 @@ import ( "strconv" ) -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md type Code uint32 const ( @@ -229,7 +235,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { if ci >= _maxCode { - return fmt.Errorf("invalid code: %q", ci) + return fmt.Errorf("invalid code: %d", ci) } *c = Code(ci) diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 5feac3aa0..665e790bb 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -28,9 +28,9 @@ import ( "fmt" "net" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/attributes" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/protobuf/proto" ) // PerRPCCredentials defines the common interface for the credentials which need to @@ -237,7 +237,7 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { } // CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. -// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. // // This API is experimental. diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 877b7cd21..411435854 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -27,9 +27,13 @@ import ( "net/url" "os" + "google.golang.org/grpc/grpclog" credinternal "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/envconfig" ) +var logger = grpclog.Component("credentials") + // TLSInfo contains the auth information for a TLS authenticated connection. // It implements the AuthInfo interface. type TLSInfo struct { @@ -44,10 +48,25 @@ func (t TLSInfo) AuthType() string { return "tls" } +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], + StandardName: cipherSuiteLookup(t.State.CipherSuite), } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -97,6 +116,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon conn.Close() return nil, nil, ctx.Err() } + + // The negotiated protocol can be either of the following: + // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since + // it is the only protocol advertised by the client during the handshake. + // The tls library ensures that the server chooses a protocol advertised + // by the client. + // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement + // for using HTTP/2 over TLS. We can terminate the connection immediately. + np := conn.ConnectionState().NegotiatedProtocol + if np == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } + logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) + } tlsInfo := TLSInfo{ State: conn.ConnectionState(), CommonAuthInfo: CommonAuthInfo{ @@ -116,8 +151,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) conn.Close() return nil, nil, err } + cs := conn.ConnectionState() + // The negotiated application protocol can be empty only if the client doesn't + // support ALPN. In such cases, we can close the connection since ALPN is required + // for using HTTP/2 over TLS. + if cs.NegotiatedProtocol == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } else if logger.V(2) { + logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") + } + } tlsInfo := TLSInfo{ - State: conn.ConnectionState(), + State: cs, CommonAuthInfo: CommonAuthInfo{ SecurityLevel: PrivacyAndIntegrity, }, @@ -138,10 +185,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } return tc } @@ -205,32 +281,3 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", - tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", - tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 1fd0d5c12..f5453d48a 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -21,6 +21,7 @@ package grpc import ( "context" "net" + "net/url" "time" "google.golang.org/grpc/backoff" @@ -36,6 +37,11 @@ import ( "google.golang.org/grpc/stats" ) +const ( + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges + defaultMaxCallAttempts = 5 +) + func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { globalDialOptions = append(globalDialOptions, opt...) @@ -43,9 +49,18 @@ func init() { internal.ClearGlobalDialOptions = func() { globalDialOptions = nil } + internal.AddGlobalPerTargetDialOptions = func(opt any) { + if ptdo, ok := opt.(perTargetDialOption); ok { + globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo) + } + } + internal.ClearGlobalPerTargetDialOptions = func() { + globalPerTargetDialOptions = nil + } internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithRecvBufferPool = withRecvBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -63,12 +78,11 @@ type dialOptions struct { block bool returnLastError bool timeout time.Duration - scChan <-chan ServiceConfig authority string binaryLogger binarylog.Logger copts transport.ConnectOptions callOptions []CallOption - channelzParentID *channelz.Identifier + channelzParent channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -79,6 +93,8 @@ type dialOptions struct { resolvers []resolver.Builder idleTimeout time.Duration recvBufferPool SharedBufferPool + defaultScheme string + maxCallAttempts int } // DialOption configures how we set up the connection. @@ -88,6 +104,19 @@ type DialOption interface { var globalDialOptions []DialOption +// perTargetDialOption takes a parsed target and returns a dial option to apply. +// +// This gets called after NewClient() parses the target, and allows per target +// configuration set through a returned DialOption. The DialOption will not take +// effect if specifies a resolver builder, as that Dial Option is factored in +// while parsing target. +type perTargetDialOption interface { + // DialOption returns a Dial Option to apply. + DialOptionForTarget(parsedTarget url.URL) DialOption +} + +var globalPerTargetDialOptions []perTargetDialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // @@ -154,9 +183,7 @@ func WithSharedWriteBuffer(val bool) DialOption { } // WithWriteBufferSize determines how much data can be batched before doing a -// write on the wire. The corresponding memory allocation for this buffer will -// be twice the size to keep syscalls low. The default value for this buffer is -// 32KB. +// write on the wire. The default value for this buffer is 32KB. // // Zero or negative values will disable the write buffer such that each write // will be on underlying connection. Note: A Send call may not directly @@ -250,19 +277,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver or via -// WithDefaultServiceConfig, as specified at -// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be -// removed in a future 1.x release. -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - // WithConnectParams configures the ClientConn to use the provided ConnectParams // for creating and maintaining connections to servers. // @@ -314,6 +328,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { // // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -328,10 +345,8 @@ func WithBlock() DialOption { // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithReturnConnectionError() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -401,8 +416,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a // ClientConn initially. This is valid if and only if WithBlock() is present. // -// Deprecated: use DialContext instead of Dial and context.WithTimeout -// instead. Will be supported throughout 1.x. +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithTimeout(d time.Duration) DialOption { return newFuncDialOption(func(o *dialOptions) { o.timeout = d @@ -413,6 +428,17 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -473,9 +499,8 @@ func withBinaryLogger(bl binarylog.Logger) DialOption { // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// Deprecated: this DialOption is not supported by NewClient. +// This API may be changed or removed in a // later release. func FailOnNonTempDialError(f bool) DialOption { return newFuncDialOption(func(o *dialOptions) { @@ -487,7 +512,7 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + o.copts.UserAgent = s + " " + grpcUA }) } @@ -557,9 +582,9 @@ func WithAuthority(a string) DialOption { // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id *channelz.Identifier) DialOption { +func WithChannelzParentID(c channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.channelzParentID = id + o.channelzParent = c }) } @@ -604,12 +629,22 @@ func WithDisableRetry() DialOption { }) } +// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +type MaxHeaderListSizeDialOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) { + do.copts.MaxHeaderListSize = &o.MaxHeaderListSize +} + // WithMaxHeaderListSize returns a DialOption that specifies the maximum // (uncompressed) size of header list that the client is prepared to accept. func WithMaxHeaderListSize(s uint32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.MaxHeaderListSize = &s - }) + return MaxHeaderListSizeDialOption{ + MaxHeaderListSize: s, + } } // WithDisableHealthCheck disables the LB channel health checking for all @@ -637,17 +672,22 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, UseProxy: true, + UserAgent: grpcUA, }, - recvBufferPool: nopBufferPool{}, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, + defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, } } -// withGetMinConnectDeadline specifies the function that clientconn uses to +// withMinConnectDeadline specifies the function that clientconn uses to // get minConnectDeadline. This can be used to make connection attempts happen // faster/slower. // @@ -658,6 +698,14 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { }) } +// withDefaultScheme is used to allow Dial to use "passthrough" as the default +// name resolver, while NewClient uses "dns" otherwise. +func withDefaultScheme(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultScheme = s + }) +} + // WithResolvers allows a list of resolver implementations to be registered // locally with the ClientConn without needing to be globally registered via // resolver.Register. They will be matched against the scheme used for the @@ -680,8 +728,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // @@ -693,6 +741,23 @@ func WithIdleTimeout(d time.Duration) DialOption { }) } +// WithMaxCallAttempts returns a DialOption that configures the maximum number +// of attempts per call (including retries and hedging) using the channel. +// Service owners may specify a higher value for these parameters, but higher +// values will be treated as equal to the maximum value by the client +// implementation. This mitigates security concerns related to the service +// config being transferred to the client via DNS. +// +// A value of 5 will be used if this dial option is not set or n < 2. +func WithMaxCallAttempts(n int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if n < 2 { + n = defaultMaxCallAttempts + } + o.maxCallAttempts = n + }) +} + // WithRecvBufferPool returns a DialOption that configures the ClientConn // to use the provided shared buffer pool for parsing incoming messages. Depending // on the application's workload, this could result in reduced memory allocation. @@ -704,11 +769,13 @@ func WithIdleTimeout(d time.Duration) DialOption { // options are used: WithStatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { o.recvBufferPool = bufferPool }) diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 69d5580b6..5ebf88d71 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 0ee3d3bae..66d5cdf03 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -23,8 +23,9 @@ package proto import ( "fmt" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/encoding" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" ) // Name is the name registered for the proto compressor. @@ -38,21 +39,34 @@ func init() { type codec struct{} func (codec) Marshal(v any) ([]byte, error) { - vv, ok := v.(proto.Message) - if !ok { + vv := messageV2Of(v) + if vv == nil { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) } + return proto.Marshal(vv) } func (codec) Unmarshal(data []byte, v any) error { - vv, ok := v.(proto.Message) - if !ok { + vv := messageV2Of(v) + if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } + return proto.Unmarshal(data, vv) } +func messageV2Of(v any) proto.Message { + switch v := v.(type) { + case protoadapt.MessageV1: + return protoadapt.MessageV2Of(v) + case protoadapt.MessageV2: + return v + } + + return nil +} + func (codec) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3da..b15cf482d 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,10 +23,12 @@ package backoff import ( + "context" + "errors" + "math/rand" "time" grpcbackoff "google.golang.org/grpc/backoff" - "google.golang.org/grpc/internal/grpcrand" ) // Strategy defines the methodology for backing off after a grpc connection @@ -65,9 +67,43 @@ func (bc Exponential) Backoff(retries int) time.Duration { } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. - backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go new file mode 100644 index 000000000..13821a926 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package gracefulswitch + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/serviceconfig" +) + +type lbConfig struct { + serviceconfig.LoadBalancingConfig + + childBuilder balancer.Builder + childConfig serviceconfig.LoadBalancingConfig +} + +func ChildName(l serviceconfig.LoadBalancingConfig) string { + return l.(*lbConfig).childBuilder.Name() +} + +// ParseConfig parses a child config list and returns a LB config for the +// gracefulswitch Balancer. +// +// cfg is expected to be a json.RawMessage containing a JSON array of LB policy +// names + configs as the format of the "loadBalancingConfig" field in +// ServiceConfig. It returns a type that should be passed to +// UpdateClientConnState in the BalancerConfig field. +func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg []map[string]json.RawMessage + if err := json.Unmarshal(cfg, &lbCfg); err != nil { + return nil, err + } + for i, e := range lbCfg { + if len(e) != 1 { + return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i) + } + + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range e { + } + + builder := balancer.Get(name) + if builder == nil { + // Skip unregistered balancer names. + continue + } + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + // This is a valid child with no config. + return &lbConfig{childBuilder: builder}, nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err) + } + return &lbConfig{childBuilder: builder, childConfig: cfg}, nil + } + + return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg)) +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 3c594e6e4..73bb4c4ee 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -94,14 +94,23 @@ func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { // process is not complete when this method returns. This method must be called // synchronously alongside the rest of the balancer.Balancer methods this // Graceful Switch Balancer implements. +// +// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState +// to cause the Balancer to automatically change to the new child when necessary. func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + _, err := gsb.switchTo(builder) + return err +} + +func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) { gsb.mu.Lock() if gsb.closed { gsb.mu.Unlock() - return errBalancerClosed + return nil, errBalancerClosed } bw := &balancerWrapper{ - gsb: gsb, + builder: builder, + gsb: gsb, lastState: balancer.State{ ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), @@ -129,7 +138,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { gsb.balancerCurrent = nil } gsb.mu.Unlock() - return balancer.ErrBadResolverState + return nil, balancer.ErrBadResolverState } // This write doesn't need to take gsb.mu because this field never gets read @@ -138,7 +147,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { // bw.Balancer field will never be forwarded to until this SwitchTo() // function returns. bw.Balancer = newBalancer - return nil + return bw, nil } // Returns nil if the graceful switch balancer is closed. @@ -152,12 +161,32 @@ func (gsb *Balancer) latestBalancer() *balancerWrapper { } // UpdateClientConnState forwards the update to the latest balancer created. +// +// If the state's BalancerConfig is the config returned by a call to +// gracefulswitch.ParseConfig, then this function will automatically SwitchTo +// the balancer indicated by the config before forwarding its config to it, if +// necessary. func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { // The resolver data is only relevant to the most recent LB Policy. balToUpdate := gsb.latestBalancer() + gsbCfg, ok := state.BalancerConfig.(*lbConfig) + if ok { + // Switch to the child in the config unless it is already active. + if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() { + var err error + balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder) + if err != nil { + return fmt.Errorf("could not switch to new child balancer: %w", err) + } + } + // Unwrap the child balancer's config. + state.BalancerConfig = gsbCfg.childConfig + } + if balToUpdate == nil { return errBalancerClosed } + // Perform this call without gsb.mu to prevent deadlocks if the child calls // back into the channel. The latest balancer can never be closed during a // call from the channel, even without gsb.mu held. @@ -169,6 +198,10 @@ func (gsb *Balancer) ResolverError(err error) { // The resolver data is only relevant to the most recent LB Policy. balToUpdate := gsb.latestBalancer() if balToUpdate == nil { + gsb.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) return } // Perform this call without gsb.mu to prevent deadlocks if the child calls @@ -261,7 +294,8 @@ func (gsb *Balancer) Close() { // graceful switch logic. type balancerWrapper struct { balancer.Balancer - gsb *Balancer + gsb *Balancer + builder balancer.Builder lastState balancer.State subconns map[balancer.SubConn]bool // subconns created by this balancer diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0f31274a3..aa4505a87 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -25,11 +25,12 @@ import ( "sync/atomic" "time" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" ) type callIDGenerator struct { @@ -64,7 +65,7 @@ type TruncatingMethodLogger struct { callID uint64 idWithinCallGen *callIDGenerator - sink Sink // TODO(blog): make this plugable. + sink Sink // TODO(blog): make this pluggable. } // NewTruncatingMethodLogger returns a new truncating method logger. @@ -79,7 +80,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { callID: idGen.next(), idWithinCallGen: &callIDGenerator{}, - sink: DefaultSink, // TODO(blog): make it plugable. + sink: DefaultSink, // TODO(blog): make it pluggable. } } @@ -88,7 +89,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { // in TruncatingMethodLogger as possible. func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() - timestamp, _ := ptypes.TimestampProto(time.Now()) + timestamp := timestamppb.Now() m.Timestamp = timestamp m.CallId = ml.callID m.SequenceIdWithinCall = ml.idWithinCallGen.next() @@ -178,7 +179,7 @@ func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { Authority: c.Authority, } if c.Timeout > 0 { - clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + clientHeader.Timeout = durationpb.New(c.Timeout) } ret := &binlogpb.GrpcLogEntry{ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, @@ -396,7 +397,7 @@ func metadataKeyOmit(key string) bool { switch key { case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": return true - case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users. return false } return strings.HasPrefix(key, "grpc-") diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 264de387c..9ea598b14 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -25,8 +25,8 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/protobuf/proto" ) var ( diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 4399c3df4..11f91668a 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -18,7 +18,10 @@ // Package buffer provides an implementation of an unbounded buffer. package buffer -import "sync" +import ( + "errors" + "sync" +) // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity @@ -36,6 +39,7 @@ import "sync" type Unbounded struct { c chan any closed bool + closing bool mu sync.Mutex backlog []any } @@ -45,32 +49,32 @@ func NewUnbounded() *Unbounded { return &Unbounded{c: make(chan any, 1)} } +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t any) { +func (b *Unbounded) Put(t any) error { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return + if b.closing { + return errBufferClosed } if len(b.backlog) == 0 { select { case b.c <- t: - return + return nil default: } } b.backlog = append(b.backlog, t) + return nil } -// Load sends the earliest buffered data, if any, onto the read channel -// returned by Get(). Users are expected to call this every time they read a +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return - } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -78,6 +82,8 @@ func (b *Unbounded) Load() { b.backlog = b.backlog[1:] default: } + } else if b.closing && !b.closed { + close(b.c) } } @@ -88,18 +94,23 @@ func (b *Unbounded) Load() { // send the next buffered value onto the channel if there is any. // // If the unbounded buffer is closed, the read channel returned by this method -// is closed. +// is closed after all data is drained. func (b *Unbounded) Get() <-chan any { return b.c } -// Close closes the unbounded buffer. +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. func (b *Unbounded) Close() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { + if b.closing { return } - b.closed = true - close(b.c) + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go new file mode 100644 index 000000000..d7e9e1d54 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -0,0 +1,255 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" + + "google.golang.org/grpc/connectivity" +) + +// Channel represents a channel within channelz, which includes metrics and +// internal channelz data, such as channelz id, child list, etc. +type Channel struct { + Entity + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + Parent *Channel + trace *ChannelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +// Implemented to make Channel implement the Identifier interface used for +// nesting. +func (c *Channel) channelzIdentifier() {} + +func (c *Channel) String() string { + if c.Parent == nil { + return fmt.Sprintf("Channel #%d", c.ID) + } + return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID) +} + +func (c *Channel) id() int64 { + return c.ID +} + +func (c *Channel) SubChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.subChans) +} + +func (c *Channel) NestedChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.nestedChans) +} + +func (c *Channel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return c.trace.copy() +} + +type ChannelMetrics struct { + // The current connectivity state of the channel. + State atomic.Pointer[connectivity.State] + // The target this channel originally tried to connect to. May be absent + Target atomic.Pointer[string] + // The number of calls started on the channel. + CallsStarted atomic.Int64 + // The number of calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp atomic.Int64 +} + +// CopyFrom copies the metrics in o to c. For testing only. +func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) { + c.State.Store(o.State.Load()) + c.Target.Store(o.Target.Load()) + c.CallsStarted.Store(o.CallsStarted.Load()) + c.CallsSucceeded.Store(o.CallsSucceeded.Load()) + c.CallsFailed.Store(o.CallsFailed.Load()) + c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// Equal returns true iff the metrics of c are the same as the metrics of o. +// For testing only. +func (c *ChannelMetrics) Equal(o any) bool { + oc, ok := o.(*ChannelMetrics) + if !ok { + return false + } + if (c.State.Load() == nil) != (oc.State.Load() == nil) { + return false + } + if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() { + return false + } + if (c.Target.Load() == nil) != (oc.Target.Load() == nil) { + return false + } + if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() { + return false + } + return c.CallsStarted.Load() == oc.CallsStarted.Load() && + c.CallsFailed.Load() == oc.CallsFailed.Load() && + c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() && + c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load() +} + +func strFromPointer(s *string) string { + if s == nil { + return "" + } + return *s +} + +func (c *ChannelMetrics) String() string { + return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", + c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), + ) +} + +func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { + c := &ChannelMetrics{} + c.State.Store(&state) + c.Target.Store(&target) + c.CallsStarted.Store(started) + c.CallsSucceeded.Store(succeeded) + c.CallsFailed.Store(failed) + c.LastCallStartedTimestamp.Store(timestamp) + return c +} + +func (c *Channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *SubChannel: + c.subChans[id] = v.RefName + case *Channel: + c.nestedChans[id] = v.RefName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *Channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *Channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *Channel) getParentID() int64 { + if c.Parent == nil { + return -1 + } + return c.Parent.ID +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *Channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.Parent != nil { + c.Parent.deleteChild(c.ID) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *Channel) deleteSelfFromMap() (delete bool) { + return c.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *Channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + db.deleteEntry(c.ID) + c.trace.clear() +} + +func (c *Channel) getChannelTrace() *ChannelTrace { + return c.trace +} + +func (c *Channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *Channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *Channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *Channel) getRefName() string { + return c.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go new file mode 100644 index 000000000..dfe18b089 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -0,0 +1,402 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sort" + "sync" + "time" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if + // child list is not empty, then deletion from the database is on hold until + // the last child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, + // and whether child list is now empty. If both conditions are met, then + // delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 + Entity +} + +// channelMap is the storage data structure for channelz. +// +// Methods of channelMap can be divided in two two categories with respect to +// locking. +// +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + channels map[int64]*Channel + subChannels map[int64]*SubChannel + sockets map[int64]*Socket + servers map[int64]*Server +} + +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*Channel), + subChannels: make(map[int64]*SubChannel), + sockets: make(map[int64]*Socket), + servers: make(map[int64]*Server), + } +} + +func (c *channelMap) addServer(id int64, s *Server) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.servers[id] = s +} + +func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else if p := c.channels[pid]; p != nil { + p.addChild(id, cn) + } else { + logger.Infof("channel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + sc.trace.cm = c + c.subChannels[id] = sc + if p := c.channels[pid]; p != nil { + p.addChild(id, sc) + } else { + logger.Infof("subchannel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSocket(s *Socket) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.sockets[s.ID] = s + if s.Parent == nil { + logger.Infof("normal socket %d has no parent", s.ID) + } + s.Parent.(entry).addChild(s.ID, s) +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the +// entry, if it has to wait on the deletion of its children and until no other +// entity's channel trace references it. It may lead to a chain of entry +// deletion. For example, deleting the last socket of a gracefully shutting down +// server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + defer c.mu.Unlock() + c.findEntry(id).triggerDelete() +} + +// tracedChannel represents tracing operations which are present on both +// channels and subChannels. +type tracedChannel interface { + getChannelTrace() *ChannelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + if v, ok := c.channels[id]; ok { + return v + } + if v, ok := c.subChannels[id]; ok { + return v + } + if v, ok := c.servers[id]; ok { + return v + } + if v, ok := c.sockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// +// deleteEntry deletes an entry from the channelMap. Before calling this method, +// caller must check this entry is ready to be deleted, i.e removeEntry() has +// been called on it, and no children still exist. +func (c *channelMap) deleteEntry(id int64) entry { + if v, ok := c.sockets[id]; ok { + delete(c.sockets, id) + return v + } + if v, ok := c.subChannels[id]; ok { + delete(c.subChannels, id) + return v + } + if v, ok := c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return v + } + if v, ok := c.servers[id]; ok { + delete(c.servers, id) + return v + } + return &dummyEntry{idNotFound: id} +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEvent) { + c.mu.Lock() + defer c.mu.Unlock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + return + } + childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *Channel: + chanType = RefChannel + case *SubChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&traceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var t []*Channel + for _, v := range ids[idx:] { + if len(t) == maxResults { + end = false + break + } + if cn, ok := c.channels[v]; ok { + t = append(t, cn) + } + } + return t, end +} + +func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + ids := make([]int64, 0, len(c.servers)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var s []*Server + for _, v := range ids[idx:] { + if len(s) == maxResults { + end = false + break + } + if svr, ok := c.servers[v]; ok { + s = append(s, svr) + } + } + return s, end +} + +func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + svr, ok := c.servers[id] + if !ok { + // server with id doesn't exist. + return nil, true + } + svrskts := svr.sockets + ids := make([]int64, 0, len(svrskts)) + sks := make([]*Socket, 0, min(len(svrskts), maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + end := true + for _, v := range ids[idx:] { + if len(sks) == maxResults { + end = false + break + } + if ns, ok := c.sockets[v]; ok { + sks = append(sks, ns) + } + } + return sks, end +} + +func (c *channelMap) getChannel(id int64) *Channel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.channels[id] +} + +func (c *channelMap) getSubChannel(id int64) *SubChannel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.subChannels[id] +} + +func (c *channelMap) getSocket(id int64) *Socket { + c.mu.RLock() + defer c.mu.RUnlock() + return c.sockets[id] +} + +func (c *channelMap) getServer(id int64) *Server { + c.mu.RLock() + defer c.mu.RUnlock() + return c.servers[id] +} + +type dummyEntry struct { + // dummyEntry is a fake entry to handle entry not found case. + idNotFound int64 + Entity +} + +func (d *dummyEntry) String() string { + return fmt.Sprintf("non-existent entity #%d", d.idNotFound) +} + +func (d *dummyEntry) ID() int64 { return d.idNotFound } + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race + // condition. For example, there could be a race between ClientConn.Close() + // info being propagated to addrConn and http2Client. ClientConn.Close() + // cancel the context and result in http2Client to error. The error info is + // then caught by transport monitor and before addrConn.tearDown() is called + // in side ClientConn.Close(). Therefore, the addrConn will create a new + // transport. And when registering the new transport in channelz, its parent + // addrConn could have already been torn down and deleted from channelz + // tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// Entity is implemented by all channelz types. +type Entity interface { + isEntity() + fmt.Stringer + id() int64 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 5395e7752..03e24e150 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -16,25 +16,16 @@ * */ -// Package channelz defines APIs for enabling channelz service, entry +// Package channelz defines internal APIs for enabling channelz service, entry // registration/deletion, and accessing channelz data. It also defines channelz // metric struct formats. -// -// All APIs in this package are experimental. package channelz import ( - "errors" - "sort" - "sync" "sync/atomic" "time" - "google.golang.org/grpc/grpclog" -) - -const ( - defaultMaxTraceEntry int32 = 30 + "google.golang.org/grpc/internal" ) var ( @@ -42,19 +33,20 @@ var ( // outside this package except by tests. IDGen IDGenerator - db dbWrapper - // EntryPerPage defines the number of channelz entries to be shown on a web page. - EntryPerPage = int64(50) - curState int32 - maxTraceEntry = defaultMaxTraceEntry + db *channelMap = newChannelMap() + // EntriesPerPage defines the number of channelz entries to be shown on a web page. + EntriesPerPage = 50 + curState int32 ) // TurnOn turns on channelz data collection. func TurnOn() { - if !IsOn() { - db.set(newChannelMap()) - IDGen.Reset() - atomic.StoreInt32(&curState, 1) + atomic.StoreInt32(&curState, 1) +} + +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) } } @@ -63,49 +55,15 @@ func IsOn() bool { return atomic.LoadInt32(&curState) == 1 } -// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). -// Setting it to 0 will disable channel tracing. -func SetMaxTraceEntry(i int32) { - atomic.StoreInt32(&maxTraceEntry, i) -} - -// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. -func ResetMaxTraceEntryToDefault() { - atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) -} - -func getMaxTraceEntry() int { - i := atomic.LoadInt32(&maxTraceEntry) - return int(i) -} - -// dbWarpper wraps around a reference to internal channelz data storage, and -// provide synchronized functionality to set and get the reference. -type dbWrapper struct { - mu sync.RWMutex - DB *channelMap -} - -func (d *dbWrapper) set(db *channelMap) { - d.mu.Lock() - d.DB = db - d.mu.Unlock() -} - -func (d *dbWrapper) get() *channelMap { - d.mu.RLock() - defer d.mu.RUnlock() - return d.DB -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // -// The arg id specifies that only top channel with id at or above it will be included -// in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - return db.get().GetTopChannels(id, maxResults) +// The arg id specifies that only top channel with id at or above it will be +// included in the result. The returned slice is up to a length of the arg +// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending +// id order. +func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) { + return db.getTopChannels(id, maxResults) } // GetServers returns a slice of server's ServerMetric, along with a @@ -113,73 +71,69 @@ func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { // // The arg id specifies that only server with id at or above it will be included // in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { - return db.get().GetServers(id, maxResults) +// EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int) ([]*Server, bool) { + return db.getServers(id, maxResults) } // GetServerSockets returns a slice of server's (identified by id) normal socket's -// SocketMetric, along with a boolean indicating whether there's more sockets to +// SocketMetrics, along with a boolean indicating whether there's more sockets to // be queried for. // // The arg startID specifies that only sockets with id at or above it will be // included in the result. The returned slice is up to a length of the arg maxResults -// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - return db.get().GetServerSockets(id, startID, maxResults) +// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + return db.getServerSockets(id, startID, maxResults) } -// GetChannel returns the ChannelMetric for the channel (identified by id). -func GetChannel(id int64) *ChannelMetric { - return db.get().GetChannel(id) +// GetChannel returns the Channel for the channel (identified by id). +func GetChannel(id int64) *Channel { + return db.getChannel(id) } -// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). -func GetSubChannel(id int64) *SubChannelMetric { - return db.get().GetSubChannel(id) +// GetSubChannel returns the SubChannel for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannel { + return db.getSubChannel(id) } -// GetSocket returns the SocketInternalMetric for the socket (identified by id). -func GetSocket(id int64) *SocketMetric { - return db.get().GetSocket(id) +// GetSocket returns the Socket for the socket (identified by id). +func GetSocket(id int64) *Socket { + return db.getSocket(id) } // GetServer returns the ServerMetric for the server (identified by id). -func GetServer(id int64) *ServerMetric { - return db.get().GetServer(id) +func GetServer(id int64) *Server { + return db.getServer(id) } // RegisterChannel registers the given channel c in the channelz database with -// ref as its reference name, and adds it to the child list of its parent -// (identified by pid). pid == nil means no parent. +// target as its target and reference name, and adds it to the child list of its +// parent. parent == nil means no parent. // // Returns a unique channelz identifier assigned to this channel. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { +func RegisterChannel(parent *Channel, target string) *Channel { id := IDGen.genID() - var parent int64 - isTopChannel := true - if pid != nil { - isTopChannel = false - parent = pid.Int() - } if !IsOn() { - return newIdentifer(RefChannel, id, pid) + return &Channel{ID: id} } - cn := &channel{ - refName: ref, - c: c, - subChans: make(map[int64]string), + isTopChannel := parent == nil + + cn := &Channel{ + ID: id, + RefName: target, nestedChans: make(map[int64]string), - id: id, - pid: parent, - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + subChans: make(map[int64]string), + Parent: parent, + trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}, } - db.get().addChannel(id, cn, isTopChannel, parent) - return newIdentifer(RefChannel, id, pid) + cn.ChannelMetrics.Target.Store(&target) + db.addChannel(id, cn, isTopChannel, cn.getParentID()) + return cn } // RegisterSubChannel registers the given subChannel c in the channelz database @@ -189,555 +143,67 @@ func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { // Returns a unique channelz identifier assigned to this subChannel. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a SubChannel's parent id cannot be nil") - } +func RegisterSubChannel(parent *Channel, ref string) *SubChannel { id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefSubChannel, id, pid), nil + sc := &SubChannel{ + ID: id, + RefName: ref, + parent: parent, } - sc := &subChannel{ - refName: ref, - c: c, - sockets: make(map[int64]string), - id: id, - pid: pid.Int(), - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + if !IsOn() { + return sc } - db.get().addSubChannel(id, sc, pid.Int()) - return newIdentifer(RefSubChannel, id, pid), nil + + sc.sockets = make(map[int64]string) + sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())} + db.addSubChannel(id, sc, parent.ID) + return sc } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterServer(s Server, ref string) *Identifier { +func RegisterServer(ref string) *Server { id := IDGen.genID() if !IsOn() { - return newIdentifer(RefServer, id, nil) + return &Server{ID: id} } - svr := &server{ - refName: ref, - s: s, + svr := &Server{ + RefName: ref, sockets: make(map[int64]string), listenSockets: make(map[int64]string), - id: id, - } - db.get().addServer(id, svr) - return newIdentifer(RefServer, id, nil) -} - -// RegisterListenSocket registers the given listen socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this listen socket. -// -// If channelz is not turned ON, the channelz database is not mutated. -func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a ListenSocket's parent id cannot be 0") + ID: id, } - id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefListenSocket, id, pid), nil - } - - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} - db.get().addListenSocket(id, ls, pid.Int()) - return newIdentifer(RefListenSocket, id, pid), nil + db.addServer(id, svr) + return svr } -// RegisterNormalSocket registers the given normal socket s in channelz database +// RegisterSocket registers the given normal socket s in channelz database // with ref as its reference name, and adds it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this normal socket. +// (identified by skt.Parent, which must be set). It returns the unique channelz +// tracking id assigned to this normal socket. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a NormalSocket's parent id cannot be 0") - } - id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefNormalSocket, id, pid), nil +func RegisterSocket(skt *Socket) *Socket { + skt.ID = IDGen.genID() + if IsOn() { + db.addSocket(skt) } - - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} - db.get().addNormalSocket(id, ns, pid.Int()) - return newIdentifer(RefNormalSocket, id, pid), nil + return skt } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. // // If channelz is not turned ON, this function is a no-op. -func RemoveEntry(id *Identifier) { +func RemoveEntry(id int64) { if !IsOn() { return } - db.get().removeEntry(id.Int()) -} - -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe -// the event to be added to the channel trace. -// -// The Parent field is optional. It is used for an event that will be recorded -// in the entity's parent trace. -type TraceEventDesc struct { - Desc string - Severity Severity - Parent *TraceEventDesc -} - -// AddTraceEvent adds trace related to the entity with specified id, using the -// provided TraceEventDesc. -// -// If channelz is not turned ON, this will simply log the event descriptions. -func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { - // Log only the trace description associated with the bottom most entity. - switch desc.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, withParens(id)+desc.Desc) - case CtWarning: - l.WarningDepth(depth+1, withParens(id)+desc.Desc) - case CtError: - l.ErrorDepth(depth+1, withParens(id)+desc.Desc) - } - - if getMaxTraceEntry() == 0 { - return - } - if IsOn() { - db.get().traceEvent(id.Int(), desc) - } -} - -// channelMap is the storage data structure for channelz. -// Methods of channelMap can be divided in two two categories with respect to locking. -// 1. Methods acquire the global lock. -// 2. Methods that can only be called when global lock is held. -// A second type of method need always to be called inside a first type of method. -type channelMap struct { - mu sync.RWMutex - topLevelChannels map[int64]struct{} - servers map[int64]*server - channels map[int64]*channel - subChannels map[int64]*subChannel - listenSockets map[int64]*listenSocket - normalSockets map[int64]*normalSocket -} - -func newChannelMap() *channelMap { - return &channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - } -} - -func (c *channelMap) addServer(id int64, s *server) { - c.mu.Lock() - s.cm = c - c.servers[id] = s - c.mu.Unlock() -} - -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { - c.mu.Lock() - cn.cm = c - cn.trace.cm = c - c.channels[id] = cn - if isTopChannel { - c.topLevelChannels[id] = struct{}{} - } else { - c.findEntry(pid).addChild(id, cn) - } - c.mu.Unlock() -} - -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { - c.mu.Lock() - sc.cm = c - sc.trace.cm = c - c.subChannels[id] = sc - c.findEntry(pid).addChild(id, sc) - c.mu.Unlock() -} - -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { - c.mu.Lock() - ls.cm = c - c.listenSockets[id] = ls - c.findEntry(pid).addChild(id, ls) - c.mu.Unlock() -} - -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { - c.mu.Lock() - ns.cm = c - c.normalSockets[id] = ns - c.findEntry(pid).addChild(id, ns) - c.mu.Unlock() -} - -// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to -// wait on the deletion of its children and until no other entity's channel trace references it. -// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully -// shutting down server will lead to the server being also deleted. -func (c *channelMap) removeEntry(id int64) { - c.mu.Lock() - c.findEntry(id).triggerDelete() - c.mu.Unlock() -} - -// c.mu must be held by the caller -func (c *channelMap) decrTraceRefCount(id int64) { - e := c.findEntry(id) - if v, ok := e.(tracedChannel); ok { - v.decrTraceRefCount() - e.deleteSelfIfReady() - } -} - -// c.mu must be held by the caller. -func (c *channelMap) findEntry(id int64) entry { - var v entry - var ok bool - if v, ok = c.channels[id]; ok { - return v - } - if v, ok = c.subChannels[id]; ok { - return v - } - if v, ok = c.servers[id]; ok { - return v - } - if v, ok = c.listenSockets[id]; ok { - return v - } - if v, ok = c.normalSockets[id]; ok { - return v - } - return &dummyEntry{idNotFound: id} -} - -// c.mu must be held by the caller -// deleteEntry simply deletes an entry from the channelMap. Before calling this -// method, caller must check this entry is ready to be deleted, i.e removeEntry() -// has been called on it, and no children still exist. -// Conditionals are ordered by the expected frequency of deletion of each entity -// type, in order to optimize performance. -func (c *channelMap) deleteEntry(id int64) { - var ok bool - if _, ok = c.normalSockets[id]; ok { - delete(c.normalSockets, id) - return - } - if _, ok = c.subChannels[id]; ok { - delete(c.subChannels, id) - return - } - if _, ok = c.channels[id]; ok { - delete(c.channels, id) - delete(c.topLevelChannels, id) - return - } - if _, ok = c.listenSockets[id]; ok { - delete(c.listenSockets, id) - return - } - if _, ok = c.servers[id]; ok { - delete(c.servers, id) - return - } -} - -func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { - c.mu.Lock() - child := c.findEntry(id) - childTC, ok := child.(tracedChannel) - if !ok { - c.mu.Unlock() - return - } - childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) - if desc.Parent != nil { - parent := c.findEntry(child.getParentID()) - var chanType RefChannelType - switch child.(type) { - case *channel: - chanType = RefChannel - case *subChannel: - chanType = RefSubChannel - } - if parentTC, ok := parent.(tracedChannel); ok { - parentTC.getChannelTrace().append(&TraceEvent{ - Desc: desc.Parent.Desc, - Severity: desc.Parent.Severity, - Timestamp: time.Now(), - RefID: id, - RefName: childTC.getRefName(), - RefType: chanType, - }) - childTC.incrTraceRefCount() - } - } - c.mu.Unlock() -} - -type int64Slice []int64 - -func (s int64Slice) Len() int { return len(s) } -func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } - -func copyMap(m map[int64]string) map[int64]string { - n := make(map[int64]string) - for k, v := range m { - n[k] = v - } - return n -} - -func min(a, b int64) int64 { - if a < b { - return a - } - return b -} - -func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.topLevelChannels)) - ids := make([]int64, 0, l) - cns := make([]*channel, 0, min(l, maxResults)) - - for k := range c.topLevelChannels { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var t []*ChannelMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if cn, ok := c.channels[v]; ok { - cns = append(cns, cn) - t = append(t, &ChannelMetric{ - NestedChans: copyMap(cn.nestedChans), - SubChans: copyMap(cn.subChans), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, cn := range cns { - t[i].ChannelData = cn.c.ChannelzMetric() - t[i].ID = cn.id - t[i].RefName = cn.refName - t[i].Trace = cn.trace.dumpData() - } - return t, end -} - -func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.servers)) - ids := make([]int64, 0, l) - ss := make([]*server, 0, min(l, maxResults)) - for k := range c.servers { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var s []*ServerMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if svr, ok := c.servers[v]; ok { - ss = append(ss, svr) - s = append(s, &ServerMetric{ - ListenSockets: copyMap(svr.listenSockets), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, svr := range ss { - s[i].ServerData = svr.s.ChannelzMetric() - s[i].ID = svr.id - s[i].RefName = svr.refName - } - return s, end -} - -func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - // server with id doesn't exist. - c.mu.RUnlock() - return nil, true - } - svrskts := svr.sockets - l := int64(len(svrskts)) - ids := make([]int64, 0, l) - sks := make([]*normalSocket, 0, min(l, maxResults)) - for k := range svrskts { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) - count := int64(0) - var end bool - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if ns, ok := c.normalSockets[v]; ok { - sks = append(sks, ns) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - s := make([]*SocketMetric, 0, len(sks)) - for _, ns := range sks { - sm := &SocketMetric{} - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - s = append(s, sm) - } - return s, end -} - -func (c *channelMap) GetChannel(id int64) *ChannelMetric { - cm := &ChannelMetric{} - var cn *channel - var ok bool - c.mu.RLock() - if cn, ok = c.channels[id]; !ok { - // channel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.NestedChans = copyMap(cn.nestedChans) - cm.SubChans = copyMap(cn.subChans) - // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when - // holding the lock to prevent potential data race. - chanCopy := cn.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = cn.id - cm.RefName = cn.refName - cm.Trace = cn.trace.dumpData() - return cm -} - -func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { - cm := &SubChannelMetric{} - var sc *subChannel - var ok bool - c.mu.RLock() - if sc, ok = c.subChannels[id]; !ok { - // subchannel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.Sockets = copyMap(sc.sockets) - // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when - // holding the lock to prevent potential data race. - chanCopy := sc.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = sc.id - cm.RefName = sc.refName - cm.Trace = sc.trace.dumpData() - return cm -} - -func (c *channelMap) GetSocket(id int64) *SocketMetric { - sm := &SocketMetric{} - c.mu.RLock() - if ls, ok := c.listenSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ls.s.ChannelzMetric() - sm.ID = ls.id - sm.RefName = ls.refName - return sm - } - if ns, ok := c.normalSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - return sm - } - c.mu.RUnlock() - return nil -} - -func (c *channelMap) GetServer(id int64) *ServerMetric { - sm := &ServerMetric{} - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - c.mu.RUnlock() - return nil - } - sm.ListenSockets = copyMap(svr.listenSockets) - c.mu.RUnlock() - sm.ID = svr.id - sm.RefName = svr.refName - sm.ServerData = svr.s.ChannelzMetric() - return sm + db.removeEntry(id) } // IDGenerator is an incrementing atomic that tracks IDs for channelz entities. @@ -754,3 +220,11 @@ func (i *IDGenerator) Reset() { func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } + +// Identifier is an opaque channelz identifier used to expose channelz symbols +// outside of grpc. Currently only implemented by Channel since no other +// types require exposure outside grpc. +type Identifier interface { + Entity + channelzIdentifier() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go deleted file mode 100644 index c9a27acd3..000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/id.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import "fmt" - -// Identifier is an opaque identifier which uniquely identifies an entity in the -// channelz database. -type Identifier struct { - typ RefChannelType - id int64 - str string - pid *Identifier -} - -// Type returns the entity type corresponding to id. -func (id *Identifier) Type() RefChannelType { - return id.typ -} - -// Int returns the integer identifier corresponding to id. -func (id *Identifier) Int() int64 { - return id.id -} - -// String returns a string representation of the entity corresponding to id. -// -// This includes some information about the parent as well. Examples: -// Top-level channel: [Channel #channel-number] -// Nested channel: [Channel #parent-channel-number Channel #channel-number] -// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] -func (id *Identifier) String() string { - return id.str -} - -// Equal returns true if other is the same as id. -func (id *Identifier) Equal(other *Identifier) bool { - if (id != nil) != (other != nil) { - return false - } - if id == nil && other == nil { - return true - } - return id.typ == other.typ && id.id == other.id && id.pid == other.pid -} - -// NewIdentifierForTesting returns a new opaque identifier to be used only for -// testing purposes. -func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { - return newIdentifer(typ, id, pid) -} - -func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { - str := fmt.Sprintf("%s #%d", typ, id) - if pid != nil { - str = fmt.Sprintf("%s %s", pid, str) - } - return &Identifier{typ: typ, id: id, str: str, pid: pid} -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index f89e6f77b..ee4d72125 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,53 +26,49 @@ import ( var logger = grpclog.Component("channelz") -func withParens(id *Identifier) string { - return "[" + id.String() + "] " -} - // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprint(args...), Severity: CtInfo, }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprint(args...), Severity: CtWarning, }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprint(args...), Severity: CtError, }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprintf(format, args...), Severity: CtError, }) diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go new file mode 100644 index 000000000..cdfc49d6e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// Server is the channelz representation of a server. +type Server struct { + Entity + ID int64 + RefName string + + ServerMetrics ServerMetrics + + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + cm *channelMap +} + +// ServerMetrics defines a struct containing metrics for servers. +type ServerMetrics struct { + // The number of incoming calls started on the server. + CallsStarted atomic.Int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the server. + LastCallStartedTimestamp atomic.Int64 +} + +// NewServerMetricsForTesting returns an initialized ServerMetrics. +func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics { + sm := &ServerMetrics{} + sm.CallsStarted.Store(started) + sm.CallsSucceeded.Store(succeeded) + sm.CallsFailed.Store(failed) + sm.LastCallStartedTimestamp.Store(timestamp) + return sm +} + +func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { + sm.CallsStarted.Store(o.CallsStarted.Load()) + sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) + sm.CallsFailed.Store(o.CallsFailed.Load()) + sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// ListenSockets returns the listening sockets for s. +func (s *Server) ListenSockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(s.listenSockets) +} + +// String returns a printable description of s. +func (s *Server) String() string { + return fmt.Sprintf("Server #%d", s.ID) +} + +func (s *Server) id() int64 { + return s.ID +} + +func (s *Server) addChild(id int64, e entry) { + switch v := e.(type) { + case *Socket: + switch v.SocketType { + case SocketTypeNormal: + s.sockets[id] = v.RefName + case SocketTypeListen: + s.listenSockets[id] = v.RefName + } + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *Server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *Server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *Server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.ID) +} + +func (s *Server) getParentID() int64 { + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go new file mode 100644 index 000000000..fa64834b2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "net" + "sync/atomic" + + "google.golang.org/grpc/credentials" +) + +// SocketMetrics defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketMetrics struct { + // The number of streams that have been started. + StreamsStarted atomic.Int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded atomic.Int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed atomic.Int64 + // The number of messages successfully sent on this socket. + MessagesSent atomic.Int64 + MessagesReceived atomic.Int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent atomic.Int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp atomic.Int64 + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp atomic.Int64 + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp atomic.Int64 + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp atomic.Int64 +} + +// EphemeralSocketMetrics are metrics that change rapidly and are tracked +// outside of channelz. +type EphemeralSocketMetrics struct { + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 +} + +type SocketType string + +const ( + SocketTypeNormal = "NormalSocket" + SocketTypeListen = "ListenSocket" +) + +type Socket struct { + Entity + SocketType SocketType + ID int64 + Parent Entity + cm *channelMap + SocketMetrics SocketMetrics + EphemeralMetrics func() *EphemeralSocketMetrics + + RefName string + // The locally bound address. Immutable. + LocalAddr net.Addr + // The remote bound address. May be absent. Immutable. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. Immutable. + RemoteName string + // Immutable. + SocketOptions *SocketOptionData + // Immutable. + Security credentials.ChannelzSecurityValue +} + +func (ls *Socket) String() string { + return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) +} + +func (ls *Socket) id() int64 { + return ls.ID +} + +func (ls *Socket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *Socket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *Socket) triggerDelete() { + ls.cm.deleteEntry(ls.ID) + ls.Parent.(entry).deleteChild(ls.ID) +} + +func (ls *Socket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *Socket) getParentID() int64 { + return ls.Parent.id() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go new file mode 100644 index 000000000..3b88e4cba --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -0,0 +1,151 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// SubChannel is the channelz representation of a subchannel. +type SubChannel struct { + Entity + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + closeCalled bool + sockets map[int64]string + parent *Channel + trace *ChannelTrace + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +func (sc *SubChannel) String() string { + return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID) +} + +func (sc *SubChannel) id() int64 { + return sc.ID +} + +func (sc *SubChannel) Sockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(sc.sockets) +} + +func (sc *SubChannel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return sc.trace.copy() +} + +func (sc *SubChannel) addChild(id int64, e entry) { + if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal { + sc.sockets[id] = v.RefName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *SubChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) getParentID() int64 { + return sc.parent.ID +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *SubChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.parent.deleteChild(sc.ID) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *SubChannel) deleteSelfFromMap() (delete bool) { + return sc.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *SubChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + db.deleteEntry(sc.ID) + sc.trace.clear() +} + +func (sc *SubChannel) getChannelTrace() *ChannelTrace { + return sc.trace +} + +func (sc *SubChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *SubChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *SubChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *SubChannel) getRefName() string { + return sc.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go similarity index 83% rename from vendor/google.golang.org/grpc/internal/channelz/types_linux.go rename to vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go index 1b1c4cce3..5ac73ff83 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go @@ -49,3 +49,17 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) { s.TCPInfo = v } } + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket any) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go similarity index 90% rename from vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go rename to vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index 8b06eed1a..d1ed8df6a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* * @@ -41,3 +40,8 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c any) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go new file mode 100644 index 000000000..36b867403 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -0,0 +1,204 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var maxTraceEntry = defaultMaxTraceEntry + +// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e. +// channel/subchannel). Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per +// entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// traceEvent is an internal representation of a single trace event +type traceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// TraceEvent is what the caller of AddTraceEvent should provide to describe the +// event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. +type TraceEvent struct { + Desc string + Severity Severity + Parent *TraceEvent +} + +type ChannelTrace struct { + cm *channelMap + clearCalled bool + CreationTime time.Time + EventNum int64 + mu sync.Mutex + Events []*traceEvent +} + +func (c *ChannelTrace) copy() *ChannelTrace { + return &ChannelTrace{ + CreationTime: c.CreationTime, + EventNum: c.EventNum, + Events: append(([]*traceEvent)(nil), c.Events...), + } +} + +func (c *ChannelTrace) append(e *traceEvent) { + c.mu.Lock() + if len(c.Events) == getMaxTraceEntry() { + del := c.Events[0] + c.Events = c.Events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.Events = append(c.Events, e) + c.EventNum++ + c.mu.Unlock() +} + +func (c *ChannelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true + c.mu.Lock() + for _, e := range c.Events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota + // RefChannel indicates the referenced entity is a Channel. + RefChannel + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket +) + +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) { + // Log only the trace description associated with the bottom most entity. + d := fmt.Sprintf("[%s]%s", e, desc.Desc) + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, d) + case CtWarning: + l.WarningDepth(depth+1, d) + case CtError: + l.ErrorDepth(depth+1, d) + } + + if getMaxTraceEntry() == 0 { + return + } + if IsOn() { + db.traceEvent(e.id(), desc) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go deleted file mode 100644 index 1d4020f53..000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ /dev/null @@ -1,727 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "net" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" -) - -// entry represents a node in the channelz database. -type entry interface { - // addChild adds a child e, whose channelz id is id to child list - addChild(id int64, e entry) - // deleteChild deletes a child with channelz id to be id from child list - deleteChild(id int64) - // triggerDelete tries to delete self from channelz database. However, if child - // list is not empty, then deletion from the database is on hold until the last - // child is deleted from database. - triggerDelete() - // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child - // list is now empty. If both conditions are met, then delete self from database. - deleteSelfIfReady() - // getParentID returns parent ID of the entry. 0 value parent ID means no parent. - getParentID() int64 -} - -// dummyEntry is a fake entry to handle entry not found case. -type dummyEntry struct { - idNotFound int64 -} - -func (d *dummyEntry) addChild(id int64, e entry) { - // Note: It is possible for a normal program to reach here under race condition. - // For example, there could be a race between ClientConn.Close() info being propagated - // to addrConn and http2Client. ClientConn.Close() cancel the context and result - // in http2Client to error. The error info is then caught by transport monitor - // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, - // the addrConn will create a new transport. And when registering the new transport in - // channelz, its parent addrConn could have already been torn down and deleted - // from channelz tracking, and thus reach the code here. - logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) -} - -func (d *dummyEntry) deleteChild(id int64) { - // It is possible for a normal program to reach here under race condition. - // Refer to the example described in addChild(). - logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) -} - -func (d *dummyEntry) triggerDelete() { - logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) -} - -func (*dummyEntry) deleteSelfIfReady() { - // code should not reach here. deleteSelfIfReady is always called on an existing entry. -} - -func (*dummyEntry) getParentID() int64 { - return 0 -} - -// ChannelMetric defines the info channelz provides for a specific Channel, which -// includes ChannelInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ChannelMetric struct { - // ID is the channelz id of this channel. - ID int64 - // RefName is the human readable reference string of this channel. - RefName string - // ChannelData contains channel internal metric reported by the channel through - // ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this channel in the format of - // a map from nested channel channelz id to corresponding reference string. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this channel in the format of a - // map from subchannel channelz id to corresponding reference string. - SubChans map[int64]string - // Sockets tracks the socket type children of this channel in the format of a map - // from socket channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow channel having sockets directly, - // therefore, this is field is unused. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// SubChannelMetric defines the info channelz provides for a specific SubChannel, -// which includes ChannelInternalMetric and channelz-specific data, such as -// channelz id, child list, etc. -type SubChannelMetric struct { - // ID is the channelz id of this subchannel. - ID int64 - // RefName is the human readable reference string of this subchannel. - RefName string - // ChannelData contains subchannel internal metric reported by the subchannel - // through ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this subchannel in the format of - // a map from nested channel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have nested channels - // as children, therefore, this field is unused. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this subchannel in the format of a - // map from subchannel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have subchannels - // as children, therefore, this field is unused. - SubChans map[int64]string - // Sockets tracks the socket type children of this subchannel in the format of a map - // from socket channelz id to corresponding reference string. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// ChannelInternalMetric defines the struct that the implementor of Channel interface -// should return from ChannelzMetric(). -type ChannelInternalMetric struct { - // current connectivity state of the channel. - State connectivity.State - // The target this channel originally tried to connect to. May be absent - Target string - // The number of calls started on the channel. - CallsStarted int64 - // The number of calls that have completed with an OK status. - CallsSucceeded int64 - // The number of calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the channel. - LastCallStartedTimestamp time.Time -} - -// ChannelTrace stores traced events on a channel/subchannel and related info. -type ChannelTrace struct { - // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) - EventNum int64 - // CreationTime is the creation time of the trace. - CreationTime time.Time - // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the - // oldest one) - Events []*TraceEvent -} - -// TraceEvent represent a single trace event -type TraceEvent struct { - // Desc is a simple description of the trace event. - Desc string - // Severity states the severity of this trace event. - Severity Severity - // Timestamp is the event time. - Timestamp time.Time - // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is - // involved in this event. - // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) - RefID int64 - // RefName is the reference name for the entity that gets referenced in the event. - RefName string - // RefType indicates the referenced entity type, i.e Channel or SubChannel. - RefType RefChannelType -} - -// Channel is the interface that should be satisfied in order to be tracked by -// channelz as Channel or SubChannel. -type Channel interface { - ChannelzMetric() *ChannelInternalMetric -} - -type dummyChannel struct{} - -func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { - return &ChannelInternalMetric{} -} - -type channel struct { - refName string - c Channel - closeCalled bool - nestedChans map[int64]string - subChans map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - // traceRefCount is the number of trace events that reference this channel. - // Non-zero traceRefCount means the trace of this channel cannot be deleted. - traceRefCount int32 -} - -func (c *channel) addChild(id int64, e entry) { - switch v := e.(type) { - case *subChannel: - c.subChans[id] = v.refName - case *channel: - c.nestedChans[id] = v.refName - default: - logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) - } -} - -func (c *channel) deleteChild(id int64) { - delete(c.subChans, id) - delete(c.nestedChans, id) - c.deleteSelfIfReady() -} - -func (c *channel) triggerDelete() { - c.closeCalled = true - c.deleteSelfIfReady() -} - -func (c *channel) getParentID() int64 { - return c.pid -} - -// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means -// deleting the channel reference from its parent's child list. -// -// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the -// corresponding grpc object has been invoked, and the channel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (c *channel) deleteSelfFromTree() (deleted bool) { - if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { - return false - } - // not top channel - if c.pid != 0 { - c.cm.findEntry(c.pid).deleteChild(c.id) - } - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means -// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the -// channel, and its memory will be garbage collected. -// -// The trace reference count of the channel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (c *channel) deleteSelfFromMap() (delete bool) { - if c.getTraceRefCount() != 0 { - c.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the channel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. -func (c *channel) deleteSelfIfReady() { - if !c.deleteSelfFromTree() { - return - } - if !c.deleteSelfFromMap() { - return - } - c.cm.deleteEntry(c.id) - c.trace.clear() -} - -func (c *channel) getChannelTrace() *channelTrace { - return c.trace -} - -func (c *channel) incrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, 1) -} - -func (c *channel) decrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, -1) -} - -func (c *channel) getTraceRefCount() int { - i := atomic.LoadInt32(&c.traceRefCount) - return int(i) -} - -func (c *channel) getRefName() string { - return c.refName -} - -type subChannel struct { - refName string - c Channel - closeCalled bool - sockets map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - traceRefCount int32 -} - -func (sc *subChannel) addChild(id int64, e entry) { - if v, ok := e.(*normalSocket); ok { - sc.sockets[id] = v.refName - } else { - logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) - } -} - -func (sc *subChannel) deleteChild(id int64) { - delete(sc.sockets, id) - sc.deleteSelfIfReady() -} - -func (sc *subChannel) triggerDelete() { - sc.closeCalled = true - sc.deleteSelfIfReady() -} - -func (sc *subChannel) getParentID() int64 { - return sc.pid -} - -// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which -// means deleting the subchannel reference from its parent's child list. -// -// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of -// the corresponding grpc object has been invoked, and the subchannel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (sc *subChannel) deleteSelfFromTree() (deleted bool) { - if !sc.closeCalled || len(sc.sockets) != 0 { - return false - } - sc.cm.findEntry(sc.pid).deleteChild(sc.id) - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means -// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query -// the subchannel, and its memory will be garbage collected. -// -// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (sc *subChannel) deleteSelfFromMap() (delete bool) { - if sc.getTraceRefCount() != 0 { - // free the grpc struct (i.e. addrConn) - sc.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. -func (sc *subChannel) deleteSelfIfReady() { - if !sc.deleteSelfFromTree() { - return - } - if !sc.deleteSelfFromMap() { - return - } - sc.cm.deleteEntry(sc.id) - sc.trace.clear() -} - -func (sc *subChannel) getChannelTrace() *channelTrace { - return sc.trace -} - -func (sc *subChannel) incrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, 1) -} - -func (sc *subChannel) decrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, -1) -} - -func (sc *subChannel) getTraceRefCount() int { - i := atomic.LoadInt32(&sc.traceRefCount) - return int(i) -} - -func (sc *subChannel) getRefName() string { - return sc.refName -} - -// SocketMetric defines the info channelz provides for a specific Socket, which -// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. -type SocketMetric struct { - // ID is the channelz id of this socket. - ID int64 - // RefName is the human readable reference string of this socket. - RefName string - // SocketData contains socket internal metric reported by the socket through - // ChannelzMetric(). - SocketData *SocketInternalMetric -} - -// SocketInternalMetric defines the struct that the implementor of Socket interface -// should return from ChannelzMetric(). -type SocketInternalMetric struct { - // The number of streams that have been started. - StreamsStarted int64 - // The number of streams that have ended successfully: - // On client side, receiving frame with eos bit set. - // On server side, sending frame with eos bit set. - StreamsSucceeded int64 - // The number of streams that have ended unsuccessfully: - // On client side, termination without receiving frame with eos bit set. - // On server side, termination without sending frame with eos bit set. - StreamsFailed int64 - // The number of messages successfully sent on this socket. - MessagesSent int64 - MessagesReceived int64 - // The number of keep alives sent. This is typically implemented with HTTP/2 - // ping messages. - KeepAlivesSent int64 - // The last time a stream was created by this endpoint. Usually unset for - // servers. - LastLocalStreamCreatedTimestamp time.Time - // The last time a stream was created by the remote endpoint. Usually unset - // for clients. - LastRemoteStreamCreatedTimestamp time.Time - // The last time a message was sent by this endpoint. - LastMessageSentTimestamp time.Time - // The last time a message was received by this endpoint. - LastMessageReceivedTimestamp time.Time - // The amount of window, granted to the local endpoint by the remote endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - LocalFlowControlWindow int64 - // The amount of window, granted to the remote endpoint by the local endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - RemoteFlowControlWindow int64 - // The locally bound address. - LocalAddr net.Addr - // The remote bound address. May be absent. - RemoteAddr net.Addr - // Optional, represents the name of the remote endpoint, if different than - // the original target name. - RemoteName string - SocketOptions *SocketOptionData - Security credentials.ChannelzSecurityValue -} - -// Socket is the interface that should be satisfied in order to be tracked by -// channelz as Socket. -type Socket interface { - ChannelzMetric() *SocketInternalMetric -} - -type listenSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ls *listenSocket) addChild(id int64, e entry) { - logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) -} - -func (ls *listenSocket) deleteChild(id int64) { - logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) -} - -func (ls *listenSocket) triggerDelete() { - ls.cm.deleteEntry(ls.id) - ls.cm.findEntry(ls.pid).deleteChild(ls.id) -} - -func (ls *listenSocket) deleteSelfIfReady() { - logger.Errorf("cannot call deleteSelfIfReady on a listen socket") -} - -func (ls *listenSocket) getParentID() int64 { - return ls.pid -} - -type normalSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ns *normalSocket) addChild(id int64, e entry) { - logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) -} - -func (ns *normalSocket) deleteChild(id int64) { - logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) -} - -func (ns *normalSocket) triggerDelete() { - ns.cm.deleteEntry(ns.id) - ns.cm.findEntry(ns.pid).deleteChild(ns.id) -} - -func (ns *normalSocket) deleteSelfIfReady() { - logger.Errorf("cannot call deleteSelfIfReady on a normal socket") -} - -func (ns *normalSocket) getParentID() int64 { - return ns.pid -} - -// ServerMetric defines the info channelz provides for a specific Server, which -// includes ServerInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ServerMetric struct { - // ID is the channelz id of this server. - ID int64 - // RefName is the human readable reference string of this server. - RefName string - // ServerData contains server internal metric reported by the server through - // ChannelzMetric(). - ServerData *ServerInternalMetric - // ListenSockets tracks the listener socket type children of this server in the - // format of a map from socket channelz id to corresponding reference string. - ListenSockets map[int64]string -} - -// ServerInternalMetric defines the struct that the implementor of Server interface -// should return from ChannelzMetric(). -type ServerInternalMetric struct { - // The number of incoming calls started on the server. - CallsStarted int64 - // The number of incoming calls that have completed with an OK status. - CallsSucceeded int64 - // The number of incoming calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the server. - LastCallStartedTimestamp time.Time -} - -// Server is the interface to be satisfied in order to be tracked by channelz as -// Server. -type Server interface { - ChannelzMetric() *ServerInternalMetric -} - -type server struct { - refName string - s Server - closeCalled bool - sockets map[int64]string - listenSockets map[int64]string - id int64 - cm *channelMap -} - -func (s *server) addChild(id int64, e entry) { - switch v := e.(type) { - case *normalSocket: - s.sockets[id] = v.refName - case *listenSocket: - s.listenSockets[id] = v.refName - default: - logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) - } -} - -func (s *server) deleteChild(id int64) { - delete(s.sockets, id) - delete(s.listenSockets, id) - s.deleteSelfIfReady() -} - -func (s *server) triggerDelete() { - s.closeCalled = true - s.deleteSelfIfReady() -} - -func (s *server) deleteSelfIfReady() { - if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { - return - } - s.cm.deleteEntry(s.id) -} - -func (s *server) getParentID() int64 { - return 0 -} - -type tracedChannel interface { - getChannelTrace() *channelTrace - incrTraceRefCount() - decrTraceRefCount() - getRefName() string -} - -type channelTrace struct { - cm *channelMap - clearCalled bool - createdTime time.Time - eventCount int64 - mu sync.Mutex - events []*TraceEvent -} - -func (c *channelTrace) append(e *TraceEvent) { - c.mu.Lock() - if len(c.events) == getMaxTraceEntry() { - del := c.events[0] - c.events = c.events[1:] - if del.RefID != 0 { - // start recursive cleanup in a goroutine to not block the call originated from grpc. - go func() { - // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. - c.cm.mu.Lock() - c.cm.decrTraceRefCount(del.RefID) - c.cm.mu.Unlock() - }() - } - } - e.Timestamp = time.Now() - c.events = append(c.events, e) - c.eventCount++ - c.mu.Unlock() -} - -func (c *channelTrace) clear() { - if c.clearCalled { - return - } - c.clearCalled = true - c.mu.Lock() - for _, e := range c.events { - if e.RefID != 0 { - // caller should have already held the c.cm.mu lock. - c.cm.decrTraceRefCount(e.RefID) - } - } - c.mu.Unlock() -} - -// Severity is the severity level of a trace event. -// The canonical enumeration of all valid values is here: -// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. -type Severity int - -const ( - // CtUnknown indicates unknown severity of a trace event. - CtUnknown Severity = iota - // CtInfo indicates info level severity of a trace event. - CtInfo - // CtWarning indicates warning level severity of a trace event. - CtWarning - // CtError indicates error level severity of a trace event. - CtError -) - -// RefChannelType is the type of the entity being referenced in a trace event. -type RefChannelType int - -const ( - // RefUnknown indicates an unknown entity type, the zero value for this type. - RefUnknown RefChannelType = iota - // RefChannel indicates the referenced entity is a Channel. - RefChannel - // RefSubChannel indicates the referenced entity is a SubChannel. - RefSubChannel - // RefServer indicates the referenced entity is a Server. - RefServer - // RefListenSocket indicates the referenced entity is a ListenSocket. - RefListenSocket - // RefNormalSocket indicates the referenced entity is a NormalSocket. - RefNormalSocket -) - -var refChannelTypeToString = map[RefChannelType]string{ - RefUnknown: "Unknown", - RefChannel: "Channel", - RefSubChannel: "SubChannel", - RefServer: "Server", - RefListenSocket: "ListenSocket", - RefNormalSocket: "NormalSocket", -} - -func (r RefChannelType) String() string { - return refChannelTypeToString[r] -} - -func (c *channelTrace) dumpData() *ChannelTrace { - c.mu.Lock() - ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} - ct.Events = c.events[:len(c.events)] - c.mu.Unlock() - return ct -} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 3cf10ddfb..d90648713 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -28,17 +28,11 @@ import ( var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) - // AdvertiseCompressors is set if registered compressor should be advertised - // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). - AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy. - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) // LeastRequestLB is set if we should support the least_request_experimental // LB policy, which can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". @@ -46,6 +40,12 @@ var ( // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled + // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this + // option is present for backward compatibility. This option may be overridden + // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" + // or "false". + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 02b4b6a1c..29f234acb 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -50,46 +50,7 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) - // XDSRingHash indicates whether ring hash support is enabled, which can be - // disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) - // XDSClientSideSecurity is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster and - // DNS cluster is enabled, which can be disabled by setting the environment - // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - // to "false". - XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) - - // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, - // which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) - // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) - // XDSFederation indicates whether federation support is enabled, which can - // be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) - - // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "false". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") - // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which - // can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". - XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 000000000..7f7044e17 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go deleted file mode 100644 index aa97273e7..000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import ( - "math/rand" - "sync" - "time" -) - -var ( - r = rand.New(rand.NewSource(time.Now().UnixNano())) - mu sync.Mutex -) - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - mu.Lock() - defer mu.Unlock() - return r.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - mu.Lock() - defer mu.Unlock() - return r.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - mu.Lock() - defer mu.Unlock() - return r.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - mu.Lock() - defer mu.Unlock() - return r.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - mu.Lock() - defer mu.Unlock() - return r.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - mu.Lock() - defer mu.Unlock() - return r.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - mu.Lock() - defer mu.Unlock() - return r.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - mu.Lock() - defer mu.Unlock() - return r.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - mu.Lock() - defer mu.Unlock() - r.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 900917dbe..f7f40a16a 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -20,7 +20,6 @@ package grpcsync import ( "context" - "sync" "google.golang.org/grpc/internal/buffer" ) @@ -38,8 +37,6 @@ type CallbackSerializer struct { done chan struct{} callbacks *buffer.Unbounded - closedMu sync.Mutex - closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided @@ -65,56 +62,34 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - cs.closedMu.Lock() - defer cs.closedMu.Unlock() - - if cs.closed { - return false - } - cs.callbacks.Put(f) - return true + return cs.callbacks.Put(f) == nil } func (cs *CallbackSerializer) run(ctx context.Context) { - var backlog []func(context.Context) - defer close(cs.done) + + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-cs.callbacks.Get(): - if !ok { - return - } + case cb := <-cs.callbacks.Get(): cs.callbacks.Load() - callback.(func(ctx context.Context))(ctx) + cb.(func(context.Context))(ctx) } } - // Fetch pending callbacks if any, and execute them before returning from - // this method and closing cs.done. - cs.closedMu.Lock() - cs.closed = true - backlog = cs.fetchPendingCallbacks() + // Close the buffer to prevent new callbacks from being added. cs.callbacks.Close() - cs.closedMu.Unlock() - for _, b := range backlog { - b(ctx) - } -} -func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { - var backlog []func(context.Context) - for { - select { - case b := <-cs.callbacks.Get(): - backlog = append(backlog, b.(func(context.Context))) - cs.callbacks.Load() - default: - return backlog - } + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go index 9f4090967..e8d866984 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -20,8 +20,6 @@ package grpcutil import ( "strings" - - "google.golang.org/grpc/internal/envconfig" ) // RegisteredCompressorNames holds names of the registered compressors. @@ -40,8 +38,5 @@ func IsCompressorNameRegistered(name string) bool { // RegisteredCompressors returns a string of registered compressor names // separated by comma. func RegisteredCompressors() string { - if !envconfig.AdvertiseCompressors { - return "" - } return strings.Join(RegisteredCompressorNames, ",") } diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index 6c272476e..fe49cb74c 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -26,8 +26,6 @@ import ( "sync" "sync/atomic" "time" - - "google.golang.org/grpc/grpclog" ) // For overriding in unit tests. @@ -39,27 +37,12 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { // and exit from idle mode. type Enforcer interface { ExitIdleMode() error - EnterIdleMode() error -} - -// Manager defines the functionality required to track RPC activity on a -// channel. -type Manager interface { - OnCallBegin() error - OnCallEnd() - Close() + EnterIdleMode() } -type noopManager struct{} - -func (noopManager) OnCallBegin() error { return nil } -func (noopManager) OnCallEnd() {} -func (noopManager) Close() {} - -// manager implements the Manager interface. It uses atomic operations to -// synchronize access to shared state and a mutex to guarantee mutual exclusion -// in a critical section. -type manager struct { +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. @@ -69,8 +52,7 @@ type manager struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. enforcer Enforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. - logger grpclog.LoggerV2 + timeout time.Duration // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: @@ -88,57 +70,48 @@ type manager struct { timer *time.Timer } -// ManagerOptions is a collection of options used by -// NewManager. -type ManagerOptions struct { - Enforcer Enforcer - Timeout time.Duration - Logger grpclog.LoggerV2 +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } } -// NewManager creates a new idleness manager implementation for the -// given idle timeout. -func NewManager(opts ManagerOptions) Manager { - if opts.Timeout == 0 { - return noopManager{} +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return } - m := &manager{ - enforcer: opts.Enforcer, - timeout: int64(opts.Timeout), - logger: opts.Logger, + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() } - m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) - return m + m.timer = timeAfterFunc(d, m.handleIdleTimeout) } -// resetIdleTimer resets the idle timer to the given duration. This method -// should only be called from the timer callback. -func (m *manager) resetIdleTimer(d time.Duration) { +func (m *Manager) resetIdleTimer(d time.Duration) { m.idleMu.Lock() defer m.idleMu.Unlock() - - if m.timer == nil { - // Only close sets timer to nil. We are done. - return - } - - // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback, which means the timer has - // already fired. - m.timer.Reset(d) + m.resetIdleTimerLocked(d) } // handleIdleTimeout is the timer callback that is invoked upon expiry of the // configured idle timeout. The channel is considered inactive if there are no // ongoing calls and no RPC activity since the last time the timer fired. -func (m *manager) handleIdleTimeout() { +func (m *Manager) handleIdleTimeout() { if m.isClosed() { return } if atomic.LoadInt32(&m.activeCallsCount) > 0 { - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) return } @@ -148,24 +121,12 @@ func (m *manager) handleIdleTimeout() { // Set the timer to fire after a duration of idle timeout, calculated // from the time the most recent RPC completed. atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) - m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) return } - // This CAS operation is extremely likely to succeed given that there has - // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the - // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity at the top of this method, or one was ongoing from before - // the last time we were here. In both case, reset the timer and return. - m.resetIdleTimer(time.Duration(m.timeout)) - return - } - - // Now that we've set the active calls count to -math.MaxInt32, it's time to - // actually move to idle mode. + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. if m.tryEnterIdleMode() { // Successfully entered idle mode. No timer needed until we exit idle. return @@ -174,8 +135,7 @@ func (m *manager) handleIdleTimeout() { // Failed to enter idle mode due to a concurrent RPC that kept the channel // active, or because of an error from the channel. Undo the attempt to // enter idle, and reset the timer to try again later. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) } // tryEnterIdleMode instructs the channel to enter idle mode. But before @@ -185,36 +145,49 @@ func (m *manager) handleIdleTimeout() { // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (m *manager) tryEnterIdleMode() bool { +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + m.idleMu.Lock() defer m.idleMu.Unlock() if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { - // An very short RPC could have come in (and also finished) after we + // A very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } - // No new RPCs have come in since we last set the active calls count value - // -math.MaxInt32 in the timer callback. And since we have the lock, it is - // safe to enter idle mode now. - if err := m.enforcer.EnterIdleMode(); err != nil { - m.logger.Errorf("Failed to enter idle mode: %v", err) - return false - } - - // Successfully entered idle mode. + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() m.actuallyIdle = true return true } +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + // OnCallBegin is invoked at the start of every RPC. -func (m *manager) OnCallBegin() error { +func (m *Manager) OnCallBegin() error { if m.isClosed() { return nil } @@ -227,7 +200,7 @@ func (m *manager) OnCallBegin() error { // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := m.exitIdleMode(); err != nil { + if err := m.ExitIdleMode(); err != nil { // Undo the increment to calls count, and return an error causing the // RPC to fail. atomic.AddInt32(&m.activeCallsCount, -1) @@ -238,28 +211,30 @@ func (m *manager) OnCallBegin() error { return nil } -// exitIdleMode instructs the channel to exit idle mode. -// -// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (m *manager) exitIdleMode() error { +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. m.idleMu.Lock() defer m.idleMu.Unlock() - if !m.actuallyIdle { - // This can happen in two scenarios: + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called // tryEnterIdleMode(). But before the latter could grab the lock, an RPC // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. // - // Either way, nothing to do here. + // In any case, there is nothing to do here. return nil } if err := m.enforcer.ExitIdleMode(); err != nil { - return fmt.Errorf("channel failed to exit idle mode: %v", err) + return fmt.Errorf("failed to exit idle mode: %w", err) } // Undo the idle entry process. This also respects any new RPC attempts. @@ -267,12 +242,12 @@ func (m *manager) exitIdleMode() error { m.actuallyIdle = false // Start a new timer to fire after the configured idle timeout. - m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + m.resetIdleTimerLocked(m.timeout) return nil } // OnCallEnd is invoked at the end of every RPC. -func (m *manager) OnCallEnd() { +func (m *Manager) OnCallEnd() { if m.isClosed() { return } @@ -287,15 +262,17 @@ func (m *manager) OnCallEnd() { atomic.AddInt32(&m.activeCallsCount, -1) } -func (m *manager) isClosed() bool { +func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } -func (m *manager) Close() { +func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) m.idleMu.Lock() - m.timer.Stop() - m.timer = nil + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } m.idleMu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c8a8c76d6..5d6653986 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -57,7 +57,7 @@ var ( // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. @@ -68,11 +68,11 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. CanonicalString any // func (codes.Code) string - // DrainServerTransports initiates a graceful close of existing connections - // on a gRPC server accepted on the provided listener address. An - // xDS-enabled server invokes this method on a grpc.Server when a particular - // listener moves to "not-serving" mode. - DrainServerTransports any // func(*grpc.Server, string) + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. @@ -106,6 +106,14 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. ClearGlobalDialOptions func() + + // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be + // configured for newly created ClientConns. + AddGlobalPerTargetDialOptions any // func (opt any) + // ClearGlobalPerTargetDialOptions clears the slice of global late apply + // dial options. + ClearGlobalPerTargetDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a // single dial option. JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption @@ -126,7 +134,8 @@ var ( // deleted or changed. BinaryLogger any // func(binarylog.Logger) grpc.ServerOption - // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a + // provided grpc.ClientConn. SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using @@ -175,9 +184,34 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() + + // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to + // invoke resource-not-found error for the given resource type and name. + TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error + + // FromOutgoingContextRaw returns the un-merged, intermediary contents of + // metadata.rawMD. + FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) + + // UserSetDefaultScheme is set to true if the user has overridden the + // default resolver scheme. + UserSetDefaultScheme bool = false + + // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n + // is the number of elements. swap swaps the elements with indexes i and j. + ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) ) -// HealthChecker defines the signature of the client-side LB channel health checking function. +// HealthChecker defines the signature of the client-side LB channel health +// checking function. // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 703319137..dbee7a60d 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -24,10 +24,8 @@ import ( "encoding/json" "fmt" - "github.com/golang/protobuf/jsonpb" - protov1 "github.com/golang/protobuf/proto" "google.golang.org/protobuf/encoding/protojson" - protov2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" ) const jsonIndent = " " @@ -36,21 +34,14 @@ const jsonIndent = " " // // If marshal fails, it falls back to fmt.Sprintf("%+v"). func ToJSON(e any) string { - switch ee := e.(type) { - case protov1.Message: - mm := jsonpb.Marshaler{Indent: jsonIndent} - ret, err := mm.MarshalToString(ee) - if err != nil { - // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 - // messages are not imported, and this will fail because the message - // is not found. - return fmt.Sprintf("%+v", ee) - } - return ret - case protov2.Message: + if ee, ok := e.(protoadapt.MessageV1); ok { + e = protoadapt.MessageV2Of(ee) + } + + if ee, ok := e.(protoadapt.MessageV2); ok { mm := protojson.MarshalOptions{ - Multiline: true, Indent: jsonIndent, + Multiline: true, } ret, err := mm.Marshal(ee) if err != nil { @@ -60,13 +51,13 @@ func ToJSON(e any) string { return fmt.Sprintf("%+v", ee) } return string(ret) - default: - ret, err := json.MarshalIndent(ee, "", jsonIndent) - if err != nil { - return fmt.Sprintf("%+v", ee) - } - return string(ret) } + + ret, err := json.MarshalIndent(e, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", e) + } + return string(ret) } // FormatJSON formats the input json bytes with indentation. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 99e1e5b36..4552db16b 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -23,8 +23,8 @@ package dns import ( "context" "encoding/json" - "errors" "fmt" + "math/rand" "net" "os" "strconv" @@ -36,26 +36,37 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) -// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB -// addresses from SRV records. Must not be changed after init time. -var EnableSRVLookups = false - -var logger = grpclog.Component("dns") - -// Globals to stub out in tests. TODO: Perhaps these two can be combined into a -// single variable for testing the resolver? var ( - newTimer = time.NewTimer - newTimerDNSResRate = time.NewTimer + // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB + // addresses from SRV records. Must not be changed after init time. + EnableSRVLookups = false + + // MinResolutionInterval is the minimum interval at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionInterval = 30 * time.Second + + // ResolvingTimeout specifies the maximum duration for a DNS resolution request. + // If the timeout expires before a response is received, the request will be canceled. + // + // It is recommended to set this value at application startup. Avoid modifying this variable + // after initialization as it's not thread-safe for concurrent modification. + ResolvingTimeout = 30 * time.Second + + logger = grpclog.Component("dns") ) func init() { resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.TimeNowFunc = time.Now + internal.TimeUntilFunc = time.Until + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer } const ( @@ -70,23 +81,6 @@ const ( txtAttribute = "grpc_config=" ) -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second -) - var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer @@ -94,7 +88,11 @@ var addressDialer = func(address string) func(context.Context, string, string) ( } } -var newNetResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -104,7 +102,7 @@ var newNetResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: addressDialer(authorityWithPort), + Dial: internal.AddressDialer(authorityWithPort), }, nil } @@ -142,13 +140,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.URL.Host == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = newNetResolver(target.URL.Host) - if err != nil { - return nil, err - } + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err } d.wg.Add(1) @@ -161,12 +155,6 @@ func (b *dnsBuilder) Scheme() string { return "dns" } -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - // deadResolver is a resolver that does nothing. type deadResolver struct{} @@ -178,7 +166,7 @@ func (deadResolver) Close() {} type dnsResolver struct { host string port string - resolver netResolver + resolver internal.NetResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn @@ -223,45 +211,43 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var timer *time.Timer + var nextResolutionTime time.Time if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - timer = newTimerDNSResRate(minDNSResRate) + nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval) select { case <-d.ctx.Done(): - timer.Stop() return case <-d.rn: } } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } select { case <-d.ctx.Done(): - timer.Stop() return - case <-timer.C: + case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)): } } } -func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { +func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { if !EnableSRVLookups { return nil, nil } var newAddrs []resolver.Address - _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + _, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host) if err != nil { err = handleDNSError(err, "SRV") // may become nil return nil, err } for _, s := range srvs { - lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + lbAddrs, err := d.resolver.LookupHost(ctx, s.Target) if err != nil { err = handleDNSError(err, "A") // may become nil if err == nil { @@ -298,8 +284,8 @@ func handleDNSError(err error, lookupType string) error { return err } -func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { - ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) +func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host) if err != nil { if envconfig.TXTErrIgnore { return nil @@ -326,8 +312,8 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { return d.cc.ParseServiceConfig(sc) } -func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - addrs, err := d.resolver.LookupHost(d.ctx, d.host) +func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) { + addrs, err := d.resolver.LookupHost(ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err @@ -345,8 +331,10 @@ func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { } func (d *dnsResolver) lookup() (*resolver.State, error) { - srv, srvErr := d.lookupSRV() - addrs, hostErr := d.lookupHost() + ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout) + defer cancel() + srv, srvErr := d.lookupSRV(ctx) + addrs, hostErr := d.lookupHost(ctx) if hostErr != nil && (srvErr != nil || len(srv) == 0) { return nil, hostErr } @@ -356,7 +344,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } if !d.disableServiceConfig { - state.ServiceConfig = d.lookupTXT() + state.ServiceConfig = d.lookupTXT(ctx) } return &state, nil } @@ -387,7 +375,7 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { - return "", "", errMissingAddr + return "", "", internal.ErrMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address @@ -397,7 +385,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if port == "" { // If the port field is empty (target ends with colon), e.g. "[::1]:", // this is an error. - return "", "", errEndsWithColon + return "", "", internal.ErrEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { @@ -437,7 +425,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return grpcrand.Intn(100)+1 <= *a + return rand.Intn(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 000000000..c0eae4f5f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,77 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overridden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // TimeNowFunc is used by the DNS resolver to get the current time. + // In non-test code, this is implemented by time.Now. In test code, + // this can be used to control the current time for the resolver. + TimeNowFunc func() time.Time + + // TimeUntilFunc is used by the DNS resolver to calculate the remaining + // wait time for re-resolution. In non-test code, this is implemented by + // time.Until. In test code, this can be used to control the remaining + // time for resolver to wait for re-resolution. + TimeUntilFunc func(time.Time) time.Duration + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 160911687..27cd81af9 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -61,6 +61,10 @@ func (b *builder) Scheme() string { return b.scheme } +func (b *builder) OverrideAuthority(resolver.Target) string { + return "localhost" +} + type nopResolver struct { } diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 4cf85cad9..c7dbc8205 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -31,10 +31,11 @@ import ( "errors" "fmt" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/types/known/anypb" ) // Status represents an RPC status code, message, and details. It is immutable @@ -43,6 +44,34 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} @@ -102,14 +131,14 @@ func (s *Status) Err() error { // WithDetails returns a new status with the provided details messages appended to the status. // If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { +func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { if s.Code() == codes.OK { return nil, errors.New("no error details for status with code OK") } // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := ptypes.MarshalAny(detail) + any, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } @@ -126,12 +155,12 @@ func (s *Status) Details() []any { } details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { + detail, err := any.UnmarshalNew() + if err != nil { details = append(details, err) continue } - details = append(details, detail.Message) + details = append(details, detail) } return details } diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go similarity index 69% rename from vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go rename to vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go index b5568b22e..4f347edd4 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go @@ -1,9 +1,7 @@ -//go:build !linux -// +build !linux +//go:build !unix && !windows /* - * - * Copyright 2018 gRPC authors. + * Copyright 2023 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +17,13 @@ * */ -package channelz +package internal + +import ( + "net" +) -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { - return nil +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 000000000..078137b7f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go new file mode 100644 index 000000000..fd7d43a89 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index b330ccedc..3deadfb4a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -193,7 +193,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn error // if set, loopyWriter will exit, resulting in conn closure + closeConn error // if set, loopyWriter will exit with this error } func (*goAway) isTransportResponseFrame() bool { return false } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -344,7 +344,7 @@ func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, err return false, c.err } if f != nil { - if !f(it) { // f wasn't successful + if !f() { // f wasn't successful c.mu.Unlock() return false, nil } @@ -495,21 +495,22 @@ type loopyWriter struct { ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ - side: s, - cbuf: cbuf, - sendQuota: defaultWindowSize, - oiws: defaultWindowSize, - estdStreams: make(map[uint32]*outStream), - activeStreams: newOutStreamList(), - framer: fr, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - bdpEst: bdpEst, - conn: conn, - logger: logger, + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + conn: conn, + logger: logger, + ssGoAwayHandler: goAwayHandler, } return l } @@ -535,8 +536,8 @@ const minBatchSize = 1000 // size is too low to give stream goroutines a chance to fill it up. // // Upon exiting, if the error causing the exit is not an I/O error, run() -// flushes and closes the underlying connection. Otherwise, the connection is -// left open to allow the I/O error to be encountered by the reader instead. +// flushes the underlying connection. The connection is always left open to +// allow different closing behavior on the client and server. func (l *loopyWriter) run() (err error) { defer func() { if l.logger.V(logLevel) { @@ -544,7 +545,6 @@ func (l *loopyWriter) run() (err error) { } if !isIOError(err) { l.framer.writer.Flush() - l.conn.Close() } l.cbuf.finish() }() diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3fa..4a3ddce29 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -35,7 +35,6 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -45,20 +44,17 @@ import ( "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { - if r.ProtoMajor != 2 { - msg := "gRPC requires HTTP/2" - http.Error(w, msg, http.StatusBadRequest) - return nil, errors.New(msg) - } - if r.Method != "POST" { + if r.Method != http.MethodPost { + w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) - http.Error(w, msg, http.StatusBadRequest) + http.Error(w, msg, http.StatusMethodNotAllowed) return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") @@ -69,17 +65,36 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s http.Error(w, msg, http.StatusUnsupportedMediaType) return nil, errors.New(msg) } + if r.ProtoMajor != 2 { + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusHTTPVersionNotSupported) + return nil, errors.New(msg) + } if _, ok := w.(http.Flusher); !ok { msg := "gRPC requires a ResponseWriter supporting http.Flusher" http.Error(w, msg, http.StatusInternalServerError) return nil, errors.New(msg) } + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), + peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, @@ -134,6 +149,8 @@ type serverHandlerTransport struct { headerMD metadata.MD + peer peer.Peer + closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -165,7 +182,13 @@ func (ht *serverHandlerTransport) Close(err error) { }) } -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -220,18 +243,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +268,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +313,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,10 +370,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -367,34 +391,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ht.Close(errors.New("request is done processing")) }() + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - for _, sh := range ht.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index badab8acf..3c63c7069 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -36,6 +36,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpclog" @@ -43,7 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" istatus "google.golang.org/grpc/internal/status" - "google.golang.org/grpc/internal/syscall" + isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -58,6 +59,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. @@ -111,11 +114,11 @@ type http2Client struct { streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 - nextID uint32 registeredCompressors string // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables + nextID uint32 state transportState activeStreams map[uint32]*Stream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. @@ -137,9 +140,7 @@ type http2Client struct { // variable. kpDormant bool - // Fields below are for channelz metric collection. - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.Socket onClose func(GoAwayReason) @@ -176,7 +177,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if networkType == "tcp" && useProxy { return proxyDial(ctx, address, grpcUA) } - return (&net.Dialer{}).DialContext(ctx, networkType, address) + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -262,7 +263,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } keepaliveEnabled := false if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -316,6 +317,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if opts.MaxHeaderListSize != nil { maxHeaderListSize = *opts.MaxHeaderListSize } + t := &http2Client{ ctx: ctx, ctxDone: ctx.Done(), // Cache Done chan. @@ -343,11 +345,25 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), - czData: new(channelzData), keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), onClose: onClose, } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: opts.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }) t.logger = prefixLoggerForClientTransport(t) // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -378,10 +394,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } sh.HandleConn(t.ctx, connBegin) } - t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) - if err != nil { - return nil, err - } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() @@ -396,10 +408,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerErrCh := make(chan error, 1) go t.reader(readerErrCh) defer func() { - if err == nil { - err = <-readerErrCh - } if err != nil { + // writerDone should be closed since the loopy goroutine + // wouldn't have started in the case this function returns an error. + close(t.writerDone) t.Close(err) } }() @@ -446,9 +458,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if err := t.framer.writer.Flush(); err != nil { return nil, err } + // Block until the server preface is received successfully or an error occurs. + if err = <-readerErrCh; err != nil { + return nil, err + } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.run() + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + if err := t.loopy.run(); !isIOError(err) { + // Immediately close the connection, as the loopy writer returns + // when there are no more active streams and we were draining (the + // server sent a GOAWAY). For I/O errors, the reader will hit it + // after draining any remaining incoming data. + t.conn.Close() + } close(t.writerDone) }() return t, nil @@ -493,9 +515,21 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, + } +} + +// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway +// to be the last frame loopy writes to the transport. +func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + defer t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + return false, err } + return false, g.closeConn } func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { @@ -566,7 +600,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. @@ -746,8 +780,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return ErrConnClosing } if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano()) } // If the keepalive goroutine has gone dormant, wake it up. if t.kpDormant { @@ -762,7 +796,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it any) bool { + checkForStreamQuota := func() bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -774,23 +808,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, t.waitingStreams-- } t.streamQuota-- - h := it.(*headerFrame) - h.streamID = t.nextID - t.nextID += 2 - - // Drain client transport if nextID > MaxStreamID which signals gRPC that - // the connection is closed and a new one must be created for subsequent RPCs. - transportDrainRequired = t.nextID > MaxStreamID - s.id = h.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). t.mu.Unlock() return false // Don't create a stream if the transport is already closed. } + + hdr.streamID = t.nextID + t.nextID += 2 + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + + s.id = hdr.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.activeStreams[s.id] = s t.mu.Unlock() + if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -800,13 +835,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it any) bool { + checkForHeaderListSize := func() bool { if t.maxSendHeaderListSize == nil { return true } - hdrFrame := it.(*headerFrame) var sz int64 - for _, f := range hdrFrame.hf { + for _, f := range hdr.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) return false @@ -815,8 +849,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it any) bool { - return checkForHeaderListSize(it) && checkForStreamQuota(it) + success, err := t.controlBuf.executeAndPut(func() bool { + return checkForHeaderListSize() && checkForStreamQuota() }, hdr) if err != nil { // Connection closed. @@ -918,16 +952,16 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. t.mu.Unlock() if channelz.IsOn() { if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) + t.channelz.SocketMetrics.StreamsFailed.Add(1) } } }, rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(any) bool { + addBackStreamQuota := func() bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -947,7 +981,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be -// accessed any more. +// accessed anymore. func (t *http2Client) Close(err error) { t.mu.Lock() // Make sure we only close once. @@ -972,10 +1006,13 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() - t.controlBuf.finish() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) + <-t.writerDone t.cancel() t.conn.Close() - channelz.RemoveEntry(t.channelzID) + channelz.RemoveEntry(t.channelz.ID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -1080,7 +1117,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(any) bool { + updateIWS := func() bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1233,7 +1270,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(any) bool { + t.controlBuf.executeAndPut(func() bool { for _, f := range updateFuncs { f() } @@ -1321,10 +1358,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - if streamID > id && streamID <= upperLimit { - atomic.StoreUint32(&stream.unprocessed, 1) - streamsToClose = append(streamsToClose, stream) - } + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) } } t.mu.Unlock() @@ -1399,7 +1434,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1468,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1548,14 +1576,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. rstStream := s.getState() == streamActive - t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true) + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the @@ -1709,7 +1735,7 @@ func (t *http2Client) keepalive() { // keepalive timer expired. In both cases, we need to send a ping. if !outstandingPing { if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) } t.controlBuf.put(p) timeoutLeft = t.kp.Timeout @@ -1739,40 +1765,23 @@ func (t *http2Client) GoAway() <-chan struct{} { return t.goAway } -func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, - // RemoteName : - } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s +func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), + } } func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } func (t *http2Client) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) } func (t *http2Client) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index c06db679d..b7091165b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,6 +25,7 @@ import ( "fmt" "io" "math" + "math/rand" "net" "net/http" "strconv" @@ -32,18 +33,17 @@ import ( "sync/atomic" "time" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -68,18 +68,15 @@ var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window @@ -121,8 +118,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.Socket bufferPool *bufferPool connectionID uint64 @@ -243,16 +239,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, } done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } t := &http2Server{ - ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, + peer: peer, framer: framer, readerDone: make(chan struct{}), - writerDone: make(chan struct{}), + loopyWriterDone: make(chan struct{}), maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, @@ -263,12 +261,25 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - czData: new(channelzData), bufferPool: newBufferPool(), } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: config.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }, + ) t.logger = prefixLoggerForServerTransport(t) - // Add peer information to the http2server context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) t.controlBuf = newControlBuffer(t.done) if dynamicWindow { @@ -277,18 +288,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.stats { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - sh.HandleConn(t.ctx, connBegin) - } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) - if err != nil { - return nil, err - } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) t.framer.writer.Flush() @@ -331,10 +330,27 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - t.loopy.run() - close(t.writerDone) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + err := t.loopy.run() + close(t.loopyWriterDone) + if !isIOError(err) { + // Close the connection if a non-I/O error occurs (for I/O errors + // the reader will also encounter the error and close). Wait 1 + // second before closing the connection, or when the reader is done + // (i.e. the client already closed the connection or a connection + // error occurred). This avoids the potential problem where there + // is unread data on the receive side of the connection, which, if + // closed, would lead to a TCP RST instead of FIN, and the client + // encountering errors. For more info: + // https://github.com/grpc/grpc-go/issues/5358 + timer := time.NewTimer(time.Second) + defer timer.Stop() + select { + case <-t.readerDone: + case <-timer.C: + } + t.conn.Close() + } }() go t.keepalive() return t, nil @@ -342,7 +358,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -369,10 +385,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), } var ( // if false, content-type was missing or invalid @@ -511,9 +528,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.state = streamReadDone } if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) + s.ctx, s.cancel = context.WithCancel(ctx) } // Attach the received metadata to the context. @@ -561,7 +578,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -586,25 +603,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } t.mu.Unlock() if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) } s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) - for _, sh := range t.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - Header: mdata.Copy(), - } - sh.HandleRPC(s.ctx, inHeader) - } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ @@ -630,8 +634,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { - defer close(t.readerDone) +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + close(t.readerDone) + <-t.loopyWriterDone + }() for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() @@ -656,18 +663,20 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } continue } - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close(err) - return - } t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { - t.Close(err) - break + if err := t.operateHeaders(ctx, frame, handle); err != nil { + // Any error processing client headers, e.g. invalid stream ID, + // is considered a protocol violation. + t.controlBuf.put(&goAway{ + code: http2.ErrCodeProtocol, + debugData: []byte(err.Error()), + closeConn: err, + }) + continue } case *http2.DataFrame: t.handleData(frame) @@ -850,7 +859,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(any) bool { + t.controlBuf.executeAndPut(func() bool { for _, f := range updateFuncs { f() } @@ -980,7 +989,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - return status.Convert(err).Err() + switch e := err.(type) { + case ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + return status.Convert(err).Err() + } } return nil } @@ -999,12 +1013,13 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } headerFields = appendHeaderFieldsFromMD(headerFields, s.header) - success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + hf := &headerFrame{ streamID: s.id, hf: headerFields, endStream: false, onWrite: t.setResetPingStrikes, - }) + } + success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf) if !success { if err != nil { return err @@ -1053,12 +1068,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } @@ -1190,12 +1208,12 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout)) return } if !outstandingPing { if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) } t.controlBuf.put(p) kpTimeoutLeft = t.kp.Timeout @@ -1235,15 +1253,11 @@ func (t *http2Server) Close(err error) { if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } - channelz.RemoveEntry(t.channelzID) + channelz.RemoveEntry(t.channelz.ID) // Cancel all active streams. for _, s := range streams { s.cancel() } - for _, sh := range t.stats { - connEnd := &stats.ConnEnd{} - sh.HandleConn(t.ctx, connEnd) - } } // deleteStream deletes the stream s from transport's active streams. @@ -1260,9 +1274,9 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { if channelz.IsOn() { if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) + t.channelz.SocketMetrics.StreamsFailed.Add(1) } } } @@ -1309,10 +1323,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() @@ -1349,6 +1359,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } + t.framer.writer.Flush() if retErr != nil { return false, retErr } @@ -1369,7 +1380,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, err } go func() { - timer := time.NewTimer(time.Minute) + timer := time.NewTimer(5 * time.Second) defer timer.Stop() select { case <-t.drainEvent.Done(): @@ -1382,38 +1393,21 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, nil } -func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, - // RemoteName : - } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s +func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), + } } func (t *http2Server) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) } func (t *http2Server) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) } func (t *http2Server) getOutFlowWindow() int64 { @@ -1431,10 +1425,12 @@ func (t *http2Server) getOutFlowWindow() int64 { } } -func (t *http2Server) getPeer() *peer.Peer { +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil } } @@ -1444,7 +1440,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := grpcrand.Int63n(2*r) - r + j := rand.Int63n(2*r) - r return time.Duration(j) } @@ -1459,6 +1455,6 @@ func GetConnection(ctx context.Context) net.Conn { // SetConnection adds the connection to the context to be able to get // information about the destination ip and port for an incoming RPC. This also // allows any unary or streaming interceptors to see the connection. -func setConnection(ctx context.Context, conn net.Conn) context.Context { +func SetConnection(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, connectionKey{}, conn) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 195814008..39cef3bd4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -34,12 +34,9 @@ import ( "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -88,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -103,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -154,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( @@ -432,10 +418,9 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu return f } -func getWriteBufferPool(writeBufferSize int) *sync.Pool { +func getWriteBufferPool(size int) *sync.Pool { writeBufferMutex.Lock() defer writeBufferMutex.Unlock() - size := writeBufferSize * 2 pool, ok := writeBufferPoolMap[size] if ok { return pool diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 415961987..24fa10325 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -28,6 +28,8 @@ import ( "net/http" "net/http/httputil" "net/url" + + "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" @@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy // is necessary, dials, does the HTTP CONNECT handshake, and returns the // connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { newAddr := addr proxyURL, err := mapAddress(addr) if err != nil { @@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, newAddr = proxyURL.Host } - conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) if err != nil { - return + return nil, err } - if proxyURL != nil { + if proxyURL == nil { // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return conn, err } - return + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 74a811fc0..4b39c0ade 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -28,6 +28,7 @@ import ( "fmt" "io" "net" + "strings" "sync" "sync/atomic" "time" @@ -37,6 +38,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" @@ -265,7 +267,8 @@ type Stream struct { // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. - headerValid bool + headerValid bool + headerWireLength int // Only set on server side. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -301,7 +304,7 @@ func (s *Stream) isHeaderSent() bool { } // updateHeaderSent updates headerSent and returns true -// if it was alreay set. It is valid only on server-side. +// if it was already set. It is valid only on server-side. func (s *Stream) updateHeaderSent() bool { return atomic.SwapUint32(&s.headerSent, 1) == 1 } @@ -360,8 +363,12 @@ func (s *Stream) SendCompress() string { // ClientAdvertisedCompressors returns the compressor names advertised by the // client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() string { - return s.clientAdvertisedCompressors +func (s *Stream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values } // Done returns a channel which is closed when it receives the final status @@ -425,6 +432,12 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -437,6 +450,12 @@ func (s *Stream) Status() *status.Status { return s.status } +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -552,7 +571,7 @@ type ServerConfig struct { WriteBufferSize int ReadBufferSize int SharedWriteBuffer bool - ChannelzParentID *channelz.Identifier + ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -587,8 +606,8 @@ type ConnectOptions struct { ReadBufferSize int // SharedWriteBuffer indicates whether connections should reuse write buffer SharedWriteBuffer bool - // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID *channelz.Identifier + // ChannelzParent sets the addrConn id which initiated the creation of this client transport. + ChannelzParent *channelz.SubChannel // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -698,7 +717,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(context.Context, func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -717,8 +736,8 @@ type ServerTransport interface { // handlers will be terminated asynchronously. Close(err error) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns the peer of the server transport. + Peer() *peer.Peer // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) @@ -801,30 +820,6 @@ const ( GoAwayTooManyPings GoAwayReason = 2 ) -// channelzData is used to store channelz related data for http2Client and http2Server. -// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - kpCount int64 - // The number of streams that have started, including already finished ones. - streamsStarted int64 - // Client side: The number of streams that have ended successfully by receiving - // EoS bit set frame from server. - // Server side: The number of streams that have ended successfully by sending - // frame with EoS bit set. - streamsSucceeded int64 - streamsFailed int64 - // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type - // instead of time.Time since it's more costly to atomically update time.Time variable than int64 - // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. - lastStreamCreatedTime int64 - msgSent int64 - msgRecv int64 - lastMsgSentTime int64 - lastMsgRecvTime int64 -} - // ContextErr converts the error from context package into a status error. func ContextErr(err error) error { switch err { diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go deleted file mode 100644 index e8b492774..000000000 --- a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package internal - -import ( - "google.golang.org/grpc/attributes" - "google.golang.org/grpc/resolver" -) - -// handshakeClusterNameKey is the type used as the key to store cluster name in -// the Attributes field of resolver.Address. -type handshakeClusterNameKey struct{} - -// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field -// is updated with the cluster name. -func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { - addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) - return addr -} - -// GetXDSHandshakeClusterName returns cluster name stored in attr. -func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { - v := attr.Value(handshakeClusterNameKey{}) - name, ok := v.(string) - return name, ok -} diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service.pb.go index 031e12ee2..6c5afce0b 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.34.1 +// protoc v4.25.2 // source: grpc/testing/benchmark_service.proto package grpc_testing diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service_grpc.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service_grpc.pb.go index 84cd44e4d..3a54d00c1 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service_grpc.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.22.0 +// - protoc-gen-go-grpc v1.4.0 +// - protoc v4.25.2 // source: grpc/testing/benchmark_service.proto package grpc_testing @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( BenchmarkService_UnaryCall_FullMethodName = "/grpc.testing.BenchmarkService/UnaryCall" @@ -53,16 +53,16 @@ type BenchmarkServiceClient interface { // Repeated sequence of one request followed by one response. // Should be called streaming ping-pong // The server returns the client payload as-is on each response - StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) + StreamingCall(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SimpleRequest, SimpleResponse], error) // Single-sided unbounded streaming from client to server // The server returns the client payload as-is once the client does WritesDone - StreamingFromClient(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingFromClientClient, error) + StreamingFromClient(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[SimpleRequest, SimpleResponse], error) // Single-sided unbounded streaming from server to client // The server repeatedly returns the client payload as-is - StreamingFromServer(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (BenchmarkService_StreamingFromServerClient, error) + StreamingFromServer(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SimpleResponse], error) // Two-sided unbounded streaming between server to client // Both sides send the content of their own choice to the other - StreamingBothWays(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingBothWaysClient, error) + StreamingBothWays(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SimpleRequest, SimpleResponse], error) } type benchmarkServiceClient struct { @@ -74,85 +74,48 @@ func NewBenchmarkServiceClient(cc grpc.ClientConnInterface) BenchmarkServiceClie } func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SimpleResponse) - err := c.cc.Invoke(ctx, BenchmarkService_UnaryCall_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, BenchmarkService_UnaryCall_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *benchmarkServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) { - stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[0], BenchmarkService_StreamingCall_FullMethodName, opts...) +func (c *benchmarkServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SimpleRequest, SimpleResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[0], BenchmarkService_StreamingCall_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &benchmarkServiceStreamingCallClient{stream} + x := &grpc.GenericClientStream[SimpleRequest, SimpleResponse]{ClientStream: stream} return x, nil } -type BenchmarkService_StreamingCallClient interface { - Send(*SimpleRequest) error - Recv() (*SimpleResponse, error) - grpc.ClientStream -} - -type benchmarkServiceStreamingCallClient struct { - grpc.ClientStream -} - -func (x *benchmarkServiceStreamingCallClient) Send(m *SimpleRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *benchmarkServiceStreamingCallClient) Recv() (*SimpleResponse, error) { - m := new(SimpleResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type BenchmarkService_StreamingCallClient = grpc.BidiStreamingClient[SimpleRequest, SimpleResponse] -func (c *benchmarkServiceClient) StreamingFromClient(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingFromClientClient, error) { - stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[1], BenchmarkService_StreamingFromClient_FullMethodName, opts...) +func (c *benchmarkServiceClient) StreamingFromClient(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[SimpleRequest, SimpleResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[1], BenchmarkService_StreamingFromClient_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &benchmarkServiceStreamingFromClientClient{stream} + x := &grpc.GenericClientStream[SimpleRequest, SimpleResponse]{ClientStream: stream} return x, nil } -type BenchmarkService_StreamingFromClientClient interface { - Send(*SimpleRequest) error - CloseAndRecv() (*SimpleResponse, error) - grpc.ClientStream -} - -type benchmarkServiceStreamingFromClientClient struct { - grpc.ClientStream -} - -func (x *benchmarkServiceStreamingFromClientClient) Send(m *SimpleRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *benchmarkServiceStreamingFromClientClient) CloseAndRecv() (*SimpleResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(SimpleResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type BenchmarkService_StreamingFromClientClient = grpc.ClientStreamingClient[SimpleRequest, SimpleResponse] -func (c *benchmarkServiceClient) StreamingFromServer(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (BenchmarkService_StreamingFromServerClient, error) { - stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[2], BenchmarkService_StreamingFromServer_FullMethodName, opts...) +func (c *benchmarkServiceClient) StreamingFromServer(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SimpleResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[2], BenchmarkService_StreamingFromServer_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &benchmarkServiceStreamingFromServerClient{stream} + x := &grpc.GenericClientStream[SimpleRequest, SimpleResponse]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -162,53 +125,21 @@ func (c *benchmarkServiceClient) StreamingFromServer(ctx context.Context, in *Si return x, nil } -type BenchmarkService_StreamingFromServerClient interface { - Recv() (*SimpleResponse, error) - grpc.ClientStream -} - -type benchmarkServiceStreamingFromServerClient struct { - grpc.ClientStream -} - -func (x *benchmarkServiceStreamingFromServerClient) Recv() (*SimpleResponse, error) { - m := new(SimpleResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type BenchmarkService_StreamingFromServerClient = grpc.ServerStreamingClient[SimpleResponse] -func (c *benchmarkServiceClient) StreamingBothWays(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingBothWaysClient, error) { - stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[3], BenchmarkService_StreamingBothWays_FullMethodName, opts...) +func (c *benchmarkServiceClient) StreamingBothWays(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SimpleRequest, SimpleResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[3], BenchmarkService_StreamingBothWays_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &benchmarkServiceStreamingBothWaysClient{stream} + x := &grpc.GenericClientStream[SimpleRequest, SimpleResponse]{ClientStream: stream} return x, nil } -type BenchmarkService_StreamingBothWaysClient interface { - Send(*SimpleRequest) error - Recv() (*SimpleResponse, error) - grpc.ClientStream -} - -type benchmarkServiceStreamingBothWaysClient struct { - grpc.ClientStream -} - -func (x *benchmarkServiceStreamingBothWaysClient) Send(m *SimpleRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *benchmarkServiceStreamingBothWaysClient) Recv() (*SimpleResponse, error) { - m := new(SimpleResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type BenchmarkService_StreamingBothWaysClient = grpc.BidiStreamingClient[SimpleRequest, SimpleResponse] // BenchmarkServiceServer is the server API for BenchmarkService service. // All implementations must embed UnimplementedBenchmarkServiceServer @@ -220,16 +151,16 @@ type BenchmarkServiceServer interface { // Repeated sequence of one request followed by one response. // Should be called streaming ping-pong // The server returns the client payload as-is on each response - StreamingCall(BenchmarkService_StreamingCallServer) error + StreamingCall(grpc.BidiStreamingServer[SimpleRequest, SimpleResponse]) error // Single-sided unbounded streaming from client to server // The server returns the client payload as-is once the client does WritesDone - StreamingFromClient(BenchmarkService_StreamingFromClientServer) error + StreamingFromClient(grpc.ClientStreamingServer[SimpleRequest, SimpleResponse]) error // Single-sided unbounded streaming from server to client // The server repeatedly returns the client payload as-is - StreamingFromServer(*SimpleRequest, BenchmarkService_StreamingFromServerServer) error + StreamingFromServer(*SimpleRequest, grpc.ServerStreamingServer[SimpleResponse]) error // Two-sided unbounded streaming between server to client // Both sides send the content of their own choice to the other - StreamingBothWays(BenchmarkService_StreamingBothWaysServer) error + StreamingBothWays(grpc.BidiStreamingServer[SimpleRequest, SimpleResponse]) error mustEmbedUnimplementedBenchmarkServiceServer() } @@ -240,16 +171,16 @@ type UnimplementedBenchmarkServiceServer struct { func (UnimplementedBenchmarkServiceServer) UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UnaryCall not implemented") } -func (UnimplementedBenchmarkServiceServer) StreamingCall(BenchmarkService_StreamingCallServer) error { +func (UnimplementedBenchmarkServiceServer) StreamingCall(grpc.BidiStreamingServer[SimpleRequest, SimpleResponse]) error { return status.Errorf(codes.Unimplemented, "method StreamingCall not implemented") } -func (UnimplementedBenchmarkServiceServer) StreamingFromClient(BenchmarkService_StreamingFromClientServer) error { +func (UnimplementedBenchmarkServiceServer) StreamingFromClient(grpc.ClientStreamingServer[SimpleRequest, SimpleResponse]) error { return status.Errorf(codes.Unimplemented, "method StreamingFromClient not implemented") } -func (UnimplementedBenchmarkServiceServer) StreamingFromServer(*SimpleRequest, BenchmarkService_StreamingFromServerServer) error { +func (UnimplementedBenchmarkServiceServer) StreamingFromServer(*SimpleRequest, grpc.ServerStreamingServer[SimpleResponse]) error { return status.Errorf(codes.Unimplemented, "method StreamingFromServer not implemented") } -func (UnimplementedBenchmarkServiceServer) StreamingBothWays(BenchmarkService_StreamingBothWaysServer) error { +func (UnimplementedBenchmarkServiceServer) StreamingBothWays(grpc.BidiStreamingServer[SimpleRequest, SimpleResponse]) error { return status.Errorf(codes.Unimplemented, "method StreamingBothWays not implemented") } func (UnimplementedBenchmarkServiceServer) mustEmbedUnimplementedBenchmarkServiceServer() {} @@ -284,103 +215,36 @@ func _BenchmarkService_UnaryCall_Handler(srv interface{}, ctx context.Context, d } func _BenchmarkService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(BenchmarkServiceServer).StreamingCall(&benchmarkServiceStreamingCallServer{stream}) -} - -type BenchmarkService_StreamingCallServer interface { - Send(*SimpleResponse) error - Recv() (*SimpleRequest, error) - grpc.ServerStream -} - -type benchmarkServiceStreamingCallServer struct { - grpc.ServerStream + return srv.(BenchmarkServiceServer).StreamingCall(&grpc.GenericServerStream[SimpleRequest, SimpleResponse]{ServerStream: stream}) } -func (x *benchmarkServiceStreamingCallServer) Send(m *SimpleResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *benchmarkServiceStreamingCallServer) Recv() (*SimpleRequest, error) { - m := new(SimpleRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type BenchmarkService_StreamingCallServer = grpc.BidiStreamingServer[SimpleRequest, SimpleResponse] func _BenchmarkService_StreamingFromClient_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(BenchmarkServiceServer).StreamingFromClient(&benchmarkServiceStreamingFromClientServer{stream}) + return srv.(BenchmarkServiceServer).StreamingFromClient(&grpc.GenericServerStream[SimpleRequest, SimpleResponse]{ServerStream: stream}) } -type BenchmarkService_StreamingFromClientServer interface { - SendAndClose(*SimpleResponse) error - Recv() (*SimpleRequest, error) - grpc.ServerStream -} - -type benchmarkServiceStreamingFromClientServer struct { - grpc.ServerStream -} - -func (x *benchmarkServiceStreamingFromClientServer) SendAndClose(m *SimpleResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *benchmarkServiceStreamingFromClientServer) Recv() (*SimpleRequest, error) { - m := new(SimpleRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type BenchmarkService_StreamingFromClientServer = grpc.ClientStreamingServer[SimpleRequest, SimpleResponse] func _BenchmarkService_StreamingFromServer_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(SimpleRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(BenchmarkServiceServer).StreamingFromServer(m, &benchmarkServiceStreamingFromServerServer{stream}) + return srv.(BenchmarkServiceServer).StreamingFromServer(m, &grpc.GenericServerStream[SimpleRequest, SimpleResponse]{ServerStream: stream}) } -type BenchmarkService_StreamingFromServerServer interface { - Send(*SimpleResponse) error - grpc.ServerStream -} - -type benchmarkServiceStreamingFromServerServer struct { - grpc.ServerStream -} - -func (x *benchmarkServiceStreamingFromServerServer) Send(m *SimpleResponse) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type BenchmarkService_StreamingFromServerServer = grpc.ServerStreamingServer[SimpleResponse] func _BenchmarkService_StreamingBothWays_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(BenchmarkServiceServer).StreamingBothWays(&benchmarkServiceStreamingBothWaysServer{stream}) -} - -type BenchmarkService_StreamingBothWaysServer interface { - Send(*SimpleResponse) error - Recv() (*SimpleRequest, error) - grpc.ServerStream -} - -type benchmarkServiceStreamingBothWaysServer struct { - grpc.ServerStream -} - -func (x *benchmarkServiceStreamingBothWaysServer) Send(m *SimpleResponse) error { - return x.ServerStream.SendMsg(m) + return srv.(BenchmarkServiceServer).StreamingBothWays(&grpc.GenericServerStream[SimpleRequest, SimpleResponse]{ServerStream: stream}) } -func (x *benchmarkServiceStreamingBothWaysServer) Recv() (*SimpleRequest, error) { - m := new(SimpleRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type BenchmarkService_StreamingBothWaysServer = grpc.BidiStreamingServer[SimpleRequest, SimpleResponse] // BenchmarkService_ServiceDesc is the grpc.ServiceDesc for BenchmarkService service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/control.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/control.pb.go index e09b9bacc..c2176a93a 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/control.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/control.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.34.1 +// protoc v4.25.2 // source: grpc/testing/control.proto package grpc_testing diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/core/stats.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/core/stats.pb.go index e61bf8f5c..99369152c 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/core/stats.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/core/stats.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.34.1 +// protoc v4.25.2 // source: grpc/core/stats.proto package core diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/empty.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/empty.pb.go index 8030ccd05..c91a2c0f5 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/empty.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/empty.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.34.1 +// protoc v4.25.2 // source: grpc/testing/empty.proto package grpc_testing diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/messages.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/messages.pb.go index de3cb7aff..c9d37b297 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/messages.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/messages.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.34.1 +// protoc v4.25.2 // source: grpc/testing/messages.proto package grpc_testing @@ -139,74 +139,25 @@ func (GrpclbRouteType) EnumDescriptor() ([]byte, []int) { return file_grpc_testing_messages_proto_rawDescGZIP(), []int{1} } -type HookRequestCommand int32 - -const ( - // Start the HTTP endpoint - HookRequestCommand_START HookRequestCommand = 0 - // Stop - HookRequestCommand_STOP HookRequestCommand = 1 - // Return from HTTP GET/POST - HookRequestCommand_RETURN HookRequestCommand = 2 -) - -// Enum value maps for HookRequestCommand. -var ( - HookRequestCommand_name = map[int32]string{ - 0: "START", - 1: "STOP", - 2: "RETURN", - } - HookRequestCommand_value = map[string]int32{ - "START": 0, - "STOP": 1, - "RETURN": 2, - } -) - -func (x HookRequestCommand) Enum() *HookRequestCommand { - p := new(HookRequestCommand) - *p = x - return p -} - -func (x HookRequestCommand) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (HookRequestCommand) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_testing_messages_proto_enumTypes[2].Descriptor() -} - -func (HookRequestCommand) Type() protoreflect.EnumType { - return &file_grpc_testing_messages_proto_enumTypes[2] -} - -func (x HookRequestCommand) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use HookRequestCommand.Descriptor instead. -func (HookRequestCommand) EnumDescriptor() ([]byte, []int) { - return file_grpc_testing_messages_proto_rawDescGZIP(), []int{2} -} - type LoadBalancerStatsResponse_MetadataType int32 const ( - LoadBalancerStatsResponse_Initial LoadBalancerStatsResponse_MetadataType = 0 - LoadBalancerStatsResponse_Trailing LoadBalancerStatsResponse_MetadataType = 1 + LoadBalancerStatsResponse_UNKNOWN LoadBalancerStatsResponse_MetadataType = 0 + LoadBalancerStatsResponse_INITIAL LoadBalancerStatsResponse_MetadataType = 1 + LoadBalancerStatsResponse_TRAILING LoadBalancerStatsResponse_MetadataType = 2 ) // Enum value maps for LoadBalancerStatsResponse_MetadataType. var ( LoadBalancerStatsResponse_MetadataType_name = map[int32]string{ - 0: "Initial", - 1: "Trailing", + 0: "UNKNOWN", + 1: "INITIAL", + 2: "TRAILING", } LoadBalancerStatsResponse_MetadataType_value = map[string]int32{ - "Initial": 0, - "Trailing": 1, + "UNKNOWN": 0, + "INITIAL": 1, + "TRAILING": 2, } ) @@ -221,11 +172,11 @@ func (x LoadBalancerStatsResponse_MetadataType) String() string { } func (LoadBalancerStatsResponse_MetadataType) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_testing_messages_proto_enumTypes[3].Descriptor() + return file_grpc_testing_messages_proto_enumTypes[2].Descriptor() } func (LoadBalancerStatsResponse_MetadataType) Type() protoreflect.EnumType { - return &file_grpc_testing_messages_proto_enumTypes[3] + return &file_grpc_testing_messages_proto_enumTypes[2] } func (x LoadBalancerStatsResponse_MetadataType) Number() protoreflect.EnumNumber { @@ -268,11 +219,11 @@ func (x ClientConfigureRequest_RpcType) String() string { } func (ClientConfigureRequest_RpcType) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_testing_messages_proto_enumTypes[4].Descriptor() + return file_grpc_testing_messages_proto_enumTypes[3].Descriptor() } func (ClientConfigureRequest_RpcType) Type() protoreflect.EnumType { - return &file_grpc_testing_messages_proto_enumTypes[4] + return &file_grpc_testing_messages_proto_enumTypes[3] } func (x ClientConfigureRequest_RpcType) Number() protoreflect.EnumNumber { @@ -284,6 +235,62 @@ func (ClientConfigureRequest_RpcType) EnumDescriptor() ([]byte, []int) { return file_grpc_testing_messages_proto_rawDescGZIP(), []int{16, 0} } +type HookRequest_HookRequestCommand int32 + +const ( + // Default value + HookRequest_UNSPECIFIED HookRequest_HookRequestCommand = 0 + // Start the HTTP endpoint + HookRequest_START HookRequest_HookRequestCommand = 1 + // Stop + HookRequest_STOP HookRequest_HookRequestCommand = 2 + // Return from HTTP GET/POST + HookRequest_RETURN HookRequest_HookRequestCommand = 3 +) + +// Enum value maps for HookRequest_HookRequestCommand. +var ( + HookRequest_HookRequestCommand_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "START", + 2: "STOP", + 3: "RETURN", + } + HookRequest_HookRequestCommand_value = map[string]int32{ + "UNSPECIFIED": 0, + "START": 1, + "STOP": 2, + "RETURN": 3, + } +) + +func (x HookRequest_HookRequestCommand) Enum() *HookRequest_HookRequestCommand { + p := new(HookRequest_HookRequestCommand) + *p = x + return p +} + +func (x HookRequest_HookRequestCommand) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HookRequest_HookRequestCommand) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_testing_messages_proto_enumTypes[4].Descriptor() +} + +func (HookRequest_HookRequestCommand) Type() protoreflect.EnumType { + return &file_grpc_testing_messages_proto_enumTypes[4] +} + +func (x HookRequest_HookRequestCommand) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HookRequest_HookRequestCommand.Descriptor instead. +func (HookRequest_HookRequestCommand) EnumDescriptor() ([]byte, []int) { + return file_grpc_testing_messages_proto_rawDescGZIP(), []int{21, 0} +} + // TODO(dgq): Go back to using well-known types once // https://github.com/grpc/grpc/issues/6980 has been fixed. // import "google/protobuf/wrappers.proto"; @@ -1612,14 +1619,70 @@ func (x *TestOrcaReport) GetUtilization() map[string]float64 { return nil } +// Status that will be return to callers of the Hook method +type SetReturnStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GrpcCodeToReturn int32 `protobuf:"varint,1,opt,name=grpc_code_to_return,json=grpcCodeToReturn,proto3" json:"grpc_code_to_return,omitempty"` + GrpcStatusDescription string `protobuf:"bytes,2,opt,name=grpc_status_description,json=grpcStatusDescription,proto3" json:"grpc_status_description,omitempty"` +} + +func (x *SetReturnStatusRequest) Reset() { + *x = SetReturnStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_testing_messages_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetReturnStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetReturnStatusRequest) ProtoMessage() {} + +func (x *SetReturnStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_testing_messages_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetReturnStatusRequest.ProtoReflect.Descriptor instead. +func (*SetReturnStatusRequest) Descriptor() ([]byte, []int) { + return file_grpc_testing_messages_proto_rawDescGZIP(), []int{20} +} + +func (x *SetReturnStatusRequest) GetGrpcCodeToReturn() int32 { + if x != nil { + return x.GrpcCodeToReturn + } + return 0 +} + +func (x *SetReturnStatusRequest) GetGrpcStatusDescription() string { + if x != nil { + return x.GrpcStatusDescription + } + return "" +} + type HookRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Command HookRequestCommand `protobuf:"varint,1,opt,name=command,proto3,enum=grpc.testing.HookRequestCommand" json:"command,omitempty"` - GrpcCodeToReturn int32 `protobuf:"varint,2,opt,name=grpc_code_to_return,json=grpcCodeToReturn,proto3" json:"grpc_code_to_return,omitempty"` - GrpcStatusDescription string `protobuf:"bytes,3,opt,name=grpc_status_description,json=grpcStatusDescription,proto3" json:"grpc_status_description,omitempty"` + Command HookRequest_HookRequestCommand `protobuf:"varint,1,opt,name=command,proto3,enum=grpc.testing.HookRequest_HookRequestCommand" json:"command,omitempty"` + GrpcCodeToReturn int32 `protobuf:"varint,2,opt,name=grpc_code_to_return,json=grpcCodeToReturn,proto3" json:"grpc_code_to_return,omitempty"` + GrpcStatusDescription string `protobuf:"bytes,3,opt,name=grpc_status_description,json=grpcStatusDescription,proto3" json:"grpc_status_description,omitempty"` // Server port to listen to ServerPort int32 `protobuf:"varint,4,opt,name=server_port,json=serverPort,proto3" json:"server_port,omitempty"` } @@ -1627,7 +1690,7 @@ type HookRequest struct { func (x *HookRequest) Reset() { *x = HookRequest{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[20] + mi := &file_grpc_testing_messages_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1640,7 +1703,7 @@ func (x *HookRequest) String() string { func (*HookRequest) ProtoMessage() {} func (x *HookRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[20] + mi := &file_grpc_testing_messages_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1653,14 +1716,14 @@ func (x *HookRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use HookRequest.ProtoReflect.Descriptor instead. func (*HookRequest) Descriptor() ([]byte, []int) { - return file_grpc_testing_messages_proto_rawDescGZIP(), []int{20} + return file_grpc_testing_messages_proto_rawDescGZIP(), []int{21} } -func (x *HookRequest) GetCommand() HookRequestCommand { +func (x *HookRequest) GetCommand() HookRequest_HookRequestCommand { if x != nil { return x.Command } - return HookRequestCommand_START + return HookRequest_UNSPECIFIED } func (x *HookRequest) GetGrpcCodeToReturn() int32 { @@ -1693,7 +1756,7 @@ type HookResponse struct { func (x *HookResponse) Reset() { *x = HookResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[21] + mi := &file_grpc_testing_messages_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1706,7 +1769,7 @@ func (x *HookResponse) String() string { func (*HookResponse) ProtoMessage() {} func (x *HookResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[21] + mi := &file_grpc_testing_messages_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1719,7 +1782,7 @@ func (x *HookResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use HookResponse.ProtoReflect.Descriptor instead. func (*HookResponse) Descriptor() ([]byte, []int) { - return file_grpc_testing_messages_proto_rawDescGZIP(), []int{21} + return file_grpc_testing_messages_proto_rawDescGZIP(), []int{22} } type LoadBalancerStatsResponse_MetadataEntry struct { @@ -1732,14 +1795,14 @@ type LoadBalancerStatsResponse_MetadataEntry struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Value, exactly as received from the server. Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - // Metadata type is either Initial or Trailing + // Metadata type Type LoadBalancerStatsResponse_MetadataType `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.testing.LoadBalancerStatsResponse_MetadataType" json:"type,omitempty"` } func (x *LoadBalancerStatsResponse_MetadataEntry) Reset() { *x = LoadBalancerStatsResponse_MetadataEntry{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[22] + mi := &file_grpc_testing_messages_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1752,7 +1815,7 @@ func (x *LoadBalancerStatsResponse_MetadataEntry) String() string { func (*LoadBalancerStatsResponse_MetadataEntry) ProtoMessage() {} func (x *LoadBalancerStatsResponse_MetadataEntry) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[22] + mi := &file_grpc_testing_messages_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1786,7 +1849,7 @@ func (x *LoadBalancerStatsResponse_MetadataEntry) GetType() LoadBalancerStatsRes if x != nil { return x.Type } - return LoadBalancerStatsResponse_Initial + return LoadBalancerStatsResponse_UNKNOWN } type LoadBalancerStatsResponse_RpcMetadata struct { @@ -1802,7 +1865,7 @@ type LoadBalancerStatsResponse_RpcMetadata struct { func (x *LoadBalancerStatsResponse_RpcMetadata) Reset() { *x = LoadBalancerStatsResponse_RpcMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[23] + mi := &file_grpc_testing_messages_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1815,7 +1878,7 @@ func (x *LoadBalancerStatsResponse_RpcMetadata) String() string { func (*LoadBalancerStatsResponse_RpcMetadata) ProtoMessage() {} func (x *LoadBalancerStatsResponse_RpcMetadata) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[23] + mi := &file_grpc_testing_messages_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1850,7 +1913,7 @@ type LoadBalancerStatsResponse_MetadataByPeer struct { func (x *LoadBalancerStatsResponse_MetadataByPeer) Reset() { *x = LoadBalancerStatsResponse_MetadataByPeer{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[24] + mi := &file_grpc_testing_messages_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1863,7 +1926,7 @@ func (x *LoadBalancerStatsResponse_MetadataByPeer) String() string { func (*LoadBalancerStatsResponse_MetadataByPeer) ProtoMessage() {} func (x *LoadBalancerStatsResponse_MetadataByPeer) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[24] + mi := &file_grpc_testing_messages_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1898,7 +1961,7 @@ type LoadBalancerStatsResponse_RpcsByPeer struct { func (x *LoadBalancerStatsResponse_RpcsByPeer) Reset() { *x = LoadBalancerStatsResponse_RpcsByPeer{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[25] + mi := &file_grpc_testing_messages_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1911,7 +1974,7 @@ func (x *LoadBalancerStatsResponse_RpcsByPeer) String() string { func (*LoadBalancerStatsResponse_RpcsByPeer) ProtoMessage() {} func (x *LoadBalancerStatsResponse_RpcsByPeer) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[25] + mi := &file_grpc_testing_messages_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1949,7 +2012,7 @@ type LoadBalancerAccumulatedStatsResponse_MethodStats struct { func (x *LoadBalancerAccumulatedStatsResponse_MethodStats) Reset() { *x = LoadBalancerAccumulatedStatsResponse_MethodStats{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[33] + mi := &file_grpc_testing_messages_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1962,7 +2025,7 @@ func (x *LoadBalancerAccumulatedStatsResponse_MethodStats) String() string { func (*LoadBalancerAccumulatedStatsResponse_MethodStats) ProtoMessage() {} func (x *LoadBalancerAccumulatedStatsResponse_MethodStats) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[33] + mi := &file_grpc_testing_messages_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2006,7 +2069,7 @@ type ClientConfigureRequest_Metadata struct { func (x *ClientConfigureRequest_Metadata) Reset() { *x = ClientConfigureRequest_Metadata{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[36] + mi := &file_grpc_testing_messages_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2019,7 +2082,7 @@ func (x *ClientConfigureRequest_Metadata) String() string { func (*ClientConfigureRequest_Metadata) ProtoMessage() {} func (x *ClientConfigureRequest_Metadata) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[36] + mi := &file_grpc_testing_messages_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2196,7 +2259,7 @@ var file_grpc_testing_messages_proto_rawDesc = []byte{ 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x73, - 0x22, 0xc3, 0x09, 0x0a, 0x19, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x22, 0xd0, 0x09, 0x0a, 0x19, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, @@ -2269,165 +2332,176 @@ var file_grpc_testing_messages_proto_rawDesc = []byte{ 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x29, 0x0a, 0x0c, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, - 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x72, 0x61, 0x69, - 0x6c, 0x69, 0x6e, 0x67, 0x10, 0x01, 0x22, 0x25, 0x0a, 0x23, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x86, 0x09, - 0x0a, 0x24, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, - 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x1a, 0x6e, 0x75, 0x6d, 0x5f, 0x72, - 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x67, 0x72, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x36, 0x0a, 0x0c, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x49, 0x54, + 0x49, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x49, 0x4e, + 0x47, 0x10, 0x02, 0x22, 0x25, 0x0a, 0x23, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x86, 0x09, 0x0a, 0x24, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, + 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x1a, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, + 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, + 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x16, 0x6e, 0x75, + 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x12, 0x94, 0x01, 0x0a, 0x1c, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, + 0x73, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x16, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, - 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x94, 0x01, 0x0a, 0x1c, 0x6e, 0x75, 0x6d, 0x5f, - 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x5f, 0x62, - 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, - 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, - 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x18, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, - 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x8b, - 0x01, 0x0a, 0x19, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, - 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, - 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x15, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x70, 0x0a, 0x10, - 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, - 0x73, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x49, - 0x0a, 0x1b, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, - 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x1d, 0x4e, 0x75, 0x6d, - 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x48, 0x0a, 0x1a, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, - 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x1a, 0xcf, 0x01, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x12, 0x62, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, + 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, + 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x18, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, + 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x8b, 0x01, 0x0a, 0x19, + 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, + 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x4d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, + 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x15, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x70, 0x0a, 0x10, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x81, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x49, 0x0a, 0x1b, 0x4e, + 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe9, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x42, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, - 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, - 0x63, 0x1a, 0x74, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x29, 0x0a, 0x07, 0x52, 0x70, 0x63, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, - 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, - 0x10, 0x01, 0x22, 0x19, 0x0a, 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x0a, - 0x0a, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x72, - 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x72, 0x73, 0x73, 0x22, 0x8b, 0x03, - 0x0a, 0x0e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, - 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x6d, 0x65, 0x6d, - 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x11, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x74, 0x69, - 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, - 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x0b, 0x75, 0x74, - 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, - 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, - 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, - 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x55, - 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd1, 0x01, 0x0a, 0x0b, - 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x07, 0x63, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, - 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x2d, 0x0a, 0x13, 0x67, 0x72, 0x70, 0x63, 0x5f, - 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x67, 0x72, 0x70, 0x63, 0x43, 0x6f, 0x64, 0x65, 0x54, 0x6f, - 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x12, 0x36, 0x0a, 0x17, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x67, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, - 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x22, - 0x0e, 0x0a, 0x0c, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, - 0x1f, 0x0a, 0x0b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, - 0x0a, 0x0c, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, - 0x2a, 0x6f, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, - 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, - 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, - 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, - 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x10, - 0x02, 0x2a, 0x35, 0x0a, 0x12, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54, 0x41, 0x52, 0x54, - 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, - 0x52, 0x45, 0x54, 0x55, 0x52, 0x4e, 0x10, 0x02, 0x42, 0x1d, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, - 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x1d, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, + 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x48, 0x0a, 0x1a, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, + 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xcf, 0x01, + 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, + 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, + 0x12, 0x62, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x4a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, + 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x81, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0xe9, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, + 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2c, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x12, 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, + 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x1a, 0x74, + 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, + 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x29, 0x0a, 0x07, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0e, 0x0a, 0x0a, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, + 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x22, + 0x19, 0x0a, 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x0a, 0x0a, 0x4d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x73, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x72, 0x73, 0x73, 0x22, 0x8b, 0x03, 0x0a, 0x0e, 0x54, + 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x27, 0x0a, + 0x0f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x11, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, + 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, + 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x75, 0x74, 0x69, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x55, 0x74, 0x69, 0x6c, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7f, 0x0a, 0x16, 0x53, 0x65, 0x74, 0x52, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, + 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x10, 0x67, 0x72, 0x70, 0x63, 0x43, 0x6f, 0x64, 0x65, 0x54, 0x6f, 0x52, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x12, 0x36, 0x0a, 0x17, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x15, 0x67, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa5, 0x02, 0x0a, 0x0b, 0x48, 0x6f, + 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x07, 0x63, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, + 0x64, 0x12, 0x2d, 0x0a, 0x13, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x74, + 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, + 0x67, 0x72, 0x70, 0x63, 0x43, 0x6f, 0x64, 0x65, 0x54, 0x6f, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x12, 0x36, 0x0a, 0x17, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x15, 0x67, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x46, 0x0a, 0x12, 0x48, 0x6f, 0x6f, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54, 0x41, 0x52, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x53, + 0x54, 0x4f, 0x50, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x54, 0x55, 0x52, 0x4e, 0x10, + 0x03, 0x22, 0x0e, 0x0a, 0x0c, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2a, 0x1f, 0x0a, 0x0b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x41, 0x42, 0x4c, 0x45, + 0x10, 0x00, 0x2a, 0x6f, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, + 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, + 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, + 0x43, 0x4b, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, + 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, + 0x44, 0x10, 0x02, 0x42, 0x1d, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2443,13 +2517,13 @@ func file_grpc_testing_messages_proto_rawDescGZIP() []byte { } var file_grpc_testing_messages_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_grpc_testing_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 39) +var file_grpc_testing_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 40) var file_grpc_testing_messages_proto_goTypes = []interface{}{ (PayloadType)(0), // 0: grpc.testing.PayloadType (GrpclbRouteType)(0), // 1: grpc.testing.GrpclbRouteType - (HookRequestCommand)(0), // 2: grpc.testing.HookRequestCommand - (LoadBalancerStatsResponse_MetadataType)(0), // 3: grpc.testing.LoadBalancerStatsResponse.MetadataType - (ClientConfigureRequest_RpcType)(0), // 4: grpc.testing.ClientConfigureRequest.RpcType + (LoadBalancerStatsResponse_MetadataType)(0), // 2: grpc.testing.LoadBalancerStatsResponse.MetadataType + (ClientConfigureRequest_RpcType)(0), // 3: grpc.testing.ClientConfigureRequest.RpcType + (HookRequest_HookRequestCommand)(0), // 4: grpc.testing.HookRequest.HookRequestCommand (*BoolValue)(nil), // 5: grpc.testing.BoolValue (*Payload)(nil), // 6: grpc.testing.Payload (*EchoStatus)(nil), // 7: grpc.testing.EchoStatus @@ -2470,25 +2544,26 @@ var file_grpc_testing_messages_proto_goTypes = []interface{}{ (*ClientConfigureResponse)(nil), // 22: grpc.testing.ClientConfigureResponse (*MemorySize)(nil), // 23: grpc.testing.MemorySize (*TestOrcaReport)(nil), // 24: grpc.testing.TestOrcaReport - (*HookRequest)(nil), // 25: grpc.testing.HookRequest - (*HookResponse)(nil), // 26: grpc.testing.HookResponse - (*LoadBalancerStatsResponse_MetadataEntry)(nil), // 27: grpc.testing.LoadBalancerStatsResponse.MetadataEntry - (*LoadBalancerStatsResponse_RpcMetadata)(nil), // 28: grpc.testing.LoadBalancerStatsResponse.RpcMetadata - (*LoadBalancerStatsResponse_MetadataByPeer)(nil), // 29: grpc.testing.LoadBalancerStatsResponse.MetadataByPeer - (*LoadBalancerStatsResponse_RpcsByPeer)(nil), // 30: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer - nil, // 31: grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry - nil, // 32: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry - nil, // 33: grpc.testing.LoadBalancerStatsResponse.MetadatasByPeerEntry - nil, // 34: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry - nil, // 35: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry - nil, // 36: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry - nil, // 37: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry - (*LoadBalancerAccumulatedStatsResponse_MethodStats)(nil), // 38: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats - nil, // 39: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry - nil, // 40: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry - (*ClientConfigureRequest_Metadata)(nil), // 41: grpc.testing.ClientConfigureRequest.Metadata - nil, // 42: grpc.testing.TestOrcaReport.RequestCostEntry - nil, // 43: grpc.testing.TestOrcaReport.UtilizationEntry + (*SetReturnStatusRequest)(nil), // 25: grpc.testing.SetReturnStatusRequest + (*HookRequest)(nil), // 26: grpc.testing.HookRequest + (*HookResponse)(nil), // 27: grpc.testing.HookResponse + (*LoadBalancerStatsResponse_MetadataEntry)(nil), // 28: grpc.testing.LoadBalancerStatsResponse.MetadataEntry + (*LoadBalancerStatsResponse_RpcMetadata)(nil), // 29: grpc.testing.LoadBalancerStatsResponse.RpcMetadata + (*LoadBalancerStatsResponse_MetadataByPeer)(nil), // 30: grpc.testing.LoadBalancerStatsResponse.MetadataByPeer + (*LoadBalancerStatsResponse_RpcsByPeer)(nil), // 31: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer + nil, // 32: grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry + nil, // 33: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry + nil, // 34: grpc.testing.LoadBalancerStatsResponse.MetadatasByPeerEntry + nil, // 35: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry + nil, // 36: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry + nil, // 37: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry + nil, // 38: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry + (*LoadBalancerAccumulatedStatsResponse_MethodStats)(nil), // 39: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats + nil, // 40: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry + nil, // 41: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry + (*ClientConfigureRequest_Metadata)(nil), // 42: grpc.testing.ClientConfigureRequest.Metadata + nil, // 43: grpc.testing.TestOrcaReport.RequestCostEntry + nil, // 44: grpc.testing.TestOrcaReport.UtilizationEntry } var file_grpc_testing_messages_proto_depIdxs = []int32{ 0, // 0: grpc.testing.Payload.type:type_name -> grpc.testing.PayloadType @@ -2509,27 +2584,27 @@ var file_grpc_testing_messages_proto_depIdxs = []int32{ 7, // 15: grpc.testing.StreamingOutputCallRequest.response_status:type_name -> grpc.testing.EchoStatus 24, // 16: grpc.testing.StreamingOutputCallRequest.orca_oob_report:type_name -> grpc.testing.TestOrcaReport 6, // 17: grpc.testing.StreamingOutputCallResponse.payload:type_name -> grpc.testing.Payload - 31, // 18: grpc.testing.LoadBalancerStatsResponse.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry - 32, // 19: grpc.testing.LoadBalancerStatsResponse.rpcs_by_method:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry - 33, // 20: grpc.testing.LoadBalancerStatsResponse.metadatas_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.MetadatasByPeerEntry - 35, // 21: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_started_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry - 36, // 22: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_succeeded_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry - 37, // 23: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_failed_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry - 39, // 24: grpc.testing.LoadBalancerAccumulatedStatsResponse.stats_per_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry - 4, // 25: grpc.testing.ClientConfigureRequest.types:type_name -> grpc.testing.ClientConfigureRequest.RpcType - 41, // 26: grpc.testing.ClientConfigureRequest.metadata:type_name -> grpc.testing.ClientConfigureRequest.Metadata - 42, // 27: grpc.testing.TestOrcaReport.request_cost:type_name -> grpc.testing.TestOrcaReport.RequestCostEntry - 43, // 28: grpc.testing.TestOrcaReport.utilization:type_name -> grpc.testing.TestOrcaReport.UtilizationEntry - 2, // 29: grpc.testing.HookRequest.command:type_name -> grpc.testing.HookRequestCommand - 3, // 30: grpc.testing.LoadBalancerStatsResponse.MetadataEntry.type:type_name -> grpc.testing.LoadBalancerStatsResponse.MetadataType - 27, // 31: grpc.testing.LoadBalancerStatsResponse.RpcMetadata.metadata:type_name -> grpc.testing.LoadBalancerStatsResponse.MetadataEntry - 28, // 32: grpc.testing.LoadBalancerStatsResponse.MetadataByPeer.rpc_metadata:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcMetadata - 34, // 33: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry - 30, // 34: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry.value:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer - 29, // 35: grpc.testing.LoadBalancerStatsResponse.MetadatasByPeerEntry.value:type_name -> grpc.testing.LoadBalancerStatsResponse.MetadataByPeer - 40, // 36: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.result:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry - 38, // 37: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry.value:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats - 4, // 38: grpc.testing.ClientConfigureRequest.Metadata.type:type_name -> grpc.testing.ClientConfigureRequest.RpcType + 32, // 18: grpc.testing.LoadBalancerStatsResponse.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry + 33, // 19: grpc.testing.LoadBalancerStatsResponse.rpcs_by_method:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry + 34, // 20: grpc.testing.LoadBalancerStatsResponse.metadatas_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.MetadatasByPeerEntry + 36, // 21: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_started_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry + 37, // 22: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_succeeded_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry + 38, // 23: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_failed_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry + 40, // 24: grpc.testing.LoadBalancerAccumulatedStatsResponse.stats_per_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry + 3, // 25: grpc.testing.ClientConfigureRequest.types:type_name -> grpc.testing.ClientConfigureRequest.RpcType + 42, // 26: grpc.testing.ClientConfigureRequest.metadata:type_name -> grpc.testing.ClientConfigureRequest.Metadata + 43, // 27: grpc.testing.TestOrcaReport.request_cost:type_name -> grpc.testing.TestOrcaReport.RequestCostEntry + 44, // 28: grpc.testing.TestOrcaReport.utilization:type_name -> grpc.testing.TestOrcaReport.UtilizationEntry + 4, // 29: grpc.testing.HookRequest.command:type_name -> grpc.testing.HookRequest.HookRequestCommand + 2, // 30: grpc.testing.LoadBalancerStatsResponse.MetadataEntry.type:type_name -> grpc.testing.LoadBalancerStatsResponse.MetadataType + 28, // 31: grpc.testing.LoadBalancerStatsResponse.RpcMetadata.metadata:type_name -> grpc.testing.LoadBalancerStatsResponse.MetadataEntry + 29, // 32: grpc.testing.LoadBalancerStatsResponse.MetadataByPeer.rpc_metadata:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcMetadata + 35, // 33: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry + 31, // 34: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry.value:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer + 30, // 35: grpc.testing.LoadBalancerStatsResponse.MetadatasByPeerEntry.value:type_name -> grpc.testing.LoadBalancerStatsResponse.MetadataByPeer + 41, // 36: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.result:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry + 39, // 37: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry.value:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats + 3, // 38: grpc.testing.ClientConfigureRequest.Metadata.type:type_name -> grpc.testing.ClientConfigureRequest.RpcType 39, // [39:39] is the sub-list for method output_type 39, // [39:39] is the sub-list for method input_type 39, // [39:39] is the sub-list for extension type_name @@ -2784,7 +2859,7 @@ func file_grpc_testing_messages_proto_init() { } } file_grpc_testing_messages_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HookRequest); i { + switch v := v.(*SetReturnStatusRequest); i { case 0: return &v.state case 1: @@ -2796,7 +2871,7 @@ func file_grpc_testing_messages_proto_init() { } } file_grpc_testing_messages_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HookResponse); i { + switch v := v.(*HookRequest); i { case 0: return &v.state case 1: @@ -2808,7 +2883,7 @@ func file_grpc_testing_messages_proto_init() { } } file_grpc_testing_messages_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalancerStatsResponse_MetadataEntry); i { + switch v := v.(*HookResponse); i { case 0: return &v.state case 1: @@ -2820,7 +2895,7 @@ func file_grpc_testing_messages_proto_init() { } } file_grpc_testing_messages_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalancerStatsResponse_RpcMetadata); i { + switch v := v.(*LoadBalancerStatsResponse_MetadataEntry); i { case 0: return &v.state case 1: @@ -2832,7 +2907,7 @@ func file_grpc_testing_messages_proto_init() { } } file_grpc_testing_messages_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalancerStatsResponse_MetadataByPeer); i { + switch v := v.(*LoadBalancerStatsResponse_RpcMetadata); i { case 0: return &v.state case 1: @@ -2844,6 +2919,18 @@ func file_grpc_testing_messages_proto_init() { } } file_grpc_testing_messages_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadBalancerStatsResponse_MetadataByPeer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_testing_messages_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LoadBalancerStatsResponse_RpcsByPeer); i { case 0: return &v.state @@ -2855,7 +2942,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LoadBalancerAccumulatedStatsResponse_MethodStats); i { case 0: return &v.state @@ -2867,7 +2954,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ClientConfigureRequest_Metadata); i { case 0: return &v.state @@ -2886,7 +2973,7 @@ func file_grpc_testing_messages_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpc_testing_messages_proto_rawDesc, NumEnums: 5, - NumMessages: 39, + NumMessages: 40, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/payloads.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/payloads.pb.go index 684fd1027..712c92241 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/payloads.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/payloads.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.34.1 +// protoc v4.25.2 // source: grpc/testing/payloads.proto package grpc_testing diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service.pb.go index 366067714..4bc43c642 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.34.1 +// protoc v4.25.2 // source: grpc/testing/report_qps_scenario_service.proto package grpc_testing diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go index 33392bc6a..cf4aff887 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.22.0 +// - protoc-gen-go-grpc v1.4.0 +// - protoc v4.25.2 // source: grpc/testing/report_qps_scenario_service.proto package grpc_testing @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( ReportQpsScenarioService_ReportScenario_FullMethodName = "/grpc.testing.ReportQpsScenarioService/ReportScenario" @@ -56,8 +56,9 @@ func NewReportQpsScenarioServiceClient(cc grpc.ClientConnInterface) ReportQpsSce } func (c *reportQpsScenarioServiceClient) ReportScenario(ctx context.Context, in *ScenarioResult, opts ...grpc.CallOption) (*Void, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Void) - err := c.cc.Invoke(ctx, ReportQpsScenarioService_ReportScenario_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, ReportQpsScenarioService_ReportScenario_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/stats.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/stats.pb.go index aa6e9b3bc..351a964a6 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/stats.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/stats.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.34.1 +// protoc v4.25.2 // source: grpc/testing/stats.proto package grpc_testing diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go index 58305cd3f..f0ffbd91e 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.34.1 +// protoc v4.25.2 // source: grpc/testing/test.proto package grpc_testing @@ -120,35 +120,44 @@ var file_grpc_testing_test_proto_rawDesc = []byte{ 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0x3f, 0x0a, 0x0b, 0x48, 0x6f, 0x6f, 0x6b, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x48, 0x6f, 0x6f, 0x6b, 0x12, - 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0xd5, 0x01, 0x0a, 0x16, 0x58, 0x64, - 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x6e, 0x67, 0x12, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0d, - 0x53, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x13, 0x2e, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0xcc, 0x01, 0x0a, 0x0b, 0x48, 0x6f, 0x6f, + 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x48, 0x6f, 0x6f, 0x6b, + 0x12, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4c, 0x0a, 0x0f, 0x53, 0x65, + 0x74, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x24, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x74, + 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3d, 0x0a, 0x11, 0x43, 0x6c, 0x65, 0x61, + 0x72, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0f, 0x53, 0x65, 0x6e, 0x64, 0x48, - 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x32, 0x7b, 0x0a, 0x1f, 0x58, 0x64, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x65, 0x12, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x1d, - 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0xd5, 0x01, 0x0a, 0x16, 0x58, 0x64, 0x73, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x36, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, + 0x12, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0d, 0x53, 0x65, + 0x74, 0x4e, 0x6f, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x13, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0f, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x6f, 0x6f, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, + 0x7b, 0x0a, 0x1f, 0x58, 0x64, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, + 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x1d, 0x0a, 0x1b, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var file_grpc_testing_test_proto_goTypes = []interface{}{ @@ -159,16 +168,17 @@ var file_grpc_testing_test_proto_goTypes = []interface{}{ (*ReconnectParams)(nil), // 4: grpc.testing.ReconnectParams (*LoadBalancerStatsRequest)(nil), // 5: grpc.testing.LoadBalancerStatsRequest (*LoadBalancerAccumulatedStatsRequest)(nil), // 6: grpc.testing.LoadBalancerAccumulatedStatsRequest - (*HookRequest)(nil), // 7: grpc.testing.HookRequest - (*ClientConfigureRequest)(nil), // 8: grpc.testing.ClientConfigureRequest - (*SimpleResponse)(nil), // 9: grpc.testing.SimpleResponse - (*StreamingOutputCallResponse)(nil), // 10: grpc.testing.StreamingOutputCallResponse - (*StreamingInputCallResponse)(nil), // 11: grpc.testing.StreamingInputCallResponse - (*ReconnectInfo)(nil), // 12: grpc.testing.ReconnectInfo - (*LoadBalancerStatsResponse)(nil), // 13: grpc.testing.LoadBalancerStatsResponse - (*LoadBalancerAccumulatedStatsResponse)(nil), // 14: grpc.testing.LoadBalancerAccumulatedStatsResponse - (*HookResponse)(nil), // 15: grpc.testing.HookResponse - (*ClientConfigureResponse)(nil), // 16: grpc.testing.ClientConfigureResponse + (*SetReturnStatusRequest)(nil), // 7: grpc.testing.SetReturnStatusRequest + (*HookRequest)(nil), // 8: grpc.testing.HookRequest + (*ClientConfigureRequest)(nil), // 9: grpc.testing.ClientConfigureRequest + (*SimpleResponse)(nil), // 10: grpc.testing.SimpleResponse + (*StreamingOutputCallResponse)(nil), // 11: grpc.testing.StreamingOutputCallResponse + (*StreamingInputCallResponse)(nil), // 12: grpc.testing.StreamingInputCallResponse + (*ReconnectInfo)(nil), // 13: grpc.testing.ReconnectInfo + (*LoadBalancerStatsResponse)(nil), // 14: grpc.testing.LoadBalancerStatsResponse + (*LoadBalancerAccumulatedStatsResponse)(nil), // 15: grpc.testing.LoadBalancerAccumulatedStatsResponse + (*HookResponse)(nil), // 16: grpc.testing.HookResponse + (*ClientConfigureResponse)(nil), // 17: grpc.testing.ClientConfigureResponse } var file_grpc_testing_test_proto_depIdxs = []int32{ 0, // 0: grpc.testing.TestService.EmptyCall:input_type -> grpc.testing.Empty @@ -185,30 +195,34 @@ var file_grpc_testing_test_proto_depIdxs = []int32{ 5, // 11: grpc.testing.LoadBalancerStatsService.GetClientStats:input_type -> grpc.testing.LoadBalancerStatsRequest 6, // 12: grpc.testing.LoadBalancerStatsService.GetClientAccumulatedStats:input_type -> grpc.testing.LoadBalancerAccumulatedStatsRequest 0, // 13: grpc.testing.HookService.Hook:input_type -> grpc.testing.Empty - 0, // 14: grpc.testing.XdsUpdateHealthService.SetServing:input_type -> grpc.testing.Empty - 0, // 15: grpc.testing.XdsUpdateHealthService.SetNotServing:input_type -> grpc.testing.Empty - 7, // 16: grpc.testing.XdsUpdateHealthService.SendHookRequest:input_type -> grpc.testing.HookRequest - 8, // 17: grpc.testing.XdsUpdateClientConfigureService.Configure:input_type -> grpc.testing.ClientConfigureRequest - 0, // 18: grpc.testing.TestService.EmptyCall:output_type -> grpc.testing.Empty - 9, // 19: grpc.testing.TestService.UnaryCall:output_type -> grpc.testing.SimpleResponse - 9, // 20: grpc.testing.TestService.CacheableUnaryCall:output_type -> grpc.testing.SimpleResponse - 10, // 21: grpc.testing.TestService.StreamingOutputCall:output_type -> grpc.testing.StreamingOutputCallResponse - 11, // 22: grpc.testing.TestService.StreamingInputCall:output_type -> grpc.testing.StreamingInputCallResponse - 10, // 23: grpc.testing.TestService.FullDuplexCall:output_type -> grpc.testing.StreamingOutputCallResponse - 10, // 24: grpc.testing.TestService.HalfDuplexCall:output_type -> grpc.testing.StreamingOutputCallResponse - 0, // 25: grpc.testing.TestService.UnimplementedCall:output_type -> grpc.testing.Empty - 0, // 26: grpc.testing.UnimplementedService.UnimplementedCall:output_type -> grpc.testing.Empty - 0, // 27: grpc.testing.ReconnectService.Start:output_type -> grpc.testing.Empty - 12, // 28: grpc.testing.ReconnectService.Stop:output_type -> grpc.testing.ReconnectInfo - 13, // 29: grpc.testing.LoadBalancerStatsService.GetClientStats:output_type -> grpc.testing.LoadBalancerStatsResponse - 14, // 30: grpc.testing.LoadBalancerStatsService.GetClientAccumulatedStats:output_type -> grpc.testing.LoadBalancerAccumulatedStatsResponse - 0, // 31: grpc.testing.HookService.Hook:output_type -> grpc.testing.Empty - 0, // 32: grpc.testing.XdsUpdateHealthService.SetServing:output_type -> grpc.testing.Empty - 0, // 33: grpc.testing.XdsUpdateHealthService.SetNotServing:output_type -> grpc.testing.Empty - 15, // 34: grpc.testing.XdsUpdateHealthService.SendHookRequest:output_type -> grpc.testing.HookResponse - 16, // 35: grpc.testing.XdsUpdateClientConfigureService.Configure:output_type -> grpc.testing.ClientConfigureResponse - 18, // [18:36] is the sub-list for method output_type - 0, // [0:18] is the sub-list for method input_type + 7, // 14: grpc.testing.HookService.SetReturnStatus:input_type -> grpc.testing.SetReturnStatusRequest + 0, // 15: grpc.testing.HookService.ClearReturnStatus:input_type -> grpc.testing.Empty + 0, // 16: grpc.testing.XdsUpdateHealthService.SetServing:input_type -> grpc.testing.Empty + 0, // 17: grpc.testing.XdsUpdateHealthService.SetNotServing:input_type -> grpc.testing.Empty + 8, // 18: grpc.testing.XdsUpdateHealthService.SendHookRequest:input_type -> grpc.testing.HookRequest + 9, // 19: grpc.testing.XdsUpdateClientConfigureService.Configure:input_type -> grpc.testing.ClientConfigureRequest + 0, // 20: grpc.testing.TestService.EmptyCall:output_type -> grpc.testing.Empty + 10, // 21: grpc.testing.TestService.UnaryCall:output_type -> grpc.testing.SimpleResponse + 10, // 22: grpc.testing.TestService.CacheableUnaryCall:output_type -> grpc.testing.SimpleResponse + 11, // 23: grpc.testing.TestService.StreamingOutputCall:output_type -> grpc.testing.StreamingOutputCallResponse + 12, // 24: grpc.testing.TestService.StreamingInputCall:output_type -> grpc.testing.StreamingInputCallResponse + 11, // 25: grpc.testing.TestService.FullDuplexCall:output_type -> grpc.testing.StreamingOutputCallResponse + 11, // 26: grpc.testing.TestService.HalfDuplexCall:output_type -> grpc.testing.StreamingOutputCallResponse + 0, // 27: grpc.testing.TestService.UnimplementedCall:output_type -> grpc.testing.Empty + 0, // 28: grpc.testing.UnimplementedService.UnimplementedCall:output_type -> grpc.testing.Empty + 0, // 29: grpc.testing.ReconnectService.Start:output_type -> grpc.testing.Empty + 13, // 30: grpc.testing.ReconnectService.Stop:output_type -> grpc.testing.ReconnectInfo + 14, // 31: grpc.testing.LoadBalancerStatsService.GetClientStats:output_type -> grpc.testing.LoadBalancerStatsResponse + 15, // 32: grpc.testing.LoadBalancerStatsService.GetClientAccumulatedStats:output_type -> grpc.testing.LoadBalancerAccumulatedStatsResponse + 0, // 33: grpc.testing.HookService.Hook:output_type -> grpc.testing.Empty + 0, // 34: grpc.testing.HookService.SetReturnStatus:output_type -> grpc.testing.Empty + 0, // 35: grpc.testing.HookService.ClearReturnStatus:output_type -> grpc.testing.Empty + 0, // 36: grpc.testing.XdsUpdateHealthService.SetServing:output_type -> grpc.testing.Empty + 0, // 37: grpc.testing.XdsUpdateHealthService.SetNotServing:output_type -> grpc.testing.Empty + 16, // 38: grpc.testing.XdsUpdateHealthService.SendHookRequest:output_type -> grpc.testing.HookResponse + 17, // 39: grpc.testing.XdsUpdateClientConfigureService.Configure:output_type -> grpc.testing.ClientConfigureResponse + 20, // [20:40] is the sub-list for method output_type + 0, // [0:20] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test_grpc.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/test_grpc.pb.go index 2d0181a7f..fd79b2f02 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/test_grpc.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/test_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.22.0 +// - protoc-gen-go-grpc v1.4.0 +// - protoc v4.25.2 // source: grpc/testing/test.proto package grpc_testing @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( TestService_EmptyCall_FullMethodName = "/grpc.testing.TestService/EmptyCall" @@ -49,6 +49,9 @@ const ( // TestServiceClient is the client API for TestService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// A simple service to test the various types of RPCs and experiment with +// performance with various types of payload. type TestServiceClient interface { // One empty request followed by one empty response. EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) @@ -60,19 +63,19 @@ type TestServiceClient interface { CacheableUnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. - StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) + StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StreamingOutputCallResponse], error) // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. - StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) + StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[StreamingInputCallRequest, StreamingInputCallResponse], error) // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. - FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) + FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[StreamingOutputCallRequest, StreamingOutputCallResponse], error) // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. - HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) + HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[StreamingOutputCallRequest, StreamingOutputCallResponse], error) // The test server will not implement this method. It will be used // to test the behavior when clients call unimplemented methods. UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) @@ -87,8 +90,9 @@ func NewTestServiceClient(cc grpc.ClientConnInterface) TestServiceClient { } func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, TestService_EmptyCall_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, TestService_EmptyCall_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -96,8 +100,9 @@ func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...gr } func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SimpleResponse) - err := c.cc.Invoke(ctx, TestService_UnaryCall_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, TestService_UnaryCall_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -105,20 +110,22 @@ func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, op } func (c *testServiceClient) CacheableUnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SimpleResponse) - err := c.cc.Invoke(ctx, TestService_CacheableUnaryCall_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, TestService_CacheableUnaryCall_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[0], TestService_StreamingOutputCall_FullMethodName, opts...) +func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StreamingOutputCallResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[0], TestService_StreamingOutputCall_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &testServiceStreamingOutputCallClient{stream} + x := &grpc.GenericClientStream[StreamingOutputCallRequest, StreamingOutputCallResponse]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -128,122 +135,52 @@ func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *Streami return x, nil } -type TestService_StreamingOutputCallClient interface { - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type TestService_StreamingOutputCallClient = grpc.ServerStreamingClient[StreamingOutputCallResponse] -type testServiceStreamingOutputCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[1], TestService_StreamingInputCall_FullMethodName, opts...) +func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[StreamingInputCallRequest, StreamingInputCallResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[1], TestService_StreamingInputCall_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &testServiceStreamingInputCallClient{stream} + x := &grpc.GenericClientStream[StreamingInputCallRequest, StreamingInputCallResponse]{ClientStream: stream} return x, nil } -type TestService_StreamingInputCallClient interface { - Send(*StreamingInputCallRequest) error - CloseAndRecv() (*StreamingInputCallResponse, error) - grpc.ClientStream -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type TestService_StreamingInputCallClient = grpc.ClientStreamingClient[StreamingInputCallRequest, StreamingInputCallResponse] -type testServiceStreamingInputCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(StreamingInputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[2], TestService_FullDuplexCall_FullMethodName, opts...) +func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[StreamingOutputCallRequest, StreamingOutputCallResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[2], TestService_FullDuplexCall_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &testServiceFullDuplexCallClient{stream} + x := &grpc.GenericClientStream[StreamingOutputCallRequest, StreamingOutputCallResponse]{ClientStream: stream} return x, nil } -type TestService_FullDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceFullDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type TestService_FullDuplexCallClient = grpc.BidiStreamingClient[StreamingOutputCallRequest, StreamingOutputCallResponse] -func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[3], TestService_HalfDuplexCall_FullMethodName, opts...) +func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[StreamingOutputCallRequest, StreamingOutputCallResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[3], TestService_HalfDuplexCall_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &testServiceHalfDuplexCallClient{stream} + x := &grpc.GenericClientStream[StreamingOutputCallRequest, StreamingOutputCallResponse]{ClientStream: stream} return x, nil } -type TestService_HalfDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceHalfDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type TestService_HalfDuplexCallClient = grpc.BidiStreamingClient[StreamingOutputCallRequest, StreamingOutputCallResponse] func (c *testServiceClient) UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, TestService_UnimplementedCall_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, TestService_UnimplementedCall_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -253,6 +190,9 @@ func (c *testServiceClient) UnimplementedCall(ctx context.Context, in *Empty, op // TestServiceServer is the server API for TestService service. // All implementations must embed UnimplementedTestServiceServer // for forward compatibility +// +// A simple service to test the various types of RPCs and experiment with +// performance with various types of payload. type TestServiceServer interface { // One empty request followed by one empty response. EmptyCall(context.Context, *Empty) (*Empty, error) @@ -264,19 +204,19 @@ type TestServiceServer interface { CacheableUnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. - StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error + StreamingOutputCall(*StreamingOutputCallRequest, grpc.ServerStreamingServer[StreamingOutputCallResponse]) error // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. - StreamingInputCall(TestService_StreamingInputCallServer) error + StreamingInputCall(grpc.ClientStreamingServer[StreamingInputCallRequest, StreamingInputCallResponse]) error // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. - FullDuplexCall(TestService_FullDuplexCallServer) error + FullDuplexCall(grpc.BidiStreamingServer[StreamingOutputCallRequest, StreamingOutputCallResponse]) error // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. - HalfDuplexCall(TestService_HalfDuplexCallServer) error + HalfDuplexCall(grpc.BidiStreamingServer[StreamingOutputCallRequest, StreamingOutputCallResponse]) error // The test server will not implement this method. It will be used // to test the behavior when clients call unimplemented methods. UnimplementedCall(context.Context, *Empty) (*Empty, error) @@ -296,16 +236,16 @@ func (UnimplementedTestServiceServer) UnaryCall(context.Context, *SimpleRequest) func (UnimplementedTestServiceServer) CacheableUnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CacheableUnaryCall not implemented") } -func (UnimplementedTestServiceServer) StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error { +func (UnimplementedTestServiceServer) StreamingOutputCall(*StreamingOutputCallRequest, grpc.ServerStreamingServer[StreamingOutputCallResponse]) error { return status.Errorf(codes.Unimplemented, "method StreamingOutputCall not implemented") } -func (UnimplementedTestServiceServer) StreamingInputCall(TestService_StreamingInputCallServer) error { +func (UnimplementedTestServiceServer) StreamingInputCall(grpc.ClientStreamingServer[StreamingInputCallRequest, StreamingInputCallResponse]) error { return status.Errorf(codes.Unimplemented, "method StreamingInputCall not implemented") } -func (UnimplementedTestServiceServer) FullDuplexCall(TestService_FullDuplexCallServer) error { +func (UnimplementedTestServiceServer) FullDuplexCall(grpc.BidiStreamingServer[StreamingOutputCallRequest, StreamingOutputCallResponse]) error { return status.Errorf(codes.Unimplemented, "method FullDuplexCall not implemented") } -func (UnimplementedTestServiceServer) HalfDuplexCall(TestService_HalfDuplexCallServer) error { +func (UnimplementedTestServiceServer) HalfDuplexCall(grpc.BidiStreamingServer[StreamingOutputCallRequest, StreamingOutputCallResponse]) error { return status.Errorf(codes.Unimplemented, "method HalfDuplexCall not implemented") } func (UnimplementedTestServiceServer) UnimplementedCall(context.Context, *Empty) (*Empty, error) { @@ -383,99 +323,32 @@ func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.Serve if err := stream.RecvMsg(m); err != nil { return err } - return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) -} - -type TestService_StreamingOutputCallServer interface { - Send(*StreamingOutputCallResponse) error - grpc.ServerStream + return srv.(TestServiceServer).StreamingOutputCall(m, &grpc.GenericServerStream[StreamingOutputCallRequest, StreamingOutputCallResponse]{ServerStream: stream}) } -type testServiceStreamingOutputCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type TestService_StreamingOutputCallServer = grpc.ServerStreamingServer[StreamingOutputCallResponse] func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) -} - -type TestService_StreamingInputCallServer interface { - SendAndClose(*StreamingInputCallResponse) error - Recv() (*StreamingInputCallRequest, error) - grpc.ServerStream + return srv.(TestServiceServer).StreamingInputCall(&grpc.GenericServerStream[StreamingInputCallRequest, StreamingInputCallResponse]{ServerStream: stream}) } -type testServiceStreamingInputCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { - m := new(StreamingInputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type TestService_StreamingInputCallServer = grpc.ClientStreamingServer[StreamingInputCallRequest, StreamingInputCallResponse] func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) -} - -type TestService_FullDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceFullDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) + return srv.(TestServiceServer).FullDuplexCall(&grpc.GenericServerStream[StreamingOutputCallRequest, StreamingOutputCallResponse]{ServerStream: stream}) } -func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type TestService_FullDuplexCallServer = grpc.BidiStreamingServer[StreamingOutputCallRequest, StreamingOutputCallResponse] func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) -} - -type TestService_HalfDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceHalfDuplexCallServer struct { - grpc.ServerStream + return srv.(TestServiceServer).HalfDuplexCall(&grpc.GenericServerStream[StreamingOutputCallRequest, StreamingOutputCallResponse]{ServerStream: stream}) } -func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type TestService_HalfDuplexCallServer = grpc.BidiStreamingServer[StreamingOutputCallRequest, StreamingOutputCallResponse] func _TestService_UnimplementedCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) @@ -553,6 +426,9 @@ const ( // UnimplementedServiceClient is the client API for UnimplementedService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// A simple service NOT implemented at servers so clients can test for +// that case. type UnimplementedServiceClient interface { // A call that no server should implement UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) @@ -567,8 +443,9 @@ func NewUnimplementedServiceClient(cc grpc.ClientConnInterface) UnimplementedSer } func (c *unimplementedServiceClient) UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, UnimplementedService_UnimplementedCall_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, UnimplementedService_UnimplementedCall_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -578,6 +455,9 @@ func (c *unimplementedServiceClient) UnimplementedCall(ctx context.Context, in * // UnimplementedServiceServer is the server API for UnimplementedService service. // All implementations must embed UnimplementedUnimplementedServiceServer // for forward compatibility +// +// A simple service NOT implemented at servers so clients can test for +// that case. type UnimplementedServiceServer interface { // A call that no server should implement UnimplementedCall(context.Context, *Empty) (*Empty, error) @@ -646,6 +526,8 @@ const ( // ReconnectServiceClient is the client API for ReconnectService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// A service used to control reconnect server. type ReconnectServiceClient interface { Start(ctx context.Context, in *ReconnectParams, opts ...grpc.CallOption) (*Empty, error) Stop(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ReconnectInfo, error) @@ -660,8 +542,9 @@ func NewReconnectServiceClient(cc grpc.ClientConnInterface) ReconnectServiceClie } func (c *reconnectServiceClient) Start(ctx context.Context, in *ReconnectParams, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, ReconnectService_Start_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, ReconnectService_Start_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -669,8 +552,9 @@ func (c *reconnectServiceClient) Start(ctx context.Context, in *ReconnectParams, } func (c *reconnectServiceClient) Stop(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ReconnectInfo, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ReconnectInfo) - err := c.cc.Invoke(ctx, ReconnectService_Stop_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, ReconnectService_Stop_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -680,6 +564,8 @@ func (c *reconnectServiceClient) Stop(ctx context.Context, in *Empty, opts ...gr // ReconnectServiceServer is the server API for ReconnectService service. // All implementations must embed UnimplementedReconnectServiceServer // for forward compatibility +// +// A service used to control reconnect server. type ReconnectServiceServer interface { Start(context.Context, *ReconnectParams) (*Empty, error) Stop(context.Context, *Empty) (*ReconnectInfo, error) @@ -773,6 +659,8 @@ const ( // LoadBalancerStatsServiceClient is the client API for LoadBalancerStatsService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// A service used to obtain stats for verifying LB behavior. type LoadBalancerStatsServiceClient interface { // Gets the backend distribution for RPCs sent by a test client. GetClientStats(ctx context.Context, in *LoadBalancerStatsRequest, opts ...grpc.CallOption) (*LoadBalancerStatsResponse, error) @@ -789,8 +677,9 @@ func NewLoadBalancerStatsServiceClient(cc grpc.ClientConnInterface) LoadBalancer } func (c *loadBalancerStatsServiceClient) GetClientStats(ctx context.Context, in *LoadBalancerStatsRequest, opts ...grpc.CallOption) (*LoadBalancerStatsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LoadBalancerStatsResponse) - err := c.cc.Invoke(ctx, LoadBalancerStatsService_GetClientStats_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, LoadBalancerStatsService_GetClientStats_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -798,8 +687,9 @@ func (c *loadBalancerStatsServiceClient) GetClientStats(ctx context.Context, in } func (c *loadBalancerStatsServiceClient) GetClientAccumulatedStats(ctx context.Context, in *LoadBalancerAccumulatedStatsRequest, opts ...grpc.CallOption) (*LoadBalancerAccumulatedStatsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LoadBalancerAccumulatedStatsResponse) - err := c.cc.Invoke(ctx, LoadBalancerStatsService_GetClientAccumulatedStats_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, LoadBalancerStatsService_GetClientAccumulatedStats_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -809,6 +699,8 @@ func (c *loadBalancerStatsServiceClient) GetClientAccumulatedStats(ctx context.C // LoadBalancerStatsServiceServer is the server API for LoadBalancerStatsService service. // All implementations must embed UnimplementedLoadBalancerStatsServiceServer // for forward compatibility +// +// A service used to obtain stats for verifying LB behavior. type LoadBalancerStatsServiceServer interface { // Gets the backend distribution for RPCs sent by a test client. GetClientStats(context.Context, *LoadBalancerStatsRequest) (*LoadBalancerStatsResponse, error) @@ -898,14 +790,24 @@ var LoadBalancerStatsService_ServiceDesc = grpc.ServiceDesc{ } const ( - HookService_Hook_FullMethodName = "/grpc.testing.HookService/Hook" + HookService_Hook_FullMethodName = "/grpc.testing.HookService/Hook" + HookService_SetReturnStatus_FullMethodName = "/grpc.testing.HookService/SetReturnStatus" + HookService_ClearReturnStatus_FullMethodName = "/grpc.testing.HookService/ClearReturnStatus" ) // HookServiceClient is the client API for HookService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Hook service. Used to keep Kubernetes from shutting the pod down. type HookServiceClient interface { + // Sends a request that will "hang" until the return status is set by a call + // to a SetReturnStatus Hook(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + // Sets a return status for pending and upcoming calls to Hook + SetReturnStatus(ctx context.Context, in *SetReturnStatusRequest, opts ...grpc.CallOption) (*Empty, error) + // Clears the return status. Incoming calls to Hook will "hang" + ClearReturnStatus(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) } type hookServiceClient struct { @@ -917,8 +819,29 @@ func NewHookServiceClient(cc grpc.ClientConnInterface) HookServiceClient { } func (c *hookServiceClient) Hook(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, HookService_Hook_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, HookService_Hook_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hookServiceClient) SetReturnStatus(ctx context.Context, in *SetReturnStatusRequest, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Empty) + err := c.cc.Invoke(ctx, HookService_SetReturnStatus_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hookServiceClient) ClearReturnStatus(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Empty) + err := c.cc.Invoke(ctx, HookService_ClearReturnStatus_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -928,8 +851,16 @@ func (c *hookServiceClient) Hook(ctx context.Context, in *Empty, opts ...grpc.Ca // HookServiceServer is the server API for HookService service. // All implementations must embed UnimplementedHookServiceServer // for forward compatibility +// +// Hook service. Used to keep Kubernetes from shutting the pod down. type HookServiceServer interface { + // Sends a request that will "hang" until the return status is set by a call + // to a SetReturnStatus Hook(context.Context, *Empty) (*Empty, error) + // Sets a return status for pending and upcoming calls to Hook + SetReturnStatus(context.Context, *SetReturnStatusRequest) (*Empty, error) + // Clears the return status. Incoming calls to Hook will "hang" + ClearReturnStatus(context.Context, *Empty) (*Empty, error) mustEmbedUnimplementedHookServiceServer() } @@ -940,6 +871,12 @@ type UnimplementedHookServiceServer struct { func (UnimplementedHookServiceServer) Hook(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Hook not implemented") } +func (UnimplementedHookServiceServer) SetReturnStatus(context.Context, *SetReturnStatusRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetReturnStatus not implemented") +} +func (UnimplementedHookServiceServer) ClearReturnStatus(context.Context, *Empty) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClearReturnStatus not implemented") +} func (UnimplementedHookServiceServer) mustEmbedUnimplementedHookServiceServer() {} // UnsafeHookServiceServer may be embedded to opt out of forward compatibility for this service. @@ -971,6 +908,42 @@ func _HookService_Hook_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +func _HookService_SetReturnStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetReturnStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HookServiceServer).SetReturnStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HookService_SetReturnStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HookServiceServer).SetReturnStatus(ctx, req.(*SetReturnStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HookService_ClearReturnStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HookServiceServer).ClearReturnStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HookService_ClearReturnStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HookServiceServer).ClearReturnStatus(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + // HookService_ServiceDesc is the grpc.ServiceDesc for HookService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -982,6 +955,14 @@ var HookService_ServiceDesc = grpc.ServiceDesc{ MethodName: "Hook", Handler: _HookService_Hook_Handler, }, + { + MethodName: "SetReturnStatus", + Handler: _HookService_SetReturnStatus_Handler, + }, + { + MethodName: "ClearReturnStatus", + Handler: _HookService_ClearReturnStatus_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "grpc/testing/test.proto", @@ -996,6 +977,8 @@ const ( // XdsUpdateHealthServiceClient is the client API for XdsUpdateHealthService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// A service to remotely control health status of an xDS test server. type XdsUpdateHealthServiceClient interface { SetServing(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) SetNotServing(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) @@ -1011,8 +994,9 @@ func NewXdsUpdateHealthServiceClient(cc grpc.ClientConnInterface) XdsUpdateHealt } func (c *xdsUpdateHealthServiceClient) SetServing(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, XdsUpdateHealthService_SetServing_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, XdsUpdateHealthService_SetServing_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -1020,8 +1004,9 @@ func (c *xdsUpdateHealthServiceClient) SetServing(ctx context.Context, in *Empty } func (c *xdsUpdateHealthServiceClient) SetNotServing(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, XdsUpdateHealthService_SetNotServing_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, XdsUpdateHealthService_SetNotServing_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -1029,8 +1014,9 @@ func (c *xdsUpdateHealthServiceClient) SetNotServing(ctx context.Context, in *Em } func (c *xdsUpdateHealthServiceClient) SendHookRequest(ctx context.Context, in *HookRequest, opts ...grpc.CallOption) (*HookResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(HookResponse) - err := c.cc.Invoke(ctx, XdsUpdateHealthService_SendHookRequest_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, XdsUpdateHealthService_SendHookRequest_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -1040,6 +1026,8 @@ func (c *xdsUpdateHealthServiceClient) SendHookRequest(ctx context.Context, in * // XdsUpdateHealthServiceServer is the server API for XdsUpdateHealthService service. // All implementations must embed UnimplementedXdsUpdateHealthServiceServer // for forward compatibility +// +// A service to remotely control health status of an xDS test server. type XdsUpdateHealthServiceServer interface { SetServing(context.Context, *Empty) (*Empty, error) SetNotServing(context.Context, *Empty) (*Empty, error) @@ -1159,6 +1147,8 @@ const ( // XdsUpdateClientConfigureServiceClient is the client API for XdsUpdateClientConfigureService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// A service to dynamically update the configuration of an xDS test client. type XdsUpdateClientConfigureServiceClient interface { // Update the tes client's configuration. Configure(ctx context.Context, in *ClientConfigureRequest, opts ...grpc.CallOption) (*ClientConfigureResponse, error) @@ -1173,8 +1163,9 @@ func NewXdsUpdateClientConfigureServiceClient(cc grpc.ClientConnInterface) XdsUp } func (c *xdsUpdateClientConfigureServiceClient) Configure(ctx context.Context, in *ClientConfigureRequest, opts ...grpc.CallOption) (*ClientConfigureResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ClientConfigureResponse) - err := c.cc.Invoke(ctx, XdsUpdateClientConfigureService_Configure_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, XdsUpdateClientConfigureService_Configure_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -1184,6 +1175,8 @@ func (c *xdsUpdateClientConfigureServiceClient) Configure(ctx context.Context, i // XdsUpdateClientConfigureServiceServer is the server API for XdsUpdateClientConfigureService service. // All implementations must embed UnimplementedXdsUpdateClientConfigureServiceServer // for forward compatibility +// +// A service to dynamically update the configuration of an xDS test client. type XdsUpdateClientConfigureServiceServer interface { // Update the tes client's configuration. Configure(context.Context, *ClientConfigureRequest) (*ClientConfigureResponse, error) diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service.pb.go index 1feccc9f7..849b01667 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.34.1 +// protoc v4.25.2 // source: grpc/testing/worker_service.proto package grpc_testing diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service_grpc.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service_grpc.pb.go index 1de7f09f8..32ff7d9e2 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service_grpc.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.22.0 +// - protoc-gen-go-grpc v1.4.0 +// - protoc v4.25.2 // source: grpc/testing/worker_service.proto package grpc_testing @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( WorkerService_RunServer_FullMethodName = "/grpc.testing.WorkerService/RunServer" @@ -52,14 +52,14 @@ type WorkerServiceClient interface { // stats. Closing the stream will initiate shutdown of the test server // and once the shutdown has finished, the OK status is sent to terminate // this RPC. - RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) + RunServer(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerArgs, ServerStatus], error) // Start client with specified workload. // First request sent specifies the ClientConfig followed by ClientStatus // response. After that, a "Mark" can be sent anytime to request the latest // stats. Closing the stream will initiate shutdown of the test client // and once the shutdown has finished, the OK status is sent to terminate // this RPC. - RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) + RunClient(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ClientArgs, ClientStatus], error) // Just return the core count - unary call CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) // Quit this worker @@ -74,71 +74,36 @@ func NewWorkerServiceClient(cc grpc.ClientConnInterface) WorkerServiceClient { return &workerServiceClient{cc} } -func (c *workerServiceClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) { - stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[0], WorkerService_RunServer_FullMethodName, opts...) +func (c *workerServiceClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerArgs, ServerStatus], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[0], WorkerService_RunServer_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &workerServiceRunServerClient{stream} + x := &grpc.GenericClientStream[ServerArgs, ServerStatus]{ClientStream: stream} return x, nil } -type WorkerService_RunServerClient interface { - Send(*ServerArgs) error - Recv() (*ServerStatus, error) - grpc.ClientStream -} - -type workerServiceRunServerClient struct { - grpc.ClientStream -} - -func (x *workerServiceRunServerClient) Send(m *ServerArgs) error { - return x.ClientStream.SendMsg(m) -} - -func (x *workerServiceRunServerClient) Recv() (*ServerStatus, error) { - m := new(ServerStatus) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type WorkerService_RunServerClient = grpc.BidiStreamingClient[ServerArgs, ServerStatus] -func (c *workerServiceClient) RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) { - stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[1], WorkerService_RunClient_FullMethodName, opts...) +func (c *workerServiceClient) RunClient(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ClientArgs, ClientStatus], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[1], WorkerService_RunClient_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &workerServiceRunClientClient{stream} + x := &grpc.GenericClientStream[ClientArgs, ClientStatus]{ClientStream: stream} return x, nil } -type WorkerService_RunClientClient interface { - Send(*ClientArgs) error - Recv() (*ClientStatus, error) - grpc.ClientStream -} - -type workerServiceRunClientClient struct { - grpc.ClientStream -} - -func (x *workerServiceRunClientClient) Send(m *ClientArgs) error { - return x.ClientStream.SendMsg(m) -} - -func (x *workerServiceRunClientClient) Recv() (*ClientStatus, error) { - m := new(ClientStatus) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type WorkerService_RunClientClient = grpc.BidiStreamingClient[ClientArgs, ClientStatus] func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CoreResponse) - err := c.cc.Invoke(ctx, WorkerService_CoreCount_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, WorkerService_CoreCount_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -146,8 +111,9 @@ func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, op } func (c *workerServiceClient) QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Void) - err := c.cc.Invoke(ctx, WorkerService_QuitWorker_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, WorkerService_QuitWorker_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -164,14 +130,14 @@ type WorkerServiceServer interface { // stats. Closing the stream will initiate shutdown of the test server // and once the shutdown has finished, the OK status is sent to terminate // this RPC. - RunServer(WorkerService_RunServerServer) error + RunServer(grpc.BidiStreamingServer[ServerArgs, ServerStatus]) error // Start client with specified workload. // First request sent specifies the ClientConfig followed by ClientStatus // response. After that, a "Mark" can be sent anytime to request the latest // stats. Closing the stream will initiate shutdown of the test client // and once the shutdown has finished, the OK status is sent to terminate // this RPC. - RunClient(WorkerService_RunClientServer) error + RunClient(grpc.BidiStreamingServer[ClientArgs, ClientStatus]) error // Just return the core count - unary call CoreCount(context.Context, *CoreRequest) (*CoreResponse, error) // Quit this worker @@ -183,10 +149,10 @@ type WorkerServiceServer interface { type UnimplementedWorkerServiceServer struct { } -func (UnimplementedWorkerServiceServer) RunServer(WorkerService_RunServerServer) error { +func (UnimplementedWorkerServiceServer) RunServer(grpc.BidiStreamingServer[ServerArgs, ServerStatus]) error { return status.Errorf(codes.Unimplemented, "method RunServer not implemented") } -func (UnimplementedWorkerServiceServer) RunClient(WorkerService_RunClientServer) error { +func (UnimplementedWorkerServiceServer) RunClient(grpc.BidiStreamingServer[ClientArgs, ClientStatus]) error { return status.Errorf(codes.Unimplemented, "method RunClient not implemented") } func (UnimplementedWorkerServiceServer) CoreCount(context.Context, *CoreRequest) (*CoreResponse, error) { @@ -209,56 +175,18 @@ func RegisterWorkerServiceServer(s grpc.ServiceRegistrar, srv WorkerServiceServe } func _WorkerService_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(WorkerServiceServer).RunServer(&workerServiceRunServerServer{stream}) + return srv.(WorkerServiceServer).RunServer(&grpc.GenericServerStream[ServerArgs, ServerStatus]{ServerStream: stream}) } -type WorkerService_RunServerServer interface { - Send(*ServerStatus) error - Recv() (*ServerArgs, error) - grpc.ServerStream -} - -type workerServiceRunServerServer struct { - grpc.ServerStream -} - -func (x *workerServiceRunServerServer) Send(m *ServerStatus) error { - return x.ServerStream.SendMsg(m) -} - -func (x *workerServiceRunServerServer) Recv() (*ServerArgs, error) { - m := new(ServerArgs) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type WorkerService_RunServerServer = grpc.BidiStreamingServer[ServerArgs, ServerStatus] func _WorkerService_RunClient_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(WorkerServiceServer).RunClient(&workerServiceRunClientServer{stream}) + return srv.(WorkerServiceServer).RunClient(&grpc.GenericServerStream[ClientArgs, ClientStatus]{ServerStream: stream}) } -type WorkerService_RunClientServer interface { - Send(*ClientStatus) error - Recv() (*ClientArgs, error) - grpc.ServerStream -} - -type workerServiceRunClientServer struct { - grpc.ServerStream -} - -func (x *workerServiceRunClientServer) Send(m *ClientStatus) error { - return x.ServerStream.SendMsg(m) -} - -func (x *workerServiceRunClientServer) Recv() (*ClientArgs, error) { - m := new(ClientArgs) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type WorkerService_RunClientServer = grpc.BidiStreamingServer[ClientArgs, ClientStatus] func _WorkerService_CoreCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CoreRequest) diff --git a/vendor/google.golang.org/grpc/interop/test_utils.go b/vendor/google.golang.org/grpc/interop/test_utils.go index 7b597cae4..302071e34 100644 --- a/vendor/google.golang.org/grpc/interop/test_utils.go +++ b/vendor/google.golang.org/grpc/interop/test_utils.go @@ -33,7 +33,6 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/grpc" @@ -44,6 +43,7 @@ import ( "google.golang.org/grpc/orca" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" testgrpc "google.golang.org/grpc/interop/grpc_testing" @@ -79,8 +79,8 @@ func ClientNewPayload(t testpb.PayloadType, size int) *testpb.Payload { } // DoEmptyUnaryCall performs a unary RPC with empty request and response messages. -func DoEmptyUnaryCall(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { - reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, args...) +func DoEmptyUnaryCall(ctx context.Context, tc testgrpc.TestServiceClient, args ...grpc.CallOption) { + reply, err := tc.EmptyCall(ctx, &testpb.Empty{}, args...) if err != nil { logger.Fatal("/TestService/EmptyCall RPC failed: ", err) } @@ -90,14 +90,14 @@ func DoEmptyUnaryCall(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { } // DoLargeUnaryCall performs a unary RPC with large payload in the request and response. -func DoLargeUnaryCall(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { +func DoLargeUnaryCall(ctx context.Context, tc testgrpc.TestServiceClient, args ...grpc.CallOption) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeRespSize), Payload: pl, } - reply, err := tc.UnaryCall(context.Background(), req, args...) + reply, err := tc.UnaryCall(ctx, req, args...) if err != nil { logger.Fatal("/TestService/UnaryCall RPC failed: ", err) } @@ -109,8 +109,8 @@ func DoLargeUnaryCall(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { } // DoClientStreaming performs a client streaming RPC. -func DoClientStreaming(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { - stream, err := tc.StreamingInputCall(context.Background(), args...) +func DoClientStreaming(ctx context.Context, tc testgrpc.TestServiceClient, args ...grpc.CallOption) { + stream, err := tc.StreamingInputCall(ctx, args...) if err != nil { logger.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } @@ -135,7 +135,7 @@ func DoClientStreaming(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { } // DoServerStreaming performs a server streaming RPC. -func DoServerStreaming(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { +func DoServerStreaming(ctx context.Context, tc testgrpc.TestServiceClient, args ...grpc.CallOption) { respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { respParam[i] = &testpb.ResponseParameters{ @@ -146,7 +146,7 @@ func DoServerStreaming(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, } - stream, err := tc.StreamingOutputCall(context.Background(), req, args...) + stream, err := tc.StreamingOutputCall(ctx, req, args...) if err != nil { logger.Fatalf("%v.StreamingOutputCall(_) = _, %v", tc, err) } @@ -179,8 +179,8 @@ func DoServerStreaming(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { } // DoPingPong performs ping-pong style bi-directional streaming RPC. -func DoPingPong(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { - stream, err := tc.FullDuplexCall(context.Background(), args...) +func DoPingPong(ctx context.Context, tc testgrpc.TestServiceClient, args ...grpc.CallOption) { + stream, err := tc.FullDuplexCall(ctx, args...) if err != nil { logger.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } @@ -223,8 +223,8 @@ func DoPingPong(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { } // DoEmptyStream sets up a bi-directional streaming with zero message. -func DoEmptyStream(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { - stream, err := tc.FullDuplexCall(context.Background(), args...) +func DoEmptyStream(ctx context.Context, tc testgrpc.TestServiceClient, args ...grpc.CallOption) { + stream, err := tc.FullDuplexCall(ctx, args...) if err != nil { logger.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } @@ -237,8 +237,8 @@ func DoEmptyStream(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { } // DoTimeoutOnSleepingServer performs an RPC on a sleep server which causes RPC timeout. -func DoTimeoutOnSleepingServer(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) +func DoTimeoutOnSleepingServer(ctx context.Context, tc testgrpc.TestServiceClient, args ...grpc.CallOption) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Millisecond) defer cancel() stream, err := tc.FullDuplexCall(ctx, args...) if err != nil { @@ -261,7 +261,7 @@ func DoTimeoutOnSleepingServer(tc testgrpc.TestServiceClient, args ...grpc.CallO } // DoComputeEngineCreds performs a unary RPC with compute engine auth. -func DoComputeEngineCreds(tc testgrpc.TestServiceClient, serviceAccount, oauthScope string) { +func DoComputeEngineCreds(ctx context.Context, tc testgrpc.TestServiceClient, serviceAccount, oauthScope string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, @@ -270,7 +270,7 @@ func DoComputeEngineCreds(tc testgrpc.TestServiceClient, serviceAccount, oauthSc FillUsername: true, FillOauthScope: true, } - reply, err := tc.UnaryCall(context.Background(), req) + reply, err := tc.UnaryCall(ctx, req) if err != nil { logger.Fatal("/TestService/UnaryCall RPC failed: ", err) } @@ -293,7 +293,7 @@ func getServiceAccountJSONKey(keyFile string) []byte { } // DoServiceAccountCreds performs a unary RPC with service account auth. -func DoServiceAccountCreds(tc testgrpc.TestServiceClient, serviceAccountKeyFile, oauthScope string) { +func DoServiceAccountCreds(ctx context.Context, tc testgrpc.TestServiceClient, serviceAccountKeyFile, oauthScope string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, @@ -302,7 +302,7 @@ func DoServiceAccountCreds(tc testgrpc.TestServiceClient, serviceAccountKeyFile, FillUsername: true, FillOauthScope: true, } - reply, err := tc.UnaryCall(context.Background(), req) + reply, err := tc.UnaryCall(ctx, req) if err != nil { logger.Fatal("/TestService/UnaryCall RPC failed: ", err) } @@ -318,7 +318,7 @@ func DoServiceAccountCreds(tc testgrpc.TestServiceClient, serviceAccountKeyFile, } // DoJWTTokenCreds performs a unary RPC with JWT token auth. -func DoJWTTokenCreds(tc testgrpc.TestServiceClient, serviceAccountKeyFile string) { +func DoJWTTokenCreds(ctx context.Context, tc testgrpc.TestServiceClient, serviceAccountKeyFile string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, @@ -326,7 +326,7 @@ func DoJWTTokenCreds(tc testgrpc.TestServiceClient, serviceAccountKeyFile string Payload: pl, FillUsername: true, } - reply, err := tc.UnaryCall(context.Background(), req) + reply, err := tc.UnaryCall(ctx, req) if err != nil { logger.Fatal("/TestService/UnaryCall RPC failed: ", err) } @@ -338,13 +338,13 @@ func DoJWTTokenCreds(tc testgrpc.TestServiceClient, serviceAccountKeyFile string } // GetToken obtains an OAUTH token from the input. -func GetToken(serviceAccountKeyFile string, oauthScope string) *oauth2.Token { +func GetToken(ctx context.Context, serviceAccountKeyFile string, oauthScope string) *oauth2.Token { jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) config, err := google.JWTConfigFromJSON(jsonKey, oauthScope) if err != nil { logger.Fatalf("Failed to get the config: %v", err) } - token, err := config.TokenSource(context.Background()).Token() + token, err := config.TokenSource(ctx).Token() if err != nil { logger.Fatalf("Failed to get the token: %v", err) } @@ -352,7 +352,7 @@ func GetToken(serviceAccountKeyFile string, oauthScope string) *oauth2.Token { } // DoOauth2TokenCreds performs a unary RPC with OAUTH2 token auth. -func DoOauth2TokenCreds(tc testgrpc.TestServiceClient, serviceAccountKeyFile, oauthScope string) { +func DoOauth2TokenCreds(ctx context.Context, tc testgrpc.TestServiceClient, serviceAccountKeyFile, oauthScope string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, @@ -361,7 +361,7 @@ func DoOauth2TokenCreds(tc testgrpc.TestServiceClient, serviceAccountKeyFile, oa FillUsername: true, FillOauthScope: true, } - reply, err := tc.UnaryCall(context.Background(), req) + reply, err := tc.UnaryCall(ctx, req) if err != nil { logger.Fatal("/TestService/UnaryCall RPC failed: ", err) } @@ -377,7 +377,7 @@ func DoOauth2TokenCreds(tc testgrpc.TestServiceClient, serviceAccountKeyFile, oa } // DoPerRPCCreds performs a unary RPC with per RPC OAUTH2 token. -func DoPerRPCCreds(tc testgrpc.TestServiceClient, serviceAccountKeyFile, oauthScope string) { +func DoPerRPCCreds(ctx context.Context, tc testgrpc.TestServiceClient, serviceAccountKeyFile, oauthScope string) { jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ @@ -387,9 +387,9 @@ func DoPerRPCCreds(tc testgrpc.TestServiceClient, serviceAccountKeyFile, oauthSc FillUsername: true, FillOauthScope: true, } - token := GetToken(serviceAccountKeyFile, oauthScope) + token := GetToken(ctx, serviceAccountKeyFile, oauthScope) kv := map[string]string{"authorization": token.Type() + " " + token.AccessToken} - ctx := metadata.NewOutgoingContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) + ctx = metadata.NewOutgoingContext(ctx, metadata.MD{"authorization": []string{kv["authorization"]}}) reply, err := tc.UnaryCall(ctx, req) if err != nil { logger.Fatal("/TestService/UnaryCall RPC failed: ", err) @@ -405,7 +405,7 @@ func DoPerRPCCreds(tc testgrpc.TestServiceClient, serviceAccountKeyFile, oauthSc } // DoGoogleDefaultCredentials performs an unary RPC with google default credentials -func DoGoogleDefaultCredentials(tc testgrpc.TestServiceClient, defaultServiceAccount string) { +func DoGoogleDefaultCredentials(ctx context.Context, tc testgrpc.TestServiceClient, defaultServiceAccount string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, @@ -414,7 +414,7 @@ func DoGoogleDefaultCredentials(tc testgrpc.TestServiceClient, defaultServiceAcc FillUsername: true, FillOauthScope: true, } - reply, err := tc.UnaryCall(context.Background(), req) + reply, err := tc.UnaryCall(ctx, req) if err != nil { logger.Fatal("/TestService/UnaryCall RPC failed: ", err) } @@ -424,7 +424,7 @@ func DoGoogleDefaultCredentials(tc testgrpc.TestServiceClient, defaultServiceAcc } // DoComputeEngineChannelCredentials performs an unary RPC with compute engine channel credentials -func DoComputeEngineChannelCredentials(tc testgrpc.TestServiceClient, defaultServiceAccount string) { +func DoComputeEngineChannelCredentials(ctx context.Context, tc testgrpc.TestServiceClient, defaultServiceAccount string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, @@ -433,7 +433,7 @@ func DoComputeEngineChannelCredentials(tc testgrpc.TestServiceClient, defaultSer FillUsername: true, FillOauthScope: true, } - reply, err := tc.UnaryCall(context.Background(), req) + reply, err := tc.UnaryCall(ctx, req) if err != nil { logger.Fatal("/TestService/UnaryCall RPC failed: ", err) } @@ -448,8 +448,8 @@ var testMetadata = metadata.MD{ } // DoCancelAfterBegin cancels the RPC after metadata has been sent but before payloads are sent. -func DoCancelAfterBegin(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { - ctx, cancel := context.WithCancel(metadata.NewOutgoingContext(context.Background(), testMetadata)) +func DoCancelAfterBegin(ctx context.Context, tc testgrpc.TestServiceClient, args ...grpc.CallOption) { + ctx, cancel := context.WithCancel(metadata.NewOutgoingContext(ctx, testMetadata)) stream, err := tc.StreamingInputCall(ctx, args...) if err != nil { logger.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) @@ -462,8 +462,8 @@ func DoCancelAfterBegin(tc testgrpc.TestServiceClient, args ...grpc.CallOption) } // DoCancelAfterFirstResponse cancels the RPC after receiving the first message from the server. -func DoCancelAfterFirstResponse(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { - ctx, cancel := context.WithCancel(context.Background()) +func DoCancelAfterFirstResponse(ctx context.Context, tc testgrpc.TestServiceClient, args ...grpc.CallOption) { + ctx, cancel := context.WithCancel(ctx) stream, err := tc.FullDuplexCall(ctx, args...) if err != nil { logger.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) @@ -516,7 +516,7 @@ func validateMetadata(header, trailer metadata.MD) { } // DoCustomMetadata checks that metadata is echoed back to the client. -func DoCustomMetadata(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { +func DoCustomMetadata(ctx context.Context, tc testgrpc.TestServiceClient, args ...grpc.CallOption) { // Testing with UnaryCall. pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 1) req := &testpb.SimpleRequest{ @@ -524,7 +524,7 @@ func DoCustomMetadata(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { ResponseSize: int32(1), Payload: pl, } - ctx := metadata.NewOutgoingContext(context.Background(), customMetadata) + ctx = metadata.NewOutgoingContext(ctx, customMetadata) var header, trailer metadata.MD args = append(args, grpc.Header(&header), grpc.Trailer(&trailer)) reply, err := tc.UnaryCall( @@ -578,7 +578,7 @@ func DoCustomMetadata(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { } // DoStatusCodeAndMessage checks that the status code is propagated back to the client. -func DoStatusCodeAndMessage(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { +func DoStatusCodeAndMessage(ctx context.Context, tc testgrpc.TestServiceClient, args ...grpc.CallOption) { var code int32 = 2 msg := "test status message" expectedErr := status.Error(codes.Code(code), msg) @@ -590,11 +590,11 @@ func DoStatusCodeAndMessage(tc testgrpc.TestServiceClient, args ...grpc.CallOpti req := &testpb.SimpleRequest{ ResponseStatus: respStatus, } - if _, err := tc.UnaryCall(context.Background(), req, args...); err == nil || err.Error() != expectedErr.Error() { + if _, err := tc.UnaryCall(ctx, req, args...); err == nil || err.Error() != expectedErr.Error() { logger.Fatalf("%v.UnaryCall(_, %v) = _, %v, want _, %v", tc, req, err, expectedErr) } // Test FullDuplexCall. - stream, err := tc.FullDuplexCall(context.Background(), args...) + stream, err := tc.FullDuplexCall(ctx, args...) if err != nil { logger.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } @@ -614,7 +614,7 @@ func DoStatusCodeAndMessage(tc testgrpc.TestServiceClient, args ...grpc.CallOpti // DoSpecialStatusMessage verifies Unicode and whitespace is correctly processed // in status message. -func DoSpecialStatusMessage(tc testgrpc.TestServiceClient, args ...grpc.CallOption) { +func DoSpecialStatusMessage(ctx context.Context, tc testgrpc.TestServiceClient, args ...grpc.CallOption) { const ( code int32 = 2 msg string = "\t\ntest with whitespace\r\nand Unicode BMP ☺ and non-BMP 😈\t\n" @@ -626,7 +626,7 @@ func DoSpecialStatusMessage(tc testgrpc.TestServiceClient, args ...grpc.CallOpti Message: msg, }, } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() if _, err := tc.UnaryCall(ctx, req, args...); err == nil || err.Error() != expectedErr.Error() { logger.Fatalf("%v.UnaryCall(_, %v) = _, %v, want _, %v", tc, req, err, expectedErr) @@ -634,24 +634,24 @@ func DoSpecialStatusMessage(tc testgrpc.TestServiceClient, args ...grpc.CallOpti } // DoUnimplementedService attempts to call a method from an unimplemented service. -func DoUnimplementedService(tc testgrpc.UnimplementedServiceClient) { - _, err := tc.UnimplementedCall(context.Background(), &testpb.Empty{}) +func DoUnimplementedService(ctx context.Context, tc testgrpc.UnimplementedServiceClient) { + _, err := tc.UnimplementedCall(ctx, &testpb.Empty{}) if status.Code(err) != codes.Unimplemented { logger.Fatalf("%v.UnimplementedCall() = _, %v, want _, %v", tc, status.Code(err), codes.Unimplemented) } } // DoUnimplementedMethod attempts to call an unimplemented method. -func DoUnimplementedMethod(cc *grpc.ClientConn) { +func DoUnimplementedMethod(ctx context.Context, cc *grpc.ClientConn) { var req, reply proto.Message - if err := cc.Invoke(context.Background(), "/grpc.testing.TestService/UnimplementedCall", req, reply); err == nil || status.Code(err) != codes.Unimplemented { + if err := cc.Invoke(ctx, "/grpc.testing.TestService/UnimplementedCall", req, reply); err == nil || status.Code(err) != codes.Unimplemented { logger.Fatalf("ClientConn.Invoke(_, _, _, _, _) = %v, want error code %s", err, codes.Unimplemented) } } // DoPickFirstUnary runs multiple RPCs (rpcCount) and checks that all requests // are sent to the same backend. -func DoPickFirstUnary(tc testgrpc.TestServiceClient) { +func DoPickFirstUnary(ctx context.Context, tc testgrpc.TestServiceClient) { const rpcCount = 100 pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 1) @@ -662,7 +662,7 @@ func DoPickFirstUnary(tc testgrpc.TestServiceClient) { FillServerId: true, } // TODO(mohanli): Revert the timeout back to 10s once TD migrates to xdstp. - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() var serverID string for i := 0; i < rpcCount; i++ { @@ -724,10 +724,9 @@ func doOneSoakIteration(ctx context.Context, tc testgrpc.TestServiceClient, rese // If resetChannel is false, then each RPC will be performed on tc. Otherwise, each RPC will be performed on a new // stub that is created with the provided server address and dial options. // TODO(mohanli-ml): Create SoakTestOptions as a parameter for this method. -func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.DialOption, resetChannel bool, soakIterations int, maxFailures int, soakRequestSize int, soakResponseSize int, perIterationMaxAcceptableLatency time.Duration, minTimeBetweenRPCs time.Duration, overallDeadline time.Time) { +func DoSoakTest(ctx context.Context, tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.DialOption, resetChannel bool, soakIterations int, maxFailures int, soakRequestSize int, soakResponseSize int, perIterationMaxAcceptableLatency time.Duration, minTimeBetweenRPCs time.Duration) { start := time.Now() - ctx, cancel := context.WithDeadline(context.Background(), overallDeadline) - defer cancel() + var elapsedTime float64 iterationsDone := 0 totalFailures := 0 hopts := stats.HistogramOptions{ @@ -738,7 +737,8 @@ func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.D } h := stats.NewHistogram(hopts) for i := 0; i < soakIterations; i++ { - if time.Now().After(overallDeadline) { + if ctx.Err() != nil { + elapsedTime = time.Since(start).Seconds() break } earliestNextStart := time.After(minTimeBetweenRPCs) @@ -771,7 +771,7 @@ func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.D fmt.Fprintf(os.Stderr, "(server_uri: %s) histogram of per-iteration latencies in milliseconds: %s\n", serverAddr, b.String()) fmt.Fprintf(os.Stderr, "(server_uri: %s) soak test ran: %d / %d iterations. total failures: %d. max failures threshold: %d. See breakdown above for which iterations succeeded, failed, and why for more info.\n", serverAddr, iterationsDone, soakIterations, totalFailures, maxFailures) if iterationsDone < soakIterations { - logger.Fatalf("(server_uri: %s) soak test consumed all %f seconds of time and quit early, only having ran %d out of desired %d iterations.", serverAddr, overallDeadline.Sub(start).Seconds(), iterationsDone, soakIterations) + logger.Fatalf("(server_uri: %s) soak test consumed all %f seconds of time and quit early, only having ran %d out of desired %d iterations.", serverAddr, elapsedTime, iterationsDone, soakIterations) } if totalFailures > maxFailures { logger.Fatalf("(server_uri: %s) soak test total failures: %d exceeds max failures threshold: %d.", serverAddr, totalFailures, maxFailures) @@ -989,8 +989,8 @@ func (s *testServer) HalfDuplexCall(stream testgrpc.TestService_HalfDuplexCallSe // DoORCAPerRPCTest performs a unary RPC that enables ORCA per-call reporting // and verifies the load report sent back to the LB policy's Done callback. -func DoORCAPerRPCTest(tc testgrpc.TestServiceClient) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) +func DoORCAPerRPCTest(ctx context.Context, tc testgrpc.TestServiceClient) { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() orcaRes := &v3orcapb.OrcaLoadReport{} _, err := tc.UnaryCall(contextWithORCAResult(ctx, &orcaRes), &testpb.SimpleRequest{ @@ -1017,8 +1017,8 @@ func DoORCAPerRPCTest(tc testgrpc.TestServiceClient) { // DoORCAOOBTest performs a streaming RPC that enables ORCA OOB reporting and // verifies the load report sent to the LB policy's OOB listener. -func DoORCAOOBTest(tc testgrpc.TestServiceClient) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) +func DoORCAOOBTest(ctx context.Context, tc testgrpc.TestServiceClient) { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() stream, err := tc.FullDuplexCall(ctx) if err != nil { diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index a2cdcaf12..1e9485fd6 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -25,8 +25,14 @@ import ( "context" "fmt" "strings" + + "google.golang.org/grpc/internal" ) +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + // DecodeKeyValue returns k, v, nil. // // Deprecated: use k and v directly instead. @@ -153,14 +159,16 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } @@ -203,7 +211,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { } // ValueFromIncomingContext returns the metadata value corresponding to the metadata -// key from the incoming metadata if it exists. Key must be lower-case. +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. // // # Experimental // @@ -219,33 +228,29 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // We need to manually convert all keys to lower case, because MD is a - // map, and there's no guarantee that the MD attached to the context is - // created using our helper functions. - if strings.ToLower(k) == key { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { return copyOf(v) } } return nil } -// the returned slice must not be modified in place func copyOf(v []string) []string { vals := make([]string, len(v)) copy(vals, v) return vals } -// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD // is a map, there's no guarantee it's created using our helper functions) and // the extra kv pairs (AppendToOutgoingContext doesn't turn them into // lowercase). -// -// This is intended for gRPC-internal use ONLY. Users should use -// FromOutgoingContext instead. -func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, nil, false diff --git a/vendor/google.golang.org/grpc/orca/producer.go b/vendor/google.golang.org/grpc/orca/producer.go index 2d5872554..04edae6de 100644 --- a/vendor/google.golang.org/grpc/orca/producer.go +++ b/vendor/google.golang.org/grpc/orca/producer.go @@ -24,6 +24,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/orca/internal" "google.golang.org/grpc/status" @@ -169,48 +170,29 @@ func (p *producer) updateRunLocked() { func (p *producer) run(ctx context.Context, done chan struct{}, interval time.Duration) { defer close(done) - backoffAttempt := 0 - backoffTimer := time.NewTimer(0) - for ctx.Err() == nil { - select { - case <-backoffTimer.C: - case <-ctx.Done(): - return - } - + runStream := func() error { resetBackoff, err := p.runStream(ctx, interval) - - if resetBackoff { - backoffTimer.Reset(0) - backoffAttempt = 0 - } else { - backoffTimer.Reset(p.backoff(backoffAttempt)) - backoffAttempt++ - } - - switch { - case err == nil: - // No error was encountered; restart the stream. - case ctx.Err() != nil: - // Producer was stopped; exit immediately and without logging an - // error. - return - case status.Code(err) == codes.Unimplemented: + if status.Code(err) == codes.Unimplemented { // Unimplemented; do not retry. logger.Error("Server doesn't support ORCA OOB load reporting protocol; not listening for load reports.") - return - case status.Code(err) == codes.Unavailable, status.Code(err) == codes.Canceled: - // TODO: these codes should ideally log an error, too, but for now - // we receive them when shutting down the ClientConn (Unavailable - // if the stream hasn't started yet, and Canceled if it happens - // mid-stream). Once we can determine the state or ensure the - // producer is stopped before the stream ends, we can log an error - // when it's not a natural shutdown. - default: - // Log all other errors. + return err + } + // Retry for all other errors. + if code := status.Code(err); code != codes.Unavailable && code != codes.Canceled { + // TODO: Unavailable and Canceled should also ideally log an error, + // but for now we receive them when shutting down the ClientConn + // (Unavailable if the stream hasn't started yet, and Canceled if it + // happens mid-stream). Once we can determine the state or ensure + // the producer is stopped before the stream ends, we can log an + // error when it's not a natural shutdown. logger.Error("Received unexpected stream error:", err) } + if resetBackoff { + return backoff.ErrResetBackoff + } + return nil } + backoff.RunF(ctx, runStream, p.backoff) } // runStream runs a single stream on the subchannel and returns the resulting diff --git a/vendor/google.golang.org/grpc/orca/server_metrics.go b/vendor/google.golang.org/grpc/orca/server_metrics.go index f2cdb9b0b..67d1fa9d7 100644 --- a/vendor/google.golang.org/grpc/orca/server_metrics.go +++ b/vendor/google.golang.org/grpc/orca/server_metrics.go @@ -19,7 +19,7 @@ package orca import ( - "sync" + "sync/atomic" v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" ) @@ -142,8 +142,7 @@ type ServerMetricsRecorder interface { } type serverMetricsRecorder struct { - mu sync.Mutex // protects state - state *ServerMetrics // the current metrics + state atomic.Pointer[ServerMetrics] // the current metrics } // NewServerMetricsRecorder returns an in-memory store for ServerMetrics and @@ -154,34 +153,23 @@ func NewServerMetricsRecorder() ServerMetricsRecorder { } func newServerMetricsRecorder() *serverMetricsRecorder { - return &serverMetricsRecorder{ - state: &ServerMetrics{ - CPUUtilization: -1, - MemUtilization: -1, - AppUtilization: -1, - QPS: -1, - EPS: -1, - Utilization: make(map[string]float64), - RequestCost: make(map[string]float64), - NamedMetrics: make(map[string]float64), - }, - } + s := new(serverMetricsRecorder) + s.state.Store(&ServerMetrics{ + CPUUtilization: -1, + MemUtilization: -1, + AppUtilization: -1, + QPS: -1, + EPS: -1, + Utilization: make(map[string]float64), + RequestCost: make(map[string]float64), + NamedMetrics: make(map[string]float64), + }) + return s } // ServerMetrics returns a copy of the current ServerMetrics. func (s *serverMetricsRecorder) ServerMetrics() *ServerMetrics { - s.mu.Lock() - defer s.mu.Unlock() - return &ServerMetrics{ - CPUUtilization: s.state.CPUUtilization, - MemUtilization: s.state.MemUtilization, - AppUtilization: s.state.AppUtilization, - QPS: s.state.QPS, - EPS: s.state.EPS, - Utilization: copyMap(s.state.Utilization), - RequestCost: copyMap(s.state.RequestCost), - NamedMetrics: copyMap(s.state.NamedMetrics), - } + return copyServerMetrics(s.state.Load()) } func copyMap(m map[string]float64) map[string]float64 { @@ -192,6 +180,19 @@ func copyMap(m map[string]float64) map[string]float64 { return ret } +func copyServerMetrics(sm *ServerMetrics) *ServerMetrics { + return &ServerMetrics{ + CPUUtilization: sm.CPUUtilization, + MemUtilization: sm.MemUtilization, + AppUtilization: sm.AppUtilization, + QPS: sm.QPS, + EPS: sm.EPS, + Utilization: copyMap(sm.Utilization), + RequestCost: copyMap(sm.RequestCost), + NamedMetrics: copyMap(sm.NamedMetrics), + } +} + // SetCPUUtilization records a measurement for the CPU utilization metric. func (s *serverMetricsRecorder) SetCPUUtilization(val float64) { if val < 0 { @@ -200,17 +201,17 @@ func (s *serverMetricsRecorder) SetCPUUtilization(val float64) { } return } - s.mu.Lock() - defer s.mu.Unlock() - s.state.CPUUtilization = val + smCopy := copyServerMetrics(s.state.Load()) + smCopy.CPUUtilization = val + s.state.Store(smCopy) } // DeleteCPUUtilization deletes the relevant server metric to prevent it from // being sent. func (s *serverMetricsRecorder) DeleteCPUUtilization() { - s.mu.Lock() - defer s.mu.Unlock() - s.state.CPUUtilization = -1 + smCopy := copyServerMetrics(s.state.Load()) + smCopy.CPUUtilization = -1 + s.state.Store(smCopy) } // SetMemoryUtilization records a measurement for the memory utilization metric. @@ -221,17 +222,17 @@ func (s *serverMetricsRecorder) SetMemoryUtilization(val float64) { } return } - s.mu.Lock() - defer s.mu.Unlock() - s.state.MemUtilization = val + smCopy := copyServerMetrics(s.state.Load()) + smCopy.MemUtilization = val + s.state.Store(smCopy) } // DeleteMemoryUtilization deletes the relevant server metric to prevent it // from being sent. func (s *serverMetricsRecorder) DeleteMemoryUtilization() { - s.mu.Lock() - defer s.mu.Unlock() - s.state.MemUtilization = -1 + smCopy := copyServerMetrics(s.state.Load()) + smCopy.MemUtilization = -1 + s.state.Store(smCopy) } // SetApplicationUtilization records a measurement for a generic utilization @@ -243,17 +244,17 @@ func (s *serverMetricsRecorder) SetApplicationUtilization(val float64) { } return } - s.mu.Lock() - defer s.mu.Unlock() - s.state.AppUtilization = val + smCopy := copyServerMetrics(s.state.Load()) + smCopy.AppUtilization = val + s.state.Store(smCopy) } // DeleteApplicationUtilization deletes the relevant server metric to prevent // it from being sent. func (s *serverMetricsRecorder) DeleteApplicationUtilization() { - s.mu.Lock() - defer s.mu.Unlock() - s.state.AppUtilization = -1 + smCopy := copyServerMetrics(s.state.Load()) + smCopy.AppUtilization = -1 + s.state.Store(smCopy) } // SetQPS records a measurement for the QPS metric. @@ -264,16 +265,16 @@ func (s *serverMetricsRecorder) SetQPS(val float64) { } return } - s.mu.Lock() - defer s.mu.Unlock() - s.state.QPS = val + smCopy := copyServerMetrics(s.state.Load()) + smCopy.QPS = val + s.state.Store(smCopy) } // DeleteQPS deletes the relevant server metric to prevent it from being sent. func (s *serverMetricsRecorder) DeleteQPS() { - s.mu.Lock() - defer s.mu.Unlock() - s.state.QPS = -1 + smCopy := copyServerMetrics(s.state.Load()) + smCopy.QPS = -1 + s.state.Store(smCopy) } // SetEPS records a measurement for the EPS metric. @@ -284,16 +285,16 @@ func (s *serverMetricsRecorder) SetEPS(val float64) { } return } - s.mu.Lock() - defer s.mu.Unlock() - s.state.EPS = val + smCopy := copyServerMetrics(s.state.Load()) + smCopy.EPS = val + s.state.Store(smCopy) } // DeleteEPS deletes the relevant server metric to prevent it from being sent. func (s *serverMetricsRecorder) DeleteEPS() { - s.mu.Lock() - defer s.mu.Unlock() - s.state.EPS = -1 + smCopy := copyServerMetrics(s.state.Load()) + smCopy.EPS = -1 + s.state.Store(smCopy) } // SetNamedUtilization records a measurement for a utilization metric uniquely @@ -305,47 +306,47 @@ func (s *serverMetricsRecorder) SetNamedUtilization(name string, val float64) { } return } - s.mu.Lock() - defer s.mu.Unlock() - s.state.Utilization[name] = val + smCopy := copyServerMetrics(s.state.Load()) + smCopy.Utilization[name] = val + s.state.Store(smCopy) } // DeleteNamedUtilization deletes any previously recorded measurement for a // utilization metric uniquely identifiable by name. func (s *serverMetricsRecorder) DeleteNamedUtilization(name string) { - s.mu.Lock() - defer s.mu.Unlock() - delete(s.state.Utilization, name) + smCopy := copyServerMetrics(s.state.Load()) + delete(smCopy.Utilization, name) + s.state.Store(smCopy) } // SetRequestCost records a measurement for a utilization metric uniquely // identifiable by name. func (s *serverMetricsRecorder) SetRequestCost(name string, val float64) { - s.mu.Lock() - defer s.mu.Unlock() - s.state.RequestCost[name] = val + smCopy := copyServerMetrics(s.state.Load()) + smCopy.RequestCost[name] = val + s.state.Store(smCopy) } // DeleteRequestCost deletes any previously recorded measurement for a // utilization metric uniquely identifiable by name. func (s *serverMetricsRecorder) DeleteRequestCost(name string) { - s.mu.Lock() - defer s.mu.Unlock() - delete(s.state.RequestCost, name) + smCopy := copyServerMetrics(s.state.Load()) + delete(smCopy.RequestCost, name) + s.state.Store(smCopy) } // SetNamedMetric records a measurement for a utilization metric uniquely // identifiable by name. func (s *serverMetricsRecorder) SetNamedMetric(name string, val float64) { - s.mu.Lock() - defer s.mu.Unlock() - s.state.NamedMetrics[name] = val + smCopy := copyServerMetrics(s.state.Load()) + smCopy.NamedMetrics[name] = val + s.state.Store(smCopy) } // DeleteNamedMetric deletes any previously recorded measurement for a // utilization metric uniquely identifiable by name. func (s *serverMetricsRecorder) DeleteNamedMetric(name string) { - s.mu.Lock() - defer s.mu.Unlock() - delete(s.state.NamedMetrics, name) + smCopy := copyServerMetrics(s.state.Load()) + delete(smCopy.NamedMetrics, name) + s.state.Store(smCopy) } diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index e01d219ff..499a49c8c 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -22,7 +22,9 @@ package peer import ( "context" + "fmt" "net" + "strings" "google.golang.org/grpc/credentials" ) @@ -32,11 +34,41 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo } +// String ensures the Peer types implements the Stringer interface in order to +// allow to print a context with a peerKey value effectively. +func (p *Peer) String() string { + if p == nil { + return "Peer" + } + sb := &strings.Builder{} + sb.WriteString("Peer{") + if p.Addr != nil { + fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String()) + } else { + fmt.Fprintf(sb, "Addr: , ") + } + if p.LocalAddr != nil { + fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String()) + } else { + fmt.Fprintf(sb, "LocalAddr: , ") + } + if p.AuthInfo != nil { + fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType()) + } else { + fmt.Fprintf(sb, "AuthInfo: ") + } + sb.WriteString("}") + + return sb.String() +} + type peerKey struct{} // NewContext creates a new context with peer information attached. diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 236837f41..bdaa2130e 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -20,8 +20,9 @@ package grpc import ( "context" + "fmt" "io" - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" @@ -32,40 +33,43 @@ import ( "google.golang.org/grpc/status" ) +// pickerGeneration stores a picker and a channel used to signal that a picker +// newer than this one is available. +type pickerGeneration struct { + // picker is the picker produced by the LB policy. May be nil if a picker + // has never been produced. + picker balancer.Picker + // blockingCh is closed when the picker has been invalidated because there + // is a new one available. + blockingCh chan struct{} +} + // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - idle bool - blockingCh chan struct{} - picker balancer.Picker + // If pickerGen holds a nil pointer, the pickerWrapper is closed. + pickerGen atomic.Pointer[pickerGeneration] statsHandlers []stats.Handler // to record blocking picker calls } func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - return &pickerWrapper{ - blockingCh: make(chan struct{}), + pw := &pickerWrapper{ statsHandlers: statsHandlers, } + pw.pickerGen.Store(&pickerGeneration{ + blockingCh: make(chan struct{}), + }) + return pw } -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +// updatePicker is called by UpdateState calls from the LB policy. It +// unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { - pw.mu.Lock() - if pw.done || pw.idle { - // There is a small window where a picker update from the LB policy can - // race with the channel going to idle mode. If the picker is idle here, - // it is because the channel asked it to do so, and therefore it is sage - // to ignore the update from the LB policy. - pw.mu.Unlock() - return - } - pw.picker = p - // pw.blockingCh should never be nil. - close(pw.blockingCh) - pw.blockingCh = make(chan struct{}) - pw.mu.Unlock() + old := pw.pickerGen.Swap(&pickerGeneration{ + picker: p, + blockingCh: make(chan struct{}), + }) + close(old.blockingCh) } // doneChannelzWrapper performs the following: @@ -102,27 +106,24 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var lastPickErr error for { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() + pg := pw.pickerGen.Load() + if pg == nil { return nil, balancer.PickResult{}, ErrClientConnClosing } - - if pw.picker == nil { - ch = pw.blockingCh + if pg.picker == nil { + ch = pg.blockingCh } - if ch == pw.blockingCh { + if ch == pg.blockingCh { // This could happen when either: // - pw.picker is nil (the previous if condition), or - // - has called pick on the current picker. - pw.mu.Unlock() + // - we have already called pick on the current picker. select { case <-ctx.Done(): var errStr string if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() } else { - errStr = ctx.Err().Error() + errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error()) } switch ctx.Err() { case context.DeadlineExceeded: @@ -149,9 +150,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } } - ch = pw.blockingCh - p := pw.picker - pw.mu.Unlock() + ch = pg.blockingCh + p := pg.picker pickResult, err := p.Pick(info) if err != nil { @@ -201,32 +201,15 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } func (pw *pickerWrapper) close() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.done = true - close(pw.blockingCh) + old := pw.pickerGen.Swap(nil) + close(old.blockingCh) } -func (pw *pickerWrapper) enterIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.idle = true -} - -func (pw *pickerWrapper) exitIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.blockingCh = make(chan struct{}) - pw.idle = false +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { + old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})}) + close(old.blockingCh) } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index a6f26c8ab..3edca296c 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -63,7 +63,7 @@ LEGACY_SOURCES=( # Generates only the new gRPC Service symbols SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto @@ -93,7 +93,7 @@ Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ @@ -118,6 +118,6 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # grpc_testing_not_regenerate/*.pb.go are not re-generated, # see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..ef3d6ed6c --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "time" + + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// SetResolvingTimeout sets the maximum duration for DNS resolution requests. +// +// This function affects the global timeout used by all channels using the DNS +// name resolver scheme. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +// +// The default value is 30 seconds. Setting the timeout too low may result in +// premature timeouts during resolution, while setting it too high may lead to +// unnecessary delays in service discovery. Choose a value appropriate for your +// specific needs and network environment. +func SetResolvingTimeout(timeout time.Duration) { + dns.ResolvingTimeout = timeout +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} + +// SetMinResolutionInterval sets the default minimum interval at which DNS +// re-resolutions are allowed. This helps to prevent excessive re-resolution. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +func SetMinResolutionInterval(d time.Duration) { + dns.MinResolutionInterval = d +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index 804be887d..ada5b9bb7 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -136,3 +136,116 @@ func (a *AddressMap) Values() []any { } return ret } + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 11384e228..202854511 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/serviceconfig" ) @@ -63,16 +64,18 @@ func Get(scheme string) Builder { } // SetDefaultScheme sets the default scheme that will be used. The default -// default scheme is "passthrough". +// scheme is initially set to "passthrough". // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. The scheme set last overrides // previously set values. func SetDefaultScheme(scheme string) { defaultScheme = scheme + internal.UserSetDefaultScheme = true } -// GetDefaultScheme gets the default scheme that will be used. +// GetDefaultScheme gets the default scheme that will be used by grpc.Dial. If +// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead. func GetDefaultScheme() string { return defaultScheme } @@ -168,6 +171,9 @@ type BuildOptions struct { // field. In most cases though, it is not appropriate, and this field may // be ignored. Dialer func(context.Context, string) (net.Conn, error) + // Authority is the effective authority of the clientconn for which the + // resolver is built. + Authority string } // An Endpoint is one network endpoint, or server, which may have multiple @@ -240,11 +246,6 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -286,6 +287,11 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } +// String returns the canonical string representation of Target. +func (t Target) String() string { + return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint() +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. @@ -314,3 +320,13 @@ type Resolver interface { // Close closes the resolver. Close() } + +// AuthorityOverrider is implemented by Builders that wish to override the +// default authority for the ClientConn. +// By default, the authority used is target.Endpoint(). +type AuthorityOverrider interface { + // OverrideAuthority returns the authority to use for a ClientConn with the + // given target. The implementation must generate it without blocking, + // typically in line, and must keep it unchanged. + OverrideAuthority(Target) string +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index d68330560..000000000 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,247 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// resolverStateUpdater wraps the single method used by ccResolverWrapper to -// report a state update from the actual resolver implementation. -type resolverStateUpdater interface { - updateResolverState(s resolver.State, err error) error -} - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc resolverStateUpdater - channelzID *channelz.Identifier - ignoreServiceConfig bool - opts ccResolverWrapperOpts - serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. - serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). - - // All incoming (resolver --> gRPC) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled on the serializer. - // Fields accessed *only* in these serializer callbacks, can therefore be - // accessed without a mutex. - curState resolver.State - - // mu guards access to the below fields. - mu sync.Mutex - closed bool - resolver resolver.Resolver // Accessed only from outgoing calls. -} - -// ccResolverWrapperOpts wraps the arguments to be passed when creating a new -// ccResolverWrapper. -type ccResolverWrapperOpts struct { - target resolver.Target // User specified dial target to resolve. - builder resolver.Builder // Resolver builder to use. - bOpts resolver.BuildOptions // Resolver build options to use. - channelzID *channelz.Identifier // Channelz identifier for the channel. -} - -// newCCResolverWrapper uses the resolver.Builder to build a Resolver and -// returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { - ctx, cancel := context.WithCancel(context.Background()) - ccr := &ccResolverWrapper{ - cc: cc, - channelzID: opts.channelzID, - ignoreServiceConfig: opts.bOpts.DisableServiceConfig, - opts: opts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - - // Cannot hold the lock at build time because the resolver can send an - // update or error inline and these incoming calls grab the lock to schedule - // a callback in the serializer. - r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) - if err != nil { - cancel() - return nil, err - } - - // Any error reported by the resolver at build time that leads to a - // re-resolution request from the balancer is dropped by grpc until we - // return from this function. So, we don't have to handle pending resolveNow - // requests here. - ccr.mu.Lock() - ccr.resolver = r - ccr.mu.Unlock() - - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.mu.Lock() - defer ccr.mu.Unlock() - - // ccr.resolver field is set only after the call to Build() returns. But in - // the process of building, the resolver may send an error update which when - // propagated to the balancer may result in a re-resolution request. - if ccr.closed || ccr.resolver == nil { - return - } - ccr.resolver.ResolveNow(o) -} - -func (ccr *ccResolverWrapper) close() { - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - return - } - - channelz.Info(logger, ccr.channelzID, "Closing the name resolver") - - // Close the serializer to ensure that no more calls from the resolver are - // handled, before actually closing the resolver. - ccr.serializerCancel() - ccr.closed = true - r := ccr.resolver - ccr.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done() - - // Spawn a goroutine to close the resolver (since it may block trying to - // cleanup all allocated resources) and return early. - go r.Close() -} - -// serializerScheduleLocked is a convenience method to schedule a function to be -// run on the serializer while holding ccr.mu. -func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { - ccr.mu.Lock() - ccr.serializer.Schedule(f) - ccr.mu.Unlock() -} - -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - errCh := make(chan error, 1) - if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } - } - ok := ccr.serializer.Schedule(func(context.Context) { - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - errCh <- balancer.ErrBadResolverState - return - } - errCh <- nil - }) - if !ok { - // The only time when Schedule() fail to add the callback to the - // serializer is when the serializer is closed, and this happens only - // when the resolver wrapper is closed. - return nil - } - return <-errCh -} - -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) - }) -} - -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.serializerScheduleLocked(func(_ context.Context) { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// NewServiceConfig is called by the resolver implementation to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.ignoreServiceConfig { - channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) -} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 000000000..c5fb45236 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,198 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + Authority: ccr.cc.authority, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.Schedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index b7723aa09..fdd49e6e9 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -189,6 +189,20 @@ type EmptyCallOption struct{} func (EmptyCallOption) before(*callInfo) error { return nil } func (EmptyCallOption) after(*callInfo, *csAttempt) {} +// StaticMethod returns a CallOption which specifies that a call is being made +// to a method that is static, which means the method is known at compile time +// and doesn't change at runtime. This can be used as a signal to stats plugins +// that this method is safe to include as a key to a measurement. +func StaticMethod() CallOption { + return StaticMethodCallOption{} +} + +// StaticMethodCallOption is a CallOption that specifies that a call comes +// from a static method. +type StaticMethodCallOption struct { + EmptyCallOption +} + // Header returns a CallOptions that retrieves the header metadata // for a unary RPC. func Header(md *metadata.MD) CallOption { @@ -640,14 +654,18 @@ func encode(c baseCodec, msg any) ([]byte, error) { return b, nil } -// compress returns the input bytes compressed by compressor or cp. If both -// compressors are nil, returns nil. +// compress returns the input bytes compressed by compressor or cp. +// If both compressors are nil, or if the message has zero length, returns nil, +// indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { if compressor == nil && cp == nil { return nil, nil } + if len(in) == 0 { + return nil, nil + } wrapErr := func(err error) error { return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } @@ -726,17 +744,19 @@ type payloadInfo struct { uncompressedBytes []byte } -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, buf, err := p.recvMsg(maxReceiveMessageSize) +// recvAndDecompress reads a message from the stream, decompressing it if necessary. +// +// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as +// the buffer is no longer needed. +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, +) (uncompressedBuf []byte, cancel func(), err error) { + pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, err - } - if payInfo != nil { - payInfo.compressedLength = len(buf) + return nil, nil, err } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, st.Err() + return nil, nil, st.Err() } var size int @@ -744,21 +764,35 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - buf, err = dc.Do(bytes.NewReader(buf)) - size = len(buf) + uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + size = len(uncompressedBuf) } else { - buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) + uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } + } else { + uncompressedBuf = compressedBuf } - return buf, nil + + if payInfo != nil { + payInfo.compressedLength = len(compressedBuf) + payInfo.uncompressedBytes = uncompressedBuf + + cancel = func() {} + } else { + cancel = func() { + p.recvBufferPool.Put(&compressedBuf) + } + } + + return uncompressedBuf, cancel, nil } // Using compressor, decompress d, returning data and size. @@ -778,6 +812,9 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // size is used as an estimate to size the buffer, but we // will read more data if available. // +MinRead so ReadFrom will not reallocate if size is correct. + // + // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // we can also utilize the recv buffer pool here. buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return buf.Bytes(), int(bytesRead), err @@ -793,18 +830,15 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } + defer cancel() + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } - if payInfo != nil { - payInfo.uncompressedBytes = buf - } else { - p.recvBufferPool.Put(&buf) - } return nil } @@ -928,22 +962,9 @@ func setCallInfoCodec(c *callInfo) error { return nil } -// channelzData is used to store channelz related data for ClientConn, addrConn and Server. -// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - callsStarted int64 - callsFailed int64 - callsSucceeded int64 - // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of - // time.Time since it's more costly to atomically update time.Time variable than int64 variable. - lastCallStartedTime int64 -} - // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 7. +// support package version is 9. // // Older versions are kept for compatibility. // @@ -954,6 +975,8 @@ const ( SupportPackageIsVersion5 = true SupportPackageIsVersion6 = true SupportPackageIsVersion7 = true + SupportPackageIsVersion8 = true + SupportPackageIsVersion9 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eeae92fbe..89f8e4792 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -33,8 +33,6 @@ import ( "sync/atomic" "time" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" @@ -70,9 +68,10 @@ func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } - internal.DrainServerTransports = func(srv *Server, addr string) { - srv.drainServerTransports(addr) + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) } + internal.ServerFromContext = serverFromContext internal.AddGlobalServerOptions = func(opt ...ServerOption) { globalServerOptions = append(globalServerOptions, opt...) } @@ -81,6 +80,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption + internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") @@ -129,17 +129,18 @@ type Server struct { drain bool cv *sync.Cond // signaled when connections close for GracefulStop services map[string]*serviceInfo // service name -> service info - events trace.EventLog + events traceEventLog quit *grpcsync.Event done *grpcsync.Event channelzRemoveOnce sync.Once - serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop + handlersWG sync.WaitGroup // counts active method handler goroutines - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.Server - serverWorkerChannel chan func() + serverWorkerChannel chan func() + serverWorkerChannelClose func() } type serverOptions struct { @@ -170,6 +171,7 @@ type serverOptions struct { headerTableSize *uint32 numServerWorkers uint32 recvBufferPool SharedBufferPool + waitForHandlers bool } var defaultServerOptions = serverOptions{ @@ -246,11 +248,9 @@ func SharedWriteBuffer(val bool) ServerOption { } // WriteBufferSize determines how much data can be batched before doing a write -// on the wire. The corresponding memory allocation for this buffer will be -// twice the size to keep syscalls low. The default value for this buffer is -// 32KB. Zero or negative values will disable the write buffer such that each -// write will be on underlying connection. -// Note: A Send call may not directly translate to a write. +// on the wire. The default value for this buffer is 32KB. Zero or negative +// values will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.writeBufferSize = s @@ -527,12 +527,22 @@ func ConnectionTimeout(d time.Duration) ServerOption { }) } +// MaxHeaderListSizeServerOption is a ServerOption that sets the max +// (uncompressed) size of header list that the server is prepared to accept. +type MaxHeaderListSizeServerOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) { + so.maxHeaderListSize = &o.MaxHeaderListSize +} + // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size // of header list that the server is prepared to accept. func MaxHeaderListSize(s uint32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.maxHeaderListSize = &s - }) + return MaxHeaderListSizeServerOption{ + MaxHeaderListSize: s, + } } // HeaderTableSize returns a ServerOption that sets the size of dynamic @@ -567,6 +577,21 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// WaitForHandlers cause Stop to wait until all outstanding method handlers have +// exited before returning. If false, Stop will return as soon as all +// connections have closed, but method handlers may still be running. By +// default, Stop does not wait for method handlers to return. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WaitForHandlers(w bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.waitForHandlers = w + }) +} + // RecvBufferPool returns a ServerOption that configures the server // to use the provided shared buffer pool for parsing incoming messages. Depending // on the application's workload, this could result in reduced memory allocation. @@ -578,11 +603,13 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { // options are used: StatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.recvBufferPool = bufferPool }) @@ -616,15 +643,14 @@ func (s *Server) serverWorker() { // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { go s.serverWorker() } } -func (s *Server) stopServerWorkers() { - close(s.serverWorkerChannel) -} - // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -642,22 +668,21 @@ func NewServer(opt ...ServerOption) *Server { services: make(map[string]*serviceInfo), quit: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - czData: new(channelzData), + channelz: channelz.RegisterServer(""), } chainUnaryServerInterceptors(s) chainStreamServerInterceptors(s) s.cv = sync.NewCond(&s.mu) if EnableTracing { _, file, line, _ := runtime.Caller(1) - s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } if s.opts.numServerWorkers > 0 { s.initServerWorkers() } - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - channelz.Info(logger, s.channelzID, "Server created") + channelz.Info(logger, s.channelz, "Server created") return s } @@ -783,20 +808,13 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID *channelz.Identifier -} - -func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { - return &channelz.SocketInternalMetric{ - SocketOptions: channelz.GetSocketOption(l.Listener), - LocalAddr: l.Listener.Addr(), - } + channelz *channelz.Socket } func (l *listenSocket) Close() error { err := l.Listener.Close() - channelz.RemoveEntry(l.channelzID) - channelz.Info(logger, l.channelzID, "ListenSocket deleted") + channelz.RemoveEntry(l.channelz.ID) + channelz.Info(logger, l.channelz, "ListenSocket deleted") return err } @@ -806,6 +824,18 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -826,7 +856,16 @@ func (s *Server) Serve(lis net.Listener) error { } }() - ls := &listenSocket{Listener: lis} + ls := &listenSocket{ + Listener: lis, + channelz: channelz.RegisterSocket(&channelz.Socket{ + SocketType: channelz.SocketTypeListen, + Parent: s.channelz, + RefName: lis.Addr().String(), + LocalAddr: lis.Addr(), + SocketOptions: channelz.GetSocketOption(lis)}, + ), + } s.lis[ls] = true defer func() { @@ -838,14 +877,8 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var err error - ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - if err != nil { - s.mu.Unlock() - return err - } s.mu.Unlock() - channelz.Info(logger, ls.channelzID, "ListenSocket created") + channelz.Info(logger, ls.channelz, "ListenSocket created") var tempDelay time.Duration // how long to sleep on accept failure for { @@ -913,24 +946,21 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } + if cc, ok := rawConn.(interface { + PassServerTransport(transport.ServerTransport) + }); ok { + cc.PassServerTransport(st) + } + if !s.addConn(lisAddr, st) { return } go func() { - s.serveStreams(st) + s.serveStreams(context.Background(), st, rawConn) s.removeConn(lisAddr, st) }() } -func (s *Server) drainServerTransports(addr string) { - s.mu.Lock() - conns := s.conns[addr] - for st := range conns { - st.Drain("") - } - s.mu.Unlock() -} - // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { @@ -947,7 +977,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, SharedWriteBuffer: s.opts.sharedWriteBuffer, - ChannelzParentID: s.channelzID, + ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } @@ -961,7 +991,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { if err != credentials.ErrConnDispatched { // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { - channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err) } c.Close() } @@ -971,19 +1001,32 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { return st } -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close(errors.New("finished serving streams for the server transport")) - var wg sync.WaitGroup +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } - streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { + s.handlersWG.Add(1) streamQuota.acquire() f := func() { defer streamQuota.release() - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) + defer s.handlersWG.Done() + s.handleStream(st, stream) } if s.opts.numServerWorkers > 0 { @@ -995,14 +1038,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } } go f() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) }) - wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -1046,31 +1082,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(st) -} - -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo + s.serveStreams(r.Context(), st, nil) } func (s *Server) addConn(addr string, st transport.ServerTransport) bool { @@ -1111,37 +1123,28 @@ func (s *Server) removeConn(addr string, st transport.ServerTransport) { } } -func (s *Server) channelzMetric() *channelz.ServerInternalMetric { - return &channelz.ServerInternalMetric{ - CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), - } -} - func (s *Server) incrCallsStarted() { - atomic.AddInt64(&s.czData.callsStarted, 1) - atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) + s.channelz.ServerMetrics.CallsStarted.Add(1) + s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } func (s *Server) incrCallsSucceeded() { - atomic.AddInt64(&s.czData.callsSucceeded, 1) + s.channelz.ServerMetrics.CallsSucceeded.Add(1) } func (s *Server) incrCallsFailed() { - atomic.AddInt64(&s.czData.callsFailed, 1) + s.channelz.ServerMetrics.CallsFailed.Add(1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { - channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) + channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } compData, err := compress(data, cp, comp) if err != nil { - channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) + channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } hdr, payload := msgHeader(data, compData) @@ -1152,7 +1155,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1194,7 +1197,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1208,7 +1211,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1240,7 +1243,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1262,7 +1265,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1333,10 +1335,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + + d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } @@ -1344,11 +1347,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. t.IncrMsgRecv() } df := func(v any) error { + defer cancel() + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1362,7 +1367,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1370,7 +1375,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1385,7 +1390,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { @@ -1395,7 +1400,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1403,7 +1408,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1418,14 +1423,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, sts); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { @@ -1445,8 +1450,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1460,8 +1465,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1479,7 +1484,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1521,7 +1526,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1535,10 +1540,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, @@ -1574,7 +1579,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1616,7 +1621,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1694,7 +1699,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1712,53 +1717,87 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + ctx = contextWithServer(ctx, s) + var ti *traceInfo + if EnableTracing { + tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = newTraceContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.Peer().Addr, + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } - channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } service := sm[:pos] method := sm[pos+1:] + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1767,19 +1806,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } - channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } @@ -1834,62 +1873,71 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quit.Fire() + s.stop(false) +} - defer func() { - s.serveWG.Wait() - s.done.Fire() - }() +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) }) s.mu.Lock() - listeners := s.lis - s.lis = nil - conns := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. s.mu.Unlock() + s.serveWG.Wait() - for lis := range listeners { - lis.Close() + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() } - for _, cs := range conns { - for st := range cs { - st.Close(errors.New("Server.Stop called")) - } + + for len(s.conns) != 0 { + s.cv.Wait() } + s.conns = nil + if s.opts.numServerWorkers > 0 { - s.stopServerWorkers() + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() + } + + if graceful || s.opts.waitForHandlers { + s.handlersWG.Wait() } - s.mu.Lock() if s.events != nil { s.events.Finish() s.events = nil } - s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.quit.Fire() - defer s.done.Fire() - - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } } +} - for lis := range s.lis { - lis.Close() - } - s.lis = nil +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { if !s.drain { for _, conns := range s.conns { for st := range conns { @@ -1898,22 +1946,14 @@ func (s *Server) GracefulStop() { } s.drain = true } +} - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() } - s.mu.Unlock() + s.lis = nil } // contentSubtype must be lowercase @@ -1927,11 +1967,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { } codec := encoding.GetCodec(contentSubtype) if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) return encoding.GetCodec(proto.Name) } return codec } +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + // SetHeader sets the header metadata to be sent from the server to the client. // The context provided must be the context passed to the server's handler. // @@ -2033,7 +2112,7 @@ func ClientSupportedCompressors(ctx context.Context) ([]string, error) { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } - return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil + return stream.ClientAdvertisedCompressors(), nil } // SetTrailer sets the trailer metadata that will be sent when an RPC returns. @@ -2063,17 +2142,9 @@ func Method(ctx context.Context) (string, bool) { return s.Method(), true } -type channelzServer struct { - s *Server -} - -func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { - return c.s.channelzMetric() -} - // validateSendCompressor returns an error when given compressor name cannot be // handled by the server or the client based on the advertised compressors. -func validateSendCompressor(name, clientCompressors string) error { +func validateSendCompressor(name string, clientCompressors []string) error { if name == encoding.Identity { return nil } @@ -2082,7 +2153,7 @@ func validateSendCompressor(name, clientCompressors string) error { return fmt.Errorf("compressor not registered %q", name) } - for _, c := range strings.Split(clientCompressors, ",") { + for _, c := range clientCompressors { if c == name { return nil // found match } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 0df11fc09..2671c5ef6 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -25,8 +25,11 @@ import ( "reflect" "time" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -41,11 +44,6 @@ const maxInt = int(^uint(0) >> 1) // https://github.com/grpc/grpc/blob/master/doc/service_config.md type MethodConfig = internalserviceconfig.MethodConfig -type lbConfig struct { - name string - cfg serviceconfig.LoadBalancingConfig -} - // ServiceConfig is provided by the service provider and contains parameters for how // clients that connect to the service should behave. // @@ -55,14 +53,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. This is - // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, - // lbConfig will be used. - LB *string - // lbConfig is the service config's load balancing configuration. If // lbConfig and LB are both present, lbConfig will be used. - lbConfig *lbConfig + lbConfig serviceconfig.LoadBalancingConfig // Methods contains a map for the methods in this service. If there is an // exact match for a method (i.e. /service/method) in the map, use the @@ -164,38 +157,55 @@ type jsonMC struct { // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { LoadBalancingPolicy *string - LoadBalancingConfig *internalserviceconfig.BalancerConfig + LoadBalancingConfig *json.RawMessage MethodConfig *[]jsonMC RetryThrottling *retryThrottlingPolicy HealthCheckConfig *healthCheckConfig } func init() { - internal.ParseServiceConfig = parseServiceConfig + internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult { + return parseServiceConfig(js, defaultMaxCallAttempts) + } } -func parseServiceConfig(js string) *serviceconfig.ParseResult { +func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} } var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ - LB: rsc.LoadBalancingPolicy, Methods: make(map[string]MethodConfig), retryThrottling: rsc.RetryThrottling, healthCheckConfig: rsc.HealthCheckConfig, rawJSONString: js, } - if c := rsc.LoadBalancingConfig; c != nil { - sc.lbConfig = &lbConfig{ - name: c.Name, - cfg: c.Config, + c := rsc.LoadBalancingConfig + if c == nil { + name := pickfirst.Name + if rsc.LoadBalancingPolicy != nil { + name = *rsc.LoadBalancingPolicy + } + if balancer.Get(name) == nil { + name = pickfirst.Name + } + cfg := []map[string]any{{name: struct{}{}}} + strCfg, err := json.Marshal(cfg) + if err != nil { + return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)} } + r := json.RawMessage(strCfg) + c = &r } + cfg, err := gracefulswitch.ParseConfig(*c) + if err != nil { + return &serviceconfig.ParseResult{Err: err} + } + sc.lbConfig = cfg if rsc.MethodConfig == nil { return &serviceconfig.ParseResult{Config: &sc} @@ -211,8 +221,8 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { WaitForReady: m.WaitForReady, Timeout: (*time.Duration)(m.Timeout), } - if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil { + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -232,13 +242,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} @@ -257,7 +267,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } @@ -271,17 +281,16 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol return nil, nil } + if jrp.MaxAttempts < maxAttempts { + maxAttempts = jrp.MaxAttempts + } rp := &internalserviceconfig.RetryPolicy{ - MaxAttempts: jrp.MaxAttempts, + MaxAttempts: maxAttempts, InitialBackoff: time.Duration(jrp.InitialBackoff), MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } - if rp.MaxAttempts > 5 { - // TODO(retry): Make the max maxAttempts configurable. - rp.MaxAttempts = 5 - } for _, code := range jrp.RetryableStatusCodes { rp.RetryableStatusCodes[code] = true } diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 4ab70e2d4..fdb0bd651 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -73,9 +73,12 @@ func (*PickerUpdated) isRPCStats() {} type InPayload struct { // Client is true if this InPayload is from client side. Client bool - // Payload is the payload with original type. + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the InPayload returns and must be + // copied if needed later. Payload any // Data is the serialized message payload. + // Deprecated: Data will be removed in the next release. Data []byte // Length is the size of the uncompressed payload data. Does not include any @@ -143,9 +146,12 @@ func (s *InTrailer) isRPCStats() {} type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool - // Payload is the payload with original type. + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the OutPayload returns and must be + // copied if needed later. Payload any // Data is the serialized message payload. + // Deprecated: Data will be removed in the next release. Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index b14b2fbea..8051ef5b5 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,11 +23,11 @@ import ( "errors" "io" "math" + "math/rand" "strconv" "sync" "time" - "golang.org/x/net/trace" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" @@ -35,7 +35,6 @@ import ( "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" @@ -48,6 +47,8 @@ import ( "google.golang.org/grpc/status" ) +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // StreamHandler defines the handler called by gRPC server to complete the // execution of a streaming RPC. // @@ -184,7 +185,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth // when the RPC completes. opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) @@ -429,7 +430,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) var trInfo *traceInfo if EnableTracing { trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), + tr: newTrace("grpc.Sent."+methodFamily(method), method), firstLine: firstLine{ client: true, }, @@ -438,7 +439,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) trInfo.firstLine.deadline = time.Until(deadline) } trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) + ctx = newTraceContext(ctx, trInfo.tr) } if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { @@ -515,6 +516,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s + a.ctx = s.Context() a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -654,13 +656,13 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. return false, err } hasPushback = true } else if len(sps) > 1 { - channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. return false, err } @@ -697,7 +699,7 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if max := float64(rp.MaxBackoff); cur > max { cur = max } - dur = time.Duration(grpcrand.Int63n(int64(cur))) + dur = time.Duration(rand.Int63n(int64(cur))) cs.numRetriesSincePushback++ } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go new file mode 100644 index 000000000..8b813529c --- /dev/null +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// ServerStreamingClient represents the client side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingClient[Res any] interface { + Recv() (*Res, error) + ClientStream +} + +// ServerStreamingServer represents the server side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingServer[Res any] interface { + Send(*Res) error + ServerStream +} + +// ClientStreamingClient represents the client side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingClient[Req any, Res any] interface { + Send(*Req) error + CloseAndRecv() (*Res, error) + ClientStream +} + +// ClientStreamingServer represents the server side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingServer[Req any, Res any] interface { + Recv() (*Req, error) + SendAndClose(*Res) error + ServerStream +} + +// BidiStreamingClient represents the client side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingClient[Req any, Res any] interface { + Send(*Req) error + Recv() (*Res, error) + ClientStream +} + +// BidiStreamingServer represents the server side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingServer[Req any, Res any] interface { + Recv() (*Req, error) + Send(*Res) error + ServerStream +} + +// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient, +// and BidiStreamingClient interfaces. It is used in generated code. +type GenericClientStream[Req any, Res any] struct { + ClientStream +} + +var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil) +var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) +var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) + +// Send pushes one message into the stream of requests to be consumed by the +// server. The type of message which can be sent is determined by the Req type +// parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Send(m *Req) error { + return x.ClientStream.SendMsg(m) +} + +// Recv reads one message from the stream of responses generated by the server. +// The type of the message returned is determined by the Res type parameter +// of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) { + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CloseAndRecv closes the sending side of the stream, then receives the unary +// response from the server. The type of message which it returns is determined +// by the Res type parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer, +// and BidiStreamingServer interfaces. It is used in generated code. +type GenericServerStream[Req any, Res any] struct { + ServerStream +} + +var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil) +var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) +var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) + +// Send pushes one message into the stream of responses to be consumed by the +// client. The type of message which can be sent is determined by the Res +// type parameter of the serverStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Send(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// SendAndClose pushes the unary response to the client. The type of message +// which can be sent is determined by the Res type parameter of the +// clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// Recv reads one message from the stream of requests generated by the client. +// The type of the message returned is determined by the Req type parameter +// of the clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) { + m := new(Req) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa40..07f012576 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 9ded79321..10f4f798f 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -26,8 +26,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/trace" ) // EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. @@ -44,9 +42,31 @@ func methodFamily(m string) string { return m } +// traceEventLog mirrors golang.org/x/net/trace.EventLog. +// +// It exists in order to avoid importing x/net/trace on grpcnotrace builds. +type traceEventLog interface { + Printf(format string, a ...any) + Errorf(format string, a ...any) + Finish() +} + +// traceLog mirrors golang.org/x/net/trace.Trace. +// +// It exists in order to avoid importing x/net/trace on grpcnotrace builds. +type traceLog interface { + LazyLog(x fmt.Stringer, sensitive bool) + LazyPrintf(format string, a ...any) + SetError() + SetRecycler(f func(any)) + SetTraceInfo(traceID, spanID uint64) + SetMaxEvents(m int) + Finish() +} + // traceInfo contains tracing information for an RPC. type traceInfo struct { - tr trace.Trace + tr traceLog firstLine firstLine } diff --git a/vendor/google.golang.org/grpc/trace_notrace.go b/vendor/google.golang.org/grpc/trace_notrace.go new file mode 100644 index 000000000..1da3a2308 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace_notrace.go @@ -0,0 +1,52 @@ +//go:build grpcnotrace + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in +// turn enables binaries using gRPC-Go for dead code elimination, which can +// yield 10-15% improvements in binary size when tracing is not needed. + +import ( + "context" + "fmt" +) + +type notrace struct{} + +func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {} +func (notrace) LazyPrintf(format string, a ...any) {} +func (notrace) SetError() {} +func (notrace) SetRecycler(f func(any)) {} +func (notrace) SetTraceInfo(traceID, spanID uint64) {} +func (notrace) SetMaxEvents(m int) {} +func (notrace) Finish() {} + +func newTrace(family, title string) traceLog { + return notrace{} +} + +func newTraceContext(ctx context.Context, tr traceLog) context.Context { + return ctx +} + +func newTraceEventLog(family, title string) traceEventLog { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/trace_withtrace.go similarity index 59% rename from vendor/google.golang.org/grpc/internal/channelz/util_linux.go rename to vendor/google.golang.org/grpc/trace_withtrace.go index 98288c3f8..88d6e8571 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/trace_withtrace.go @@ -1,6 +1,8 @@ +//go:build !grpcnotrace + /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,22 +18,22 @@ * */ -package channelz +package grpc import ( - "syscall" + "context" + + t "golang.org/x/net/trace" ) -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket any) *SocketOptionData { - c, ok := socket.(syscall.Conn) - if !ok { - return nil - } - data := &SocketOptionData{} - if rawConn, err := c.SyscallConn(); err == nil { - rawConn.Control(data.Getsockopt) - return data - } - return nil +func newTrace(family, title string) traceLog { + return t.New(family, title) +} + +func newTraceContext(ctx context.Context, tr traceLog) context.Context { + return t.NewContext(ctx, tr) +} + +func newTraceEventLog(family, title string) traceEventLog { + return t.NewEventLog(family, title) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 724ad2102..bafaef99b 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.58.3" +const Version = "1.65.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh deleted file mode 100644 index bbc9e2e3c..000000000 --- a/vendor/google.golang.org/grpc/vet.sh +++ /dev/null @@ -1,209 +0,0 @@ -#!/bin/bash - -set -ex # Exit on error; debugging enabled. -set -o pipefail # Fail a pipe if any sub-command fails. - -# not makes sure the command passed to it does not exit with a return code of 0. -not() { - # This is required instead of the earlier (! $COMMAND) because subshells and - # pipefail don't work the same on Darwin as in Linux. - ! "$@" -} - -die() { - echo "$@" >&2 - exit 1 -} - -fail_on_output() { - tee /dev/stderr | not read -} - -# Check to make sure it's safe to modify the user's git repo. -git status --porcelain | fail_on_output - -# Undo any edits made by this script. -cleanup() { - git reset --hard HEAD -} -trap cleanup EXIT - -PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" -go version - -if [[ "$1" = "-install" ]]; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/runner/go - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif not which protoc > /dev/null; then - die "Please install protoc into your path" - fi - fi - exit 0 -elif [[ "$#" -ne 0 ]]; then - die "Unknown argument(s): $*" -fi - -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - make proto && git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -if [[ -n "${VET_ONLY_PROTO}" ]]; then - exit 0 -fi - -# - Ensure all source files contain a copyright message. -# (Done in two parts because Darwin "git grep" has broken support for compound -# exclusion matches.) -(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output - -# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -not grep 'func Test[^(]' *_test.go -not grep 'func Test[^(]' test/*.go - -# - Do not import x/net/context. -not git grep -l 'x/net/context' -- "*.go" - -# - Do not import math/rand for real library code. Use internal/grpcrand for -# thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' - -# - Do not use "interface{}"; use "any" instead. -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' - -# - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' - -# - Ensure all ptypes proto packages are renamed when importing. -not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" - -# - Ensure all xds proto imports are renamed to *pb or *grpc. -git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' - -misspell -error . - -# - gofmt, goimports, golint (with exceptions for generated code), go vet, -# go mod tidy. -# Perform these checks on each module inside gRPC. -for MOD_FILE in $(find . -name 'go.mod'); do - MOD_DIR=$(dirname ${MOD_FILE}) - pushd ${MOD_DIR} - go vet -all ./... | fail_on_output - gofmt -s -d -l . 2>&1 | fail_on_output - goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - - go mod tidy -compat=1.19 - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) - popd -done - -# - Collection of static analysis checks -# -# TODO(dfawley): don't use deprecated functions in examples or first-party -# plugins. -# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. -SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true -# Error if anything other than deprecation warnings are printed. -not grep -v "is deprecated:.*SA1019" "${SC_OUT}" -# Only ignore the following deprecated types/fields/functions. -not grep -Fv '.CredsBundle -.HeaderMap -.Metadata is deprecated: use Attributes -.NewAddress -.NewServiceConfig -.Type is deprecated: use Attributes -BuildVersion is deprecated -balancer.ErrTransientFailure -balancer.Picker -extDesc.Filename is deprecated -github.com/golang/protobuf/jsonpb is deprecated -grpc.CallCustomCodec -grpc.Code -grpc.Compressor -grpc.CustomCodec -grpc.Decompressor -grpc.MaxMsgSize -grpc.MethodConfig -grpc.NewGZIPCompressor -grpc.NewGZIPDecompressor -grpc.RPCCompressor -grpc.RPCDecompressor -grpc.ServiceConfig -grpc.WithCompressor -grpc.WithDecompressor -grpc.WithDialer -grpc.WithMaxMsgSize -grpc.WithServiceConfig -grpc.WithTimeout -http.CloseNotifier -info.SecurityVersion -proto is deprecated -proto.InternalMessageInfo is deprecated -proto.EnumName is deprecated -proto.ErrInternalBadWireType is deprecated -proto.FileDescriptor is deprecated -proto.Marshaler is deprecated -proto.MessageType is deprecated -proto.RegisterEnum is deprecated -proto.RegisterFile is deprecated -proto.RegisterType is deprecated -proto.RegisterExtension is deprecated -proto.RegisteredExtension is deprecated -proto.RegisteredExtensions is deprecated -proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated -Target is deprecated: Use the Target field in the BuildOptions instead. -xxx_messageInfo_ -' "${SC_OUT}" - -# - special golint on package comments. -lint_package_comment_per_package() { - # Number of files in this go package. - fileCount=$(go list -f '{{len .GoFiles}}' $1) - if [ ${fileCount} -eq 0 ]; then - return 0 - fi - # Number of package errors generated by golint. - lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") - # golint complains about every file that's missing the package comment. If the - # number of files for this package is greater than the number of errors, there's - # at least one file with package comment, good. Otherwise, fail. - if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then - echo "Package $1 (with ${fileCount} files) is missing package comment" - return 1 - fi -} -lint_package_comment() { - set +ex - - count=0 - for i in $(go list ./...); do - lint_package_comment_per_package "$i" - ((count += $?)) - done - - set -ex - return $count -} -lint_package_comment - -echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go new file mode 100644 index 000000000..2ef36bbcf --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodelim marshals and unmarshals varint size-delimited messages. +package protodelim + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" +) + +// MarshalOptions is a configurable varint size-delimited marshaler. +type MarshalOptions struct{ proto.MarshalOptions } + +// MarshalTo writes a varint size-delimited wire-format message to w. +// If w returns an error, MarshalTo returns it unchanged. +func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) { + msgBytes, err := o.MarshalOptions.Marshal(m) + if err != nil { + return 0, err + } + + sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes))) + sizeWritten, err := w.Write(sizeBytes) + if err != nil { + return sizeWritten, err + } + msgWritten, err := w.Write(msgBytes) + if err != nil { + return sizeWritten + msgWritten, err + } + return sizeWritten + msgWritten, nil +} + +// MarshalTo writes a varint size-delimited wire-format message to w +// with the default options. +// +// See the documentation for [MarshalOptions.MarshalTo]. +func MarshalTo(w io.Writer, m proto.Message) (int, error) { + return MarshalOptions{}.MarshalTo(w, m) +} + +// UnmarshalOptions is a configurable varint size-delimited unmarshaler. +type UnmarshalOptions struct { + proto.UnmarshalOptions + + // MaxSize is the maximum size in wire-format bytes of a single message. + // Unmarshaling a message larger than MaxSize will return an error. + // A zero MaxSize will default to 4 MiB. + // Setting MaxSize to -1 disables the limit. + MaxSize int64 +} + +const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size + +// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size +// that is larger than its configured [UnmarshalOptions.MaxSize]. +type SizeTooLargeError struct { + // Size is the varint size of the message encountered + // that was larger than the provided MaxSize. + Size uint64 + + // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded. + MaxSize uint64 +} + +func (e *SizeTooLargeError) Error() string { + return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize) +} + +// Reader is the interface expected by [UnmarshalFrom]. +// It is implemented by *[bufio.Reader]. +type Reader interface { + io.Reader + io.ByteReader +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// The error is [io.EOF] error only if no bytes are read. +// If an EOF happens after reading some but not all the bytes, +// UnmarshalFrom returns a non-io.EOF error. +// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged, +// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned. +func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error { + var sizeArr [binary.MaxVarintLen64]byte + sizeBuf := sizeArr[:0] + for i := range sizeArr { + b, err := r.ReadByte() + if err != nil { + // Immediate EOF is unexpected. + if err == io.EOF && i != 0 { + break + } + return err + } + sizeBuf = append(sizeBuf, b) + if b < 0x80 { + break + } + } + size, n := protowire.ConsumeVarint(sizeBuf) + if n < 0 { + return protowire.ParseError(n) + } + + maxSize := o.MaxSize + if maxSize == 0 { + maxSize = defaultMaxSize + } + if maxSize != -1 && size > uint64(maxSize) { + return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "") + } + + var b []byte + var err error + if br, ok := r.(*bufio.Reader); ok { + // Use the []byte from the bufio.Reader instead of having to allocate one. + // This reduces CPU usage and allocated bytes. + b, err = br.Peek(int(size)) + if err == nil { + defer br.Discard(int(size)) + } else { + b = nil + } + } + if b == nil { + b = make([]byte, size) + _, err = io.ReadFull(r, b) + } + + if err == io.EOF { + return io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if err := o.Unmarshal(b, m); err != nil { + return err + } + return nil +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r with the default options. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the documentation for [UnmarshalOptions.UnmarshalFrom]. +func UnmarshalFrom(r Reader, m proto.Message) error { + return UnmarshalOptions{}.UnmarshalFrom(r, m) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index f47902371..bb2966e3b 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -102,7 +102,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -114,7 +114,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 3f75098b6..29846df22 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -25,15 +25,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } // Marshal writes the given [proto.Message] in JSON format using default options. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -110,8 +112,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -122,8 +125,9 @@ func (o MarshalOptions) Format(m proto.Message) string { } // Marshal marshals the given [proto.Message] in the JSON format using options in -// MarshalOptions. Do not depend on the output being stable. It may change over -// time across different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index a45f112bc..24bc98ac4 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 95967e811..1f57e6610 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -27,15 +27,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } // Marshal writes the given [proto.Message] in textproto format using default -// options. Do not depend on the output being stable. It may change over time -// across different versions of the program. +// options. Do not depend on the output being stable. Its output will change +// across different builds of your program, even when using the same version of +// the protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -84,8 +86,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -98,8 +101,9 @@ func (o MarshalOptions) Format(m proto.Message) string { } // Marshal writes the given [proto.Message] in textproto format using options in -// MarshalOptions object. Do not depend on the output being stable. It may -// change over time across different versions of the program. +// MarshalOptions object. Do not depend on the output being stable. Its output +// will change across different builds of your program, even when using the +// same version of the protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index a45625c8d..87e46bd4d 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -252,6 +252,7 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu {rv.MethodByName("Values"), "Values"}, {rv.MethodByName("ReservedNames"), "ReservedNames"}, {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("IsClosed"), "IsClosed"}, }...) case protoreflect.EnumValueDescriptor: diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 18f075687..ff6a38360 100644 Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go new file mode 100644 index 000000000..029a6a12d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editionssupport defines constants for editions that are supported. +package editionssupport + +import descriptorpb "google.golang.org/protobuf/types/descriptorpb" + +const ( + Minimum = descriptorpb.Edition_EDITION_PROTO2 + Maximum = descriptorpb.Edition_EDITION_2023 +) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d2b3ac031..ea1d3e65a 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) { // newSyntaxError returns an error with line and column information useful for // syntax errors. -func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(pos) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 373d20837..7e87c7604 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -32,6 +32,7 @@ var byteType = reflect.TypeOf(byte(0)) func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { f := new(filedesc.Field) f.L0.ParentFile = filedesc.SurrogateProto2 + f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -107,8 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri f.L1.StringName.InitJSON(jsonName) } case s == "packed": - f.L1.HasPacked = true - f.L1.IsPacked = true + f.L1.EditionFeatures.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 87853e786..099b2bf45 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index 20c17b35e..c2d6bd526 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { +func New(f string, x ...any) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { +func Wrap(err error, f string, x ...any) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...interface{}) string { +func format(f string, x ...any) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { @@ -87,3 +87,18 @@ func InvalidUTF8(name string) error { func RequiredNotSet(name string) error { return New("required field %v not set", name) } + +type SizeMismatchError struct { + Calculated, Measured int +} + +func (e *SizeMismatchError) Error() string { + return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured) +} + +func MismatchedSizeCalculation(calculated, measured int) error { + return &SizeMismatchError{ + Calculated: calculated, + Measured: measured, + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 8826bcf40..df53ff40b 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -7,6 +7,7 @@ package filedesc import ( "bytes" "fmt" + "strings" "sync" "sync/atomic" @@ -108,9 +109,12 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } + +// Not exported and just used to reconstruct the original FileDescriptor proto +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -202,6 +206,9 @@ func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } +func (ed *Enum) IsClosed() bool { + return !ed.L1.EditionFeatures.IsOpenEnum +} func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { @@ -251,10 +258,6 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions - HasPacked bool // promoted from google.protobuf.FieldOptions - IsPacked bool // promoted from google.protobuf.FieldOptions - HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions - EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -331,8 +334,7 @@ func (fd *Field) HasPresence() bool { if fd.L1.Cardinality == protoreflect.Repeated { return false } - explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence - return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional @@ -345,14 +347,7 @@ func (fd *Field) IsPacked() bool { case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: return false } - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.EditionFeatures.IsPacked - } - if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 { - // proto3 repeated fields are packed by default. - return !fd.L1.HasPacked || fd.L1.IsPacked - } - return fd.L1.IsPacked + return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } @@ -388,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor { } return fd.L1.Message } +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} @@ -399,13 +398,7 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.EditionFeatures.IsUTF8Validated - } - if fd.L1.HasEnforceUTF8 { - return fd.L1.EnforceUTF8 - } - return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 + return fd.L1.EditionFeatures.IsUTF8Validated } func (od *Oneof) IsSynthetic() bool { @@ -438,7 +431,6 @@ type ( Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor @@ -461,7 +453,16 @@ func (xd *Extension) HasPresence() bool { return xd.L1.Cardi func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional } -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsPacked() bool { + if xd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch xd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return xd.L1.EditionFeatures.IsPacked +} func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } @@ -542,8 +543,9 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}} ) type ( @@ -585,6 +587,34 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } +// Returns true if this field is structured like the synthetic field of a proto2 +// group. This allows us to expand our treatment of delimited fields without +// breaking proto2 files that have been upgraded to editions. +func isGroupLike(fd protoreflect.FieldDescriptor) bool { + // Groups are always group types. + if fd.Kind() != protoreflect.GroupKind { + return false + } + + // Group fields are always the lowercase type name. + if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) { + return false + } + + // Groups could only be defined in the same file they're used. + if fd.Message().ParentFile() != fd.ParentFile() { + return false + } + + // Group messages are always defined in the same scope as the field. File + // level extensions will compare NULL == NULL here, which is why the file + // comparison above is necessary to ensure both come from the same file. + if fd.IsExtension() { + return fd.Parent() == fd.Message().Parent() + } + return fd.ContainingMessage() == fd.Message().Parent() +} + func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { @@ -605,7 +635,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { + if isGroupLike(fd) { s.nameText = string(fd.Message().Name()) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 237e64fd2..8a57d60b0 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -113,8 +113,10 @@ func (fd *File) unmarshalSeed(b []byte) { switch string(v) { case "proto2": fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + fd.L1.Edition = EditionProto3 case "editions": fd.L1.Syntax = protoreflect.Editions default: @@ -177,11 +179,10 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 } - if fd.L1.Syntax == protoreflect.Editions { - fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) - } + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) // Parse editions features from options if any if options != nil { @@ -267,6 +268,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i + ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent()) var numValues int for b := b; len(b) > 0; { @@ -443,6 +445,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i + xd.L1.EditionFeatures = featuresFromParentDesc(pd) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -467,6 +470,38 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -499,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, + New: func() any { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 482a61cc1..e56c91a8d 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -45,6 +45,11 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } } // Default is resolved here since it depends on Enum being resolved. @@ -466,10 +471,10 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } - if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { fd.L1.Kind = protoreflect.GroupKind } - if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired { + if fd.L1.EditionFeatures.IsLegacyRequired { fd.L1.Cardinality = protoreflect.Required } if rawTypeName != nil { @@ -496,13 +501,11 @@ func (fd *Field) unmarshalOptions(b []byte) { b = b[m:] switch num { case genid.FieldOptions_Packed_field_number: - fd.L1.HasPacked = true - fd.L1.IsPacked = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: - fd.L1.HasEnforceUTF8 = true - fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -548,7 +551,6 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { var rawTypeName []byte var rawOptions []byte - xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee) xd.L2 = new(ExtensionL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -572,7 +574,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: - xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } default: @@ -580,12 +581,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] } } - if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { - xd.L1.Kind = protoreflect.GroupKind - } - if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired { - xd.L1.Cardinality = protoreflect.Required - } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { @@ -598,32 +593,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) } -func (xd *Extension) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldOptions_Packed_field_number: - xd.L2.IsPacked = protowire.DecodeBool(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FieldOptions_Features_field_number: - xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { var rawMethods [][]byte var rawOptions []byte diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 30db19fdc..f4107c05f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -8,6 +8,7 @@ package filedesc import ( "fmt" + "strings" "sync" "google.golang.org/protobuf/internal/descfmt" @@ -198,6 +199,16 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byText[d.TextName()]; !ok { p.byText[d.TextName()] = d } + if isGroupLike(d) { + lowerJSONName := strings.ToLower(d.JSONName()) + if _, ok := p.byJSON[lowerJSONName]; !ok { + p.byJSON[lowerJSONName] = d + } + lowerTextName := strings.ToLower(d.TextName()) + if _, ok := p.byText[lowerTextName]; !ok { + p.byText[lowerTextName] = d + } + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 0375a49d4..11f5f356b 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -14,9 +14,13 @@ import ( ) var defaultsCache = make(map[Edition]EditionFeatures) +var defaultsKeys = []Edition{} func init() { unmarshalEditionDefaults(editiondefaults.Defaults) + SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2) + SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3) + SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023) } func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { @@ -104,12 +108,15 @@ func unmarshalEditionDefault(b []byte) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number: + case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number: fs = unmarshalFeatureSet(v, fs) } } } defaultsCache[ed] = fs + defaultsKeys = append(defaultsKeys, ed) } func unmarshalEditionDefaults(b []byte) { @@ -135,8 +142,15 @@ func unmarshalEditionDefaults(b []byte) { } func getFeaturesFor(ed Edition) EditionFeatures { - if def, ok := defaultsCache[ed]; ok { - return def + match := EditionUnknown + for _, key := range defaultsKeys { + if key > ed { + break + } + match = key + } + if match == EditionUnknown { + panic(fmt.Sprintf("unsupported edition: %v", ed)) } - panic(fmt.Sprintf("unsupported edition: %v", ed)) + return defaultsCache[match] } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index 28240ebc5..bfb3b8417 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -63,6 +63,7 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return des func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) IsClosed() bool { return false } func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index f0e38c4ef..ba83fea44 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -68,7 +68,7 @@ type Builder struct { // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []interface{} + GoTypes []any // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []interface{} + goTypes []any depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 40272c893..f30ab6b58 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -21,6 +21,7 @@ const ( // Enum values for google.protobuf.Edition. const ( Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_LEGACY_enum_value = 900 Edition_EDITION_PROTO2_enum_value = 998 Edition_EDITION_PROTO3_enum_value = 999 Edition_EDITION_2023_enum_value = 1000 @@ -653,6 +654,7 @@ const ( FieldOptions_Targets_field_name protoreflect.Name = "targets" FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" FieldOptions_Features_field_name protoreflect.Name = "features" + FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -667,6 +669,7 @@ const ( FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" + FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -684,6 +687,7 @@ const ( FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 FieldOptions_Features_field_number protoreflect.FieldNumber = 21 + FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -767,6 +771,33 @@ const ( FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport" + FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport" +) + +// Field names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed" + + FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed" +) + +// Field numbers for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1 + FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2 + FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3 + FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -829,11 +860,13 @@ const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumValueOptions_Features_field_name protoreflect.Name = "features" EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) @@ -842,6 +875,7 @@ const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -1110,17 +1144,20 @@ const ( // Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features" - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features" ) // Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5 ) // Names for google.protobuf.SourceCodeInfo. diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index fd9015e8e..9a652a2b4 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -10,7 +10,7 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" ) -const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto" +const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" // Names for google.protobuf.GoFeatures. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index a371f98de..5d5771c2e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { +func (Export) NewError(f string, x ...any) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = interface{} +type enum = any // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = interface{} +type message = any // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index bff041edc..f29e6a8fa 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 2b8f122c2..4bb0a7a20 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -99,6 +99,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 3fadd241e..78ee47e44 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -233,9 +233,15 @@ func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { } func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + calculatedSize := f.mi.sizePointer(p.Elem(), opts) b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) - return f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -262,14 +268,21 @@ func isInitMessageInfo(p pointer, f *coderFieldInfo) error { return f.mi.checkInitializedPointer(p.Elem()) } -func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { - return protowire.SizeBytes(proto.Size(m)) + tagsize +func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int { + return protowire.SizeBytes(opts.Options().Size(m)) + tagsize } func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() + calculatedSize := mopts.Size(m) b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(proto.Size(m))) - return opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := mopts.MarshalAppend(b, m) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -405,8 +418,8 @@ func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) } -func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { - return 2*tagsize + proto.Size(m) +func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int { + return 2*tagsize + opts.Options().Size(m) } func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { @@ -482,10 +495,14 @@ func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshal b = protowire.AppendVarint(b, f.wiretag) siz := f.mi.sizePointer(v, opts) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) b, err = f.mi.marshalAppendPointer(b, v, opts) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -520,28 +537,34 @@ func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { return nil } -func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() s := p.PointerSlice() var err error for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) - b, err = opts.Options().MarshalAppend(b, m) + before := len(b) + b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -582,11 +605,12 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } @@ -597,13 +621,17 @@ func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) var err error b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -651,11 +679,12 @@ var coderMessageSliceValue = valueCoderFuncs{ } func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } @@ -738,12 +767,13 @@ func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) } } -func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(messageType.Elem())) - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 111b9d16f..fb35f0bae 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -9,6 +9,7 @@ import ( "sort" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -240,11 +241,16 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) size += mapi.valFuncs.size(val, mapValTagSize, opts) b = protowire.AppendVarint(b, uint64(size)) + before := len(b) b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) if err != nil { return nil, err } - return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + if measuredSize := len(b) - before; size != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(size, measuredSize) + } + return b, err } else { key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() val := pointerOfValue(valrv) @@ -259,7 +265,12 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder } b = protowire.AppendVarint(b, mapi.valWiretag) b = protowire.AppendVarint(b, uint64(valSize)) - return f.mi.marshalAppendPointer(b, val, opts) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, val, opts) + if measuredSize := len(b) - before; valSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(valSize, measuredSize) + } + return b, err } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index b7a23faf1..7a16ec13d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 185ef2efa..e06ece55a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() interface{} + protoUnwrap() any } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index f89136516..18cb96fd7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() interface{} { +func (ls *listReflect) protoUnwrap() any { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index f30b0a057..304244a65 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() interface{} { +func (ms *mapReflect) protoUnwrap() any { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 845c67d6e..febd21224 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) } } return mi.sizePointerSlow(p, opts) @@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } return size } @@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { + if size > (math.MaxInt32 - 1) { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } } return size @@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index cb25b0bae..e31249f64 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} + ExtensionType any // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { +func (xi *ExtensionInfo) IsValidInterface(v any) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c2a803bb2..81b2b1a76 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() interface{} { +func (e *legacyEnumWrapper) protoUnwrap() any { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() @@ -167,6 +167,7 @@ func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) // TODO: Use the presence of a UnmarshalJSON method to determine proto2? diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 87b30d050..6e8677ee6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -118,7 +118,7 @@ func (xi *ExtensionInfo) initFromLegacy() { xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind - xd.L2.IsPacked = fd.L1.IsPacked + xd.L1.EditionFeatures = fd.L1.EditionFeatures xd.L2.Default = fd.L1.Default xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) xd.L2.Enum = ed diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go index 9ab091086..b649f1124 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -7,7 +7,7 @@ package impl import ( "bytes" "compress/gzip" - "io/ioutil" + "io" "sync" "google.golang.org/protobuf/internal/filedesc" @@ -51,7 +51,7 @@ func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { if err != nil { panic(err) } - b2, err := ioutil.ReadAll(zr) + b2, err := io.ReadAll(zr) if err != nil { panic(err) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 2ab2c6297..bf0b6049b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -204,6 +204,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } } + md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type methods := make([]reflect.Method, 0, 2) @@ -215,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { for _, v := range vs { oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } @@ -250,6 +251,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName od := &md.L2.Oneofs.List[n] od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile + od.L1.EditionFeatures = md.L1.EditionFeatures od.L0.Parent = md od.L0.Index = n @@ -260,6 +262,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName aberrantAppendField(md, f.Type, tag, "", "") fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] fd.L1.ContainingOneof = od + fd.L1.EditionFeatures = od.L1.EditionFeatures od.L1.Fields.List = append(od.L1.Fields.List, fd) } } @@ -307,14 +310,14 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.HasPacked { + if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) } - if fd.L1.HasPacked { - opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + if fd.L1.EditionFeatures.IsPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } return opts.Interface() } @@ -344,6 +347,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n + md2.L1.EditionFeatures = md.L1.EditionFeatures md2.L1.IsMapEntry = true md2.L2.Options = func() protoreflect.ProtoMessage { @@ -563,6 +567,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() interface{} { +func (m aberrantMessage) protoUnwrap() any { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 629bacdce..019399d45 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -35,7 +35,7 @@ type MessageInfo struct { Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} + OneofWrappers []any initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +47,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} +type exporter func(v any, i int) any // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -201,7 +201,7 @@ fieldLoop: } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { oneofWrappers = vs } } @@ -256,7 +256,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type + valType any // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index d9ea010be..ecb4623d7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]any // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo + rangeInfos []any // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -247,39 +247,39 @@ func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } } } -func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if m == nil { return false } - xd := xt.TypeDescriptor() x, ok := (*m)[int32(xd.Number())] if !ok { return false } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } switch { case xd.IsList(): return x.Value().List().Len() > 0 case xd.IsMap(): return x.Value().Map().Len() > 0 - case xd.Message() != nil: - return x.Value().Message().IsValid() } return true } -func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { - delete(*m, int32(xt.TypeDescriptor().Number())) +func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) { + delete(*m, int32(xd.Number())) } -func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } } - return xt.Zero() + return xd.Type().Zero() } -func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { - xd := xt.TypeDescriptor() +func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) { + xt := xd.Type() isValid := true switch { case !xt.IsValidValue(v): @@ -292,7 +292,7 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) isValid = v.Message().IsValid() } if !isValid { - panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName())) } if *m == nil { @@ -302,16 +302,15 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } - v := xt.New() - m.Set(xt, v) + v := xd.Type().New() + m.Set(xd, v) return v } @@ -394,7 +393,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -422,13 +421,13 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() interface{} { +func (m *messageIfaceWrapper) protoUnwrap() any { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -457,7 +456,7 @@ func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } - return nil, xtd.Type() + return nil, xtd } panic(fmt.Sprintf("field %v is invalid", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 741d6e5b6..99dc23c6f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,12 +23,13 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() interface{} { +func (m *messageState) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -41,8 +42,9 @@ func (m *messageState) ProtoMessageInfo() *MessageInfo { } func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -52,77 +54,86 @@ func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageState) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageState) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageState) IsValid() bool { return !m.pointer().IsNil() @@ -143,12 +154,13 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() interface{} { +func (m *messageReflectWrapper) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -161,8 +173,9 @@ func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { } func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -172,77 +185,86 @@ func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, proto } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageReflectWrapper) IsValid() bool { return !m.pointer().IsNil() diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 517e94434..da685e8a2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -16,7 +16,7 @@ import ( const UnsafeEnabled = false // Pointer is an opaque pointer type. -type Pointer interface{} +type Pointer any // offset represents the offset to a struct field, accessible from a pointer. // The offset is the field index into a struct. @@ -62,7 +62,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { return pointer{v: reflect.ValueOf(v)} } @@ -93,7 +93,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 4b020e311..5f20ca5d8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -50,7 +50,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +80,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index 1665a68e5..a1f09162d 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, + New: func() any { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, + New: func() any { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a50fcfb49..dbbf1f686 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 33 - Patch = 0 + Minor = 34 + Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index e5b03b567..d75a6534c 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -51,6 +51,8 @@ type UnmarshalOptions struct { // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the [UnmarshalOptions] type if you need more control. func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 4fed202f9..1f847bcc3 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,17 @@ package proto import ( + "errors" + "fmt" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" + + protoerrors "google.golang.org/protobuf/internal/errors" ) // MarshalOptions configures the marshaler. @@ -70,7 +75,32 @@ type MarshalOptions struct { UseCachedSize bool } +// flags turns the specified MarshalOptions (user-facing) into +// protoiface.MarshalInputFlags (used internally by the marshaler). +// +// See impl.marshalOptions.Options for the inverse operation. +func (o MarshalOptions) flags() protoiface.MarshalInputFlags { + var flags protoiface.MarshalInputFlags + + // Note: o.AllowPartial is always forced to true by MarshalOptions.marshal, + // which is why it is not a part of MarshalInputFlags. + + if o.Deterministic { + flags |= protoiface.MarshalDeterministic + } + + if o.UseCachedSize { + flags |= protoiface.MarshalUseCachedSize + } + + return flags +} + // Marshal returns the wire-format encoding of m. +// +// This is the most common entry point for encoding a Protobuf message. +// +// See the [MarshalOptions] type if you need more control. func Marshal(m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to output. if m == nil { @@ -116,6 +146,9 @@ func emptyBytesForMessage(m Message) []byte { // MarshalAppend appends the wire-format encoding of m to b, // returning the result. +// +// This is a less common entry point than [Marshal], which is only needed if you +// need to supply your own buffers for performance reasons. func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to append. if m == nil { @@ -145,12 +178,7 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac in := protoiface.MarshalInput{ Message: m, Buf: b, - } - if o.Deterministic { - in.Flags |= protoiface.MarshalDeterministic - } - if o.UseCachedSize { - in.Flags |= protoiface.MarshalUseCachedSize + Flags: o.flags(), } if methods.Size != nil { sout := methods.Size(protoiface.SizeInput{ @@ -168,6 +196,10 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac out.Buf, err = o.marshalMessageSlow(b, m) } if err != nil { + var mismatch *protoerrors.SizeMismatchError + if errors.As(err, &mismatch) { + return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err) + } return out, err } if allowPartial { diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 17899a3a7..d248f2928 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -11,18 +11,21 @@ import ( // HasExtension reports whether an extension field is populated. // It returns false if m is invalid or if xt does not extend m. func HasExtension(m Message, xt protoreflect.ExtensionType) bool { - // Treat nil message interface as an empty message; no populated fields. - if m == nil { + // Treat nil message interface or descriptor as an empty message; no populated + // fields. + if m == nil || xt == nil { return false } // As a special-case, we reports invalid or mismatching descriptors // as always not being populated (since they aren't). - if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + mr := m.ProtoReflect() + xd := xt.TypeDescriptor() + if mr.Descriptor() != xd.ContainingMessage() { return false } - return m.ProtoReflect().Has(xt.TypeDescriptor()) + return mr.Has(xd) } // ClearExtension clears an extension field such that subsequent @@ -36,7 +39,7 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { +func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -48,7 +51,7 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -75,7 +78,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 312d5d45c..575d14831 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -47,11 +47,16 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { b = messageset.AppendFieldStart(b, fd.Number()) b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) - b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + calculatedSize := o.Size(value.Message().Interface()) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) b, err := o.marshalMessage(b, value.Message()) if err != nil { return b, err } + if measuredSize := len(b) - before; calculatedSize != measuredSize { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } b = messageset.AppendFieldEnd(b) return b, nil } diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index f1692b49b..052fb5ae3 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -34,6 +34,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { if methods != nil && methods.Size != nil { out := methods.Size(protoiface.SizeInput{ Message: m, + Flags: o.flags(), }) return out.Size } @@ -42,6 +43,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { // This case is mainly used for legacy types with a Marshal method. out, _ := methods.Marshal(protoiface.MarshalInput{ Message: m, + Flags: o.flags(), }) return len(out.Buf) } diff --git a/vendor/google.golang.org/protobuf/protoadapt/convert.go b/vendor/google.golang.org/protobuf/protoadapt/convert.go new file mode 100644 index 000000000..ea276d15a --- /dev/null +++ b/vendor/google.golang.org/protobuf/protoadapt/convert.go @@ -0,0 +1,31 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoadapt bridges the original and new proto APIs. +package protoadapt + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// MessageV1 is the original [github.com/golang/protobuf/proto.Message] type. +type MessageV1 = protoiface.MessageV1 + +// MessageV2 is the [google.golang.org/protobuf/proto.Message] type used by the +// current [google.golang.org/protobuf] module, adding support for reflection. +type MessageV2 = proto.Message + +// MessageV1Of converts a v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1Of(m MessageV2) MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2Of converts a v1 message to a v2 message. +// It returns nil if m is nil. +func MessageV2Of(m MessageV1) MessageV2 { + return protoimpl.X.ProtoMessageV2Of(m) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index baa0cc621..8fbecb4f5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -13,6 +13,7 @@ package protodesc import ( + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" @@ -91,15 +92,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot switch fd.GetSyntax() { case "proto2", "": f.L1.Syntax = protoreflect.Proto2 + f.L1.Edition = filedesc.EditionProto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + f.L1.Edition = filedesc.EditionProto3 case "editions": f.L1.Syntax = protoreflect.Editions f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) { + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) } f.L1.Path = fd.GetName() @@ -114,9 +117,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } - if f.L1.Syntax == protoreflect.Editions { - initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) - } + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -219,10 +220,10 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { return nil, err } - if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil { return nil, err } - if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil { return nil, err } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index b3278163c..856175542 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -69,9 +69,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } - if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions { - m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) - } + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -146,13 +144,15 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { return nil, err } + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) f.L1.IsProto3Optional = fd.GetProto3Optional() if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() - f.L1.HasPacked = opts.Packed != nil - f.L1.IsPacked = opts.GetPacked() + if opts.Packed != nil { + f.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) @@ -163,32 +163,12 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc f.L1.StringName.InitJSON(fd.GetJsonName()) } - if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { - f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) - - if f.L1.EditionFeatures.IsLegacyRequired { - f.L1.Cardinality = protoreflect.Required - } - // We reuse the existing field because the old option `[packed = - // true]` is mutually exclusive with the editions feature. - if canBePacked(fd) { - f.L1.HasPacked = true - f.L1.IsPacked = f.L1.EditionFeatures.IsPacked - } - - // We pretend this option is always explicitly set because the only - // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8 - // or to return the appropriate default. - // When using editions we either parse the option or resolve the - // appropriate default here (instead of later when this option is - // requested from the descriptor). - // In proto2/proto3 syntax HasEnforceUTF8 might be false. - f.L1.HasEnforceUTF8 = true - f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } - if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { - f.L1.Kind = protoreflect.GroupKind - } + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { + f.L1.Kind = protoreflect.GroupKind } } return fs, nil @@ -201,12 +181,10 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { return nil, err } + o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures()) if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } - if parent.Syntax() == protoreflect.Editions { - o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures()) - } } } return os, nil @@ -220,10 +198,13 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { return nil, err } + x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures()) if opts := xd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) x.L2.Options = func() protoreflect.ProtoMessage { return opts } - x.L2.IsPacked = opts.GetPacked() + if opts.Packed != nil { + x.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 254ca5854..f3cebab29 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -46,6 +46,11 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } + if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + f.L1.Kind = protoreflect.MessageKind + } if fd.DefaultValue != nil { v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) if err != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index e4dcaf876..6de31c2eb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -45,11 +45,11 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri if allowAlias && !foundAlias { return errors.New("enum %q allows aliases, but none were found", e.FullName()) } - if e.Syntax() == protoreflect.Proto3 { + if !e.IsClosed() { if v := e.Values().Get(0); v.Number() != 0 { - return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName()) } - // Verify that value names in proto3 do not conflict if the + // Verify that value names in open enums do not conflict if the // case-insensitive prefix is removed. // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 names := map[string]protoreflect.EnumValueDescriptor{} @@ -58,7 +58,7 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri v1 := e.Values().Get(i) s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) if v2, ok := names[s]; ok && v1.Number() != v2.Number() { - return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) } names[s] = v1 } @@ -80,7 +80,9 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri return nil } -func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { +func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + // There are a few limited exceptions only for proto3 + isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) for i, md := range mds { m := &ms[i] @@ -107,25 +109,13 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } - if m.Syntax() == protoreflect.Proto3 { + if isProto3 { if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } - // Verify that field names in proto3 do not conflict if lowercased - // with all underscores removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 - names := map[string]protoreflect.FieldDescriptor{} - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) - if f2, ok := names[s]; ok { - return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - names[s] = f1 - } } for j, fd := range md.GetField() { @@ -149,7 +139,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) } if f.L1.IsProto3Optional { - if f.Syntax() != protoreflect.Proto3 { + if !isProto3 { return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) } if f.Cardinality() != protoreflect.Optional { @@ -162,26 +152,29 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if f.IsWeak() && !flags.ProtoLegacy { return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) } - if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { return errors.New("message field %q may only be weak for an optional message", f.FullName()) } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } - if err := checkValidGroup(f); err != nil { + if err := checkValidGroup(file, f); err != nil { return errors.New("message field %q is an invalid group: %v", f.FullName(), err) } if err := checkValidMap(f); err != nil { return errors.New("message field %q is an invalid map: %v", f.FullName(), err) } - if f.Syntax() == protoreflect.Proto3 { + if isProto3 { if f.Cardinality() == protoreflect.Required { return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) } - if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { - return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName()) } } + if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q with implicit presence may only use open enums", f.FullName()) + } } seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs for j := range md.GetOneofDecl() { @@ -215,17 +208,17 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { return err } - if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil { return err } - if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil { return err } } return nil } -func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { +func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { for i, xd := range xds { x := &xs[i] // NOTE: Avoid using the IsValid method since extensions to MessageSet @@ -267,13 +260,13 @@ func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb. if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } - if err := checkValidGroup(x); err != nil { + if err := checkValidGroup(f, x); err != nil { return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) } if md := x.Message(); md != nil && md.IsMapEntry() { return errors.New("extension field %q cannot be a map entry", x.FullName()) } - if x.Syntax() == protoreflect.Proto3 { + if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) { switch x.ContainingMessage().FullName() { case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): @@ -309,21 +302,25 @@ func isPackable(fd protoreflect.FieldDescriptor) bool { // checkValidGroup reports whether fd is a valid group according to the same // rules that protoc imposes. -func checkValidGroup(fd protoreflect.FieldDescriptor) error { +func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error { md := fd.Message() switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() == protoreflect.Proto3: + case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3): return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") - case fd.FullName().Parent() != md.FullName().Parent(): - return errors.New("message and field must be declared in the same scope") - case !unicode.IsUpper(rune(md.Name()[0])): - return errors.New("message name must start with an uppercase") - case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): - return errors.New("field name must be lowercased form of the message name") + } + if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) { + switch { + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } } return nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 2a6b29d17..804830eda 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -17,11 +17,6 @@ import ( gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" ) -const ( - SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 - SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 -) - var defaults = &descriptorpb.FeatureSetDefaults{} var defaultsCacheMu sync.Mutex var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) @@ -67,18 +62,20 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) os.Exit(1) } - fs := defaults.GetDefaults()[0].GetFeatures() + fsed := defaults.GetDefaults()[0] // Using a linear search for now. // Editions are guaranteed to be sorted and thus we could use a binary search. // Given that there are only a handful of editions (with one more per year) // there is not much reason to use a binary search. for _, def := range defaults.GetDefaults() { if def.GetEdition() <= edpb { - fs = def.GetFeatures() + fsed = def } else { break } } + fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet) + proto.Merge(fs, fsed.GetOverridableFeatures()) defaultsCache[ed] = fs return fs } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index 9d6e05420..a5de8d400 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -73,6 +73,16 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } + if file.Syntax() == protoreflect.Editions { + desc := file + if fileImportDesc, ok := file.(protoreflect.FileImport); ok { + desc = fileImportDesc.FileDescriptor + } + + if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok { + p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum() + } + } return p } @@ -153,6 +163,18 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { p.Proto3Optional = proto.Bool(true) } + if field.Syntax() == protoreflect.Editions { + // Editions have no group keyword, this type is only set so that downstream users continue + // treating this as delimited encoding. + if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { + p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() + } + // Editions have no required keyword, this label is only set so that downstream users continue + // treating it as required. + if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } + } if field.HasDefault() { def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) if err != nil && field.DefaultEnumValue() != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 00b01fbd8..c85bfaa5b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -161,7 +161,7 @@ const ( // IsValid reports whether the syntax is valid. func (s Syntax) IsValid() bool { switch s { - case Proto2, Proto3: + case Proto2, Proto3, Editions: return true default: return false diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 7dcc2ff09..ea154eec4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -373,6 +373,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) case 21: b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 22: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -483,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -519,6 +523,23 @@ func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { return b } +func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "edition_introduced", nil) + case 2: + b = p.appendSingularField(b, "edition_deprecated", nil) + case 3: + b = p.appendSingularField(b, "deprecation_warning", nil) + case 4: + b = p.appendSingularField(b, "edition_removed", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 60ff62b4c..cd8fadbaf 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -510,7 +510,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(interface{}) Value + ValueOf(any) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,13 +519,13 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) interface{} + InterfaceOf(Value) any // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool + IsValidInterface(any) bool } // EnumDescriptor describes an enum and @@ -544,6 +544,12 @@ type EnumDescriptor interface { // ReservedRanges is a list of reserved ranges of enum numbers. ReservedRanges() EnumRanges + // IsClosed reports whether this enum uses closed semantics. + // See https://protobuf.dev/programming-guides/enum/#definitions. + // Note: the Go protobuf implementation is not spec compliant and treats + // all enums as open enums. + IsClosed() bool + isEnumDescriptor } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 7ced876f4..75f83a2af 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -32,11 +32,11 @@ const ( type value struct { pragma.DoNotCompare // 0B - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface any // 16B } func valueOfString(v string) Value { @@ -45,7 +45,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, bin: v} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { return Value{typ: ifaceType, iface: v} } @@ -55,6 +55,6 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return v.bin } -func (v Value) getIface() interface{} { +func (v Value) getIface() any { return v.iface } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 160309731..9fe83cef5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -69,8 +69,8 @@ import ( // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v interface{}) Value { +func ValueOf(v any) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an interface{}. +// Interface returns v as an any. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { +func (v Value) Interface() any { switch v.typ { case nilType: return nil @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { +// Interface returns k as an any. +func (k MapKey) Interface() any { return Value(k).Interface() } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index b1fdbe3e8..7f3583ead 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -45,7 +45,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +80,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +93,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index 435470111..f7d386990 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -15,7 +15,7 @@ import ( type ( ifaceHeader struct { - _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. Type unsafe.Pointer Data unsafe.Pointer } @@ -37,7 +37,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -70,7 +70,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -81,7 +81,7 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return unsafe.Slice((*byte)(v.ptr), v.num) } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 6267dc52a..de1777339 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} + descsByName map[protoreflect.FullName]any filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ + r.descsByName = map[protoreflect.FullName]any{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]interface{} + typesByName map[protoreflect.FullName]any extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t interface{}) string { +func typeName(t any) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t interface{}) string { } } -func amendErrorWithCaller(err error, prev, curr interface{}) error { +func amendErrorWithCaller(err error, prev, curr any) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v interface{}) string { +func goPackage(v any) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 78624cf60..9403eb075 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -54,6 +54,9 @@ type Edition int32 const ( // A placeholder for an unknown edition value. Edition_EDITION_UNKNOWN Edition = 0 + // A placeholder edition for specifying default behaviors *before* a feature + // was first introduced. This is effectively an "infinite past". + Edition_EDITION_LEGACY Edition = 900 // Legacy syntax "editions". These pre-date editions, but behave much like // distinct editions. These can't be used to specify the edition of proto // files, but feature definitions must supply proto2/proto3 defaults for @@ -82,6 +85,7 @@ const ( var ( Edition_name = map[int32]string{ 0: "EDITION_UNKNOWN", + 900: "EDITION_LEGACY", 998: "EDITION_PROTO2", 999: "EDITION_PROTO3", 1000: "EDITION_2023", @@ -95,6 +99,7 @@ var ( } Edition_value = map[string]int32{ "EDITION_UNKNOWN": 0, + "EDITION_LEGACY": 900, "EDITION_PROTO2": 998, "EDITION_PROTO3": 999, "EDITION_2023": 1000, @@ -2177,12 +2182,16 @@ type FileOptions struct { // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. + // A proto2 file can set this to true to opt in to UTF-8 checking for Java, + // which will throw an exception if invalid UTF-8 is parsed from the wire or + // assigned to a string field. + // + // TODO: clarify exactly what kinds of field types this option + // applies to, and update these docs accordingly. + // + // Proto3 files already perform these checks. Setting the option explicitly to + // false has no effect: it cannot be used to opt proto3 files out of UTF-8 + // checks. JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` // Sets the Go package where structs generated from this .proto will be @@ -2679,7 +2688,8 @@ type FieldOptions struct { Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2811,6 +2821,13 @@ func (x *FieldOptions) GetFeatures() *FeatureSet { return nil } +func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2995,6 +3012,8 @@ type EnumValueOptions struct { // out when using debug formats, e.g. when the field contains sensitive // credentials. DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + // Information about the support window of a feature value. + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -3058,6 +3077,13 @@ func (x *EnumValueOptions) GetDebugRedact() bool { return Default_EnumValueOptions_DebugRedact } +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -3968,6 +3994,88 @@ func (x *FieldOptions_EditionDefault) GetValue() string { return "" } +// Information about the support window of a feature. +type FieldOptions_FeatureSupport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The edition that this feature was first available in. In editions + // earlier than this one, the default assigned to EDITION_LEGACY will be + // used, and proto files will not be able to override it. + EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"` + // The edition this feature becomes deprecated in. Using this after this + // edition may trigger warnings. + EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"` + // The deprecation warning text if this feature is used after the edition it + // was marked deprecated in. + DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"` + // The edition this feature is no longer available in. In editions after + // this one, the last default assigned will be used, and proto files will + // not be able to override it. + EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` +} + +func (x *FieldOptions_FeatureSupport) Reset() { + *x = FieldOptions_FeatureSupport{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_FeatureSupport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_FeatureSupport) ProtoMessage() {} + +func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead. +func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition { + if x != nil && x.EditionIntroduced != nil { + return *x.EditionIntroduced + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition { + if x != nil && x.EditionDeprecated != nil { + return *x.EditionDeprecated + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string { + if x != nil && x.DeprecationWarning != nil { + return *x.DeprecationWarning + } + return "" +} + +func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { + if x != nil && x.EditionRemoved != nil { + return *x.EditionRemoved + } + return Edition_EDITION_UNKNOWN +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -3985,7 +4093,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3998,7 +4106,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4037,14 +4145,17 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + // Defaults of features that can be overridden in this edition. + OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` + // Defaults of features that can't be overridden in this edition. + FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` } func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4057,7 +4168,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4080,9 +4191,16 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { return Edition_EDITION_UNKNOWN } -func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet { +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet { if x != nil { - return x.Features + return x.OverridableFeatures + } + return nil +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet { + if x != nil { + return x.FixedFeatures } return nil } @@ -4188,7 +4306,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4201,7 +4319,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4275,7 +4393,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4288,7 +4406,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4597,7 +4715,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, @@ -4670,405 +4788,445 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, - 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, - 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, - 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, - 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, - 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, - 0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, - 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, - 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, - 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, - 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, - 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, - 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, - 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, - 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, - 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, - 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, - 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, - 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, - 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, - 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, - 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, - 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, - 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, - 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, - 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, + 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, + 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, + 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, + 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, - 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, - 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, - 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, - 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a, - 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, - 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, - 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, - 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, + 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, + 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, + 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, + 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, + 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, + 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, + 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, + 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, + 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, - 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, - 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, - 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, - 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, - 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, - 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, - 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, - 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, - 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, - 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, - 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, - 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, - 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, - 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, - 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, - 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, - 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, - 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, - 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, - 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, - 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, - 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, - 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, - 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, - 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, - 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07, - 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, - 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, - 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, - 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, - 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, - 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, - 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, - 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, - 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, - 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, - 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, - 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, + 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, + 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, + 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, + 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, + 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, + 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, + 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, + 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, + 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, + 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, + 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, + 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, + 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, + 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, - 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, - 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, - 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, - 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, - 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, - 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, - 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, - 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, - 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, - 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, - 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, - 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, - 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, + 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, + 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, + 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, + 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, + 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -5084,8 +5242,8 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { } var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32) -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type @@ -5131,10 +5289,11 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation + (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto @@ -5179,40 +5338,46 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet - 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 71, // [71:71] is the sub-list for method output_type - 71, // [71:71] is the sub-list for method input_type - 71, // [71:71] is the sub-list for extension type_name - 71, // [71:71] is the sub-list for extension extendee - 0, // [0:71] is the sub-list for field type_name + 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 77, // [77:77] is the sub-list for method output_type + 77, // [77:77] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5221,7 +5386,7 @@ func file_google_protobuf_descriptor_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorSet); i { case 0: return &v.state @@ -5233,7 +5398,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorProto); i { case 0: return &v.state @@ -5245,7 +5410,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto); i { case 0: return &v.state @@ -5257,7 +5422,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions); i { case 0: return &v.state @@ -5271,7 +5436,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*FieldDescriptorProto); i { case 0: return &v.state @@ -5283,7 +5448,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OneofDescriptorProto); i { case 0: return &v.state @@ -5295,7 +5460,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto); i { case 0: return &v.state @@ -5307,7 +5472,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*EnumValueDescriptorProto); i { case 0: return &v.state @@ -5319,7 +5484,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ServiceDescriptorProto); i { case 0: return &v.state @@ -5331,7 +5496,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*MethodDescriptorProto); i { case 0: return &v.state @@ -5343,7 +5508,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*FileOptions); i { case 0: return &v.state @@ -5357,7 +5522,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*MessageOptions); i { case 0: return &v.state @@ -5371,7 +5536,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions); i { case 0: return &v.state @@ -5385,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*OneofOptions); i { case 0: return &v.state @@ -5399,7 +5564,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*EnumOptions); i { case 0: return &v.state @@ -5413,7 +5578,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*EnumValueOptions); i { case 0: return &v.state @@ -5427,7 +5592,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ServiceOptions); i { case 0: return &v.state @@ -5441,7 +5606,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*MethodOptions); i { case 0: return &v.state @@ -5455,7 +5620,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption); i { case 0: return &v.state @@ -5467,7 +5632,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*FeatureSet); i { case 0: return &v.state @@ -5481,7 +5646,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state @@ -5493,7 +5658,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo); i { case 0: return &v.state @@ -5505,7 +5670,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state @@ -5517,7 +5682,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state @@ -5529,7 +5694,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state @@ -5541,7 +5706,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state @@ -5553,7 +5718,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state @@ -5565,7 +5730,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_EditionDefault); i { case 0: return &v.state @@ -5577,7 +5742,19 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { + switch v := v.(*FieldOptions_FeatureSupport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state @@ -5589,7 +5766,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { case 0: return &v.state @@ -5601,7 +5778,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo_Location); i { case 0: return &v.state @@ -5613,7 +5790,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -5632,7 +5809,7 @@ func file_google_protobuf_descriptor_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, NumEnums: 17, - NumMessages: 32, + NumMessages: 33, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index 25de5ae00..a2ca940c5 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -6,9 +6,9 @@ // https://developers.google.com/open-source/licenses/bsd // Code generated by protoc-gen-go. DO NOT EDIT. -// source: reflect/protodesc/proto/go_features.proto +// source: google/protobuf/go_features.proto -package proto +package gofeaturespb import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -30,7 +30,7 @@ type GoFeatures struct { func (x *GoFeatures) Reset() { *x = GoFeatures{} if protoimpl.UnsafeEnabled { - mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + mi := &file_google_protobuf_go_features_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -43,7 +43,7 @@ func (x *GoFeatures) String() string { func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { - mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + mi := &file_google_protobuf_go_features_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -56,7 +56,7 @@ func (x *GoFeatures) ProtoReflect() protoreflect.Message { // Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. func (*GoFeatures) Descriptor() ([]byte, []int) { - return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0} + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0} } func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { @@ -66,69 +66,73 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { return false } -var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ +var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FeatureSet)(nil), ExtensionType: (*GoFeatures)(nil), Field: 1002, - Name: "google.protobuf.go", + Name: "pb.go", Tag: "bytes,1002,opt,name=go", - Filename: "reflect/protodesc/proto/go_features.proto", + Filename: "google/protobuf/go_features.proto", }, } // Extension fields to descriptorpb.FeatureSet. var ( - // optional google.protobuf.GoFeatures go = 1002; - E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0] + // optional pb.GoFeatures go = 1002; + E_Go = &file_google_protobuf_go_features_proto_extTypes[0] ) -var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor - -var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64, - 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, - 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, - 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, - 0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, - 0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, - 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f, - 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, +var File_google_protobuf_go_features_proto protoreflect.FileDescriptor + +var file_google_protobuf_go_features_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, + 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, + 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, } var ( - file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once - file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc + file_google_protobuf_go_features_proto_rawDescOnce sync.Once + file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc ) -func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte { - file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() { - file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData) +func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { + file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) }) - return file_reflect_protodesc_proto_go_features_proto_rawDescData + return file_google_protobuf_go_features_proto_rawDescData } -var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{ - (*GoFeatures)(nil), // 0: google.protobuf.GoFeatures +var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_go_features_proto_goTypes = []any{ + (*GoFeatures)(nil), // 0: pb.GoFeatures (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet } -var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ - 1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet - 0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures +var file_google_protobuf_go_features_proto_depIdxs = []int32{ + 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet + 0, // 1: pb.go:type_name -> pb.GoFeatures 2, // [2:2] is the sub-list for method output_type 2, // [2:2] is the sub-list for method input_type 1, // [1:2] is the sub-list for extension type_name @@ -136,13 +140,13 @@ var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for field type_name } -func init() { file_reflect_protodesc_proto_go_features_proto_init() } -func file_reflect_protodesc_proto_go_features_proto_init() { - if File_reflect_protodesc_proto_go_features_proto != nil { +func init() { file_google_protobuf_go_features_proto_init() } +func file_google_protobuf_go_features_proto_init() { + if File_google_protobuf_go_features_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GoFeatures); i { case 0: return &v.state @@ -159,19 +163,19 @@ func file_reflect_protodesc_proto_go_features_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc, + RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, NumEnums: 0, NumMessages: 1, NumExtensions: 1, NumServices: 0, }, - GoTypes: file_reflect_protodesc_proto_go_features_proto_goTypes, - DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs, - MessageInfos: file_reflect_protodesc_proto_go_features_proto_msgTypes, - ExtensionInfos: file_reflect_protodesc_proto_go_features_proto_extTypes, + GoTypes: file_google_protobuf_go_features_proto_goTypes, + DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + MessageInfos: file_google_protobuf_go_features_proto_msgTypes, + ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() - File_reflect_protodesc_proto_go_features_proto = out.File - file_reflect_protodesc_proto_go_features_proto_rawDesc = nil - file_reflect_protodesc_proto_go_features_proto_goTypes = nil - file_reflect_protodesc_proto_go_features_proto_depIdxs = nil + File_google_protobuf_go_features_proto = out.File + file_google_protobuf_go_features_proto_rawDesc = nil + file_google_protobuf_go_features_proto_goTypes = nil + file_google_protobuf_go_features_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto deleted file mode 100644 index d24657129..000000000 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto +++ /dev/null @@ -1,28 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2023 Google Inc. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd - -syntax = "proto2"; - -package google.protobuf; - -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/protobuf/types/gofeaturespb"; - -extend google.protobuf.FeatureSet { - optional GoFeatures go = 1002; -} - -message GoFeatures { - // Whether or not to generate the deprecated UnmarshalJSON method for enums. - optional bool legacy_unmarshal_json_enum = 1 [ - retention = RETENTION_RUNTIME, - targets = TARGET_TYPE_ENUM, - edition_defaults = { edition: EDITION_PROTO2, value: "true" }, - edition_defaults = { edition: EDITION_PROTO3, value: "false" } - ]; -} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 9de51be54..7172b43d3 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -445,7 +445,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte { } var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ +var file_google_protobuf_any_proto_goTypes = []any{ (*Any)(nil), // 0: google.protobuf.Any } var file_google_protobuf_any_proto_depIdxs = []int32{ @@ -462,7 +462,7 @@ func file_google_protobuf_any_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Any); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index df709a8dd..1b71bcd91 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -323,7 +323,7 @@ func file_google_protobuf_duration_proto_rawDescGZIP() []byte { } var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []interface{}{ +var file_google_protobuf_duration_proto_goTypes = []any{ (*Duration)(nil), // 0: google.protobuf.Duration } var file_google_protobuf_duration_proto_depIdxs = []int32{ @@ -340,7 +340,7 @@ func file_google_protobuf_duration_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Duration); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 81511a336..83a5a645b 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -332,7 +332,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { } var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ +var file_google_protobuf_timestamp_proto_goTypes = []any{ (*Timestamp)(nil), // 0: google.protobuf.Timestamp } var file_google_protobuf_timestamp_proto_depIdxs = []int32{ @@ -349,7 +349,7 @@ func file_google_protobuf_timestamp_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Timestamp); i { case 0: return &v.state diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 026be9e3b..47ec9466a 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -404,13 +404,6 @@ func (t *traceLocation) Set(value string) error { return nil } -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - var logging loggingT var commandLine flag.FlagSet @@ -486,7 +479,7 @@ type settings struct { // Access to all of the following fields must be protected via a mutex. // file holds writer for each of the log types. - file [severity.NumSeverity]flushSyncWriter + file [severity.NumSeverity]io.Writer // flushInterval is the interval for periodic flushing. If zero, // the global default will be used. flushInterval time.Duration @@ -831,32 +824,12 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, buffer.PutBuffer(b) } -// redirectBuffer is used to set an alternate destination for the logs -type redirectBuffer struct { - w io.Writer -} - -func (rb *redirectBuffer) Sync() error { - return nil -} - -func (rb *redirectBuffer) Flush() error { - return nil -} - -func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { - return rb.w.Write(bytes) -} - // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() for s := severity.FatalLog; s >= severity.InfoLog; s-- { - rb := &redirectBuffer{ - w: w, - } - logging.file[s] = rb + logging.file[s] = w } } @@ -868,10 +841,7 @@ func SetOutputBySeverity(name string, w io.Writer) { if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) } - rb := &redirectBuffer{ - w: w, - } - logging.file[sev] = rb + logging.file[sev] = w } // LogToStderr sets whether to log exclusively to stderr, bypassing outputs @@ -1011,7 +981,8 @@ func (l *loggingT) exit(err error) { logExitFunc(err) return } - l.flushAll() + needToSync := l.flushAll() + l.syncAll(needToSync) OsExit(2) } @@ -1028,10 +999,6 @@ type syncBuffer struct { maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - // CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. func CalculateMaxSize() uint64 { if logging.logFile != "" { @@ -1223,24 +1190,45 @@ func StartFlushDaemon(interval time.Duration) { // lockAndFlushAll is like flushAll but locks l.mu first. func (l *loggingT) lockAndFlushAll() { l.mu.Lock() - l.flushAll() + needToSync := l.flushAll() l.mu.Unlock() + // Some environments are slow when syncing and holding the lock might cause contention. + l.syncAll(needToSync) } -// flushAll flushes all the logs and attempts to "sync" their data to disk. +// flushAll flushes all the logs // l.mu is held. -func (l *loggingT) flushAll() { +// +// The result is the number of files which need to be synced and the pointers to them. +func (l *loggingT) flushAll() fileArray { + var needToSync fileArray + // Flush from fatal down, in case there's trouble flushing. for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] - if file != nil { - _ = file.Flush() // ignore error - _ = file.Sync() // ignore error + if sb, ok := file.(*syncBuffer); ok && sb.file != nil { + _ = sb.Flush() // ignore error + needToSync.files[needToSync.num] = sb.file + needToSync.num++ } } if logging.loggerOptions.flush != nil { logging.loggerOptions.flush() } + return needToSync +} + +type fileArray struct { + num int + files [severity.NumSeverity]*os.File +} + +// syncAll attempts to "sync" their data to disk. +func (l *loggingT) syncAll(needToSync fileArray) { + // Flush from fatal down, in case there's trouble flushing. + for i := 0; i < needToSync.num; i++ { + _ = needToSync.files[i].Sync() // ignore error + } } // CopyStandardLogTo arranges for messages written to the Go "log" package's diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go new file mode 100644 index 000000000..7cb7795be --- /dev/null +++ b/vendor/k8s.io/utils/net/multi_listen.go @@ -0,0 +1,195 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "context" + "fmt" + "net" + "sync" +) + +// connErrPair pairs conn and error which is returned by accept on sub-listeners. +type connErrPair struct { + conn net.Conn + err error +} + +// multiListener implements net.Listener +type multiListener struct { + listeners []net.Listener + wg sync.WaitGroup + + // connCh passes accepted connections, from child listeners to parent. + connCh chan connErrPair + // stopCh communicates from parent to child listeners. + stopCh chan struct{} +} + +// compile time check to ensure *multiListener implements net.Listener +var _ net.Listener = &multiListener{} + +// MultiListen returns net.Listener which can listen on and accept connections for +// the given network on multiple addresses. Internally it uses stdlib to create +// sub-listener and multiplexes connection requests using go-routines. +// The network must be "tcp", "tcp4" or "tcp6". +// It follows the semantics of net.Listen that primarily means: +// 1. If the host is an unspecified/zero IP address with "tcp" network, MultiListen +// listens on all available unicast and anycast IP addresses of the local system. +// 2. Use "tcp4" or "tcp6" to exclusively listen on IPv4 or IPv6 family, respectively. +// 3. The host can accept names (e.g, localhost) and it will create a listener for at +// most one of the host's IP. +func MultiListen(ctx context.Context, network string, addrs ...string) (net.Listener, error) { + var lc net.ListenConfig + return multiListen( + ctx, + network, + addrs, + func(ctx context.Context, network, address string) (net.Listener, error) { + return lc.Listen(ctx, network, address) + }) +} + +// multiListen implements MultiListen by consuming stdlib functions as dependency allowing +// mocking for unit-testing. +func multiListen( + ctx context.Context, + network string, + addrs []string, + listenFunc func(ctx context.Context, network, address string) (net.Listener, error), +) (net.Listener, error) { + if !(network == "tcp" || network == "tcp4" || network == "tcp6") { + return nil, fmt.Errorf("network %q not supported", network) + } + if len(addrs) == 0 { + return nil, fmt.Errorf("no address provided to listen on") + } + + ml := &multiListener{ + connCh: make(chan connErrPair), + stopCh: make(chan struct{}), + } + for _, addr := range addrs { + l, err := listenFunc(ctx, network, addr) + if err != nil { + // close all the sub-listeners and exit + _ = ml.Close() + return nil, err + } + ml.listeners = append(ml.listeners, l) + } + + for _, l := range ml.listeners { + ml.wg.Add(1) + go func(l net.Listener) { + defer ml.wg.Done() + for { + // Accept() is blocking, unless ml.Close() is called, in which + // case it will return immediately with an error. + conn, err := l.Accept() + // This assumes that ANY error from Accept() will terminate the + // sub-listener. We could maybe be more precise, but it + // doesn't seem necessary. + terminate := err != nil + + select { + case ml.connCh <- connErrPair{conn: conn, err: err}: + case <-ml.stopCh: + // In case we accepted a connection AND were stopped, and + // this select-case was chosen, just throw away the + // connection. This avoids potentially blocking on connCh + // or leaking a connection. + if conn != nil { + _ = conn.Close() + } + terminate = true + } + // Make sure we don't loop on Accept() returning an error and + // the select choosing the channel case. + if terminate { + return + } + } + }(l) + } + return ml, nil +} + +// Accept implements net.Listener. It waits for and returns a connection from +// any of the sub-listener. +func (ml *multiListener) Accept() (net.Conn, error) { + // wait for any sub-listener to enqueue an accepted connection + connErr, ok := <-ml.connCh + if !ok { + // The channel will be closed only when Close() is called on the + // multiListener. Closing of this channel implies that all + // sub-listeners are also closed, which causes a "use of closed + // network connection" error on their Accept() calls. We return the + // same error for multiListener.Accept() if multiListener.Close() + // has already been called. + return nil, fmt.Errorf("use of closed network connection") + } + return connErr.conn, connErr.err +} + +// Close implements net.Listener. It will close all sub-listeners and wait for +// the go-routines to exit. +func (ml *multiListener) Close() error { + // Make sure this can be called repeatedly without explosions. + select { + case <-ml.stopCh: + return fmt.Errorf("use of closed network connection") + default: + } + + // Tell all sub-listeners to stop. + close(ml.stopCh) + + // Closing the listeners causes Accept() to immediately return an error in + // the sub-listener go-routines. + for _, l := range ml.listeners { + _ = l.Close() + } + + // Wait for all the sub-listener go-routines to exit. + ml.wg.Wait() + close(ml.connCh) + + // Drain any already-queued connections. + for connErr := range ml.connCh { + if connErr.conn != nil { + _ = connErr.conn.Close() + } + } + return nil +} + +// Addr is an implementation of the net.Listener interface. It always returns +// the address of the first listener. Callers should use conn.LocalAddr() to +// obtain the actual local address of the sub-listener. +func (ml *multiListener) Addr() net.Addr { + return ml.listeners[0].Addr() +} + +// Addrs is like Addr, but returns the address for all registered listeners. +func (ml *multiListener) Addrs() []net.Addr { + var ret []net.Addr + for _, l := range ml.listeners { + ret = append(ret, l.Addr()) + } + return ret +} diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index 187eb5d8c..559aebb59 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -192,7 +192,7 @@ func (t *Trace) Log() { t.endTime = &endTime t.lock.Unlock() // an explicit logging request should dump all the steps out at the higher level - if t.parentTrace == nil { // We don't start logging until Log or LogIfLong is called on the root trace + if t.parentTrace == nil && klogV(2) { // We don't start logging until Log or LogIfLong is called on the root trace t.logTrace() } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 731cb8a90..8666bd55a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,7 +1,4 @@ -# cloud.google.com/go/compute v1.23.0 -## explicit; go 1.19 -cloud.google.com/go/compute/internal -# cloud.google.com/go/compute/metadata v0.2.3 +# cloud.google.com/go/compute/metadata v0.3.0 ## explicit; go 1.19 cloud.google.com/go/compute/metadata # github.com/Azure/azure-sdk-for-go v46.0.0+incompatible @@ -168,31 +165,31 @@ github.com/beorn7/perks/quantile # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 -# github.com/cespare/xxhash/v2 v2.2.0 +# github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 -## explicit; go 1.11 +# github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b +## explicit; go 1.19 github.com/cncf/xds/go/xds/data/orca/v3 github.com/cncf/xds/go/xds/service/orca/v3 -# github.com/davecgh/go-spew v1.1.1 +# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/emicklei/go-restful/v3 v3.11.0 +# github.com/emicklei/go-restful/v3 v3.12.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log -# github.com/envoyproxy/protoc-gen-validate v1.0.2 +# github.com/envoyproxy/protoc-gen-validate v1.0.4 ## explicit; go 1.19 github.com/envoyproxy/protoc-gen-validate/validate -# github.com/evanphx/json-patch v5.6.0+incompatible +# github.com/evanphx/json-patch v5.9.0+incompatible ## explicit github.com/evanphx/json-patch # github.com/evanphx/json-patch/v5 v5.9.0 ## explicit; go 1.18 github.com/evanphx/json-patch/v5 github.com/evanphx/json-patch/v5/internal/json -# github.com/fatih/color v1.16.0 +# github.com/fatih/color v1.17.0 ## explicit; go 1.17 github.com/fatih/color # github.com/florianl/go-nfqueue v1.3.2 @@ -208,7 +205,7 @@ github.com/gabriel-vasile/mimetype github.com/gabriel-vasile/mimetype/internal/charset github.com/gabriel-vasile/mimetype/internal/json github.com/gabriel-vasile/mimetype/internal/magic -# github.com/go-logr/logr v1.4.1 +# github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/slogr @@ -218,18 +215,18 @@ github.com/go-logr/zapr # github.com/go-openapi/errors v0.21.0 ## explicit; go 1.19 github.com/go-openapi/errors -# github.com/go-openapi/jsonpointer v0.19.6 -## explicit; go 1.13 +# github.com/go-openapi/jsonpointer v0.21.0 +## explicit; go 1.20 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.20.2 -## explicit; go 1.13 +# github.com/go-openapi/jsonreference v0.21.0 +## explicit; go 1.20 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal # github.com/go-openapi/strfmt v0.22.1 ## explicit; go 1.19 github.com/go-openapi/strfmt -# github.com/go-openapi/swag v0.22.3 -## explicit; go 1.18 +# github.com/go-openapi/swag v0.23.0 +## explicit; go 1.20 github.com/go-openapi/swag # github.com/go-playground/locales v0.14.1 ## explicit; go 1.17 @@ -241,8 +238,8 @@ github.com/go-playground/universal-translator # github.com/go-playground/validator/v10 v10.19.0 ## explicit; go 1.18 github.com/go-playground/validator/v10 -# github.com/goccy/go-yaml v1.8.8 -## explicit; go 1.12 +# github.com/goccy/go-yaml v1.12.0 +## explicit; go 1.19 github.com/goccy/go-yaml github.com/goccy/go-yaml/ast github.com/goccy/go-yaml/internal/errors @@ -263,7 +260,6 @@ github.com/golang-jwt/jwt/v4 github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.4 ## explicit; go 1.17 -github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any @@ -327,8 +323,8 @@ github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/internal -# github.com/gorilla/websocket v1.5.0 -## explicit; go 1.12 +# github.com/gorilla/websocket v1.5.1 +## explicit; go 1.20 github.com/gorilla/websocket # github.com/hashicorp/go-cleanhttp v0.5.2 ## explicit; go 1.13 @@ -336,12 +332,15 @@ github.com/hashicorp/go-cleanhttp # github.com/hashicorp/go-retryablehttp v0.7.7 ## explicit; go 1.19 github.com/hashicorp/go-retryablehttp -# github.com/imdario/mergo v0.3.12 -## explicit; go 1.13 +# github.com/imdario/mergo v1.0.0 => github.com/imdario/mergo v0.3.5 +## explicit github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap +# github.com/istio-ecosystem/sail-operator v0.0.0-20241001175228-c7164d4a99dd +## explicit; go 1.22.0 +github.com/istio-ecosystem/sail-operator/api/v1alpha1 # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath @@ -382,9 +381,6 @@ github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 -## explicit; go 1.19 -github.com/matttproud/golang_protobuf_extensions/v2/pbutil # github.com/mdlayher/netlink v1.6.0 ## explicit; go 1.13 github.com/mdlayher/netlink @@ -547,8 +543,8 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.18.0 -## explicit; go 1.19 +# github.com/prometheus/client_golang v1.19.1 +## explicit; go 1.20 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 github.com/prometheus/client_golang/prometheus @@ -558,33 +554,32 @@ github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint github.com/prometheus/client_golang/prometheus/testutil/promlint/validations -# github.com/prometheus/client_model v0.5.0 +# github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.45.0 +# github.com/prometheus/common v0.55.0 ## explicit; go 1.20 github.com/prometheus/common/expfmt -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.12.0 -## explicit; go 1.19 +# github.com/prometheus/procfs v0.15.1 +## explicit; go 1.20 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util # github.com/robfig/cron v1.2.0 ## explicit github.com/robfig/cron -# github.com/sirupsen/logrus v1.9.0 +# github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/spf13/cobra v1.7.0 +# github.com/spf13/cobra v1.8.1 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/testify v1.8.4 -## explicit; go 1.20 +# github.com/stretchr/testify v1.9.0 +## explicit; go 1.17 github.com/stretchr/testify/assert # github.com/summerwind/h2spec v0.0.0-20200804131034-70ac22940108 ## explicit; go 1.12 @@ -630,7 +625,7 @@ go.opencensus.io/trace/tracestate # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# go.uber.org/zap v1.26.0 +# go.uber.org/zap v1.27.0 ## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer @@ -641,8 +636,8 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.21.0 -## explicit; go 1.18 +# golang.org/x/crypto v0.25.0 +## explicit; go 1.20 golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/cryptobyte @@ -653,12 +648,12 @@ golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/sha3 -# golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 -## explicit; go 1.18 +# golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7 +## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/net v0.23.0 +# golang.org/x/net v0.27.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -673,28 +668,31 @@ golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.12.0 +# golang.org/x/oauth2 v0.21.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/google -golang.org/x/oauth2/google/internal/externalaccount +golang.org/x/oauth2/google/externalaccount +golang.org/x/oauth2/google/internal/externalaccountauthorizeduser +golang.org/x/oauth2/google/internal/impersonate +golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.6.0 +# golang.org/x/sync v0.7.0 ## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.20.0 +# golang.org/x/sys v0.22.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.18.0 +# golang.org/x/term v0.22.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.14.0 +# golang.org/x/text v0.16.0 ## explicit; go 1.18 golang.org/x/text/internal/language golang.org/x/text/internal/language/compact @@ -704,11 +702,11 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.3.0 -## explicit +# golang.org/x/time v0.5.0 +## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 -## explicit; go 1.17 +# golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 +## explicit; go 1.18 golang.org/x/xerrors golang.org/x/xerrors/internal # gomodules.xyz/jsonpatch/v2 v2.4.0 @@ -742,19 +740,20 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d -## explicit; go 1.19 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d +## explicit; go 1.20 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.58.3 -## explicit; go 1.19 +# google.golang.org/grpc v1.65.0 +## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/benchmark/stats google.golang.org/grpc/binarylog/grpc_binarylog_v1 @@ -776,7 +775,6 @@ google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/credentials google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/grpclog -google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/idle @@ -784,6 +782,7 @@ google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig @@ -800,12 +799,14 @@ google.golang.org/grpc/orca google.golang.org/grpc/orca/internal google.golang.org/grpc/peer google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.33.0 -## explicit; go 1.17 +# google.golang.org/protobuf v1.34.2 +## explicit; go 1.20 +google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -813,6 +814,7 @@ google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand google.golang.org/protobuf/internal/editiondefaults +google.golang.org/protobuf/internal/editionssupport google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset @@ -830,6 +832,7 @@ google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version google.golang.org/protobuf/proto +google.golang.org/protobuf/protoadapt google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry @@ -855,7 +858,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.30.2 +# k8s.io/api v0.30.3 ## explicit; go 1.22.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -914,7 +917,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.30.2 +# k8s.io/apiextensions-apiserver v0.30.3 ## explicit; go 1.22.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -925,7 +928,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.30.2 +# k8s.io/apimachinery v0.30.3 ## explicit; go 1.22.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -985,7 +988,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.30.2 +# k8s.io/apiserver v0.30.3 ## explicit; go 1.22.0 k8s.io/apiserver/pkg/authentication/user k8s.io/apiserver/pkg/storage/names @@ -1263,13 +1266,13 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.30.2 +# k8s.io/component-base v0.30.3 ## explicit; go 1.22.0 k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/metrics/prometheusextension k8s.io/component-base/version -# k8s.io/klog/v2 v2.120.1 +# k8s.io/klog/v2 v2.130.1 ## explicit; go 1.18 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -1285,7 +1288,7 @@ k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1 k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1 -# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 +# k8s.io/kube-openapi v0.0.0-20240423202451-8948a665c108 ## explicit; go 1.20 k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common @@ -1296,7 +1299,7 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20230726121419-3b25d923346b +# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1378,3 +1381,4 @@ sigs.k8s.io/yaml/goyaml.v2 # bitbucket.org/ww/goautoneg => github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d # github.com/openshift/api => github.com/openshift/api v0.0.0-20240806152114-6b4a57ec20b0 # k8s.io/client-go => k8s.io/client-go v0.30.2 +# github.com/imdario/mergo => github.com/imdario/mergo v0.3.5