From f24915e62353d1cdcdcbb20911d0082b7eb47bd4 Mon Sep 17 00:00:00 2001 From: "plural-copilot[bot]" Date: Tue, 24 Sep 2024 14:25:42 +0000 Subject: [PATCH] fix(deps): update module github.com/aws/aws-sdk-go to v1.42.53 --- go.mod | 2 +- go.sum | 3 + .../stscreds/web_identity_provider.go | 40 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1780 +++++++++++-- .../aws/aws-sdk-go/aws/request/request.go | 9 + .../aws/aws-sdk-go/aws/session/credentials.go | 33 +- .../aws/aws-sdk-go/aws/session/session.go | 5 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../private/protocol/rest/unmarshal.go | 2 +- .../aws/aws-sdk-go/service/iam/service.go | 4 + .../aws/aws-sdk-go/service/sts/api.go | 168 +- .../aws/aws-sdk-go/service/sts/errors.go | 2 +- .../aws/aws-sdk-go/service/sts/service.go | 4 + .../crossplane/crossplane-runtime/LICENSE | 201 -- .../apis/common/v1/condition.go | 252 -- .../crossplane-runtime/apis/common/v1/doc.go | 19 - .../apis/common/v1/merge.go | 52 - .../apis/common/v1/policies.go | 47 - .../apis/common/v1/resource.go | 265 -- .../apis/common/v1/zz_generated.deepcopy.go | 371 --- .../crossplane-runtime/pkg/errors/errors.go | 128 - .../crossplane-runtime/pkg/meta/meta.go | 414 ---- .../pkg/reference/reference.go | 296 --- .../crossplane-runtime/pkg/resource/api.go | 250 -- .../crossplane-runtime/pkg/resource/doc.go | 19 - .../pkg/resource/enqueue_handlers.go | 67 - .../pkg/resource/interfaces.go | 239 -- .../pkg/resource/late_initializer.go | 86 - .../pkg/resource/predicates.go | 148 -- .../pkg/resource/providerconfig.go | 160 -- .../pkg/resource/reference.go | 74 - .../pkg/resource/resource.go | 412 --- .../crossplane/provider-aws/LICENSE | 201 -- .../apis/iam/v1beta1/accesskey_types.go | 82 - .../provider-aws/apis/iam/v1beta1/doc.go | 22 - .../apis/iam/v1beta1/group_types.go | 80 - .../v1beta1/grouppolicyattachment_types.go | 105 - .../iam/v1beta1/groupusermembership_types.go | 105 - .../v1beta1/openidconnectprovider_types.go | 117 - .../apis/iam/v1beta1/policy_types.go | 108 - .../apis/iam/v1beta1/referencers.go | 56 - .../provider-aws/apis/iam/v1beta1/register.go | 134 - .../apis/iam/v1beta1/role_types.go | 133 - .../iam/v1beta1/rolepolicyattachment_types.go | 106 - .../apis/iam/v1beta1/user_types.go | 87 - .../iam/v1beta1/userpolicyattachment_types.go | 105 - .../apis/iam/v1beta1/zz_generated.deepcopy.go | 1419 ----------- .../apis/iam/v1beta1/zz_generated.managed.go | 581 ----- .../iam/v1beta1/zz_generated.managedlist.go | 111 - .../iam/v1beta1/zz_generated.resolvers.go | 220 -- vendor/github.com/kr/text/License | 19 - vendor/github.com/kr/text/Readme | 3 - vendor/github.com/kr/text/doc.go | 3 - vendor/github.com/kr/text/indent.go | 74 - vendor/github.com/kr/text/wrap.go | 86 - vendor/github.com/motomux/pretty/.gitignore | 4 - vendor/github.com/motomux/pretty/License | 21 - vendor/github.com/motomux/pretty/Readme | 9 - vendor/github.com/motomux/pretty/diff.go | 273 -- vendor/github.com/motomux/pretty/formatter.go | 328 --- vendor/github.com/motomux/pretty/pretty.go | 108 - vendor/github.com/motomux/pretty/zero.go | 41 - .../reconcile-helper.go | 2206 ----------------- .../controller-reconcile-helper/renovate.json | 5 - .../apis/platform/v1alpha1/config_types.go | 129 - .../platform/v1alpha1/environment_types.go | 97 - .../platform/v1alpha1/groupversion_info.go | 36 - .../apis/platform/v1alpha1/project_types.go | 85 - .../platform/v1alpha1/resourcegroup_types.go | 74 - .../platform/v1alpha1/storagegroup_types.go | 79 - .../v1alpha1/zz_generated.deepcopy.go | 715 ------ vendor/github.com/spf13/afero/.gitignore | 2 - vendor/github.com/spf13/afero/.travis.yml | 26 - vendor/github.com/spf13/afero/LICENSE.txt | 174 -- vendor/github.com/spf13/afero/README.md | 430 ---- vendor/github.com/spf13/afero/afero.go | 111 - vendor/github.com/spf13/afero/appveyor.yml | 15 - vendor/github.com/spf13/afero/basepath.go | 211 -- .../github.com/spf13/afero/cacheOnReadFs.go | 311 --- vendor/github.com/spf13/afero/const_bsds.go | 22 - .../github.com/spf13/afero/const_win_unix.go | 26 - .../github.com/spf13/afero/copyOnWriteFs.go | 326 --- vendor/github.com/spf13/afero/httpFs.go | 114 - vendor/github.com/spf13/afero/iofs.go | 288 --- vendor/github.com/spf13/afero/ioutil.go | 240 -- vendor/github.com/spf13/afero/lstater.go | 27 - vendor/github.com/spf13/afero/match.go | 110 - vendor/github.com/spf13/afero/mem/dir.go | 37 - vendor/github.com/spf13/afero/mem/dirmap.go | 43 - vendor/github.com/spf13/afero/mem/file.go | 338 --- vendor/github.com/spf13/afero/memmap.go | 404 --- vendor/github.com/spf13/afero/os.go | 113 - vendor/github.com/spf13/afero/path.go | 106 - vendor/github.com/spf13/afero/readonlyfs.go | 96 - vendor/github.com/spf13/afero/regexpfs.go | 224 -- vendor/github.com/spf13/afero/symlink.go | 55 - vendor/github.com/spf13/afero/unionFile.go | 317 --- vendor/github.com/spf13/afero/util.go | 330 --- .../zalando/postgres-operator/LICENSE | 21 - .../pkg/apis/acid.zalan.do/register.go | 6 - .../pkg/apis/acid.zalan.do/v1/const.go | 19 - .../pkg/apis/acid.zalan.do/v1/crds.go | 1647 ------------ .../pkg/apis/acid.zalan.do/v1/doc.go | 6 - .../pkg/apis/acid.zalan.do/v1/marshal.go | 153 -- .../v1/operator_configuration_type.go | 249 -- .../acid.zalan.do/v1/postgres_team_type.go | 33 - .../apis/acid.zalan.do/v1/postgresql_type.go | 228 -- .../pkg/apis/acid.zalan.do/v1/register.go | 54 - .../pkg/apis/acid.zalan.do/v1/util.go | 106 - .../acid.zalan.do/v1/zz_generated.deepcopy.go | 1227 --------- .../postgres-operator/pkg/spec/types.go | 216 -- .../pkg/util/config/config.go | 288 --- .../postgres-operator/pkg/util/config/util.go | 246 -- .../pkg/util/constants/annotations.go | 11 - .../pkg/util/constants/aws.go | 17 - .../pkg/util/constants/kubernetes.go | 13 - .../pkg/util/constants/pooler.go | 18 - .../pkg/util/constants/postgresql.go | 18 - .../pkg/util/constants/roles.go | 22 - .../pkg/util/constants/units.go | 6 - .../postgres-operator/pkg/util/util.go | 354 --- vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 - vendor/modules.txt | 22 +- .../hierarchical-namespaces/LICENSE | 201 -- .../api/v1alpha2/groupversion_info.go | 35 - .../api/v1alpha2/hierarchy_types.go | 240 -- .../api/v1alpha2/hnc_config.go | 176 -- .../api/v1alpha2/subnamespace_anchor.go | 85 - .../api/v1alpha2/zz_generated.deepcopy.go | 363 --- 129 files changed, 1757 insertions(+), 22617 deletions(-) delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/LICENSE delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/condition.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/doc.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/merge.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/policies.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/resource.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/zz_generated.deepcopy.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/pkg/errors/errors.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/pkg/meta/meta.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/pkg/reference/reference.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/pkg/resource/api.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/pkg/resource/doc.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/pkg/resource/enqueue_handlers.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/pkg/resource/interfaces.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/pkg/resource/late_initializer.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/pkg/resource/predicates.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/pkg/resource/providerconfig.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/pkg/resource/reference.go delete mode 100644 vendor/github.com/crossplane/crossplane-runtime/pkg/resource/resource.go delete mode 100644 vendor/github.com/crossplane/provider-aws/LICENSE delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/accesskey_types.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/doc.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/group_types.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/grouppolicyattachment_types.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/groupusermembership_types.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/openidconnectprovider_types.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/policy_types.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/referencers.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/register.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/role_types.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/rolepolicyattachment_types.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/user_types.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/userpolicyattachment_types.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.managed.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.managedlist.go delete mode 100644 vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.resolvers.go delete mode 100644 vendor/github.com/kr/text/License delete mode 100644 vendor/github.com/kr/text/Readme delete mode 100644 vendor/github.com/kr/text/doc.go delete mode 100644 vendor/github.com/kr/text/indent.go delete mode 100644 vendor/github.com/kr/text/wrap.go delete mode 100644 vendor/github.com/motomux/pretty/.gitignore delete mode 100644 vendor/github.com/motomux/pretty/License delete mode 100644 vendor/github.com/motomux/pretty/Readme delete mode 100644 vendor/github.com/motomux/pretty/diff.go delete mode 100644 vendor/github.com/motomux/pretty/formatter.go delete mode 100644 vendor/github.com/motomux/pretty/pretty.go delete mode 100644 vendor/github.com/motomux/pretty/zero.go delete mode 100644 vendor/github.com/pluralsh/controller-reconcile-helper/reconcile-helper.go delete mode 100644 vendor/github.com/pluralsh/controller-reconcile-helper/renovate.json delete mode 100644 vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/config_types.go delete mode 100644 vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/environment_types.go delete mode 100644 vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/groupversion_info.go delete mode 100644 vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/project_types.go delete mode 100644 vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/resourcegroup_types.go delete mode 100644 vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/storagegroup_types.go delete mode 100644 vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/github.com/spf13/afero/.gitignore delete mode 100644 vendor/github.com/spf13/afero/.travis.yml delete mode 100644 vendor/github.com/spf13/afero/LICENSE.txt delete mode 100644 vendor/github.com/spf13/afero/README.md delete mode 100644 vendor/github.com/spf13/afero/afero.go delete mode 100644 vendor/github.com/spf13/afero/appveyor.yml delete mode 100644 vendor/github.com/spf13/afero/basepath.go delete mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go delete mode 100644 vendor/github.com/spf13/afero/const_bsds.go delete mode 100644 vendor/github.com/spf13/afero/const_win_unix.go delete mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go delete mode 100644 vendor/github.com/spf13/afero/httpFs.go delete mode 100644 vendor/github.com/spf13/afero/iofs.go delete mode 100644 vendor/github.com/spf13/afero/ioutil.go delete mode 100644 vendor/github.com/spf13/afero/lstater.go delete mode 100644 vendor/github.com/spf13/afero/match.go delete mode 100644 vendor/github.com/spf13/afero/mem/dir.go delete mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go delete mode 100644 vendor/github.com/spf13/afero/mem/file.go delete mode 100644 vendor/github.com/spf13/afero/memmap.go delete mode 100644 vendor/github.com/spf13/afero/os.go delete mode 100644 vendor/github.com/spf13/afero/path.go delete mode 100644 vendor/github.com/spf13/afero/readonlyfs.go delete mode 100644 vendor/github.com/spf13/afero/regexpfs.go delete mode 100644 vendor/github.com/spf13/afero/symlink.go delete mode 100644 vendor/github.com/spf13/afero/unionFile.go delete mode 100644 vendor/github.com/spf13/afero/util.go delete mode 100644 vendor/github.com/zalando/postgres-operator/LICENSE delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/register.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/const.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/crds.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/doc.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/marshal.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/postgres_team_type.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/postgresql_type.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/register.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/util.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/spec/types.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/util/config/config.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/util/config/util.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/util/constants/annotations.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/util/constants/aws.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/util/constants/kubernetes.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/util/constants/pooler.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/util/constants/postgresql.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/util/constants/roles.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/util/constants/units.go delete mode 100644 vendor/github.com/zalando/postgres-operator/pkg/util/util.go delete mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go delete mode 100644 vendor/sigs.k8s.io/hierarchical-namespaces/LICENSE delete mode 100644 vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/groupversion_info.go delete mode 100644 vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/hierarchy_types.go delete mode 100644 vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/hnc_config.go delete mode 100644 vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/subnamespace_anchor.go delete mode 100644 vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/zz_generated.deepcopy.go diff --git a/go.mod b/go.mod index bbe8c029..6b0f77eb 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.17 require ( github.com/aws-controllers-k8s/iam-controller v0.0.6 - github.com/aws/aws-sdk-go v1.42.0 + github.com/aws/aws-sdk-go v1.42.53 github.com/cenkalti/backoff v2.2.1+incompatible // github.com/ghodss/yaml v1.0.0 // indirect github.com/go-logr/logr v1.2.0 diff --git a/go.sum b/go.sum index 09201cb0..446869f6 100644 --- a/go.sum +++ b/go.sum @@ -105,6 +105,8 @@ github.com/aws/aws-sdk-go v1.37.10/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2z github.com/aws/aws-sdk-go v1.41.16/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.42.0 h1:BMZws0t8NAhHFsfnT3B40IwD13jVDG5KerlRksctVIw= github.com/aws/aws-sdk-go v1.42.0/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.42.53 h1:56T04NWcmc0ZVYFbUc6HdewDQ9iHQFlmS6hj96dRjJs= +github.com/aws/aws-sdk-go v1.42.53/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= github.com/aws/aws-sdk-go-v2 v1.11.0/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0/go.mod h1:Xn6sxgRuIDflLRJFj5Ev7UxABIkNbccFPV/p8itDReM= github.com/aws/aws-sdk-go-v2/config v1.10.0/go.mod h1:xuqoV5etD3N3B8Ts9je4ijgAv6mb+6NiOPFMUhwRcjA= @@ -846,6 +848,7 @@ golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go index cefe2a76..19ad619a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -28,7 +28,7 @@ const ( // compare test values. var now = time.Now -// TokenFetcher shuold return WebIdentity token bytes or an error +// TokenFetcher should return WebIdentity token bytes or an error type TokenFetcher interface { FetchToken(credentials.Context) ([]byte, error) } @@ -50,6 +50,8 @@ func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { // an OIDC token. type WebIdentityRoleProvider struct { credentials.Expiry + + // The policy ARNs to use with the web identity assumed role. PolicyArns []*sts.PolicyDescriptorType // Duration the STS credentials will be valid for. Truncated to seconds. @@ -74,6 +76,9 @@ type WebIdentityRoleProvider struct { // NewWebIdentityCredentials will return a new set of credentials with a given // configuration, role arn, and token file path. +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options, and wrap with credentials.NewCredentials helper. func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { svc := sts.New(c) p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) @@ -82,19 +87,42 @@ func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName // NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the // provided stsiface.STSAPI +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options. func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { - return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) + return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, FetchTokenPath(path)) } // NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the // provided stsiface.STSAPI and a TokenFetcher +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options. func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { - return &WebIdentityRoleProvider{ + return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, tokenFetcher) +} + +// NewWebIdentityRoleProviderWithOptions will return an initialize +// WebIdentityRoleProvider with the provided stsiface.STSAPI, role ARN, and a +// TokenFetcher. Additional options can be provided as functional options. +// +// TokenFetcher is the implementation that will retrieve the JWT token from to +// assume the role with. Use the provided FetchTokenPath implementation to +// retrieve the JWT token using a file system path. +func NewWebIdentityRoleProviderWithOptions(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher, optFns ...func(*WebIdentityRoleProvider)) *WebIdentityRoleProvider { + p := WebIdentityRoleProvider{ client: svc, tokenFetcher: tokenFetcher, roleARN: roleARN, roleSessionName: roleSessionName, } + + for _, fn := range optFns { + fn(&p) + } + + return &p } // Retrieve will attempt to assume a role from a token which is located at @@ -104,9 +132,9 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { return p.RetrieveWithContext(aws.BackgroundContext()) } -// RetrieveWithContext will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. +// RetrieveWithContext will attempt to assume a role from a token which is +// located at 'WebIdentityTokenFilePath' specified destination and if that is +// empty an error will be returned. func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { b, err := p.tokenFetcher.FetchToken(ctx) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 614345da..a49650c6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -25,6 +25,7 @@ const ( ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). CaCentral1RegionID = "ca-central-1" // Canada (Central). EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). @@ -161,6 +162,9 @@ var awsPartition = partition{ "ap-southeast-2": region{ Description: "Asia Pacific (Sydney)", }, + "ap-southeast-3": region{ + Description: "Asia Pacific (Jakarta)", + }, "ca-central-1": region{ Description: "Canada (Central)", }, @@ -235,6 +239,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -391,6 +398,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -1007,6 +1017,14 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -1430,6 +1448,50 @@ var awsPartition = partition{ }, }, }, + "api.iotwireless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotwireless.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "api.mediatailor": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -1702,6 +1764,73 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "appflow": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -1785,6 +1914,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -1840,6 +1972,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -2352,6 +2487,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -2424,6 +2562,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -2821,6 +2962,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -2994,6 +3138,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3112,36 +3259,12 @@ var awsPartition = partition{ }, "cloudhsm": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, }, }, "cloudhsmv2": service{ @@ -3723,6 +3846,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -4498,88 +4624,223 @@ var awsPartition = partition{ }, }, }, - "config": service{ + "compute-optimizer": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, endpointKey{ Region: "ap-northeast-1", - }: endpoint{}, + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, endpointKey{ Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, endpointKey{ Region: "ap-south-1", - }: endpoint{}, + }: endpoint{ + Hostname: "compute-optimizer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "ap-southeast-1", - }: endpoint{}, + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, endpointKey{ Region: "ap-southeast-2", - }: endpoint{}, + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, endpointKey{ Region: "ca-central-1", - }: endpoint{}, + }: endpoint{ + Hostname: "compute-optimizer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "eu-central-1", - }: endpoint{}, + }: endpoint{ + Hostname: "compute-optimizer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, endpointKey{ Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, + }: endpoint{ + Hostname: "compute-optimizer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, endpointKey{ Region: "eu-west-1", - }: endpoint{}, + }: endpoint{ + Hostname: "compute-optimizer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, endpointKey{ Region: "eu-west-2", - }: endpoint{}, + }: endpoint{ + Hostname: "compute-optimizer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, endpointKey{ Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", }: endpoint{ - Hostname: "config-fips.us-east-1.amazonaws.com", + Hostname: "compute-optimizer.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-west-3", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "fips-us-east-2", + Region: "sa-east-1", }: endpoint{ - Hostname: "config-fips.us-east-2.amazonaws.com", + Hostname: "compute-optimizer.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "sa-east-1", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "fips-us-west-1", + Region: "us-east-1", }: endpoint{ - Hostname: "config-fips.us-west-1.amazonaws.com", + Hostname: "compute-optimizer.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "fips-us-west-2", + Region: "us-east-2", }: endpoint{ - Hostname: "config-fips.us-west-2.amazonaws.com", + Hostname: "compute-optimizer.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-2", }, - Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "compute-optimizer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "compute-optimizer.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, endpointKey{ Region: "me-south-1", @@ -4627,6 +4888,9 @@ var awsPartition = partition{ }, "connect": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -5190,6 +5454,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -5342,6 +5609,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -5586,6 +5856,37 @@ var awsPartition = partition{ }, }, }, + "drs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "ds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -5759,6 +6060,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6063,6 +6367,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6235,6 +6542,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6500,6 +6810,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6811,6 +7124,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6946,6 +7268,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -7150,6 +7481,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7283,6 +7617,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7621,6 +7958,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7757,6 +8097,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7858,6 +8201,55 @@ var awsPartition = partition{ }, }, }, + "evidently": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "evidently.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "evidently.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "evidently.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "evidently.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "evidently.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "evidently.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "evidently.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "evidently.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "evidently.us-west-2.amazonaws.com", + }, + }, + }, "finspace": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -8935,6 +9327,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9285,6 +9680,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9355,6 +9753,9 @@ var awsPartition = partition{ endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -9813,14 +10214,7 @@ var awsPartition = partition{ }, }, }, - "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, + "inspector2": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "ap-east-1", @@ -9843,18 +10237,15 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.ca-central-1.amazonaws.com", - }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9865,13 +10256,84 @@ var awsPartition = partition{ Region: "eu-west-3", }: endpoint{}, endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "iot-fips.ca-central-1.amazonaws.com", + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ CredentialScope: credentialScope{ Service: "execute-api", }, - Deprecated: boxedTrue, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "execute-api", + }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-east-1", @@ -10370,6 +10832,9 @@ var awsPartition = partition{ }, "kafka": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -10379,6 +10844,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -10577,6 +11045,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -10680,6 +11151,9 @@ var awsPartition = partition{ }, "kinesisanalytics": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -10689,6 +11163,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -10934,6 +11411,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11302,48 +11797,147 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -11383,12 +11977,30 @@ var awsPartition = partition{ endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -11398,6 +12010,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -11407,6 +12025,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -11416,6 +12040,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -11623,6 +12253,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11737,6 +12370,37 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "lookoutmetrics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "lookoutvision": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -12367,6 +13031,52 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "meetings-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, "messaging-chime": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -12422,6 +13132,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -12490,6 +13203,9 @@ var awsPartition = partition{ }, "mgn": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -12520,12 +13236,21 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -12543,6 +13268,31 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "migrationhub-strategy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "mobileanalytics": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -12691,6 +13441,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -13620,24 +14373,70 @@ var awsPartition = partition{ Hostname: "outposts-fips.us-west-1.amazonaws.com", }, endpointKey{ - Region: "us-west-2", + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + }, + }, + }, + "personalize": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", }: endpoint{}, endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-west-2.amazonaws.com", - }, + Region: "us-west-2", + }: endpoint{}, }, }, - "personalize": service{ + "pi": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -13653,15 +14452,36 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -14069,6 +14889,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -14322,6 +15145,73 @@ var awsPartition = partition{ }, }, }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "rds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -14348,6 +15238,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -14628,6 +15521,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -15023,6 +15919,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -15272,6 +16171,40 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "rum": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "runtime-v2-lex": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -15627,6 +16560,15 @@ var awsPartition = partition{ Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", + }, endpointKey{ Region: "aws-global", }: endpoint{ @@ -16346,52 +17288,169 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{ - Hostname: "s3-control.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, + Region: "fips-us-east-1", }: endpoint{ - Hostname: "s3-control.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, + Region: "fips-us-east-2", }: endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, + Region: "fips-us-west-1", }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-west-2-fips", + Region: "fips-us-west-2", }: endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, }, }, "savingsplans": service{ @@ -16525,9 +17584,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17370,6 +18447,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17932,6 +19012,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18065,6 +19148,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18195,6 +19281,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18316,12 +19405,21 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18331,12 +19429,24 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -18368,6 +19478,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18495,6 +19608,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18630,12 +19746,21 @@ var awsPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -18646,22 +19771,20 @@ var awsPartition = partition{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ - Region: "ca-central-1", + Region: "ap-southeast-3", }: endpoint{}, endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18689,47 +19812,15 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, endpointKey{ Region: "us-east-2", }: endpoint{}, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, endpointKey{ Region: "us-west-1", }: endpoint{}, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, endpointKey{ Region: "us-west-2", }: endpoint{}, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, }, }, "sts": service{ @@ -18759,6 +19850,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "aws-global", }: endpoint{ @@ -18907,6 +20001,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18998,14 +20095,84 @@ var awsPartition = partition{ Hostname: "swf-fips.us-west-1.amazonaws.com", }, endpointKey{ - Region: "us-west-2", + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + }, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", }: endpoint{}, endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-west-2.amazonaws.com", - }, + Region: "us-west-2", + }: endpoint{}, }, }, "tagging": service{ @@ -19034,6 +20201,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -19230,6 +20400,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19245,6 +20421,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -19484,6 +20669,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -20507,6 +21695,19 @@ var awsPartition = partition{ }, }, }, + "workspaces-web": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "xray": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -20766,6 +21967,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "application-autoscaling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -20964,6 +22175,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "cognito-identity": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -20971,6 +22192,26 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "config": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -21483,9 +22724,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "license-manager": service{ @@ -21586,6 +22839,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "polly": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -21985,6 +23248,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "tagging": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -22492,6 +23765,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "application-autoscaling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -24256,7 +25539,24 @@ var awsusgovPartition = partition{ }, }, "identitystore": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.{region}.{dnsSuffix}", + }, + }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "identitystore.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -24266,6 +25566,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -24901,6 +26210,14 @@ var awsusgovPartition = partition{ }, "oidc": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "oidc.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -25024,6 +26341,14 @@ var awsusgovPartition = partition{ }, "portal.sso": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "portal.sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -25065,6 +26390,14 @@ var awsusgovPartition = partition{ }, }, "rds": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.{region}.{dnsSuffix}", + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "rds.us-gov-east-1", @@ -25319,6 +26652,14 @@ var awsusgovPartition = partition{ }, }, "runtime.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime.sagemaker.{region}.{dnsSuffix}", + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-west-1", @@ -25545,6 +26886,36 @@ var awsusgovPartition = partition{ }, }, }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -26125,29 +27496,51 @@ var awsusgovPartition = partition{ Service: "dynamodb", }, }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, }, Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-east-1-fips", }: endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1-fips", }: endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, @@ -26259,6 +27652,16 @@ var awsusgovPartition = partition{ }, }, }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "tagging": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -26772,6 +28175,24 @@ var awsisoPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "dms.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "ds": service{ @@ -27181,7 +28602,6 @@ var awsisoPartition = partition{ "streams.dynamodb": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Service: "dynamodb", }, @@ -27190,9 +28610,7 @@ var awsisoPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, + }: endpoint{}, }, }, "sts": service{ @@ -27228,6 +28646,13 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "transcribe": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -27738,6 +29163,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "tagging": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index fb0a68fc..636d9ec9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io" + "io/ioutil" "net/http" "net/url" "reflect" @@ -525,6 +526,14 @@ func (r *Request) GetBody() io.ReadSeeker { // Send will not close the request.Request's body. func (r *Request) Send() error { defer func() { + // Ensure a non-nil HTTPResponse parameter is set to ensure handlers + // checking for HTTPResponse values, don't fail. + if r.HTTPResponse == nil { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + Body: ioutil.NopCloser(&bytes.Buffer{}), + } + } // Regardless of success or failure of the request trigger the Complete // request handlers. r.Handlers.Complete.Run(r) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index 3efdac29..1d3f4c3a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -14,8 +14,17 @@ import ( "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/shareddefaults" + "github.com/aws/aws-sdk-go/service/sts" ) +// CredentialsProviderOptions specifies additional options for configuring +// credentials providers. +type CredentialsProviderOptions struct { + // WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider, + // such as setting its ExpiryWindow. + WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider) +} + func resolveCredentials(cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, @@ -40,6 +49,7 @@ func resolveCredentials(cfg *aws.Config, envCfg.WebIdentityTokenFilePath, envCfg.RoleARN, envCfg.RoleSessionName, + sessOpts.CredentialsProviderOptions, ) default: @@ -59,6 +69,7 @@ var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, " func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, filepath string, roleARN, sessionName string, + credOptions *CredentialsProviderOptions, ) (*credentials.Credentials, error) { if len(filepath) == 0 { @@ -69,17 +80,18 @@ func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, return nil, WebIdentityEmptyRoleARNErr } - creds := stscreds.NewWebIdentityCredentials( - &Session{ - Config: cfg, - Handlers: handlers.Copy(), - }, - roleARN, - sessionName, - filepath, - ) + svc := sts.New(&Session{ + Config: cfg, + Handlers: handlers.Copy(), + }) - return creds, nil + var optFns []func(*stscreds.WebIdentityRoleProvider) + if credOptions != nil && credOptions.WebIdentityRoleProviderOptions != nil { + optFns = append(optFns, credOptions.WebIdentityRoleProviderOptions) + } + + p := stscreds.NewWebIdentityRoleProviderWithOptions(svc, roleARN, sessionName, stscreds.FetchTokenPath(filepath), optFns...) + return credentials.NewCredentials(p), nil } func resolveCredsFromProfile(cfg *aws.Config, @@ -114,6 +126,7 @@ func resolveCredsFromProfile(cfg *aws.Config, sharedCfg.WebIdentityTokenFile, sharedCfg.RoleARN, sharedCfg.RoleSessionName, + sessOpts.CredentialsProviderOptions, ) case sharedCfg.hasSSOConfiguration(): diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index ebace4bb..4293dbe1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -304,6 +304,11 @@ type Options struct { // // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies options for creating credential providers. + // These are only used if the aws.Config does not already + // include credentials. + CredentialsProviderOptions *CredentialsProviderOptions } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index f885a90b..df7d622d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.42.0" +const SDKVersion = "1.42.53" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 92f8b4d9..c26fbfa5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -140,7 +140,7 @@ func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHe prefix := field.Tag.Get("locationName") err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) if err != nil { - awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go index 89fe8231..7361474e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go @@ -48,6 +48,10 @@ const ( // svc := iam.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *IAM { c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = EndpointsID + // No Fallback + } return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index a1a8a095..1e7fa655 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -63,14 +63,15 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // or for cross-account access. For a comparison of AssumeRole with other API // operations that produce temporary credentials, see Requesting Temporary Security // Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // Permissions // // The temporary security credentials created by AssumeRole can be used to make // API calls to any Amazon Web Services service with the following exception: -// You cannot call the STS GetFederationToken or GetSessionToken API operations. +// You cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken +// API operations. // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an @@ -86,26 +87,33 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// To assume a role from a different account, your account must be trusted by -// the role. The trust relationship is defined in the role's trust policy when -// the role is created. That trust policy states which accounts are allowed -// to delegate that access to users in the account. +// When you create a role, you create two policies: A role trust policy that +// specifies who can assume the role and a permissions policy that specifies +// what can be done with the role. You specify the trusted principal who is +// allowed to assume the role in the role trust policy. +// +// To assume a role from a different account, your Amazon Web Services account +// must be trusted by the role. The trust relationship is defined in the role's +// trust policy when the role is created. That trust policy states which accounts +// are allowed to delegate that access to users in the account. // // A user who wants to access a role in a different account must also have permissions // that are delegated from the user account administrator. The administrator // must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. If the user is in the same account as the -// role, then you can do either of the following: +// of the role in the other account. // -// * Attach a policy to the user (identical to the previous user in a different -// account). +// To allow a user to assume a role in the same account, you can do either of +// the following: +// +// * Attach a policy to the user that allows the user to call AssumeRole +// (as long as the role's trust policy trusts the account). // // * Add the user as a principal directly in the role's trust policy. // -// In this case, the trust policy acts as an IAM resource-based policy. Users -// in the same account as the role do not need explicit permission to assume -// the role. For more information about trust policies and resource-based policies, -// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the +// same account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) // in the IAM User Guide. // // Tags @@ -170,7 +178,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // // You could receive this error even though you meet other defined session policy // and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // * ErrCodeRegionDisabledException "RegionDisabledException" @@ -258,7 +266,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // Services access without user-specific credentials or configuration. For a // comparison of AssumeRoleWithSAML with the other API operations that produce // temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this operation consist of @@ -403,7 +411,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // You could receive this error even though you meet other defined session policy // and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" @@ -523,7 +531,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity // with the other API operations that produce temporary credentials, see Requesting // Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this API consist of an access @@ -661,7 +669,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // You could receive this error even though you meet other defined session policy // and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" @@ -776,10 +784,11 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // code. // // The message is encoded because the details of the authorization status can -// constitute privileged information that the user who requested the operation +// contain privileged information that the user who requested the operation // should not see. To decode an authorization status message, a user must be -// granted permissions via an IAM policy to request the DecodeAuthorizationMessage -// (sts:DecodeAuthorizationMessage) action. +// granted permissions through an IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// to request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) +// action. // // The decoded message includes the following type of information: // @@ -1065,7 +1074,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // For a comparison of GetFederationToken with the other API operations that // produce temporary credentials, see Requesting Temporary Security Credentials // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // You can create a mobile-based or browser-based app that can authenticate @@ -1088,9 +1097,9 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials that -// are obtained by using Amazon Web Services account root user credentials have -// a maximum duration of 3,600 seconds (1 hour). +// session duration is 43,200 seconds (12 hours). Temporary credentials obtained +// by using the Amazon Web Services account root user credentials have a maximum +// duration of 3,600 seconds (1 hour). // // Permissions // @@ -1141,63 +1150,6 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) // in the IAM User Guide. // -// You can also call GetFederationToken using the security credentials of an -// Amazon Web Services account root user, but we do not recommend it. Instead, -// we recommend that you create an IAM user for the purpose of the proxy application. -// Then attach a policy to the IAM user that limits federated users to only -// the actions and resources that they need to access. For more information, -// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// -// Session duration -// -// The temporary credentials are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials that -// are obtained by using Amazon Web Services account root user credentials have -// a maximum duration of 3,600 seconds (1 hour). -// -// Permissions -// -// You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service except the following: -// -// * You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. -// -// * You cannot call any STS operations except GetCallerIdentity. -// -// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. -// -// Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. When you pass -// session policies, the session permissions are the intersection of the IAM -// user policies and the session policies that you pass. This gives you a way -// to further restrict the permissions for a federated user. You cannot use -// session policies to grant more permissions than those that are defined in -// the permissions policy of the IAM user. For more information, see Session -// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to -// create temporary security credentials, see GetFederationToken—Federation -// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). -// -// You can use the credentials to access a resource that has a resource-based -// policy. If that policy specifically references the federated user session -// in the Principal element of the policy, the session has the permissions allowed -// by the policy. These permissions are granted in addition to the permissions -// granted by the session policies. -// -// Tags -// -// (Optional) You can pass tag key-value pairs to your session. These are called -// session tags. For more information about session tags, see Passing Session -// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// // An administrator must grant you the permissions necessary to pass session // tags. The administrator can also create granular permissions to allow you // to pass only specific session tags. For more information, see Tutorial: Using @@ -1234,7 +1186,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // // You could receive this error even though you meet other defined session policy // and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // * ErrCodeRegionDisabledException "RegionDisabledException" @@ -1323,7 +1275,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // then the API returns an access denied error. For a comparison of GetSessionToken // with the other API operations that produce temporary credentials, see Requesting // Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // Session Duration @@ -1404,15 +1356,23 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken type AssumeRoleInput struct { _ struct{} `type:"structure"` - // The duration, in seconds, of the role session. The value specified can can - // range from 900 seconds (15 minutes) up to the maximum session duration that - // is set for the role. The maximum session duration setting can have a value - // from 1 hour to 12 hours. If you specify a value higher than this setting - // or the administrator setting (whichever is lower), the operation fails. For - // example, if you specify a session duration of 12 hours, but your administrator - // set the maximum session duration to 6 hours, your operation fails. To learn - // how to view the maximum value for your role, see View the Maximum Session - // Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for + // the role. The maximum session duration setting can have a value from 1 hour + // to 12 hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. + // + // Role chaining limits your Amazon Web Services CLI or Amazon Web Services + // API role session to a maximum of one hour. When you use the AssumeRole API + // operation to assume a role, you can specify the duration of your role session + // with the DurationSeconds parameter. You can specify a parameter value of + // up to 43200 seconds (12 hours), depending on the maximum session duration + // setting for your role. However, if you assume a role using role chaining + // and provide a DurationSeconds parameter value greater than one hour, the + // operation fails. To learn how to view the maximum value for your role, see + // View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1422,7 +1382,7 @@ type AssumeRoleInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1548,7 +1508,7 @@ type AssumeRoleInput struct { // A list of session tags that you want to pass. Each session tag consists of // a key name and an associated value. For more information about session tags, - // see Tagging STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // see Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // // This parameter is optional. You can pass up to 50 session tags. The plaintext @@ -1858,7 +1818,7 @@ type AssumeRoleWithSAMLInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -2205,7 +2165,7 @@ type AssumeRoleWithWebIdentityInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -2705,7 +2665,7 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut type DecodeAuthorizationMessageOutput struct { _ struct{} `type:"structure"` - // An XML document that contains the decoded message. + // The API returns a response with the decoded message. DecodedMessage *string `type:"string"` } @@ -3237,8 +3197,8 @@ type GetSessionTokenInput struct { // user has a policy that requires MFA authentication. The value is either the // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // You can find the device for an IAM user by going to the Management Console - // and viewing the user's security credentials. + // You can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. // // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can @@ -3400,9 +3360,9 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { // You can pass custom key-value pair attributes when you assume a role or federate // a user. These are called session tags. You can then use the session tags -// to control access to resources. For more information, see Tagging STS Sessions -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in -// the IAM User Guide. +// to control access to resources. For more information, see Tagging Amazon +// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. type Tag struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index 7897d70c..b680bbd5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -67,7 +67,7 @@ const ( // // You could receive this error even though you meet other defined session policy // and session tag limits. For more information, see IAM and STS Entity Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 703defd9..f324ff10 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -48,6 +48,10 @@ const ( // svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = EndpointsID + // No Fallback + } return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) } diff --git a/vendor/github.com/crossplane/crossplane-runtime/LICENSE b/vendor/github.com/crossplane/crossplane-runtime/LICENSE deleted file mode 100644 index ef10385c..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 The Crossplane Authors. All rights reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/condition.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/condition.go deleted file mode 100644 index b49a74e0..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/condition.go +++ /dev/null @@ -1,252 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "sort" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// A ConditionType represents a condition a resource could be in. -type ConditionType string - -// Condition types. -const ( - // TypeReady resources are believed to be ready to handle work. - TypeReady ConditionType = "Ready" - - // TypeSynced resources are believed to be in sync with the - // Kubernetes resources that manage their lifecycle. - TypeSynced ConditionType = "Synced" -) - -// A ConditionReason represents the reason a resource is in a condition. -type ConditionReason string - -// Reasons a resource is or is not ready. -const ( - ReasonAvailable ConditionReason = "Available" - ReasonUnavailable ConditionReason = "Unavailable" - ReasonCreating ConditionReason = "Creating" - ReasonDeleting ConditionReason = "Deleting" -) - -// Reasons a resource is or is not synced. -const ( - ReasonReconcileSuccess ConditionReason = "ReconcileSuccess" - ReasonReconcileError ConditionReason = "ReconcileError" -) - -// A Condition that may apply to a resource. -type Condition struct { - // Type of this condition. At most one of each condition type may apply to - // a resource at any point in time. - Type ConditionType `json:"type"` - - // Status of this condition; is it currently True, False, or Unknown? - Status corev1.ConditionStatus `json:"status"` - - // LastTransitionTime is the last time this condition transitioned from one - // status to another. - LastTransitionTime metav1.Time `json:"lastTransitionTime"` - - // A Reason for this condition's last transition from one status to another. - Reason ConditionReason `json:"reason"` - - // A Message containing details about this condition's last transition from - // one status to another, if any. - // +optional - Message string `json:"message,omitempty"` -} - -// Equal returns true if the condition is identical to the supplied condition, -// ignoring the LastTransitionTime. -func (c Condition) Equal(other Condition) bool { - return c.Type == other.Type && - c.Status == other.Status && - c.Reason == other.Reason && - c.Message == other.Message -} - -// WithMessage returns a condition by adding the provided message to existing -// condition. -func (c Condition) WithMessage(msg string) Condition { - c.Message = msg - return c -} - -// NOTE(negz): Conditions are implemented as a slice rather than a map to comply -// with Kubernetes API conventions. Ideally we'd comply by using a map that -// marshalled to a JSON array, but doing so confuses the CRD schema generator. -// https://github.com/kubernetes/community/blob/9bf8cd/contributors/devel/sig-architecture/api-conventions.md#lists-of-named-subobjects-preferred-over-maps - -// NOTE(negz): Do not manipulate Conditions directly. Use the Set method. - -// A ConditionedStatus reflects the observed status of a resource. Only -// one condition of each type may exist. -type ConditionedStatus struct { - // Conditions of the resource. - // +optional - Conditions []Condition `json:"conditions,omitempty"` -} - -// NewConditionedStatus returns a stat with the supplied conditions set. -func NewConditionedStatus(c ...Condition) *ConditionedStatus { - s := &ConditionedStatus{} - s.SetConditions(c...) - return s -} - -// GetCondition returns the condition for the given ConditionType if exists, -// otherwise returns nil -func (s *ConditionedStatus) GetCondition(ct ConditionType) Condition { - for _, c := range s.Conditions { - if c.Type == ct { - return c - } - } - - return Condition{Type: ct, Status: corev1.ConditionUnknown} -} - -// SetConditions sets the supplied conditions, replacing any existing conditions -// of the same type. This is a no-op if all supplied conditions are identical, -// ignoring the last transition time, to those already set. -func (s *ConditionedStatus) SetConditions(c ...Condition) { - for _, new := range c { - exists := false - for i, existing := range s.Conditions { - if existing.Type != new.Type { - continue - } - - if existing.Equal(new) { - exists = true - continue - } - - s.Conditions[i] = new - exists = true - } - if !exists { - s.Conditions = append(s.Conditions, new) - } - } -} - -// Equal returns true if the status is identical to the supplied status, -// ignoring the LastTransitionTimes and order of statuses. -func (s *ConditionedStatus) Equal(other *ConditionedStatus) bool { - if s == nil || other == nil { - return s == nil && other == nil - } - - if len(other.Conditions) != len(s.Conditions) { - return false - } - - sc := make([]Condition, len(s.Conditions)) - copy(sc, s.Conditions) - - oc := make([]Condition, len(other.Conditions)) - copy(oc, other.Conditions) - - // We should not have more than one condition of each type. - sort.Slice(sc, func(i, j int) bool { return sc[i].Type < sc[j].Type }) - sort.Slice(oc, func(i, j int) bool { return oc[i].Type < oc[j].Type }) - - for i := range sc { - if !sc[i].Equal(oc[i]) { - return false - } - } - - return true -} - -// Creating returns a condition that indicates the resource is currently -// being created. -func Creating() Condition { - return Condition{ - Type: TypeReady, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Now(), - Reason: ReasonCreating, - } -} - -// Deleting returns a condition that indicates the resource is currently -// being deleted. -func Deleting() Condition { - return Condition{ - Type: TypeReady, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Now(), - Reason: ReasonDeleting, - } -} - -// Available returns a condition that indicates the resource is -// currently observed to be available for use. -func Available() Condition { - return Condition{ - Type: TypeReady, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Now(), - Reason: ReasonAvailable, - } -} - -// Unavailable returns a condition that indicates the resource is not -// currently available for use. Unavailable should be set only when Crossplane -// expects the resource to be available but knows it is not, for example -// because its API reports it is unhealthy. -func Unavailable() Condition { - return Condition{ - Type: TypeReady, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Now(), - Reason: ReasonUnavailable, - } -} - -// ReconcileSuccess returns a condition indicating that Crossplane successfully -// completed the most recent reconciliation of the resource. -func ReconcileSuccess() Condition { - return Condition{ - Type: TypeSynced, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Now(), - Reason: ReasonReconcileSuccess, - } -} - -// ReconcileError returns a condition indicating that Crossplane encountered an -// error while reconciling the resource. This could mean Crossplane was -// unable to update the resource to reflect its desired state, or that -// Crossplane was unable to determine the current actual state of the resource. -func ReconcileError(err error) Condition { - return Condition{ - Type: TypeSynced, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Now(), - Reason: ReasonReconcileError, - Message: err.Error(), - } -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/doc.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/doc.go deleted file mode 100644 index 9b99de7b..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1 contains core API types used by most Crossplane resources. -// +kubebuilder:object:generate=true -package v1 diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/merge.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/merge.go deleted file mode 100644 index efed85db..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/merge.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2021 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "github.com/imdario/mergo" -) - -// MergeOptions Specifies merge options on a field path -type MergeOptions struct { // TODO(aru): add more options that control merging behavior - // Specifies that already existing values in a merged map should be preserved - // +optional - KeepMapValues *bool `json:"keepMapValues,omitempty"` - // Specifies that already existing elements in a merged slice should be preserved - // +optional - AppendSlice *bool `json:"appendSlice,omitempty"` -} - -// MergoConfiguration the default behavior is to replace maps and slices -func (mo *MergeOptions) MergoConfiguration() []func(*mergo.Config) { - config := []func(*mergo.Config){mergo.WithOverride} - if mo == nil { - return config - } - - if mo.KeepMapValues != nil && *mo.KeepMapValues { - config = config[:0] - } - if mo.AppendSlice != nil && *mo.AppendSlice { - config = append(config, mergo.WithAppendSlice) - } - return config -} - -// IsAppendSlice returns true if mo.AppendSlice is set to true -func (mo *MergeOptions) IsAppendSlice() bool { - return mo != nil && mo.AppendSlice != nil && *mo.AppendSlice -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/policies.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/policies.go deleted file mode 100644 index 81b74814..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/policies.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// A DeletionPolicy determines what should happen to the underlying external -// resource when a managed resource is deleted. -// +kubebuilder:validation:Enum=Orphan;Delete -type DeletionPolicy string - -const ( - // DeletionOrphan means the external resource will orphaned when its managed - // resource is deleted. - DeletionOrphan DeletionPolicy = "Orphan" - - // DeletionDelete means both the external resource will be deleted when its - // managed resource is deleted. - DeletionDelete DeletionPolicy = "Delete" -) - -// An UpdatePolicy determines how something should be updated - either -// automatically (without human intervention) or manually. -// +kubebuilder:validation:Enum=Automatic;Manual -type UpdatePolicy string - -const ( - // UpdateAutomatic means the resource should be updated automatically, - // without any human intervention. - UpdateAutomatic UpdatePolicy = "Automatic" - - // UpdateManual means the resource requires human intervention to - // update. - UpdateManual UpdatePolicy = "Manual" -) diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/resource.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/resource.go deleted file mode 100644 index 0ee323d4..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/resource.go +++ /dev/null @@ -1,265 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" -) - -const ( - // ResourceCredentialsSecretEndpointKey is the key inside a connection secret for the connection endpoint - ResourceCredentialsSecretEndpointKey = "endpoint" - // ResourceCredentialsSecretPortKey is the key inside a connection secret for the connection port - ResourceCredentialsSecretPortKey = "port" - // ResourceCredentialsSecretUserKey is the key inside a connection secret for the connection user - ResourceCredentialsSecretUserKey = "username" - // ResourceCredentialsSecretPasswordKey is the key inside a connection secret for the connection password - ResourceCredentialsSecretPasswordKey = "password" - // ResourceCredentialsSecretCAKey is the key inside a connection secret for the server CA certificate - ResourceCredentialsSecretCAKey = "clusterCA" - // ResourceCredentialsSecretClientCertKey is the key inside a connection secret for the client certificate - ResourceCredentialsSecretClientCertKey = "clientCert" - // ResourceCredentialsSecretClientKeyKey is the key inside a connection secret for the client key - ResourceCredentialsSecretClientKeyKey = "clientKey" - // ResourceCredentialsSecretTokenKey is the key inside a connection secret for the bearer token value - ResourceCredentialsSecretTokenKey = "token" - // ResourceCredentialsSecretKubeconfigKey is the key inside a connection secret for the raw kubeconfig yaml - ResourceCredentialsSecretKubeconfigKey = "kubeconfig" -) - -// LabelKeyProviderName is added to ProviderConfigUsages to relate them to their -// ProviderConfig. -const LabelKeyProviderName = "crossplane.io/provider-config" - -// NOTE(negz): The below secret references differ from ObjectReference and -// LocalObjectReference in that they include only the fields Crossplane needs to -// reference a secret, and make those fields required. This reduces ambiguity in -// the API for resource authors. - -// A LocalSecretReference is a reference to a secret in the same namespace as -// the referencer. -type LocalSecretReference struct { - // Name of the secret. - Name string `json:"name"` -} - -// A SecretReference is a reference to a secret in an arbitrary namespace. -type SecretReference struct { - // Name of the secret. - Name string `json:"name"` - - // Namespace of the secret. - Namespace string `json:"namespace"` -} - -// A SecretKeySelector is a reference to a secret key in an arbitrary namespace. -type SecretKeySelector struct { - SecretReference `json:",inline"` - - // The key to select. - Key string `json:"key"` -} - -// A Reference to a named object. -type Reference struct { - // Name of the referenced object. - Name string `json:"name"` -} - -// A TypedReference refers to an object by Name, Kind, and APIVersion. It is -// commonly used to reference cluster-scoped objects or objects where the -// namespace is already known. -type TypedReference struct { - // APIVersion of the referenced object. - APIVersion string `json:"apiVersion"` - - // Kind of the referenced object. - Kind string `json:"kind"` - - // Name of the referenced object. - Name string `json:"name"` - - // UID of the referenced object. - // +optional - UID types.UID `json:"uid,omitempty"` -} - -// A Selector selects an object. -type Selector struct { - // MatchLabels ensures an object with matching labels is selected. - MatchLabels map[string]string `json:"matchLabels,omitempty"` - - // MatchControllerRef ensures an object with the same controller reference - // as the selecting object is selected. - MatchControllerRef *bool `json:"matchControllerRef,omitempty"` -} - -// SetGroupVersionKind sets the Kind and APIVersion of a TypedReference. -func (obj *TypedReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} - -// GroupVersionKind gets the GroupVersionKind of a TypedReference. -func (obj *TypedReference) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -// GetObjectKind get the ObjectKind of a TypedReference. -func (obj *TypedReference) GetObjectKind() schema.ObjectKind { return obj } - -// TODO(negz): Rename Resource* to Managed* to clarify that they enable the -// resource.Managed interface. - -// A ResourceSpec defines the desired state of a managed resource. -type ResourceSpec struct { - // WriteConnectionSecretToReference specifies the namespace and name of a - // Secret to which any connection details for this managed resource should - // be written. Connection details frequently include the endpoint, username, - // and password required to connect to the managed resource. - // +optional - WriteConnectionSecretToReference *SecretReference `json:"writeConnectionSecretToRef,omitempty"` - - // ProviderConfigReference specifies how the provider that will be used to - // create, observe, update, and delete this managed resource should be - // configured. - // +kubebuilder:default={"name": "default"} - ProviderConfigReference *Reference `json:"providerConfigRef,omitempty"` - - // ProviderReference specifies the provider that will be used to create, - // observe, update, and delete this managed resource. - // Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef` - ProviderReference *Reference `json:"providerRef,omitempty"` - - // DeletionPolicy specifies what will happen to the underlying external - // when this managed resource is deleted - either "Delete" or "Orphan" the - // external resource. - // +optional - // +kubebuilder:default=Delete - DeletionPolicy DeletionPolicy `json:"deletionPolicy,omitempty"` -} - -// ResourceStatus represents the observed state of a managed resource. -type ResourceStatus struct { - ConditionedStatus `json:",inline"` -} - -// A CredentialsSource is a source from which provider credentials may be -// acquired. -type CredentialsSource string - -const ( - // CredentialsSourceNone indicates that a provider does not require - // credentials. - CredentialsSourceNone CredentialsSource = "None" - - // CredentialsSourceSecret indicates that a provider should acquire - // credentials from a secret. - CredentialsSourceSecret CredentialsSource = "Secret" - - // CredentialsSourceInjectedIdentity indicates that a provider should use - // credentials via its (pod's) identity; i.e. via IRSA for AWS, - // Workload Identity for GCP, Pod Identity for Azure, or in-cluster - // authentication for the Kubernetes API. - CredentialsSourceInjectedIdentity CredentialsSource = "InjectedIdentity" - - // CredentialsSourceEnvironment indicates that a provider should acquire - // credentials from an environment variable. - CredentialsSourceEnvironment CredentialsSource = "Environment" - - // CredentialsSourceFilesystem indicates that a provider should acquire - // credentials from the filesystem. - CredentialsSourceFilesystem CredentialsSource = "Filesystem" -) - -// CommonCredentialSelectors provides common selectors for extracting -// credentials. -type CommonCredentialSelectors struct { - // Fs is a reference to a filesystem location that contains credentials that - // must be used to connect to the provider. - // +optional - Fs *FsSelector `json:"fs,omitempty"` - - // Env is a reference to an environment variable that contains credentials - // that must be used to connect to the provider. - // +optional - Env *EnvSelector `json:"env,omitempty"` - - // A SecretRef is a reference to a secret key that contains the credentials - // that must be used to connect to the provider. - // +optional - SecretRef *SecretKeySelector `json:"secretRef,omitempty"` -} - -// EnvSelector selects an environment variable. -type EnvSelector struct { - // Name is the name of an environment variable. - Name string `json:"name"` -} - -// FsSelector selects a filesystem location. -type FsSelector struct { - // Path is a filesystem path. - Path string `json:"path"` -} - -// A ProviderConfigStatus defines the observed status of a ProviderConfig. -type ProviderConfigStatus struct { - ConditionedStatus `json:",inline"` - - // Users of this provider configuration. - Users int64 `json:"users,omitempty"` -} - -// A ProviderConfigUsage is a record that a particular managed resource is using -// a particular provider configuration. -type ProviderConfigUsage struct { - // ProviderConfigReference to the provider config being used. - ProviderConfigReference Reference `json:"providerConfigRef"` - - // ResourceReference to the managed resource using the provider config. - ResourceReference TypedReference `json:"resourceRef"` -} - -// A TargetSpec defines the common fields of objects used for exposing -// infrastructure to workloads that can be scheduled to. -// -// Deprecated. -type TargetSpec struct { - // WriteConnectionSecretToReference specifies the name of a Secret, in the - // same namespace as this target, to which any connection details for this - // target should be written or already exist. Connection secrets referenced - // by a target should contain information for connecting to a resource that - // allows for scheduling of workloads. - // +optional - WriteConnectionSecretToReference *LocalSecretReference `json:"connectionSecretRef,omitempty"` - - // A ResourceReference specifies an existing managed resource, in any - // namespace, which this target should attempt to propagate a connection - // secret from. - // +optional - ResourceReference *corev1.ObjectReference `json:"clusterRef,omitempty"` -} - -// A TargetStatus defines the observed status a target. -// -// Deprecated. -type TargetStatus struct { - ConditionedStatus `json:",inline"` -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/zz_generated.deepcopy.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/zz_generated.deepcopy.go deleted file mode 100644 index 3ae4bb9a..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,371 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommonCredentialSelectors) DeepCopyInto(out *CommonCredentialSelectors) { - *out = *in - if in.Fs != nil { - in, out := &in.Fs, &out.Fs - *out = new(FsSelector) - **out = **in - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = new(EnvSelector) - **out = **in - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretKeySelector) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonCredentialSelectors. -func (in *CommonCredentialSelectors) DeepCopy() *CommonCredentialSelectors { - if in == nil { - return nil - } - out := new(CommonCredentialSelectors) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Condition) DeepCopyInto(out *Condition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. -func (in *Condition) DeepCopy() *Condition { - if in == nil { - return nil - } - out := new(Condition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConditionedStatus) DeepCopyInto(out *ConditionedStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionedStatus. -func (in *ConditionedStatus) DeepCopy() *ConditionedStatus { - if in == nil { - return nil - } - out := new(ConditionedStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvSelector) DeepCopyInto(out *EnvSelector) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvSelector. -func (in *EnvSelector) DeepCopy() *EnvSelector { - if in == nil { - return nil - } - out := new(EnvSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FsSelector) DeepCopyInto(out *FsSelector) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FsSelector. -func (in *FsSelector) DeepCopy() *FsSelector { - if in == nil { - return nil - } - out := new(FsSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalSecretReference) DeepCopyInto(out *LocalSecretReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSecretReference. -func (in *LocalSecretReference) DeepCopy() *LocalSecretReference { - if in == nil { - return nil - } - out := new(LocalSecretReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MergeOptions) DeepCopyInto(out *MergeOptions) { - *out = *in - if in.KeepMapValues != nil { - in, out := &in.KeepMapValues, &out.KeepMapValues - *out = new(bool) - **out = **in - } - if in.AppendSlice != nil { - in, out := &in.AppendSlice, &out.AppendSlice - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MergeOptions. -func (in *MergeOptions) DeepCopy() *MergeOptions { - if in == nil { - return nil - } - out := new(MergeOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProviderConfigStatus) DeepCopyInto(out *ProviderConfigStatus) { - *out = *in - in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigStatus. -func (in *ProviderConfigStatus) DeepCopy() *ProviderConfigStatus { - if in == nil { - return nil - } - out := new(ProviderConfigStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProviderConfigUsage) DeepCopyInto(out *ProviderConfigUsage) { - *out = *in - out.ProviderConfigReference = in.ProviderConfigReference - out.ResourceReference = in.ResourceReference -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigUsage. -func (in *ProviderConfigUsage) DeepCopy() *ProviderConfigUsage { - if in == nil { - return nil - } - out := new(ProviderConfigUsage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Reference) DeepCopyInto(out *Reference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Reference. -func (in *Reference) DeepCopy() *Reference { - if in == nil { - return nil - } - out := new(Reference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { - *out = *in - if in.WriteConnectionSecretToReference != nil { - in, out := &in.WriteConnectionSecretToReference, &out.WriteConnectionSecretToReference - *out = new(SecretReference) - **out = **in - } - if in.ProviderConfigReference != nil { - in, out := &in.ProviderConfigReference, &out.ProviderConfigReference - *out = new(Reference) - **out = **in - } - if in.ProviderReference != nil { - in, out := &in.ProviderReference, &out.ProviderReference - *out = new(Reference) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. -func (in *ResourceSpec) DeepCopy() *ResourceSpec { - if in == nil { - return nil - } - out := new(ResourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { - *out = *in - in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus. -func (in *ResourceStatus) DeepCopy() *ResourceStatus { - if in == nil { - return nil - } - out := new(ResourceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { - *out = *in - out.SecretReference = in.SecretReference -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. -func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { - if in == nil { - return nil - } - out := new(SecretKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretReference) DeepCopyInto(out *SecretReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. -func (in *SecretReference) DeepCopy() *SecretReference { - if in == nil { - return nil - } - out := new(SecretReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Selector) DeepCopyInto(out *Selector) { - *out = *in - if in.MatchLabels != nil { - in, out := &in.MatchLabels, &out.MatchLabels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.MatchControllerRef != nil { - in, out := &in.MatchControllerRef, &out.MatchControllerRef - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Selector. -func (in *Selector) DeepCopy() *Selector { - if in == nil { - return nil - } - out := new(Selector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TargetSpec) DeepCopyInto(out *TargetSpec) { - *out = *in - if in.WriteConnectionSecretToReference != nil { - in, out := &in.WriteConnectionSecretToReference, &out.WriteConnectionSecretToReference - *out = new(LocalSecretReference) - **out = **in - } - if in.ResourceReference != nil { - in, out := &in.ResourceReference, &out.ResourceReference - *out = new(corev1.ObjectReference) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSpec. -func (in *TargetSpec) DeepCopy() *TargetSpec { - if in == nil { - return nil - } - out := new(TargetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TargetStatus) DeepCopyInto(out *TargetStatus) { - *out = *in - in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetStatus. -func (in *TargetStatus) DeepCopy() *TargetStatus { - if in == nil { - return nil - } - out := new(TargetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TypedReference) DeepCopyInto(out *TypedReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedReference. -func (in *TypedReference) DeepCopy() *TypedReference { - if in == nil { - return nil - } - out := new(TypedReference) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/errors/errors.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/errors/errors.go deleted file mode 100644 index 24cc306f..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/pkg/errors/errors.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2021 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package errors is a github.com/pkg/errors compatible API for native errors. -// It includes only the subset of the github.com/pkg/errors API that is used by -// the Crossplane project. -package errors - -import ( - "errors" - "fmt" -) - -// New returns an error that formats as the given text. Each call to New returns -// a distinct error value even if the text is identical. -func New(text string) error { return errors.New(text) } - -// Is reports whether any error in err's chain matches target. -// -// The chain consists of err itself followed by the sequence of errors obtained -// by repeatedly calling Unwrap. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -// -// An error type might provide an Is method so it can be treated as equivalent -// to an existing error. For example, if MyError defines -// -// func (m MyError) Is(target error) bool { return target == fs.ErrExist } -// -// then Is(MyError{}, fs.ErrExist) returns true. See syscall.Errno.Is for -// an example in the standard library. -func Is(err, target error) bool { return errors.Is(err, target) } - -// As finds the first error in err's chain that matches target, and if so, sets -// target to that error value and returns true. Otherwise, it returns false. -// -// The chain consists of err itself followed by the sequence of errors obtained -// by repeatedly calling Unwrap. -// -// An error matches target if the error's concrete value is assignable to the -// value pointed to by target, or if the error has a method As(interface{}) bool -// such that As(target) returns true. In the latter case, the As method is -// responsible for setting target. -// -// An error type might provide an As method so it can be treated as if it were a -// different error type. -// -// As panics if target is not a non-nil pointer to either a type that implements -// error, or to any interface type. -func As(err error, target interface{}) bool { return errors.As(err, target) } - -// Unwrap returns the result of calling the Unwrap method on err, if err's type -// contains an Unwrap method returning error. Otherwise, Unwrap returns nil. -func Unwrap(err error) error { return errors.Unwrap(err) } - -// Errorf formats according to a format specifier and returns the string as a -// value that satisfies error. -// -// If the format specifier includes a %w verb with an error operand, the -// returned error will implement an Unwrap method returning the operand. It is -// invalid to include more than one %w verb or to supply it with an operand that -// does not implement the error interface. The %w verb is otherwise a synonym -// for %v. -func Errorf(format string, a ...interface{}) error { return fmt.Errorf(format, a...) } - -// WithMessage annotates err with a new message. If err is nil, WithMessage -// returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return fmt.Errorf("%s: %w", message, err) -} - -// WithMessagef annotates err with the format specifier. If err is nil, -// WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err) -} - -// Wrap is an alias for WithMessage. -func Wrap(err error, message string) error { - return WithMessage(err, message) -} - -// Wrapf is an alias for WithMessagef -func Wrapf(err error, format string, args ...interface{}) error { - return WithMessagef(err, format, args...) -} - -// Cause calls Unwrap on each error it finds. It returns the first error it -// finds that does not have an Unwrap method - i.e. the first error that was not -// the result of a Wrap call, a Wrapf call, or an Errorf call with %w wrapping. -func Cause(err error) error { - type wrapped interface { - Unwrap() error - } - - for err != nil { - // We're ignoring errorlint telling us to use errors.As because - // we actually do want to check the outermost error. - //nolint:errorlint - w, ok := err.(wrapped) - if !ok { - return err - } - err = w.Unwrap() - } - - return err -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/meta/meta.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/meta/meta.go deleted file mode 100644 index d6e56f47..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/pkg/meta/meta.go +++ /dev/null @@ -1,414 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package meta contains functions for dealing with Kubernetes object metadata. -package meta - -import ( - "fmt" - "hash/fnv" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - "github.com/crossplane/crossplane-runtime/pkg/errors" -) - -const ( - // AnnotationKeyExternalName is the key in the annotations map of a - // resource for the name of the resource as it appears on provider's - // systems. - AnnotationKeyExternalName = "crossplane.io/external-name" - - // AnnotationKeyExternalCreatePending is the key in the annotations map - // of a resource that indicates the last time creation of the external - // resource was pending (i.e. about to happen). Its value must be an - // RFC3999 timestamp. - AnnotationKeyExternalCreatePending = "crossplane.io/external-create-pending" - - // AnnotationKeyExternalCreateSucceeded is the key in the annotations - // map of a resource that represents the last time the external resource - // was created successfully. Its value must be an RFC3339 timestamp, - // which can be used to determine how long ago a resource was created. - // This is useful for eventually consistent APIs that may take some time - // before the API called by Observe will report that a recently created - // external resource exists. - AnnotationKeyExternalCreateSucceeded = "crossplane.io/external-create-succeeded" - - // AnnotationKeyExternalCreateFailed is the key in the annotations map - // of a resource that indicates the last time creation of the external - // resource failed. Its value must be an RFC3999 timestamp. - AnnotationKeyExternalCreateFailed = "crossplane.io/external-create-failed" -) - -// Supported resources with all of these annotations will be fully or partially -// propagated to the named resource of the same kind, assuming it exists and -// consents to propagation. -const ( - AnnotationKeyPropagateToPrefix = "to.propagate.crossplane.io/" - - // Deprecated: This functionality will be removed soon. - AnnotationKeyPropagateFromNamespace = "from.propagate.crossplane.io/namespace" - AnnotationKeyPropagateFromName = "from.propagate.crossplane.io/name" -) - -// ReferenceTo returns an object reference to the supplied object, presumed to -// be of the supplied group, version, and kind. -// Deprecated: use a more specific reference type, such as TypedReference or -// Reference instead of the overly verbose ObjectReference. -// See https://github.com/crossplane/crossplane-runtime/issues/49 -func ReferenceTo(o metav1.Object, of schema.GroupVersionKind) *corev1.ObjectReference { - v, k := of.ToAPIVersionAndKind() - return &corev1.ObjectReference{ - APIVersion: v, - Kind: k, - Namespace: o.GetNamespace(), - Name: o.GetName(), - UID: o.GetUID(), - } -} - -// TypedReferenceTo returns a typed object reference to the supplied object, -// presumed to be of the supplied group, version, and kind. -func TypedReferenceTo(o metav1.Object, of schema.GroupVersionKind) *xpv1.TypedReference { - v, k := of.ToAPIVersionAndKind() - return &xpv1.TypedReference{ - APIVersion: v, - Kind: k, - Name: o.GetName(), - UID: o.GetUID(), - } -} - -// AsOwner converts the supplied object reference to an owner reference. -func AsOwner(r *xpv1.TypedReference) metav1.OwnerReference { - return metav1.OwnerReference{ - APIVersion: r.APIVersion, - Kind: r.Kind, - Name: r.Name, - UID: r.UID, - } -} - -// AsController converts the supplied object reference to a controller -// reference. You may also consider using metav1.NewControllerRef. -func AsController(r *xpv1.TypedReference) metav1.OwnerReference { - c := true - ref := AsOwner(r) - ref.Controller = &c - return ref -} - -// HaveSameController returns true if both supplied objects are controlled by -// the same object. -func HaveSameController(a, b metav1.Object) bool { - ac := metav1.GetControllerOf(a) - bc := metav1.GetControllerOf(b) - - // We do not consider two objects without any controller to have - // the same controller. - if ac == nil || bc == nil { - return false - } - - return ac.UID == bc.UID -} - -// NamespacedNameOf returns the referenced object's namespaced name. -func NamespacedNameOf(r *corev1.ObjectReference) types.NamespacedName { - return types.NamespacedName{Namespace: r.Namespace, Name: r.Name} -} - -// AddOwnerReference to the supplied object' metadata. Any existing owner with -// the same UID as the supplied reference will be replaced. -func AddOwnerReference(o metav1.Object, r metav1.OwnerReference) { - refs := o.GetOwnerReferences() - for i := range refs { - if refs[i].UID == r.UID { - refs[i] = r - o.SetOwnerReferences(refs) - return - } - } - o.SetOwnerReferences(append(refs, r)) -} - -// AddControllerReference to the supplied object's metadata. Any existing owner -// with the same UID as the supplied reference will be replaced. Returns an -// error if the supplied object is already controlled by a different owner. -func AddControllerReference(o metav1.Object, r metav1.OwnerReference) error { - if c := metav1.GetControllerOf(o); c != nil && c.UID != r.UID { - return errors.Errorf("%s is already controlled by %s %s (UID %s)", o.GetName(), c.Kind, c.Name, c.UID) - } - - AddOwnerReference(o, r) - return nil -} - -// AddFinalizer to the supplied Kubernetes object's metadata. -func AddFinalizer(o metav1.Object, finalizer string) { - f := o.GetFinalizers() - for _, e := range f { - if e == finalizer { - return - } - } - o.SetFinalizers(append(f, finalizer)) -} - -// RemoveFinalizer from the supplied Kubernetes object's metadata. -func RemoveFinalizer(o metav1.Object, finalizer string) { - f := o.GetFinalizers() - for i, e := range f { - if e == finalizer { - f = append(f[:i], f[i+1:]...) - } - } - o.SetFinalizers(f) -} - -// FinalizerExists checks whether given finalizer is already set. -func FinalizerExists(o metav1.Object, finalizer string) bool { - f := o.GetFinalizers() - for _, e := range f { - if e == finalizer { - return true - } - } - return false -} - -// AddLabels to the supplied object. -func AddLabels(o metav1.Object, labels map[string]string) { - l := o.GetLabels() - if l == nil { - o.SetLabels(labels) - return - } - for k, v := range labels { - l[k] = v - } - o.SetLabels(l) -} - -// RemoveLabels with the supplied keys from the supplied object. -func RemoveLabels(o metav1.Object, labels ...string) { - l := o.GetLabels() - if l == nil { - return - } - for _, k := range labels { - delete(l, k) - } - o.SetLabels(l) -} - -// AddAnnotations to the supplied object. -func AddAnnotations(o metav1.Object, annotations map[string]string) { - a := o.GetAnnotations() - if a == nil { - o.SetAnnotations(annotations) - return - } - for k, v := range annotations { - a[k] = v - } - o.SetAnnotations(a) -} - -// RemoveAnnotations with the supplied keys from the supplied object. -func RemoveAnnotations(o metav1.Object, annotations ...string) { - a := o.GetAnnotations() - if a == nil { - return - } - for _, k := range annotations { - delete(a, k) - } - o.SetAnnotations(a) -} - -// WasDeleted returns true if the supplied object was deleted from the API server. -func WasDeleted(o metav1.Object) bool { - return !o.GetDeletionTimestamp().IsZero() -} - -// WasCreated returns true if the supplied object was created in the API server. -func WasCreated(o metav1.Object) bool { - // This looks a little different from WasDeleted because DeletionTimestamp - // returns a reference while CreationTimestamp returns a value. - t := o.GetCreationTimestamp() - return !t.IsZero() -} - -// GetExternalName returns the external name annotation value on the resource. -func GetExternalName(o metav1.Object) string { - return o.GetAnnotations()[AnnotationKeyExternalName] -} - -// SetExternalName sets the external name annotation of the resource. -func SetExternalName(o metav1.Object, name string) { - AddAnnotations(o, map[string]string{AnnotationKeyExternalName: name}) -} - -// GetExternalCreatePending returns the time at which the external resource -// was most recently pending creation. -func GetExternalCreatePending(o metav1.Object) time.Time { - a := o.GetAnnotations()[AnnotationKeyExternalCreatePending] - t, err := time.Parse(time.RFC3339, a) - if err != nil { - return time.Time{} - } - return t -} - -// SetExternalCreatePending sets the time at which the external resource was -// most recently pending creation to the supplied time. -func SetExternalCreatePending(o metav1.Object, t time.Time) { - AddAnnotations(o, map[string]string{AnnotationKeyExternalCreatePending: t.Format(time.RFC3339)}) -} - -// GetExternalCreateSucceeded returns the time at which the external resource -// was most recently created. -func GetExternalCreateSucceeded(o metav1.Object) time.Time { - a := o.GetAnnotations()[AnnotationKeyExternalCreateSucceeded] - t, err := time.Parse(time.RFC3339, a) - if err != nil { - return time.Time{} - } - return t -} - -// SetExternalCreateSucceeded sets the time at which the external resource was -// most recently created to the supplied time. -func SetExternalCreateSucceeded(o metav1.Object, t time.Time) { - AddAnnotations(o, map[string]string{AnnotationKeyExternalCreateSucceeded: t.Format(time.RFC3339)}) -} - -// GetExternalCreateFailed returns the time at which the external resource -// recently failed to create. -func GetExternalCreateFailed(o metav1.Object) time.Time { - a := o.GetAnnotations()[AnnotationKeyExternalCreateFailed] - t, err := time.Parse(time.RFC3339, a) - if err != nil { - return time.Time{} - } - return t -} - -// SetExternalCreateFailed sets the time at which the external resource most -// recently failed to create. -func SetExternalCreateFailed(o metav1.Object, t time.Time) { - AddAnnotations(o, map[string]string{AnnotationKeyExternalCreateFailed: t.Format(time.RFC3339)}) -} - -// ExternalCreateIncomplete returns true if creation of the external resource -// appears to be incomplete. We deem creation to be incomplete if the 'external -// create pending' annotation is the newest of all tracking annotations that are -// set (i.e. pending, succeeded, and failed). -func ExternalCreateIncomplete(o metav1.Object) bool { - pending := GetExternalCreatePending(o) - succeeded := GetExternalCreateSucceeded(o) - failed := GetExternalCreateFailed(o) - - // If creation never started it can't be incomplete. - if pending.IsZero() { - return false - } - - latest := succeeded - if failed.After(succeeded) { - latest = failed - } - - return pending.After(latest) -} - -// ExternalCreateSucceededDuring returns true if creation of the external -// resource that corresponds to the supplied managed resource succeeded within -// the supplied duration. -func ExternalCreateSucceededDuring(o metav1.Object, d time.Duration) bool { - t := GetExternalCreateSucceeded(o) - if t.IsZero() { - return false - } - return time.Since(t) < d -} - -// AllowPropagation from one object to another by adding consenting annotations -// to both. -// Deprecated: This functionality will be removed soon. -func AllowPropagation(from, to metav1.Object) { - AddAnnotations(to, map[string]string{ - AnnotationKeyPropagateFromNamespace: from.GetNamespace(), - AnnotationKeyPropagateFromName: from.GetName(), - }) - - AddAnnotations(from, map[string]string{ - AnnotationKeyPropagateTo(to): to.GetNamespace() + "/" + to.GetName(), - }) -} - -// AnnotationKeyPropagateTo returns an annotation key whose presence indicates -// that the annotated object consents to propagation from the supplied object. -// The annotation name (which follows the prefix) can be anything that doesn't -// collide with another annotation. to.propagation.crossplane.io/example would -// be valid. This function uses a hash of the supplied object's namespace and -// name in order to avoid collisions and keep the suffix relatively short. -func AnnotationKeyPropagateTo(o metav1.Object) string { - // Writing to a hash never returns an error. - h := fnv.New32a() - h.Write([]byte(o.GetNamespace())) // nolint:errcheck - h.Write([]byte(o.GetName())) // nolint:errcheck - return fmt.Sprintf("%s%x", AnnotationKeyPropagateToPrefix, h.Sum32()) -} - -// AllowsPropagationFrom returns the NamespacedName of the object the supplied -// object should be propagated from. -func AllowsPropagationFrom(to metav1.Object) types.NamespacedName { - return types.NamespacedName{ - Namespace: to.GetAnnotations()[AnnotationKeyPropagateFromNamespace], - Name: to.GetAnnotations()[AnnotationKeyPropagateFromName], - } -} - -// AllowsPropagationTo returns the set of NamespacedNames that the supplied -// object may be propagated to. -func AllowsPropagationTo(from metav1.Object) map[types.NamespacedName]bool { - to := make(map[types.NamespacedName]bool) - - for k, v := range from.GetAnnotations() { - nn := strings.Split(v, "/") - switch { - case !strings.HasPrefix(k, AnnotationKeyPropagateToPrefix): - continue - case len(nn) != 2: - continue - case nn[0] == "": - continue - case nn[1] == "": - continue - } - to[types.NamespacedName{Namespace: nn[0], Name: nn[1]}] = true - } - - return to -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/reference/reference.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/reference/reference.go deleted file mode 100644 index 06396e82..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/pkg/reference/reference.go +++ /dev/null @@ -1,296 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package reference - -import ( - "context" - - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane-runtime/pkg/meta" - "github.com/crossplane/crossplane-runtime/pkg/resource" -) - -// Error strings. -const ( - errGetManaged = "cannot get referenced resource" - errListManaged = "cannot list resources that match selector" - errNoMatches = "no resources matched selector" - errNoValue = "referenced field was empty (referenced resource may not yet be ready)" -) - -// NOTE(negz): There are many equivalents of FromPtrValue and ToPtrValue -// throughout the Crossplane codebase. We duplicate them here to reduce the -// number of packages our API types have to import to support references. - -// FromPtrValue adapts a string pointer field for use as a CurrentValue. -func FromPtrValue(v *string) string { - if v == nil { - return "" - } - return *v -} - -// ToPtrValue adapts a ResolvedValue for use as a string pointer field. -func ToPtrValue(v string) *string { - if v == "" { - return nil - } - return &v -} - -// FromPtrValues adapts a slice of string pointer fields for use as CurrentValues. -// NOTE: Do not use this utility function unless you have to. -// Using pointer slices does not adhere to our current API practices. -// The current use case is where generated code creates reference-able fields in a provider which are -// string pointers and need to be resolved as part of `ResolveMultiple` -func FromPtrValues(v []*string) []string { - var res = make([]string, len(v)) - for i := 0; i < len(v); i++ { - res[i] = FromPtrValue(v[i]) - } - return res -} - -// ToPtrValues adapts ResolvedValues for use as a slice of string pointer fields. -// NOTE: Do not use this utility function unless you have to. -// Using pointer slices does not adhere to our current API practices. -// The current use case is where generated code creates reference-able fields in a provider which are -// string pointers and need to be resolved as part of `ResolveMultiple` -func ToPtrValues(v []string) []*string { - var res = make([]*string, len(v)) - for i := 0; i < len(v); i++ { - res[i] = ToPtrValue(v[i]) - } - return res -} - -// To indicates the kind of managed resource a reference is to. -type To struct { - Managed resource.Managed - List resource.ManagedList -} - -// An ExtractValueFn specifies how to extract a value from the resolved managed -// resource. -type ExtractValueFn func(resource.Managed) string - -// ExternalName extracts the resolved managed resource's external name from its -// external name annotation. -func ExternalName() ExtractValueFn { - return func(mg resource.Managed) string { - return meta.GetExternalName(mg) - } -} - -// A ResolutionRequest requests that a reference to a particular kind of -// managed resource be resolved. -type ResolutionRequest struct { - CurrentValue string - Reference *xpv1.Reference - Selector *xpv1.Selector - To To - Extract ExtractValueFn -} - -// IsNoOp returns true if the supplied ResolutionRequest cannot or should not be -// processed. -func (rr ResolutionRequest) IsNoOp() bool { - // We don't resolve values that are already set; we effectively cache - // resolved values. The CR author can invalidate the cache and trigger a new - // resolution by explicitly clearing the resolved value. - if rr.CurrentValue != "" { - return true - } - - // We can't resolve anything if neither a reference nor a selector were - // provided. - return rr.Reference == nil && rr.Selector == nil -} - -// A ResolutionResponse returns the result of a reference resolution. The -// returned values are always safe to set if resolution was successful. -type ResolutionResponse struct { - ResolvedValue string - ResolvedReference *xpv1.Reference -} - -// Validate this ResolutionResponse. -func (rr ResolutionResponse) Validate() error { - if rr.ResolvedValue == "" { - return errors.New(errNoValue) - } - - return nil -} - -// A MultiResolutionRequest requests that several references to a particular -// kind of managed resource be resolved. -type MultiResolutionRequest struct { - CurrentValues []string - References []xpv1.Reference - Selector *xpv1.Selector - To To - Extract ExtractValueFn -} - -// IsNoOp returns true if the supplied MultiResolutionRequest cannot or should -// not be processed. -func (rr MultiResolutionRequest) IsNoOp() bool { - // We don't resolve values that are already set; we effectively cache - // resolved values. The CR author can invalidate the cache and trigger a new - // resolution by explicitly clearing the resolved values. This is a little - // unintuitive for the APIMultiResolver but mimics the UX of the APIResolver - // and simplifies the overall mental model. - if len(rr.CurrentValues) > 0 { - return true - } - - // We can't resolve anything if neither a reference nor a selector were - // provided. - return len(rr.References) == 0 && rr.Selector == nil -} - -// A MultiResolutionResponse returns the result of several reference -// resolutions. The returned values are always safe to set if resolution was -// successful. -type MultiResolutionResponse struct { - ResolvedValues []string - ResolvedReferences []xpv1.Reference -} - -// Validate this MultiResolutionResponse. -func (rr MultiResolutionResponse) Validate() error { - if len(rr.ResolvedValues) == 0 { - return errors.New(errNoMatches) - } - - for _, v := range rr.ResolvedValues { - if v == "" { - return errors.New(errNoValue) - } - } - - return nil -} - -// An APIResolver selects and resolves references to managed resources in the -// Kubernetes API server. -type APIResolver struct { - client client.Reader - from resource.Managed -} - -// NewAPIResolver returns a Resolver that selects and resolves references from -// the supplied managed resource to other managed resources in the Kubernetes -// API server. -func NewAPIResolver(c client.Reader, from resource.Managed) *APIResolver { - return &APIResolver{client: c, from: from} -} - -// Resolve the supplied ResolutionRequest. The returned ResolutionResponse -// always contains valid values unless an error was returned. -func (r *APIResolver) Resolve(ctx context.Context, req ResolutionRequest) (ResolutionResponse, error) { - // Return early if from is being deleted, or the request is a no-op. - if meta.WasDeleted(r.from) || req.IsNoOp() { - return ResolutionResponse{ResolvedValue: req.CurrentValue, ResolvedReference: req.Reference}, nil - } - - // The reference is already set - resolve it. - if req.Reference != nil { - if err := r.client.Get(ctx, types.NamespacedName{Name: req.Reference.Name}, req.To.Managed); err != nil { - return ResolutionResponse{}, errors.Wrap(err, errGetManaged) - } - - rsp := ResolutionResponse{ResolvedValue: req.Extract(req.To.Managed), ResolvedReference: req.Reference} - return rsp, rsp.Validate() - } - - // The reference was not set, but a selector was. Select a reference. - if err := r.client.List(ctx, req.To.List, client.MatchingLabels(req.Selector.MatchLabels)); err != nil { - return ResolutionResponse{}, errors.Wrap(err, errListManaged) - } - - for _, to := range req.To.List.GetItems() { - if ControllersMustMatch(req.Selector) && !meta.HaveSameController(r.from, to) { - continue - } - - rsp := ResolutionResponse{ResolvedValue: req.Extract(to), ResolvedReference: &xpv1.Reference{Name: to.GetName()}} - return rsp, rsp.Validate() - } - - // We couldn't resolve anything. - return ResolutionResponse{}, errors.New(errNoMatches) -} - -// ResolveMultiple resolves the supplied MultiResolutionRequest. The returned -// MultiResolutionResponse always contains valid values unless an error was -// returned. -func (r *APIResolver) ResolveMultiple(ctx context.Context, req MultiResolutionRequest) (MultiResolutionResponse, error) { - // Return early if from is being deleted, or the request is a no-op. - if meta.WasDeleted(r.from) || req.IsNoOp() { - return MultiResolutionResponse{ResolvedValues: req.CurrentValues, ResolvedReferences: req.References}, nil - } - - // The references are already set - resolve them. - if len(req.References) > 0 { - vals := make([]string, len(req.References)) - for i := range req.References { - if err := r.client.Get(ctx, types.NamespacedName{Name: req.References[i].Name}, req.To.Managed); err != nil { - return MultiResolutionResponse{}, errors.Wrap(err, errGetManaged) - } - vals[i] = req.Extract(req.To.Managed) - } - - rsp := MultiResolutionResponse{ResolvedValues: vals, ResolvedReferences: req.References} - return rsp, rsp.Validate() - } - - // No references were set, but a selector was. Select and resolve references. - if err := r.client.List(ctx, req.To.List, client.MatchingLabels(req.Selector.MatchLabels)); err != nil { - return MultiResolutionResponse{}, errors.Wrap(err, errListManaged) - } - - items := req.To.List.GetItems() - refs := make([]xpv1.Reference, 0, len(items)) - vals := make([]string, 0, len(items)) - for _, to := range req.To.List.GetItems() { - if ControllersMustMatch(req.Selector) && !meta.HaveSameController(r.from, to) { - continue - } - - vals = append(vals, req.Extract(to)) - refs = append(refs, xpv1.Reference{Name: to.GetName()}) - } - - rsp := MultiResolutionResponse{ResolvedValues: vals, ResolvedReferences: refs} - return rsp, rsp.Validate() -} - -// ControllersMustMatch returns true if the supplied Selector requires that a -// reference be to a managed resource whose controller reference matches the -// referencing resource. -func ControllersMustMatch(s *xpv1.Selector) bool { - if s == nil { - return false - } - return s.MatchControllerRef != nil && *s.MatchControllerRef -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/api.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/api.go deleted file mode 100644 index 7c77e2a0..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/api.go +++ /dev/null @@ -1,250 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "context" - "encoding/json" - - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane-runtime/pkg/meta" -) - -// Error strings. -const ( - errGetSecret = "cannot get managed resource's connection secret" - errSecretConflict = "cannot establish control of existing connection secret" - errUpdateSecret = "cannot update connection secret" - errCreateOrUpdateSecret = "cannot create or update connection secret" - - errUpdateObject = "cannot update object" -) - -// An APIManagedConnectionPropagator propagates connection details by reading -// them from and writing them to a Kubernetes API server. -// Deprecated: This functionality will be removed soon. -type APIManagedConnectionPropagator struct { - Propagator ConnectionPropagator -} - -// PropagateConnection details from the supplied resource. -func (a *APIManagedConnectionPropagator) PropagateConnection(ctx context.Context, to LocalConnectionSecretOwner, mg Managed) error { - return a.Propagator.PropagateConnection(ctx, to, mg) -} - -// An APIConnectionPropagator propagates connection details by reading -// them from and writing them to a Kubernetes API server. -// Deprecated: This functionality will be removed soon. -type APIConnectionPropagator struct { - client ClientApplicator - typer runtime.ObjectTyper -} - -// NewAPIConnectionPropagator returns a new APIConnectionPropagator. -// Deprecated: This functionality will be removed soon. -func NewAPIConnectionPropagator(c client.Client, t runtime.ObjectTyper) *APIConnectionPropagator { - return &APIConnectionPropagator{ - client: ClientApplicator{Client: c, Applicator: NewAPIUpdatingApplicator(c)}, - typer: t, - } -} - -// PropagateConnection details from the supplied resource. -func (a *APIConnectionPropagator) PropagateConnection(ctx context.Context, to LocalConnectionSecretOwner, from ConnectionSecretOwner) error { - // Either from does not expose a connection secret, or to does not want one. - if from.GetWriteConnectionSecretToReference() == nil || to.GetWriteConnectionSecretToReference() == nil { - return nil - } - - n := types.NamespacedName{ - Namespace: from.GetWriteConnectionSecretToReference().Namespace, - Name: from.GetWriteConnectionSecretToReference().Name, - } - fs := &corev1.Secret{} - if err := a.client.Get(ctx, n, fs); err != nil { - return errors.Wrap(err, errGetSecret) - } - - // Make sure the managed resource is the controller of the connection secret - // it references before we propagate it. This ensures a managed resource - // cannot use Crossplane to circumvent RBAC by propagating a secret it does - // not own. - if c := metav1.GetControllerOf(fs); c == nil || c.UID != from.GetUID() { - return errors.New(errSecretConflict) - } - - ts := LocalConnectionSecretFor(to, MustGetKind(to, a.typer)) - ts.Data = fs.Data - - meta.AllowPropagation(fs, ts) - - if err := a.client.Apply(ctx, ts, ConnectionSecretMustBeControllableBy(to.GetUID())); err != nil { - return errors.Wrap(err, errCreateOrUpdateSecret) - } - - return errors.Wrap(a.client.Update(ctx, fs), errUpdateSecret) -} - -// An APIPatchingApplicator applies changes to an object by either creating or -// patching it in a Kubernetes API server. -type APIPatchingApplicator struct { - client client.Client -} - -// NewAPIPatchingApplicator returns an Applicator that applies changes to an -// object by either creating or patching it in a Kubernetes API server. -func NewAPIPatchingApplicator(c client.Client) *APIPatchingApplicator { - return &APIPatchingApplicator{client: c} -} - -// Apply changes to the supplied object. The object will be created if it does -// not exist, or patched if it does. If the object does exist, it will only be -// patched if the passed object has the same or an empty resource version. -func (a *APIPatchingApplicator) Apply(ctx context.Context, o client.Object, ao ...ApplyOption) error { - m, ok := o.(metav1.Object) - if !ok { - return errors.New("cannot access object metadata") - } - - if m.GetName() == "" && m.GetGenerateName() != "" { - return errors.Wrap(a.client.Create(ctx, o), "cannot create object") - } - - desired := o.DeepCopyObject() - - err := a.client.Get(ctx, types.NamespacedName{Name: m.GetName(), Namespace: m.GetNamespace()}, o) - if kerrors.IsNotFound(err) { - // TODO(negz): Apply ApplyOptions here too? - return errors.Wrap(a.client.Create(ctx, o), "cannot create object") - } - if err != nil { - return errors.Wrap(err, "cannot get object") - } - - for _, fn := range ao { - if err := fn(ctx, o, desired); err != nil { - return err - } - } - - // TODO(negz): Allow callers to override the kind of patch used. - return errors.Wrap(a.client.Patch(ctx, o, &patch{desired}), "cannot patch object") -} - -type patch struct{ from runtime.Object } - -func (p *patch) Type() types.PatchType { return types.MergePatchType } -func (p *patch) Data(_ client.Object) ([]byte, error) { return json.Marshal(p.from) } - -// An APIUpdatingApplicator applies changes to an object by either creating or -// updating it in a Kubernetes API server. -type APIUpdatingApplicator struct { - client client.Client -} - -// NewAPIUpdatingApplicator returns an Applicator that applies changes to an -// object by either creating or updating it in a Kubernetes API server. -func NewAPIUpdatingApplicator(c client.Client) *APIUpdatingApplicator { - return &APIUpdatingApplicator{client: c} -} - -// Apply changes to the supplied object. The object will be created if it does -// not exist, or updated if it does. -func (a *APIUpdatingApplicator) Apply(ctx context.Context, o client.Object, ao ...ApplyOption) error { - m, ok := o.(Object) - if !ok { - return errors.New("cannot access object metadata") - } - - if m.GetName() == "" && m.GetGenerateName() != "" { - return errors.Wrap(a.client.Create(ctx, o), "cannot create object") - } - - current := o.DeepCopyObject().(client.Object) - - err := a.client.Get(ctx, types.NamespacedName{Name: m.GetName(), Namespace: m.GetNamespace()}, current) - if kerrors.IsNotFound(err) { - // TODO(negz): Apply ApplyOptions here too? - return errors.Wrap(a.client.Create(ctx, m), "cannot create object") - } - if err != nil { - return errors.Wrap(err, "cannot get object") - } - - for _, fn := range ao { - if err := fn(ctx, current, m); err != nil { - return err - } - } - - // NOTE(hasheddan): we must set the resource version of the desired object - // to that of the current or the update will always fail. - m.SetResourceVersion(current.(metav1.Object).GetResourceVersion()) - return errors.Wrap(a.client.Update(ctx, m), "cannot update object") -} - -// An APIFinalizer adds and removes finalizers to and from a resource. -type APIFinalizer struct { - client client.Client - finalizer string -} - -// NewAPIFinalizer returns a new APIFinalizer. -func NewAPIFinalizer(c client.Client, finalizer string) *APIFinalizer { - return &APIFinalizer{client: c, finalizer: finalizer} -} - -// AddFinalizer to the supplied Managed resource. -func (a *APIFinalizer) AddFinalizer(ctx context.Context, obj Object) error { - if meta.FinalizerExists(obj, a.finalizer) { - return nil - } - meta.AddFinalizer(obj, a.finalizer) - return errors.Wrap(a.client.Update(ctx, obj), errUpdateObject) -} - -// RemoveFinalizer from the supplied Managed resource. -func (a *APIFinalizer) RemoveFinalizer(ctx context.Context, obj Object) error { - if !meta.FinalizerExists(obj, a.finalizer) { - return nil - } - meta.RemoveFinalizer(obj, a.finalizer) - return errors.Wrap(IgnoreNotFound(a.client.Update(ctx, obj)), errUpdateObject) -} - -// A FinalizerFns satisfy the Finalizer interface. -type FinalizerFns struct { - AddFinalizerFn func(ctx context.Context, obj Object) error - RemoveFinalizerFn func(ctx context.Context, obj Object) error -} - -// AddFinalizer to the supplied resource. -func (f FinalizerFns) AddFinalizer(ctx context.Context, obj Object) error { - return f.AddFinalizerFn(ctx, obj) -} - -// RemoveFinalizer from the supplied resource. -func (f FinalizerFns) RemoveFinalizer(ctx context.Context, obj Object) error { - return f.RemoveFinalizerFn(ctx, obj) -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/doc.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/doc.go deleted file mode 100644 index cd13e74c..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package resource provides types and functions that can be used to build -// Kubernetes controllers that reconcile Crossplane resources. -package resource diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/enqueue_handlers.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/enqueue_handlers.go deleted file mode 100644 index 1bafa3ad..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/enqueue_handlers.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -type adder interface { - Add(item interface{}) -} - -// EnqueueRequestForProviderConfig enqueues a reconcile.Request for a referenced -// ProviderConfig. -type EnqueueRequestForProviderConfig struct{} - -// Create adds a NamespacedName for the supplied CreateEvent if its Object is a -// ProviderConfigReferencer. -func (e *EnqueueRequestForProviderConfig) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { - addProviderConfig(evt.Object, q) -} - -// Update adds a NamespacedName for the supplied UpdateEvent if its Objects are -// a ProviderConfigReferencer. -func (e *EnqueueRequestForProviderConfig) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - addProviderConfig(evt.ObjectOld, q) - addProviderConfig(evt.ObjectNew, q) -} - -// Delete adds a NamespacedName for the supplied DeleteEvent if its Object is a -// ProviderConfigReferencer. -func (e *EnqueueRequestForProviderConfig) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { - addProviderConfig(evt.Object, q) -} - -// Generic adds a NamespacedName for the supplied GenericEvent if its Object is -// a ProviderConfigReferencer. -func (e *EnqueueRequestForProviderConfig) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { - addProviderConfig(evt.Object, q) -} - -func addProviderConfig(obj runtime.Object, queue adder) { - pcr, ok := obj.(RequiredProviderConfigReferencer) - if !ok { - return - } - - queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: pcr.GetProviderConfigReference().Name}}) -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/interfaces.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/interfaces.go deleted file mode 100644 index 049e0e22..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/interfaces.go +++ /dev/null @@ -1,239 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -// A Conditioned may have conditions set or retrieved. Conditions are typically -// indicate the status of both a resource and its reconciliation process. -type Conditioned interface { - SetConditions(c ...xpv1.Condition) - GetCondition(xpv1.ConditionType) xpv1.Condition -} - -// A ClaimReferencer may reference a resource claim. -type ClaimReferencer interface { - SetClaimReference(r *corev1.ObjectReference) - GetClaimReference() *corev1.ObjectReference -} - -// A ManagedResourceReferencer may reference a concrete managed resource. -type ManagedResourceReferencer interface { - SetResourceReference(r *corev1.ObjectReference) - GetResourceReference() *corev1.ObjectReference -} - -// A LocalConnectionSecretWriterTo may write a connection secret to its own -// namespace. -type LocalConnectionSecretWriterTo interface { - SetWriteConnectionSecretToReference(r *xpv1.LocalSecretReference) - GetWriteConnectionSecretToReference() *xpv1.LocalSecretReference -} - -// A ConnectionSecretWriterTo may write a connection secret to an arbitrary -// namespace. -type ConnectionSecretWriterTo interface { - SetWriteConnectionSecretToReference(r *xpv1.SecretReference) - GetWriteConnectionSecretToReference() *xpv1.SecretReference -} - -// An Orphanable resource may specify a DeletionPolicy. -type Orphanable interface { - SetDeletionPolicy(p xpv1.DeletionPolicy) - GetDeletionPolicy() xpv1.DeletionPolicy -} - -// A ProviderReferencer may reference a provider resource. -type ProviderReferencer interface { - GetProviderReference() *xpv1.Reference - SetProviderReference(p *xpv1.Reference) -} - -// A ProviderConfigReferencer may reference a provider config resource. -type ProviderConfigReferencer interface { - GetProviderConfigReference() *xpv1.Reference - SetProviderConfigReference(p *xpv1.Reference) -} - -// A RequiredProviderConfigReferencer may reference a provider config resource. -// Unlike ProviderConfigReferencer, the reference is required (i.e. not nil). -type RequiredProviderConfigReferencer interface { - GetProviderConfigReference() xpv1.Reference - SetProviderConfigReference(p xpv1.Reference) -} - -// A RequiredTypedResourceReferencer can reference a resource. -type RequiredTypedResourceReferencer interface { - SetResourceReference(r xpv1.TypedReference) - GetResourceReference() xpv1.TypedReference -} - -// A Finalizer manages the finalizers on the resource. -type Finalizer interface { - AddFinalizer(ctx context.Context, obj Object) error - RemoveFinalizer(ctx context.Context, obj Object) error -} - -// A CompositionSelector may select a composition of resources. -type CompositionSelector interface { - SetCompositionSelector(*metav1.LabelSelector) - GetCompositionSelector() *metav1.LabelSelector -} - -// A CompositionReferencer may reference a composition of resources. -type CompositionReferencer interface { - SetCompositionReference(*corev1.ObjectReference) - GetCompositionReference() *corev1.ObjectReference -} - -// A CompositionRevisionReferencer may reference a specific revision of a -// composition of resources. -type CompositionRevisionReferencer interface { - SetCompositionRevisionReference(*corev1.ObjectReference) - GetCompositionRevisionReference() *corev1.ObjectReference -} - -// A CompositionUpdater uses a composition, and may update which revision of -// that composition it uses. -type CompositionUpdater interface { - SetCompositionUpdatePolicy(*xpv1.UpdatePolicy) - GetCompositionUpdatePolicy() *xpv1.UpdatePolicy -} - -// A ComposedResourcesReferencer may reference the resources it composes. -type ComposedResourcesReferencer interface { - SetResourceReferences([]corev1.ObjectReference) - GetResourceReferences() []corev1.ObjectReference -} - -// A CompositeResourceReferencer can reference a composite resource. -type CompositeResourceReferencer interface { - SetResourceReference(r *corev1.ObjectReference) - GetResourceReference() *corev1.ObjectReference -} - -// A UserCounter can count how many users it has. -type UserCounter interface { - SetUsers(i int64) - GetUsers() int64 -} - -// A ConnectionDetailsPublishedTimer can record the last time its connection -// details were published. -type ConnectionDetailsPublishedTimer interface { - SetConnectionDetailsLastPublishedTime(t *metav1.Time) - GetConnectionDetailsLastPublishedTime() *metav1.Time -} - -// An Object is a Kubernetes object. -type Object interface { - metav1.Object - runtime.Object -} - -// A Managed is a Kubernetes object representing a concrete managed -// resource (e.g. a CloudSQL instance). -type Managed interface { - Object - - ProviderReferencer - ProviderConfigReferencer - ConnectionSecretWriterTo - Orphanable - - Conditioned -} - -// A ManagedList is a list of managed resources. -type ManagedList interface { - client.ObjectList - - // GetItems returns the list of managed resources. - GetItems() []Managed -} - -// A ProviderConfig configures a Crossplane provider. -type ProviderConfig interface { - Object - - UserCounter - Conditioned -} - -// A ProviderConfigUsage indicates a usage of a Crossplane provider config. -type ProviderConfigUsage interface { - Object - - RequiredProviderConfigReferencer - RequiredTypedResourceReferencer -} - -// A ProviderConfigUsageList is a list of provider config usages. -type ProviderConfigUsageList interface { - client.ObjectList - - // GetItems returns the list of provider config usages. - GetItems() []ProviderConfigUsage -} - -// A Composite resource composes one or more Composed resources. -type Composite interface { - Object - - CompositionSelector - CompositionReferencer - CompositionUpdater - CompositionRevisionReferencer - ComposedResourcesReferencer - ClaimReferencer - ConnectionSecretWriterTo - - Conditioned - ConnectionDetailsPublishedTimer -} - -// Composed resources can be a composed into a Composite resource. -type Composed interface { - Object - - Conditioned - ConnectionSecretWriterTo -} - -// A CompositeClaim for a Composite resource. -type CompositeClaim interface { - Object - - CompositionSelector - CompositionReferencer - CompositionUpdater - CompositionRevisionReferencer - CompositeResourceReferencer - LocalConnectionSecretWriterTo - - Conditioned - ConnectionDetailsPublishedTimer -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/late_initializer.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/late_initializer.go deleted file mode 100644 index 0bcbb7cf..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/late_initializer.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2021 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// NewLateInitializer returns a new instance of *LateInitializer. -func NewLateInitializer() *LateInitializer { - return &LateInitializer{} -} - -// LateInitializer contains functions to late initialize two fields with varying -// types. The main purpose of LateInitializer is to be able to report whether -// anything different from the original value has been returned after all late -// initialization calls. -type LateInitializer struct { - changed bool -} - -// IsChanged reports whether the second argument is ever used in late initialization -// function calls. -func (li *LateInitializer) IsChanged() bool { - return li.changed -} - -// SetChanged marks the LateInitializer such that users can tell whether any -// of the late initialization calls returned the non-original argument. -func (li *LateInitializer) SetChanged() { - li.changed = true -} - -// LateInitializeStringPtr implements late initialization for *string. -func (li *LateInitializer) LateInitializeStringPtr(org *string, from *string) *string { - if org != nil || from == nil { - return org - } - li.SetChanged() - return from -} - -// LateInitializeInt64Ptr implements late initialization for *int64. -func (li *LateInitializer) LateInitializeInt64Ptr(org *int64, from *int64) *int64 { - if org != nil || from == nil { - return org - } - li.SetChanged() - return from -} - -// LateInitializeBoolPtr implements late initialization for *bool. -func (li *LateInitializer) LateInitializeBoolPtr(org *bool, from *bool) *bool { - if org != nil || from == nil { - return org - } - li.SetChanged() - return from -} - -// LateInitializeTimePtr implements late initialization for *metav1.Time from -// *time.Time. -func (li *LateInitializer) LateInitializeTimePtr(org *metav1.Time, from *time.Time) *metav1.Time { - if org != nil || from == nil { - return org - } - li.SetChanged() - t := metav1.NewTime(*from) - return &t -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/predicates.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/predicates.go deleted file mode 100644 index ca441f21..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/predicates.go +++ /dev/null @@ -1,148 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - "github.com/crossplane/crossplane-runtime/pkg/meta" -) - -// A PredicateFn returns true if the supplied object should be reconciled. -type PredicateFn func(obj runtime.Object) bool - -// NewPredicates returns a set of Funcs that are all satisfied by the supplied -// PredicateFn. The PredicateFn is run against the new object during updates. -func NewPredicates(fn PredicateFn) predicate.Funcs { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { return fn(e.Object) }, - DeleteFunc: func(e event.DeleteEvent) bool { return fn(e.Object) }, - UpdateFunc: func(e event.UpdateEvent) bool { return fn(e.ObjectNew) }, - GenericFunc: func(e event.GenericEvent) bool { return fn(e.Object) }, - } -} - -// AnyOf accepts objects that pass any of the supplied predicate functions. -func AnyOf(fn ...PredicateFn) PredicateFn { - return func(obj runtime.Object) bool { - for _, f := range fn { - if f(obj) { - return true - } - } - return false - } -} - -// AllOf accepts objects that pass all of the supplied predicate functions. -func AllOf(fn ...PredicateFn) PredicateFn { - return func(obj runtime.Object) bool { - for _, f := range fn { - if !f(obj) { - return false - } - } - return true - } -} - -// HasManagedResourceReferenceKind accepts objects that reference the supplied -// managed resource kind. -func HasManagedResourceReferenceKind(k ManagedKind) PredicateFn { - return func(obj runtime.Object) bool { - r, ok := obj.(ManagedResourceReferencer) - if !ok { - return false - } - - if r.GetResourceReference() == nil { - return false - } - - return r.GetResourceReference().GroupVersionKind() == schema.GroupVersionKind(k) - } -} - -// IsManagedKind accepts objects that are of the supplied managed resource kind. -func IsManagedKind(k ManagedKind, ot runtime.ObjectTyper) PredicateFn { - return func(obj runtime.Object) bool { - gvk, err := GetKind(obj, ot) - if err != nil { - return false - } - return gvk == schema.GroupVersionKind(k) - } -} - -// IsControlledByKind accepts objects that are controlled by a resource of the -// supplied kind. -func IsControlledByKind(k schema.GroupVersionKind) PredicateFn { - return func(obj runtime.Object) bool { - mo, ok := obj.(metav1.Object) - if !ok { - return false - } - - ref := metav1.GetControllerOf(mo) - if ref == nil { - return false - } - - return ref.APIVersion == k.GroupVersion().String() && ref.Kind == k.Kind - } -} - -// IsPropagator accepts objects that request to be partially or fully propagated -// to another object of the same kind. -func IsPropagator() PredicateFn { - return func(obj runtime.Object) bool { - from, ok := obj.(metav1.Object) - if !ok { - return false - } - - return len(meta.AllowsPropagationTo(from)) > 0 - } -} - -// IsPropagated accepts objects that consent to be partially or fully propagated -// from another object of the same kind. -func IsPropagated() PredicateFn { - return func(obj runtime.Object) bool { - to, ok := obj.(metav1.Object) - if !ok { - return false - } - nn := meta.AllowsPropagationFrom(to) - return nn.Namespace != "" && nn.Name != "" - } -} - -// IsNamed accepts objects that is named as the given name. -func IsNamed(name string) PredicateFn { - return func(obj runtime.Object) bool { - mo, ok := obj.(metav1.Object) - if !ok { - return false - } - return mo.GetName() == name - } -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/providerconfig.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/providerconfig.go deleted file mode 100644 index 5f15978d..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/providerconfig.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "context" - "os" - - "github.com/spf13/afero" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane-runtime/pkg/meta" -) - -const ( - errExtractEnv = "cannot extract from environment variable when none specified" - errExtractFs = "cannot extract from filesystem when no path specified" - errExtractSecretKey = "cannot extract from secret key when none specified" - errGetCredentialsSecret = "cannot get credentials secret" - errNoHandlerForSourceFmt = "no extraction handler registered for source: %s" - errMissingPCRef = "managed resource does not reference a ProviderConfig" - errApplyPCU = "cannot apply ProviderConfigUsage" -) - -type errMissingRef struct{ error } - -func (m errMissingRef) MissingReference() bool { return true } - -// IsMissingReference returns true if an error indicates that a managed -// resource is missing a required reference.. -func IsMissingReference(err error) bool { - _, ok := err.(interface { - MissingReference() bool - }) - return ok -} - -// EnvLookupFn looks up an environment variable. -type EnvLookupFn func(string) string - -// ExtractEnv extracts credentials from an environment variable. -func ExtractEnv(ctx context.Context, e EnvLookupFn, s xpv1.CommonCredentialSelectors) ([]byte, error) { - if s.Env == nil { - return nil, errors.New(errExtractEnv) - } - return []byte(e(s.Env.Name)), nil -} - -// ExtractFs extracts credentials from the filesystem. -func ExtractFs(ctx context.Context, fs afero.Fs, s xpv1.CommonCredentialSelectors) ([]byte, error) { - if s.Fs == nil { - return nil, errors.New(errExtractFs) - } - return afero.ReadFile(fs, s.Fs.Path) -} - -// ExtractSecret extracts credentials from a Kubernetes secret. -func ExtractSecret(ctx context.Context, client client.Client, s xpv1.CommonCredentialSelectors) ([]byte, error) { - if s.SecretRef == nil { - return nil, errors.New(errExtractSecretKey) - } - secret := &corev1.Secret{} - if err := client.Get(ctx, types.NamespacedName{Namespace: s.SecretRef.Namespace, Name: s.SecretRef.Name}, secret); err != nil { - return nil, errors.Wrap(err, errGetCredentialsSecret) - } - return secret.Data[s.SecretRef.Key], nil -} - -// CommonCredentialExtractor extracts credentials from common sources. -func CommonCredentialExtractor(ctx context.Context, source xpv1.CredentialsSource, client client.Client, selector xpv1.CommonCredentialSelectors) ([]byte, error) { - switch source { // nolint:exhaustive - case xpv1.CredentialsSourceEnvironment: - return ExtractEnv(ctx, os.Getenv, selector) - case xpv1.CredentialsSourceFilesystem: - return ExtractFs(ctx, afero.NewOsFs(), selector) - case xpv1.CredentialsSourceSecret: - return ExtractSecret(ctx, client, selector) - case xpv1.CredentialsSourceNone: - return nil, nil - } - return nil, errors.Errorf(errNoHandlerForSourceFmt, source) -} - -// A Tracker tracks managed resources. -type Tracker interface { - // Track the supplied managed resource. - Track(ctx context.Context, mg Managed) error -} - -// A TrackerFn is a function that tracks managed resources. -type TrackerFn func(ctx context.Context, mg Managed) error - -// Track the supplied managed resource. -func (fn TrackerFn) Track(ctx context.Context, mg Managed) error { - return fn(ctx, mg) -} - -// A ProviderConfigUsageTracker tracks usages of a ProviderConfig by creating or -// updating the appropriate ProviderConfigUsage. -type ProviderConfigUsageTracker struct { - c Applicator - of ProviderConfigUsage -} - -// NewProviderConfigUsageTracker creates a ProviderConfigUsageTracker. -func NewProviderConfigUsageTracker(c client.Client, of ProviderConfigUsage) *ProviderConfigUsageTracker { - return &ProviderConfigUsageTracker{c: NewAPIUpdatingApplicator(c), of: of} -} - -// Track that the supplied Managed resource is using the ProviderConfig it -// references by creating or updating a ProviderConfigUsage. Track should be -// called _before_ attempting to use the ProviderConfig. This ensures the -// managed resource's usage is updated if the managed resource is updated to -// reference a misconfigured ProviderConfig. -func (u *ProviderConfigUsageTracker) Track(ctx context.Context, mg Managed) error { - pcu := u.of.DeepCopyObject().(ProviderConfigUsage) - gvk := mg.GetObjectKind().GroupVersionKind() - ref := mg.GetProviderConfigReference() - if ref == nil { - return errMissingRef{errors.New(errMissingPCRef)} - } - - pcu.SetName(string(mg.GetUID())) - pcu.SetLabels(map[string]string{xpv1.LabelKeyProviderName: ref.Name}) - pcu.SetOwnerReferences([]metav1.OwnerReference{meta.AsController(meta.TypedReferenceTo(mg, gvk))}) - pcu.SetProviderConfigReference(xpv1.Reference{Name: ref.Name}) - pcu.SetResourceReference(xpv1.TypedReference{ - APIVersion: gvk.GroupVersion().String(), - Kind: gvk.Kind, - Name: mg.GetName(), - }) - - err := u.c.Apply(ctx, pcu, - MustBeControllableBy(mg.GetUID()), - AllowUpdateIf(func(current, _ runtime.Object) bool { - return current.(ProviderConfigUsage).GetProviderConfigReference() != pcu.GetProviderConfigReference() - }), - ) - return errors.Wrap(Ignore(IsNotAllowed, err), errApplyPCU) -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/reference.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/reference.go deleted file mode 100644 index a596106f..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/reference.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "context" - "fmt" - - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// ReferenceStatusType is an enum type for the possible values for a Reference Status -type ReferenceStatusType int - -// Reference statuses. -const ( - ReferenceStatusUnknown ReferenceStatusType = iota - ReferenceNotFound - ReferenceNotReady - ReferenceReady -) - -func (t ReferenceStatusType) String() string { - return []string{"Unknown", "NotFound", "NotReady", "Ready"}[t] -} - -// ReferenceStatus has the name and status of a reference -type ReferenceStatus struct { - Name string - Status ReferenceStatusType -} - -func (r ReferenceStatus) String() string { - return fmt.Sprintf("{reference:%s status:%s}", r.Name, r.Status) -} - -// A CanReference is a resource that can reference another resource in its -// spec in order to automatically resolve corresponding spec field values -// by inspecting the referenced resource. -type CanReference runtime.Object - -// An AttributeReferencer resolves cross-resource attribute references. See -// https://github.com/crossplane/crossplane/blob/master/design/one-pager-cross-resource-referencing.md -// for more information -type AttributeReferencer interface { - // GetStatus retries the referenced resource, as well as other non-managed - // resources (like a `Provider`) and reports their readiness for use as a - // referenced resource. - GetStatus(ctx context.Context, res CanReference, r client.Reader) ([]ReferenceStatus, error) - - // Build retrieves the referenced resource, as well as other non-managed - // resources (like a `Provider`), and builds the referenced attribute, - // returning it as a string value. - Build(ctx context.Context, res CanReference, r client.Reader) (value string, err error) - - // Assign accepts a managed resource object, and assigns the given value to - // its corresponding property. - Assign(res CanReference, value string) error -} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/resource.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/resource.go deleted file mode 100644 index 0e706603..00000000 --- a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/resource.go +++ /dev/null @@ -1,412 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "context" - "strings" - - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/retry" - "sigs.k8s.io/controller-runtime/pkg/client" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane-runtime/pkg/meta" -) - -// SecretTypeConnection is the type of Crossplane connection secrets. -const SecretTypeConnection corev1.SecretType = "connection.crossplane.io/v1alpha1" - -// External resources are tagged/labelled with the following keys in the cloud -// provider API if the type supports. -const ( - ExternalResourceTagKeyKind = "crossplane-kind" - ExternalResourceTagKeyName = "crossplane-name" - ExternalResourceTagKeyProvider = "crossplane-providerconfig" -) - -// A ManagedKind contains the type metadata for a kind of managed resource. -type ManagedKind schema.GroupVersionKind - -// A CompositeKind contains the type metadata for a kind of composite resource. -type CompositeKind schema.GroupVersionKind - -// A CompositeClaimKind contains the type metadata for a kind of composite -// resource claim. -type CompositeClaimKind schema.GroupVersionKind - -// ProviderConfigKinds contains the type metadata for a kind of provider config. -type ProviderConfigKinds struct { - Config schema.GroupVersionKind - Usage schema.GroupVersionKind - UsageList schema.GroupVersionKind -} - -// A LocalConnectionSecretOwner may create and manage a connection secret in its -// own namespace. -type LocalConnectionSecretOwner interface { - runtime.Object - metav1.Object - - LocalConnectionSecretWriterTo -} - -// A ConnectionPropagator is responsible for propagating information required to -// connect to a resource. -// Deprecated: This functionality will be removed soon. -type ConnectionPropagator interface { - PropagateConnection(ctx context.Context, to LocalConnectionSecretOwner, from ConnectionSecretOwner) error -} - -// A ConnectionPropagatorFn is a function that satisfies the -// ConnectionPropagator interface. -type ConnectionPropagatorFn func(ctx context.Context, to LocalConnectionSecretOwner, from ConnectionSecretOwner) error - -// A ManagedConnectionPropagator is responsible for propagating information -// required to connect to a managed resource (for example the connection secret) -// from the managed resource to a target. -// Deprecated: This functionality will be removed soon. -type ManagedConnectionPropagator interface { - PropagateConnection(ctx context.Context, o LocalConnectionSecretOwner, mg Managed) error -} - -// A ManagedConnectionPropagatorFn is a function that satisfies the -// ManagedConnectionPropagator interface. -type ManagedConnectionPropagatorFn func(ctx context.Context, o LocalConnectionSecretOwner, mg Managed) error - -// PropagateConnection information from the supplied managed resource to the -// supplied resource claim. -func (fn ManagedConnectionPropagatorFn) PropagateConnection(ctx context.Context, o LocalConnectionSecretOwner, mg Managed) error { - return fn(ctx, o, mg) -} - -// LocalConnectionSecretFor creates a connection secret in the namespace of the -// supplied LocalConnectionSecretOwner, assumed to be of the supplied kind. -func LocalConnectionSecretFor(o LocalConnectionSecretOwner, kind schema.GroupVersionKind) *corev1.Secret { - return &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: o.GetNamespace(), - Name: o.GetWriteConnectionSecretToReference().Name, - OwnerReferences: []metav1.OwnerReference{meta.AsController(meta.TypedReferenceTo(o, kind))}, - }, - Type: SecretTypeConnection, - Data: make(map[string][]byte), - } -} - -// A ConnectionSecretOwner may create and manage a connection secret in an -// arbitrary namespace. -type ConnectionSecretOwner interface { - runtime.Object - metav1.Object - - ConnectionSecretWriterTo -} - -// ConnectionSecretFor creates a connection for the supplied -// ConnectionSecretOwner, assumed to be of the supplied kind. The secret is -// written to 'default' namespace if the ConnectionSecretOwner does not specify -// a namespace. -func ConnectionSecretFor(o ConnectionSecretOwner, kind schema.GroupVersionKind) *corev1.Secret { - return &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: o.GetWriteConnectionSecretToReference().Namespace, - Name: o.GetWriteConnectionSecretToReference().Name, - OwnerReferences: []metav1.OwnerReference{meta.AsController(meta.TypedReferenceTo(o, kind))}, - }, - Type: SecretTypeConnection, - Data: make(map[string][]byte), - } -} - -// MustCreateObject returns a new Object of the supplied kind. It panics if the -// kind is unknown to the supplied ObjectCreator. -func MustCreateObject(kind schema.GroupVersionKind, oc runtime.ObjectCreater) runtime.Object { - obj, err := oc.New(kind) - if err != nil { - panic(err) - } - return obj -} - -// GetKind returns the GroupVersionKind of the supplied object. It return an -// error if the object is unknown to the supplied ObjectTyper, the object is -// unversioned, or the object does not have exactly one registered kind. -func GetKind(obj runtime.Object, ot runtime.ObjectTyper) (schema.GroupVersionKind, error) { - kinds, unversioned, err := ot.ObjectKinds(obj) - if err != nil { - return schema.GroupVersionKind{}, errors.Wrap(err, "cannot get kind of supplied object") - } - if unversioned { - return schema.GroupVersionKind{}, errors.New("supplied object is unversioned") - } - if len(kinds) != 1 { - return schema.GroupVersionKind{}, errors.New("supplied object does not have exactly one kind") - } - return kinds[0], nil -} - -// MustGetKind returns the GroupVersionKind of the supplied object. It panics if -// the object is unknown to the supplied ObjectTyper, the object is unversioned, -// or the object does not have exactly one registered kind. -func MustGetKind(obj runtime.Object, ot runtime.ObjectTyper) schema.GroupVersionKind { - gvk, err := GetKind(obj, ot) - if err != nil { - panic(err) - } - return gvk -} - -// An ErrorIs function returns true if an error satisfies a particular condition. -type ErrorIs func(err error) bool - -// Ignore any errors that satisfy the supplied ErrorIs function by returning -// nil. Errors that do not satisfy the supplied function are returned unmodified. -func Ignore(is ErrorIs, err error) error { - if is(err) { - return nil - } - return err -} - -// IgnoreAny ignores errors that satisfy any of the supplied ErrorIs functions -// by returning nil. Errors that do not satisfy any of the supplied functions -// are returned unmodified. -func IgnoreAny(err error, is ...ErrorIs) error { - for _, f := range is { - if f(err) { - return nil - } - } - return err -} - -// IgnoreNotFound returns the supplied error, or nil if the error indicates a -// Kubernetes resource was not found. -func IgnoreNotFound(err error) error { - return Ignore(kerrors.IsNotFound, err) -} - -// IsAPIError returns true if the given error's type is of Kubernetes API error. -func IsAPIError(err error) bool { - _, ok := err.(kerrors.APIStatus) - return ok -} - -// IsAPIErrorWrapped returns true if err is a K8s API error, or recursively wraps a K8s API error -func IsAPIErrorWrapped(err error) bool { - return IsAPIError(errors.Cause(err)) -} - -// IsConditionTrue returns if condition status is true -func IsConditionTrue(c xpv1.Condition) bool { - return c.Status == corev1.ConditionTrue -} - -// An Applicator applies changes to an object. -type Applicator interface { - Apply(context.Context, client.Object, ...ApplyOption) error -} - -type shouldRetryFunc func(error) bool - -// An ApplicatorWithRetry applies changes to an object, retrying on transient failures -type ApplicatorWithRetry struct { - Applicator - shouldRetry shouldRetryFunc - backoff wait.Backoff -} - -// Apply invokes nested Applicator's Apply retrying on designated errors -func (awr *ApplicatorWithRetry) Apply(ctx context.Context, c client.Object, opts ...ApplyOption) error { - return retry.OnError(awr.backoff, awr.shouldRetry, func() error { - return awr.Applicator.Apply(ctx, c, opts...) - }) -} - -// NewApplicatorWithRetry returns an ApplicatorWithRetry for the specified -// applicator and with the specified retry function. -// If backoff is nil, then retry.DefaultRetry is used as the default. -func NewApplicatorWithRetry(applicator Applicator, shouldRetry shouldRetryFunc, backoff *wait.Backoff) *ApplicatorWithRetry { - result := &ApplicatorWithRetry{ - Applicator: applicator, - shouldRetry: shouldRetry, - backoff: retry.DefaultRetry, - } - - if backoff != nil { - result.backoff = *backoff - } - - return result -} - -// A ClientApplicator may be used to build a single 'client' that satisfies both -// client.Client and Applicator. -type ClientApplicator struct { - client.Client - Applicator -} - -// An ApplyFn is a function that satisfies the Applicator interface. -type ApplyFn func(context.Context, client.Object, ...ApplyOption) error - -// Apply changes to the supplied object. -func (fn ApplyFn) Apply(ctx context.Context, o client.Object, ao ...ApplyOption) error { - return fn(ctx, o, ao...) -} - -// An ApplyOption is called before patching the current object to match the -// desired object. ApplyOptions are not called if no current object exists. -type ApplyOption func(ctx context.Context, current, desired runtime.Object) error - -// UpdateFn returns an ApplyOption that is used to modify the current object to -// match fields of the desired. -func UpdateFn(fn func(current, desired runtime.Object)) ApplyOption { - return func(_ context.Context, c, d runtime.Object) error { - fn(c, d) - return nil - } -} - -type errNotControllable struct{ error } - -func (e errNotControllable) NotControllable() bool { - return true -} - -// IsNotControllable returns true if the supplied error indicates that a -// resource is not controllable - i.e. that it another resource is not and may -// not become its controller reference. -func IsNotControllable(err error) bool { - _, ok := err.(interface { - NotControllable() bool - }) - return ok -} - -// MustBeControllableBy requires that the current object is controllable by an -// object with the supplied UID. An object is controllable if its controller -// reference matches the supplied UID, or it has no controller reference. An -// error that satisfies IsNotControllable will be returned if the current object -// cannot be controlled by the supplied UID. -func MustBeControllableBy(u types.UID) ApplyOption { - return func(_ context.Context, current, _ runtime.Object) error { - c := metav1.GetControllerOf(current.(metav1.Object)) - if c == nil { - return nil - } - - if c.UID != u { - return errNotControllable{errors.Errorf("existing object is not controlled by UID %q", u)} - - } - return nil - } -} - -// ConnectionSecretMustBeControllableBy requires that the current object is a -// connection secret that is controllable by an object with the supplied UID. -// Contemporary connection secrets are of SecretTypeConnection, while legacy -// connection secrets are of corev1.SecretTypeOpaque. Contemporary connection -// secrets are considered controllable if they are already controlled by the -// supplied UID, or have no controller reference. Legacy connection secrets are -// only considered controllable if they are already controlled by the supplied -// UID. It is not safe to assume legacy connection secrets without a controller -// reference are controllable because they are indistinguishable from Kubernetes -// secrets that have nothing to do with Crossplane. An error that satisfies -// IsNotControllable will be returned if the current secret is not a connection -// secret or cannot be controlled by the supplied UID. -func ConnectionSecretMustBeControllableBy(u types.UID) ApplyOption { - return func(_ context.Context, current, _ runtime.Object) error { - s := current.(*corev1.Secret) - c := metav1.GetControllerOf(s) - - switch { - case c == nil && s.Type != SecretTypeConnection: - return errNotControllable{errors.Errorf("refusing to modify uncontrolled secret of type %q", s.Type)} - case c == nil: - return nil - case c.UID != u: - return errNotControllable{errors.Errorf("existing secret is not controlled by UID %q", u)} - } - - return nil - } -} - -type errNotAllowed struct{ error } - -func (e errNotAllowed) NotAllowed() bool { - return true -} - -// IsNotAllowed returns true if the supplied error indicates that an operation -// was not allowed. -func IsNotAllowed(err error) bool { - _, ok := err.(interface { - NotAllowed() bool - }) - return ok -} - -// AllowUpdateIf will only update the current object if the supplied fn returns -// true. An error that satisfies IsNotAllowed will be returned if the supplied -// function returns false. Creation of a desired object that does not currently -// exist is always allowed. -func AllowUpdateIf(fn func(current, desired runtime.Object) bool) ApplyOption { - return func(_ context.Context, current, desired runtime.Object) error { - if fn(current, desired) { - return nil - } - return errNotAllowed{errors.New("update not allowed")} - } -} - -// Apply changes to the supplied object. The object will be created if it does -// not exist, or patched if it does. -// -// Deprecated: use APIPatchingApplicator instead. -func Apply(ctx context.Context, c client.Client, o client.Object, ao ...ApplyOption) error { - return NewAPIPatchingApplicator(c).Apply(ctx, o, ao...) -} - -// GetExternalTags returns the identifying tags to be used to tag the external -// resource in provider API. -func GetExternalTags(mg Managed) map[string]string { - tags := map[string]string{ - ExternalResourceTagKeyKind: strings.ToLower(mg.GetObjectKind().GroupVersionKind().GroupKind().String()), - ExternalResourceTagKeyName: mg.GetName(), - } - - switch { - case mg.GetProviderConfigReference() != nil && mg.GetProviderConfigReference().Name != "": - tags[ExternalResourceTagKeyProvider] = mg.GetProviderConfigReference().Name - // TODO(muvaf): Remove the branch once Provider type has been removed from - // everywhere. - case mg.GetProviderReference() != nil && mg.GetProviderReference().Name != "": - tags[ExternalResourceTagKeyProvider] = mg.GetProviderReference().Name - } - return tags -} diff --git a/vendor/github.com/crossplane/provider-aws/LICENSE b/vendor/github.com/crossplane/provider-aws/LICENSE deleted file mode 100644 index ef10385c..00000000 --- a/vendor/github.com/crossplane/provider-aws/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 The Crossplane Authors. All rights reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/accesskey_types.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/accesskey_types.go deleted file mode 100644 index d7701b05..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/accesskey_types.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -// AccessKeyParameters define the desired state of an AWS IAM Access Key. -type AccessKeyParameters struct { - // Username contains the name of the User. - // +optional - // +immutable - // +crossplane:generate:reference:type=User - Username string `json:"userName,omitempty"` - - // UsernameRef references to an User to retrieve its userName - // +optional - UsernameRef *xpv1.Reference `json:"userNameRef,omitempty"` - - // UsernameSelector selects a reference to an User to retrieve its userName - // +optional - UsernameSelector *xpv1.Selector `json:"userNameSelector,omitempty"` - - // The current status of this AccessKey on the AWS - // Must be either Active or Inactive. - // +kubebuilder:validation:Enum=Active;Inactive - Status string `json:"accessKeyStatus,omitempty"` -} - -// An AccessKeySpec defines the desired state of an IAM Access Key. -type AccessKeySpec struct { - xpv1.ResourceSpec `json:",inline"` - ForProvider AccessKeyParameters `json:"forProvider"` -} - -// AccessKeyStatus represents the observed state of an IAM Access Key. -type AccessKeyStatus struct { - xpv1.ResourceStatus `json:",inline"` -} - -// +kubebuilder:object:root=true - -// An AccessKey is a managed resource that represents an the Access Key for an AWS IAM User. -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".spec.forProvider.accessKeyStatus" -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} -type AccessKey struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec AccessKeySpec `json:"spec"` - Status AccessKeyStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// AccessKeyList contains a list of IAM Access Keys -type AccessKeyList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []AccessKey `json:"items"` -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/doc.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/doc.go deleted file mode 100644 index 8a3b3943..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1beta1 contains managed resources for AWS identity services such as -// IAM. -// +kubebuilder:object:generate=true -// +groupName=iam.aws.crossplane.io -// +versionName=v1beta1 -package v1beta1 diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/group_types.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/group_types.go deleted file mode 100644 index 073782b9..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/group_types.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -// GroupParameters define the desired state of an AWS IAM Group. -type GroupParameters struct { - // The path for the group name. - // +optional - Path *string `json:"path,omitempty"` -} - -// An GroupSpec defines the desired state of an IAM Group. -type GroupSpec struct { - xpv1.ResourceSpec `json:",inline"` - ForProvider GroupParameters `json:"forProvider,omitempty"` -} - -// GroupObservation keeps the state for the external resource -type GroupObservation struct { - // The Amazon Resource Name (ARN) that identifies the group. - ARN string `json:"arn,omitempty"` - - // The stable and unique string identifying the group. - GroupID string `json:"groupId,omitempty"` -} - -// An GroupStatus represents the observed state of an IAM Group. -type GroupStatus struct { - xpv1.ResourceStatus `json:",inline"` - AtProvider GroupObservation `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true - -// An Group is a managed resource that represents an AWS IAM Group. -// An User is a managed resource that represents an AWS IAM User. -// +kubebuilder:printcolumn:name="ARN",type="string",JSONPath=".status.atProvider.arn" -// +kubebuilder:printcolumn:name="ID",type="string",JSONPath=".status.atProvider.groupId" -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} -type Group struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec GroupSpec `json:"spec"` - - Status GroupStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// GroupList contains a list of IAM Groups -type GroupList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Group `json:"items"` -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/grouppolicyattachment_types.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/grouppolicyattachment_types.go deleted file mode 100644 index f237a34b..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/grouppolicyattachment_types.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -// GroupPolicyAttachmentParameters define the desired state of an AWS GroupPolicyAttachment. -type GroupPolicyAttachmentParameters struct { - - // PolicyARN is the Amazon Resource Name (ARN) of the IAM policy you want to - // attach. - // +immutable - // +crossplane:generate:reference:type=Policy - // +crossplane:generate:reference:extractor=PolicyARN() - PolicyARN string `json:"policyArn,omitempty"` - - // PolicyARNRef references an Policy to retrieve its Policy ARN. - // +optional - PolicyARNRef *xpv1.Reference `json:"policyArnRef,omitempty"` - - // PolicyARNSelector selects a reference to an Policy to retrieve its - // Policy ARN - // +optional - PolicyARNSelector *xpv1.Selector `json:"policyArnSelector,omitempty"` - - // GroupName presents the name of the Group. - // +immutable - // +crossplane:generate:reference:type=Group - GroupName string `json:"groupName,omitempty"` - - // GroupNameRef references to an Group to retrieve its groupName - // +optional - GroupNameRef *xpv1.Reference `json:"groupNameRef,omitempty"` - - // GroupNameSelector selects a reference to an Group to retrieve its groupName - // +optional - GroupNameSelector *xpv1.Selector `json:"groupNameSelector,omitempty"` -} - -// An GroupPolicyAttachmentSpec defines the desired state of an -// GroupPolicyAttachment. -type GroupPolicyAttachmentSpec struct { - xpv1.ResourceSpec `json:",inline"` - ForProvider GroupPolicyAttachmentParameters `json:"forProvider"` -} - -// GroupPolicyAttachmentObservation keeps the state for the external resource -type GroupPolicyAttachmentObservation struct { - // AttachedPolicyARN is the arn for the attached policy. If nil, the policy - // is not yet attached - AttachedPolicyARN string `json:"attachedPolicyArn"` -} - -// An GroupPolicyAttachmentStatus represents the observed state of an -// GroupPolicyAttachment. -type GroupPolicyAttachmentStatus struct { - xpv1.ResourceStatus `json:",inline"` - AtProvider GroupPolicyAttachmentObservation `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true - -// An GroupPolicyAttachment is a managed resource that represents an AWS IAM -// Group policy attachment. -// +kubebuilder:printcolumn:name="GROUPNAME",type="string",JSONPath=".spec.forProvider.groupName" -// +kubebuilder:printcolumn:name="POLICYARN",type="string",JSONPath=".spec.forProvider.policyArn" -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} -type GroupPolicyAttachment struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec GroupPolicyAttachmentSpec `json:"spec"` - Status GroupPolicyAttachmentStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// GroupPolicyAttachmentList contains a list of GroupPolicyAttachments -type GroupPolicyAttachmentList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []GroupPolicyAttachment `json:"items"` -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/groupusermembership_types.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/groupusermembership_types.go deleted file mode 100644 index a997f3af..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/groupusermembership_types.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -// GroupUserMembershipParameters define the desired state of an AWS GroupUserMembership. -type GroupUserMembershipParameters struct { - - // GroupName is the Amazon IAM Group Name (Group) of the IAM group you want to - // add User to. - // +immutable - // +crossplane:generate:reference:type=Group - GroupName string `json:"groupName,omitempty"` - - // GroupNameRef references to an Group to retrieve its groupName - // +optional - // +immutable - GroupNameRef *xpv1.Reference `json:"groupNameRef,omitempty"` - - // GroupNameSelector selects a reference to an Group to retrieve its groupName - // +optional - GroupNameSelector *xpv1.Selector `json:"groupNameSelector,omitempty"` - - // UserName presents the name of the User. - // +immutable - // +crossplane:generate:reference:type=User - UserName string `json:"userName,omitempty"` - - // UserNameRef references to an User to retrieve its userName - // +optional - // +immutable - UserNameRef *xpv1.Reference `json:"userNameRef,omitempty"` - - // UserNameSelector selects a reference to an User to retrieve its userName - // +optional - UserNameSelector *xpv1.Selector `json:"userNameSelector,omitempty"` -} - -// An GroupUserMembershipSpec defines the desired state of an -// GroupUserMembership. -type GroupUserMembershipSpec struct { - xpv1.ResourceSpec `json:",inline"` - ForProvider GroupUserMembershipParameters `json:"forProvider"` -} - -// GroupUserMembershipObservation keeps the state for the external resource -type GroupUserMembershipObservation struct { - // AttachedGroupARN is the arn for the attached group. If nil, the group - // is not yet attached - AttachedGroupARN string `json:"attachedGroupArn"` -} - -// An GroupUserMembershipStatus represents the observed state of an -// GroupUserMembership. -type GroupUserMembershipStatus struct { - xpv1.ResourceStatus `json:",inline"` - AtProvider GroupUserMembershipObservation `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true - -// An GroupUserMembership is a managed resource that represents an AWS IAM -// User group membership. -// +kubebuilder:printcolumn:name="USERNAME",type="string",JSONPath=".spec.forProvider.userName" -// +kubebuilder:printcolumn:name="GROUPNAME",type="string",JSONPath=".spec.forProvider.groupName" -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} -type GroupUserMembership struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec GroupUserMembershipSpec `json:"spec"` - Status GroupUserMembershipStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// GroupUserMembershipList contains a list of GroupUserMemberships -type GroupUserMembershipList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []GroupUserMembership `json:"items"` -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/openidconnectprovider_types.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/openidconnectprovider_types.go deleted file mode 100644 index d65cae3a..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/openidconnectprovider_types.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2021 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// OpenIDConnectProviderParameters defines the desired state of OpenIDConnectProvider -type OpenIDConnectProviderParameters struct { - // A list of client IDs (also known as audiences). When a mobile or web app - // registers with an OpenID Connect provider, they establish a value that identifies - // the application. (This is the value that's sent as the client_id parameter - // on OAuth requests.) - // - // You can register multiple client IDs with the same provider. For example, - // you might have multiple applications that use the same OIDC provider. You - // cannot register more than 100 client IDs with a single IAM OIDC provider. - // - // There is no defined format for a client ID. The CreateOpenIDConnectProviderRequest - // operation accepts client IDs up to 255 characters long. - // +kubebuilder:validation:MaxItems:=100 - // +optional - ClientIDList []string `json:"clientIDList,omitempty"` - - // A list of server certificate thumbprints for the OpenID Connect (OIDC) identity - // provider's server certificates. Typically this list includes only one entry. - // However, IAM lets you have up to five thumbprints for an OIDC provider. This - // lets you maintain multiple thumbprints if the identity provider is rotating - // certificates. - // - // The server certificate thumbprint is the hex-encoded SHA-1 hash value of - // the X.509 certificate used by the domain where the OpenID Connect provider - // makes its keys available. It is always a 40-character string. - // - // You must provide at least one thumbprint when creating an IAM OIDC provider. - // For example, assume that the OIDC provider is server.example.com and the - // provider stores its keys at https://keys.server.example.com/openid-connect. - // In that case, the thumbprint string would be the hex-encoded SHA-1 hash value - // of the certificate used by https://keys.server.example.com. - // - // For more information about obtaining the OIDC provider's thumbprint, see - // Obtaining the Thumbprint for an OpenID Connect Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/identity-providers-oidc-obtain-thumbprint.html) - // in the IAM User Guide. - // +kubebuilder:validation:MinItems:=1 - // +kubebuilder:validation:MaxItems:=5 - ThumbprintList []string `json:"thumbprintList"` - - // The URL of the identity provider. The URL must begin with https:// and should - // correspond to the iss claim in the provider's OpenID Connect ID tokens. Per - // the OIDC standard, path components are allowed but query parameters are not. - // Typically the URL consists of only a hostname, like https://server.example.org - // or https://example.com. - // - // You cannot register the same provider multiple times in a single AWS account. - // If you try to submit a URL that has already been used for an OpenID Connect - // provider in the AWS account, you will get an error. - URL string `json:"url"` -} - -// OpenIDConnectProviderSpec defines the desired state of OpenIDConnectProvider -type OpenIDConnectProviderSpec struct { - xpv1.ResourceSpec `json:",inline"` - ForProvider OpenIDConnectProviderParameters `json:"forProvider"` -} - -// OpenIDConnectProviderObservation defines the observed state of OpenIDConnectProvider -type OpenIDConnectProviderObservation struct { - // The date and time when the IAM OIDC provider resource object was created - // in the AWS account. - CreateDate *metav1.Time `json:"createDate,omitempty"` -} - -// OpenIDConnectProviderStatus defines the observed state of OpenIDConnectProvider. -type OpenIDConnectProviderStatus struct { - xpv1.ResourceStatus `json:",inline"` - AtProvider OpenIDConnectProviderObservation `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true - -// OpenIDConnectProvider is the Schema for the OpenIDConnectProviders API -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="URL",type="string",JSONPath=".spec.forProvider.url" -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws},shortName="oidcprovider" -type OpenIDConnectProvider struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - Spec OpenIDConnectProviderSpec `json:"spec"` - Status OpenIDConnectProviderStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// OpenIDConnectProviderList contains a list of OpenIDConnectProviders -type OpenIDConnectProviderList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []OpenIDConnectProvider `json:"items"` -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/policy_types.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/policy_types.go deleted file mode 100644 index ee1b12e0..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/policy_types.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -// PolicyParameters define the desired state of an AWS IAM Policy. -type PolicyParameters struct { - // A description of the policy. - // +optional - Description *string `json:"description,omitempty"` - - // The path to the policy. - // +optional - Path *string `json:"path,omitempty"` - - // The JSON policy document that is the content for the policy. - Document string `json:"document"` - - // The name of the policy. - Name string `json:"name"` - - // Tags. For more information about - // tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) - // in the IAM User Guide. - // +immutable - // +optional - Tags []Tag `json:"tags,omitempty"` -} - -// An PolicySpec defines the desired state of an Policy. -type PolicySpec struct { - xpv1.ResourceSpec `json:",inline"` - ForProvider PolicyParameters `json:"forProvider"` -} - -// PolicyObservation keeps the state for the external resource -type PolicyObservation struct { - // The Amazon PolicyObservation Name (ARN) of the policy - ARN string `json:"arn,omitempty"` - - // The number of entities (users, groups, and roles) that the policy is attached - // to. - AttachmentCount int32 `json:"attachmentCount,omitempty"` - - // The identifier for the version of the policy that is set as the default version. - DefaultVersionID string `json:"defaultVersionId,omitempty"` - - // Specifies whether the policy can be attached to an IAM user, group, or role. - IsAttachable bool `json:"isAttachable,omitempty"` - - // The number of entities (users and roles) for which the policy is used to - // set the permissions boundary. - PermissionsBoundaryUsageCount int32 `json:"permissionsBoundaryUsageCount,omitempty"` - - // The stable and unique string identifying the policy. - PolicyID string `json:"policyId,omitempty"` -} - -// An PolicyStatus represents the observed state of an Policy. -type PolicyStatus struct { - xpv1.ResourceStatus `json:",inline"` - AtProvider PolicyObservation `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true - -// An Policy is a managed resource that represents an AWS IAM Policy. -// +kubebuilder:printcolumn:name="ARN",type="string",JSONPath=".status.atProvider.arn" -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} -type Policy struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec PolicySpec `json:"spec"` - Status PolicyStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// PolicyList contains a list of Policies -type PolicyList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Policy `json:"items"` -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/referencers.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/referencers.go deleted file mode 100644 index e1994f04..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/referencers.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "github.com/crossplane/crossplane-runtime/pkg/reference" - "github.com/crossplane/crossplane-runtime/pkg/resource" -) - -// RoleARN returns the status.atProvider.ARN of a Role. -func RoleARN() reference.ExtractValueFn { - return func(mg resource.Managed) string { - r, ok := mg.(*Role) - if !ok { - return "" - } - return r.Status.AtProvider.ARN - - } -} - -// PolicyARN returns a function that returns the ARN of the given policy. -func PolicyARN() reference.ExtractValueFn { - return func(mg resource.Managed) string { - r, ok := mg.(*Policy) - if !ok { - return "" - } - return r.Status.AtProvider.ARN - } -} - -// UserARN returns a function that returns the ARN of the given policy. -func UserARN() reference.ExtractValueFn { - return func(mg resource.Managed) string { - r, ok := mg.(*User) - if !ok { - return "" - } - return r.Status.AtProvider.ARN - } -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/register.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/register.go deleted file mode 100644 index 4d3c6c78..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/register.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// +kubebuilder:object:generate=true -// +groupName=iam.aws.crossplane.io -// +versionName=v1beta1 - -package v1beta1 - -import ( - "reflect" - - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -// Package type metadata. -const ( - CRDGroup = "iam.aws.crossplane.io" - CRDVersion = "v1beta1" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} -) - -// Role type metadata. -var ( - RoleKind = reflect.TypeOf(Role{}).Name() - RoleGroupKind = schema.GroupKind{Group: CRDGroup, Kind: RoleKind}.String() - RoleKindAPIVersion = RoleKind + "." + SchemeGroupVersion.String() - RoleGroupVersionKind = SchemeGroupVersion.WithKind(RoleKind) -) - -// RolePolicyAttachment type metadata. -var ( - RolePolicyAttachmentKind = reflect.TypeOf(RolePolicyAttachment{}).Name() - RolePolicyAttachmentGroupKind = schema.GroupKind{Group: CRDGroup, Kind: RolePolicyAttachmentKind}.String() - RolePolicyAttachmentKindAPIVersion = RolePolicyAttachmentKind + "." + SchemeGroupVersion.String() - RolePolicyAttachmentGroupVersionKind = SchemeGroupVersion.WithKind(RolePolicyAttachmentKind) -) - -// User type metadata. -var ( - UserKind = reflect.TypeOf(User{}).Name() - UserGroupKind = schema.GroupKind{Group: CRDGroup, Kind: UserKind}.String() - UserKindAPIVersion = UserKind + "." + SchemeGroupVersion.String() - UserGroupVersionKind = SchemeGroupVersion.WithKind(UserKind) -) - -// UserPolicyAttachment type metadata. -var ( - UserPolicyAttachmentKind = reflect.TypeOf(UserPolicyAttachment{}).Name() - UserPolicyAttachmentGroupKind = schema.GroupKind{Group: CRDGroup, Kind: UserPolicyAttachmentKind}.String() - UserPolicyAttachmentKindAPIVersion = UserPolicyAttachmentKind + "." + SchemeGroupVersion.String() - UserPolicyAttachmentGroupVersionKind = SchemeGroupVersion.WithKind(UserPolicyAttachmentKind) -) - -// Policy type metadata. -var ( - PolicyKind = reflect.TypeOf(Policy{}).Name() - PolicyGroupKind = schema.GroupKind{Group: CRDGroup, Kind: PolicyKind}.String() - PolicyKindAPIVersion = PolicyKind + "." + SchemeGroupVersion.String() - PolicyGroupVersionKind = SchemeGroupVersion.WithKind(PolicyKind) -) - -// Group type metadata -var ( - GroupKind = reflect.TypeOf(Group{}).Name() - GroupGroupKind = schema.GroupKind{Group: CRDGroup, Kind: GroupKind}.String() - GroupKindAPIVersion = GroupKind + "." + SchemeGroupVersion.String() - GroupGroupVersionKind = SchemeGroupVersion.WithKind(GroupKind) -) - -// GroupUserMembership type metadata. -var ( - GroupUserMembershipKind = reflect.TypeOf(GroupUserMembership{}).Name() - GroupUserMembershipGroupKind = schema.GroupKind{Group: CRDGroup, Kind: GroupUserMembershipKind}.String() - GroupUserMembershipKindAPIVersion = GroupUserMembershipKind + "." + SchemeGroupVersion.String() - GroupUserMembershipGroupVersionKind = SchemeGroupVersion.WithKind(GroupUserMembershipKind) -) - -// GroupPolicyAttachment type metadata. -var ( - GroupPolicyAttachmentKind = reflect.TypeOf(GroupPolicyAttachment{}).Name() - GroupPolicyAttachmentGroupKind = schema.GroupKind{Group: CRDGroup, Kind: GroupPolicyAttachmentKind}.String() - GroupPolicyAttachmentKindAPIVersion = GroupPolicyAttachmentKind + "." + SchemeGroupVersion.String() - GroupPolicyAttachmentGroupVersionKind = SchemeGroupVersion.WithKind(GroupPolicyAttachmentKind) -) - -// AccessKey type metadata. -var ( - AccessKeyKind = reflect.TypeOf(AccessKey{}).Name() - AccessKeyGroupKind = schema.GroupKind{Group: CRDGroup, Kind: AccessKeyKind}.String() - AccessKeyKindAPIVersion = AccessKeyKind + "." + SchemeGroupVersion.String() - AccessKeyGroupVersionKind = SchemeGroupVersion.WithKind(AccessKeyKind) -) - -// OpenIDConnectProvider type metadata. -var ( - OpenIDConnectProviderKind = "OpenIDConnectProvider" - OpenIDConnectProviderGroupKind = schema.GroupKind{Group: CRDGroup, Kind: OpenIDConnectProviderKind}.String() - OpenIDConnectProviderKindAPIVersion = OpenIDConnectProviderKind + "." + SchemeGroupVersion.String() - OpenIDConnectProviderGroupVersionKind = SchemeGroupVersion.WithKind(OpenIDConnectProviderKind) -) - -func init() { - SchemeBuilder.Register(&Role{}, &RoleList{}) - SchemeBuilder.Register(&RolePolicyAttachment{}, &RolePolicyAttachmentList{}) - SchemeBuilder.Register(&User{}, &UserList{}) - SchemeBuilder.Register(&Policy{}, &PolicyList{}) - SchemeBuilder.Register(&UserPolicyAttachment{}, &UserPolicyAttachmentList{}) - SchemeBuilder.Register(&Group{}, &GroupList{}) - SchemeBuilder.Register(&GroupUserMembership{}, &GroupUserMembershipList{}) - SchemeBuilder.Register(&GroupPolicyAttachment{}, &GroupPolicyAttachmentList{}) - SchemeBuilder.Register(&AccessKey{}, &AccessKeyList{}) - SchemeBuilder.Register(&OpenIDConnectProvider{}, &OpenIDConnectProviderList{}) -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/role_types.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/role_types.go deleted file mode 100644 index 5ba41976..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/role_types.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -// Tag represents user-provided metadata that can be associated -// with a IAM role. For more information about tagging, -// see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) -// in the IAM User Guide. -type Tag struct { - - // The key name that can be used to look up or retrieve the associated value. - // For example, Department or Cost Center are common choices. - Key string `json:"key"` - - // The value associated with this tag. For example, tags with a key name of - // Department could have values such as Human Resources, Accounting, and Support. - // Tags with a key name of Cost Center might have values that consist of the - // number associated with the different cost centers in your company. Typically, - // many resources have tags with the same key name but with different values. - // - // AWS always interprets the tag Value as a single string. If you need to store - // an array, you can store comma-separated values in the string. However, you - // must interpret the value in your code. - // +optional - Value string `json:"value,omitempty"` -} - -// RoleParameters define the desired state of an AWS IAM Role. -type RoleParameters struct { - - // AssumeRolePolicyDocument is the the trust relationship policy document - // that grants an entity permission to assume the role. - // +immutable - AssumeRolePolicyDocument string `json:"assumeRolePolicyDocument"` - - // Description is a description of the role. - // +optional - Description *string `json:"description,omitempty"` - - // MaxSessionDuration is the duration (in seconds) that you want to set for the specified - // role. The default maximum of one hour is applied. This setting can have a value from 1 hour to 12 hours. - // Default: 3600 - // +optional - MaxSessionDuration *int32 `json:"maxSessionDuration,omitempty"` - - // Path is the path to the role. - // Default: / - // +immutable - // +optional - Path *string `json:"path,omitempty"` - - // PermissionsBoundary is the ARN of the policy that is used to set the permissions boundary for the role. - // +immutable - // +optional - PermissionsBoundary *string `json:"permissionsBoundary,omitempty"` - - // Tags. For more information about - // tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) - // in the IAM User Guide. - // +immutable - // +optional - Tags []Tag `json:"tags,omitempty"` -} - -// An RoleSpec defines the desired state of an Role. -type RoleSpec struct { - xpv1.ResourceSpec `json:",inline"` - ForProvider RoleParameters `json:"forProvider"` -} - -// RoleExternalStatus keeps the state for the external resource -type RoleExternalStatus struct { - // ARN is the Amazon Resource Name (ARN) specifying the role. For more information - // about ARNs and how to use them in policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the IAM User Guide guide. - ARN string `json:"arn"` - - // RoleID is the stable and unique string identifying the role. For more information about - // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. - RoleID string `json:"roleID"` -} - -// An RoleStatus represents the observed state of an Role. -type RoleStatus struct { - xpv1.ResourceStatus `json:",inline"` - AtProvider RoleExternalStatus `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true - -// An Role is a managed resource that represents an AWS IAM Role. -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} -type Role struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec RoleSpec `json:"spec"` - Status RoleStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// RoleList contains a list of Roles -type RoleList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Role `json:"items"` -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/rolepolicyattachment_types.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/rolepolicyattachment_types.go deleted file mode 100644 index a1942680..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/rolepolicyattachment_types.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -// RolePolicyAttachmentParameters define the desired state of an AWS IAM -// Role policy attachment. -type RolePolicyAttachmentParameters struct { - - // PolicyARN is the Amazon Resource Name (ARN) of the IAM policy you want to - // attach. - // +immutable - // +crossplane:generate:reference:type=Policy - // +crossplane:generate:reference:extractor=PolicyARN() - PolicyARN string `json:"policyArn,omitempty"` - - // PolicyARNRef references an Policy to retrieve its Policy ARN. - // +optional - PolicyARNRef *xpv1.Reference `json:"policyArnRef,omitempty"` - - // PolicyARNSelector selects a reference to an Policy to retrieve its - // Policy ARN - // +optional - PolicyARNSelector *xpv1.Selector `json:"policyArnSelector,omitempty"` - - // RoleName presents the name of the IAM role. - // +immutable - // +crossplane:generate:reference:type=Role - RoleName string `json:"roleName,omitempty"` - - // RoleNameRef references an Role to retrieve its Name - // +optional - RoleNameRef *xpv1.Reference `json:"roleNameRef,omitempty"` - - // RoleNameSelector selects a reference to an Role to retrieve its Name - // +optional - RoleNameSelector *xpv1.Selector `json:"roleNameSelector,omitempty"` -} - -// An RolePolicyAttachmentSpec defines the desired state of an -// RolePolicyAttachment. -type RolePolicyAttachmentSpec struct { - xpv1.ResourceSpec `json:",inline"` - ForProvider RolePolicyAttachmentParameters `json:"forProvider"` -} - -// RolePolicyAttachmentExternalStatus keeps the state for the external resource -type RolePolicyAttachmentExternalStatus struct { - // AttachedPolicyARN is the arn for the attached policy. If nil, the policy - // is not yet attached - AttachedPolicyARN string `json:"attachedPolicyArn"` -} - -// An RolePolicyAttachmentStatus represents the observed state of an -// RolePolicyAttachment. -type RolePolicyAttachmentStatus struct { - xpv1.ResourceStatus `json:",inline"` - AtProvider RolePolicyAttachmentExternalStatus `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true - -// An RolePolicyAttachment is a managed resource that represents an AWS IAM -// Role policy attachment. -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="ROLENAME",type="string",JSONPath=".spec.forProvider.roleName" -// +kubebuilder:printcolumn:name="POLICYARN",type="string",JSONPath=".spec.forProvider.policyArn" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} -type RolePolicyAttachment struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec RolePolicyAttachmentSpec `json:"spec"` - Status RolePolicyAttachmentStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// RolePolicyAttachmentList contains a list of RolePolicyAttachments -type RolePolicyAttachmentList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []RolePolicyAttachment `json:"items"` -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/user_types.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/user_types.go deleted file mode 100644 index d3b505f5..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/user_types.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -// UserParameters define the desired state of an AWS IAM User. -type UserParameters struct { - // The path for the user name. - // +optional - Path *string `json:"path,omitempty"` - - // The ARN of the policy that is used to set the permissions boundary for the - // user. - // +optional - PermissionsBoundary *string `json:"permissionsBoundary,omitempty"` - - // A list of tags that you want to attach to the newly created user. - // +optional - Tags []Tag `json:"tags,omitempty"` -} - -// An UserSpec defines the desired state of an IAM User. -type UserSpec struct { - xpv1.ResourceSpec `json:",inline"` - ForProvider UserParameters `json:"forProvider"` -} - -// UserObservation keeps the state for the external resource -type UserObservation struct { - // The Amazon Resource Name (ARN) that identifies the user. - ARN string `json:"arn,omitempty"` - - // The stable and unique string identifying the user. - UserID string `json:"userId,omitempty"` -} - -// An UserStatus represents the observed state of an IAM User. -type UserStatus struct { - xpv1.ResourceStatus `json:",inline"` - AtProvider UserObservation `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true - -// An User is a managed resource that represents an AWS IAM User. -// +kubebuilder:printcolumn:name="ARN",type="string",JSONPath=".status.atProvider.arn" -// +kubebuilder:printcolumn:name="ID",type="string",JSONPath=".status.atProvider.userId" -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} -type User struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec UserSpec `json:"spec"` - Status UserStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// UserList contains a list of IAM Users -type UserList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []User `json:"items"` -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/userpolicyattachment_types.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/userpolicyattachment_types.go deleted file mode 100644 index dbcbe06e..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/userpolicyattachment_types.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2019 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -// UserPolicyAttachmentParameters define the desired state of an AWS UserPolicyAttachment. -type UserPolicyAttachmentParameters struct { - - // PolicyARN is the Amazon Resource Name (ARN) of the IAM policy you want to - // attach. - // +immutable - // +crossplane:generate:reference:type=Policy - // +crossplane:generate:reference:extractor=PolicyARN() - PolicyARN string `json:"policyArn,omitempty"` - - // PolicyARNRef references an Policy to retrieve its Policy ARN. - // +optional - PolicyARNRef *xpv1.Reference `json:"policyArnRef,omitempty"` - - // PolicyARNSelector selects a reference to an Policy to retrieve its - // Policy ARN - // +optional - PolicyARNSelector *xpv1.Selector `json:"policyArnSelector,omitempty"` - - // UserName presents the name of the User. - // +immutable - // +crossplane:generate:reference:type=User - UserName string `json:"userName,omitempty"` - - // UserNameRef references to an User to retrieve its userName - // +optional - UserNameRef *xpv1.Reference `json:"userNameRef,omitempty"` - - // UserNameSelector selects a reference to an User to retrieve its userName - // +optional - UserNameSelector *xpv1.Selector `json:"userNameSelector,omitempty"` -} - -// An UserPolicyAttachmentSpec defines the desired state of an -// UserPolicyAttachment. -type UserPolicyAttachmentSpec struct { - xpv1.ResourceSpec `json:",inline"` - ForProvider UserPolicyAttachmentParameters `json:"forProvider"` -} - -// UserPolicyAttachmentObservation keeps the state for the external resource -type UserPolicyAttachmentObservation struct { - // AttachedPolicyARN is the arn for the attached policy. If nil, the policy - // is not yet attached - AttachedPolicyARN string `json:"attachedPolicyArn"` -} - -// An UserPolicyAttachmentStatus represents the observed state of an -// UserPolicyAttachment. -type UserPolicyAttachmentStatus struct { - xpv1.ResourceStatus `json:",inline"` - AtProvider UserPolicyAttachmentObservation `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true - -// An UserPolicyAttachment is a managed resource that represents an AWS IAM -// User policy attachment. -// +kubebuilder:printcolumn:name="USERNAME",type="string",JSONPath=".spec.forProvider.userName" -// +kubebuilder:printcolumn:name="POLICYARN",type="string",JSONPath=".spec.forProvider.policyArn" -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} -type UserPolicyAttachment struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec UserPolicyAttachmentSpec `json:"spec"` - Status UserPolicyAttachmentStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// UserPolicyAttachmentList contains a list of UserPolicyAttachments -type UserPolicyAttachmentList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []UserPolicyAttachment `json:"items"` -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index b88d5e2b..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,1419 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2021 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "github.com/crossplane/crossplane-runtime/apis/common/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AccessKey) DeepCopyInto(out *AccessKey) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessKey. -func (in *AccessKey) DeepCopy() *AccessKey { - if in == nil { - return nil - } - out := new(AccessKey) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AccessKey) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AccessKeyList) DeepCopyInto(out *AccessKeyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AccessKey, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessKeyList. -func (in *AccessKeyList) DeepCopy() *AccessKeyList { - if in == nil { - return nil - } - out := new(AccessKeyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AccessKeyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AccessKeyParameters) DeepCopyInto(out *AccessKeyParameters) { - *out = *in - if in.UsernameRef != nil { - in, out := &in.UsernameRef, &out.UsernameRef - *out = new(v1.Reference) - **out = **in - } - if in.UsernameSelector != nil { - in, out := &in.UsernameSelector, &out.UsernameSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessKeyParameters. -func (in *AccessKeyParameters) DeepCopy() *AccessKeyParameters { - if in == nil { - return nil - } - out := new(AccessKeyParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AccessKeySpec) DeepCopyInto(out *AccessKeySpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessKeySpec. -func (in *AccessKeySpec) DeepCopy() *AccessKeySpec { - if in == nil { - return nil - } - out := new(AccessKeySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AccessKeyStatus) DeepCopyInto(out *AccessKeyStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessKeyStatus. -func (in *AccessKeyStatus) DeepCopy() *AccessKeyStatus { - if in == nil { - return nil - } - out := new(AccessKeyStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Group) DeepCopyInto(out *Group) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Group. -func (in *Group) DeepCopy() *Group { - if in == nil { - return nil - } - out := new(Group) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Group) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupList) DeepCopyInto(out *GroupList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Group, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupList. -func (in *GroupList) DeepCopy() *GroupList { - if in == nil { - return nil - } - out := new(GroupList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GroupList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupObservation) DeepCopyInto(out *GroupObservation) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupObservation. -func (in *GroupObservation) DeepCopy() *GroupObservation { - if in == nil { - return nil - } - out := new(GroupObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupParameters) DeepCopyInto(out *GroupParameters) { - *out = *in - if in.Path != nil { - in, out := &in.Path, &out.Path - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupParameters. -func (in *GroupParameters) DeepCopy() *GroupParameters { - if in == nil { - return nil - } - out := new(GroupParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupPolicyAttachment) DeepCopyInto(out *GroupPolicyAttachment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupPolicyAttachment. -func (in *GroupPolicyAttachment) DeepCopy() *GroupPolicyAttachment { - if in == nil { - return nil - } - out := new(GroupPolicyAttachment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GroupPolicyAttachment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupPolicyAttachmentList) DeepCopyInto(out *GroupPolicyAttachmentList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]GroupPolicyAttachment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupPolicyAttachmentList. -func (in *GroupPolicyAttachmentList) DeepCopy() *GroupPolicyAttachmentList { - if in == nil { - return nil - } - out := new(GroupPolicyAttachmentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GroupPolicyAttachmentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupPolicyAttachmentObservation) DeepCopyInto(out *GroupPolicyAttachmentObservation) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupPolicyAttachmentObservation. -func (in *GroupPolicyAttachmentObservation) DeepCopy() *GroupPolicyAttachmentObservation { - if in == nil { - return nil - } - out := new(GroupPolicyAttachmentObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupPolicyAttachmentParameters) DeepCopyInto(out *GroupPolicyAttachmentParameters) { - *out = *in - if in.PolicyARNRef != nil { - in, out := &in.PolicyARNRef, &out.PolicyARNRef - *out = new(v1.Reference) - **out = **in - } - if in.PolicyARNSelector != nil { - in, out := &in.PolicyARNSelector, &out.PolicyARNSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } - if in.GroupNameRef != nil { - in, out := &in.GroupNameRef, &out.GroupNameRef - *out = new(v1.Reference) - **out = **in - } - if in.GroupNameSelector != nil { - in, out := &in.GroupNameSelector, &out.GroupNameSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupPolicyAttachmentParameters. -func (in *GroupPolicyAttachmentParameters) DeepCopy() *GroupPolicyAttachmentParameters { - if in == nil { - return nil - } - out := new(GroupPolicyAttachmentParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupPolicyAttachmentSpec) DeepCopyInto(out *GroupPolicyAttachmentSpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupPolicyAttachmentSpec. -func (in *GroupPolicyAttachmentSpec) DeepCopy() *GroupPolicyAttachmentSpec { - if in == nil { - return nil - } - out := new(GroupPolicyAttachmentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupPolicyAttachmentStatus) DeepCopyInto(out *GroupPolicyAttachmentStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - out.AtProvider = in.AtProvider -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupPolicyAttachmentStatus. -func (in *GroupPolicyAttachmentStatus) DeepCopy() *GroupPolicyAttachmentStatus { - if in == nil { - return nil - } - out := new(GroupPolicyAttachmentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupSpec) DeepCopyInto(out *GroupSpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSpec. -func (in *GroupSpec) DeepCopy() *GroupSpec { - if in == nil { - return nil - } - out := new(GroupSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupStatus) DeepCopyInto(out *GroupStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - out.AtProvider = in.AtProvider -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupStatus. -func (in *GroupStatus) DeepCopy() *GroupStatus { - if in == nil { - return nil - } - out := new(GroupStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupUserMembership) DeepCopyInto(out *GroupUserMembership) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupUserMembership. -func (in *GroupUserMembership) DeepCopy() *GroupUserMembership { - if in == nil { - return nil - } - out := new(GroupUserMembership) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GroupUserMembership) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupUserMembershipList) DeepCopyInto(out *GroupUserMembershipList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]GroupUserMembership, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupUserMembershipList. -func (in *GroupUserMembershipList) DeepCopy() *GroupUserMembershipList { - if in == nil { - return nil - } - out := new(GroupUserMembershipList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GroupUserMembershipList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupUserMembershipObservation) DeepCopyInto(out *GroupUserMembershipObservation) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupUserMembershipObservation. -func (in *GroupUserMembershipObservation) DeepCopy() *GroupUserMembershipObservation { - if in == nil { - return nil - } - out := new(GroupUserMembershipObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupUserMembershipParameters) DeepCopyInto(out *GroupUserMembershipParameters) { - *out = *in - if in.GroupNameRef != nil { - in, out := &in.GroupNameRef, &out.GroupNameRef - *out = new(v1.Reference) - **out = **in - } - if in.GroupNameSelector != nil { - in, out := &in.GroupNameSelector, &out.GroupNameSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } - if in.UserNameRef != nil { - in, out := &in.UserNameRef, &out.UserNameRef - *out = new(v1.Reference) - **out = **in - } - if in.UserNameSelector != nil { - in, out := &in.UserNameSelector, &out.UserNameSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupUserMembershipParameters. -func (in *GroupUserMembershipParameters) DeepCopy() *GroupUserMembershipParameters { - if in == nil { - return nil - } - out := new(GroupUserMembershipParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupUserMembershipSpec) DeepCopyInto(out *GroupUserMembershipSpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupUserMembershipSpec. -func (in *GroupUserMembershipSpec) DeepCopy() *GroupUserMembershipSpec { - if in == nil { - return nil - } - out := new(GroupUserMembershipSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupUserMembershipStatus) DeepCopyInto(out *GroupUserMembershipStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - out.AtProvider = in.AtProvider -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupUserMembershipStatus. -func (in *GroupUserMembershipStatus) DeepCopy() *GroupUserMembershipStatus { - if in == nil { - return nil - } - out := new(GroupUserMembershipStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenIDConnectProvider) DeepCopyInto(out *OpenIDConnectProvider) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectProvider. -func (in *OpenIDConnectProvider) DeepCopy() *OpenIDConnectProvider { - if in == nil { - return nil - } - out := new(OpenIDConnectProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenIDConnectProvider) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenIDConnectProviderList) DeepCopyInto(out *OpenIDConnectProviderList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]OpenIDConnectProvider, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectProviderList. -func (in *OpenIDConnectProviderList) DeepCopy() *OpenIDConnectProviderList { - if in == nil { - return nil - } - out := new(OpenIDConnectProviderList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenIDConnectProviderList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenIDConnectProviderObservation) DeepCopyInto(out *OpenIDConnectProviderObservation) { - *out = *in - if in.CreateDate != nil { - in, out := &in.CreateDate, &out.CreateDate - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectProviderObservation. -func (in *OpenIDConnectProviderObservation) DeepCopy() *OpenIDConnectProviderObservation { - if in == nil { - return nil - } - out := new(OpenIDConnectProviderObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenIDConnectProviderParameters) DeepCopyInto(out *OpenIDConnectProviderParameters) { - *out = *in - if in.ClientIDList != nil { - in, out := &in.ClientIDList, &out.ClientIDList - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ThumbprintList != nil { - in, out := &in.ThumbprintList, &out.ThumbprintList - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectProviderParameters. -func (in *OpenIDConnectProviderParameters) DeepCopy() *OpenIDConnectProviderParameters { - if in == nil { - return nil - } - out := new(OpenIDConnectProviderParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenIDConnectProviderSpec) DeepCopyInto(out *OpenIDConnectProviderSpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectProviderSpec. -func (in *OpenIDConnectProviderSpec) DeepCopy() *OpenIDConnectProviderSpec { - if in == nil { - return nil - } - out := new(OpenIDConnectProviderSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenIDConnectProviderStatus) DeepCopyInto(out *OpenIDConnectProviderStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - in.AtProvider.DeepCopyInto(&out.AtProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectProviderStatus. -func (in *OpenIDConnectProviderStatus) DeepCopy() *OpenIDConnectProviderStatus { - if in == nil { - return nil - } - out := new(OpenIDConnectProviderStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Policy) DeepCopyInto(out *Policy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. -func (in *Policy) DeepCopy() *Policy { - if in == nil { - return nil - } - out := new(Policy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Policy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PolicyList) DeepCopyInto(out *PolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Policy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. -func (in *PolicyList) DeepCopy() *PolicyList { - if in == nil { - return nil - } - out := new(PolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PolicyObservation) DeepCopyInto(out *PolicyObservation) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyObservation. -func (in *PolicyObservation) DeepCopy() *PolicyObservation { - if in == nil { - return nil - } - out := new(PolicyObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PolicyParameters) DeepCopyInto(out *PolicyParameters) { - *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.Path != nil { - in, out := &in.Path, &out.Path - *out = new(string) - **out = **in - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]Tag, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyParameters. -func (in *PolicyParameters) DeepCopy() *PolicyParameters { - if in == nil { - return nil - } - out := new(PolicyParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PolicySpec) DeepCopyInto(out *PolicySpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec. -func (in *PolicySpec) DeepCopy() *PolicySpec { - if in == nil { - return nil - } - out := new(PolicySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - out.AtProvider = in.AtProvider -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus. -func (in *PolicyStatus) DeepCopy() *PolicyStatus { - if in == nil { - return nil - } - out := new(PolicyStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Role) DeepCopyInto(out *Role) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. -func (in *Role) DeepCopy() *Role { - if in == nil { - return nil - } - out := new(Role) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Role) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleExternalStatus) DeepCopyInto(out *RoleExternalStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleExternalStatus. -func (in *RoleExternalStatus) DeepCopy() *RoleExternalStatus { - if in == nil { - return nil - } - out := new(RoleExternalStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleList) DeepCopyInto(out *RoleList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Role, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. -func (in *RoleList) DeepCopy() *RoleList { - if in == nil { - return nil - } - out := new(RoleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RoleList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleParameters) DeepCopyInto(out *RoleParameters) { - *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.MaxSessionDuration != nil { - in, out := &in.MaxSessionDuration, &out.MaxSessionDuration - *out = new(int32) - **out = **in - } - if in.Path != nil { - in, out := &in.Path, &out.Path - *out = new(string) - **out = **in - } - if in.PermissionsBoundary != nil { - in, out := &in.PermissionsBoundary, &out.PermissionsBoundary - *out = new(string) - **out = **in - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]Tag, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleParameters. -func (in *RoleParameters) DeepCopy() *RoleParameters { - if in == nil { - return nil - } - out := new(RoleParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RolePolicyAttachment) DeepCopyInto(out *RolePolicyAttachment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolePolicyAttachment. -func (in *RolePolicyAttachment) DeepCopy() *RolePolicyAttachment { - if in == nil { - return nil - } - out := new(RolePolicyAttachment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RolePolicyAttachment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RolePolicyAttachmentExternalStatus) DeepCopyInto(out *RolePolicyAttachmentExternalStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolePolicyAttachmentExternalStatus. -func (in *RolePolicyAttachmentExternalStatus) DeepCopy() *RolePolicyAttachmentExternalStatus { - if in == nil { - return nil - } - out := new(RolePolicyAttachmentExternalStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RolePolicyAttachmentList) DeepCopyInto(out *RolePolicyAttachmentList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RolePolicyAttachment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolePolicyAttachmentList. -func (in *RolePolicyAttachmentList) DeepCopy() *RolePolicyAttachmentList { - if in == nil { - return nil - } - out := new(RolePolicyAttachmentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RolePolicyAttachmentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RolePolicyAttachmentParameters) DeepCopyInto(out *RolePolicyAttachmentParameters) { - *out = *in - if in.PolicyARNRef != nil { - in, out := &in.PolicyARNRef, &out.PolicyARNRef - *out = new(v1.Reference) - **out = **in - } - if in.PolicyARNSelector != nil { - in, out := &in.PolicyARNSelector, &out.PolicyARNSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } - if in.RoleNameRef != nil { - in, out := &in.RoleNameRef, &out.RoleNameRef - *out = new(v1.Reference) - **out = **in - } - if in.RoleNameSelector != nil { - in, out := &in.RoleNameSelector, &out.RoleNameSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolePolicyAttachmentParameters. -func (in *RolePolicyAttachmentParameters) DeepCopy() *RolePolicyAttachmentParameters { - if in == nil { - return nil - } - out := new(RolePolicyAttachmentParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RolePolicyAttachmentSpec) DeepCopyInto(out *RolePolicyAttachmentSpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolePolicyAttachmentSpec. -func (in *RolePolicyAttachmentSpec) DeepCopy() *RolePolicyAttachmentSpec { - if in == nil { - return nil - } - out := new(RolePolicyAttachmentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RolePolicyAttachmentStatus) DeepCopyInto(out *RolePolicyAttachmentStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - out.AtProvider = in.AtProvider -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolePolicyAttachmentStatus. -func (in *RolePolicyAttachmentStatus) DeepCopy() *RolePolicyAttachmentStatus { - if in == nil { - return nil - } - out := new(RolePolicyAttachmentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleSpec) DeepCopyInto(out *RoleSpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleSpec. -func (in *RoleSpec) DeepCopy() *RoleSpec { - if in == nil { - return nil - } - out := new(RoleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleStatus) DeepCopyInto(out *RoleStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - out.AtProvider = in.AtProvider -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleStatus. -func (in *RoleStatus) DeepCopy() *RoleStatus { - if in == nil { - return nil - } - out := new(RoleStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Tag) DeepCopyInto(out *Tag) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tag. -func (in *Tag) DeepCopy() *Tag { - if in == nil { - return nil - } - out := new(Tag) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *User) DeepCopyInto(out *User) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. -func (in *User) DeepCopy() *User { - if in == nil { - return nil - } - out := new(User) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *User) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserList) DeepCopyInto(out *UserList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]User, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. -func (in *UserList) DeepCopy() *UserList { - if in == nil { - return nil - } - out := new(UserList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *UserList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserObservation) DeepCopyInto(out *UserObservation) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserObservation. -func (in *UserObservation) DeepCopy() *UserObservation { - if in == nil { - return nil - } - out := new(UserObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserParameters) DeepCopyInto(out *UserParameters) { - *out = *in - if in.Path != nil { - in, out := &in.Path, &out.Path - *out = new(string) - **out = **in - } - if in.PermissionsBoundary != nil { - in, out := &in.PermissionsBoundary, &out.PermissionsBoundary - *out = new(string) - **out = **in - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]Tag, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserParameters. -func (in *UserParameters) DeepCopy() *UserParameters { - if in == nil { - return nil - } - out := new(UserParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserPolicyAttachment) DeepCopyInto(out *UserPolicyAttachment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPolicyAttachment. -func (in *UserPolicyAttachment) DeepCopy() *UserPolicyAttachment { - if in == nil { - return nil - } - out := new(UserPolicyAttachment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *UserPolicyAttachment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserPolicyAttachmentList) DeepCopyInto(out *UserPolicyAttachmentList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]UserPolicyAttachment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPolicyAttachmentList. -func (in *UserPolicyAttachmentList) DeepCopy() *UserPolicyAttachmentList { - if in == nil { - return nil - } - out := new(UserPolicyAttachmentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *UserPolicyAttachmentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserPolicyAttachmentObservation) DeepCopyInto(out *UserPolicyAttachmentObservation) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPolicyAttachmentObservation. -func (in *UserPolicyAttachmentObservation) DeepCopy() *UserPolicyAttachmentObservation { - if in == nil { - return nil - } - out := new(UserPolicyAttachmentObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserPolicyAttachmentParameters) DeepCopyInto(out *UserPolicyAttachmentParameters) { - *out = *in - if in.PolicyARNRef != nil { - in, out := &in.PolicyARNRef, &out.PolicyARNRef - *out = new(v1.Reference) - **out = **in - } - if in.PolicyARNSelector != nil { - in, out := &in.PolicyARNSelector, &out.PolicyARNSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } - if in.UserNameRef != nil { - in, out := &in.UserNameRef, &out.UserNameRef - *out = new(v1.Reference) - **out = **in - } - if in.UserNameSelector != nil { - in, out := &in.UserNameSelector, &out.UserNameSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPolicyAttachmentParameters. -func (in *UserPolicyAttachmentParameters) DeepCopy() *UserPolicyAttachmentParameters { - if in == nil { - return nil - } - out := new(UserPolicyAttachmentParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserPolicyAttachmentSpec) DeepCopyInto(out *UserPolicyAttachmentSpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPolicyAttachmentSpec. -func (in *UserPolicyAttachmentSpec) DeepCopy() *UserPolicyAttachmentSpec { - if in == nil { - return nil - } - out := new(UserPolicyAttachmentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserPolicyAttachmentStatus) DeepCopyInto(out *UserPolicyAttachmentStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - out.AtProvider = in.AtProvider -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPolicyAttachmentStatus. -func (in *UserPolicyAttachmentStatus) DeepCopy() *UserPolicyAttachmentStatus { - if in == nil { - return nil - } - out := new(UserPolicyAttachmentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserSpec) DeepCopyInto(out *UserSpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. -func (in *UserSpec) DeepCopy() *UserSpec { - if in == nil { - return nil - } - out := new(UserSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserStatus) DeepCopyInto(out *UserStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - out.AtProvider = in.AtProvider -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserStatus. -func (in *UserStatus) DeepCopy() *UserStatus { - if in == nil { - return nil - } - out := new(UserStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.managed.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.managed.go deleted file mode 100644 index d533231e..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.managed.go +++ /dev/null @@ -1,581 +0,0 @@ -/* -Copyright 2021 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by angryjet. DO NOT EDIT. - -package v1beta1 - -import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - -// GetCondition of this AccessKey. -func (mg *AccessKey) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this AccessKey. -func (mg *AccessKey) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetProviderConfigReference of this AccessKey. -func (mg *AccessKey) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -/* -GetProviderReference of this AccessKey. -Deprecated: Use GetProviderConfigReference. -*/ -func (mg *AccessKey) GetProviderReference() *xpv1.Reference { - return mg.Spec.ProviderReference -} - -// GetWriteConnectionSecretToReference of this AccessKey. -func (mg *AccessKey) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this AccessKey. -func (mg *AccessKey) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this AccessKey. -func (mg *AccessKey) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetProviderConfigReference of this AccessKey. -func (mg *AccessKey) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -/* -SetProviderReference of this AccessKey. -Deprecated: Use SetProviderConfigReference. -*/ -func (mg *AccessKey) SetProviderReference(r *xpv1.Reference) { - mg.Spec.ProviderReference = r -} - -// SetWriteConnectionSecretToReference of this AccessKey. -func (mg *AccessKey) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} - -// GetCondition of this Group. -func (mg *Group) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this Group. -func (mg *Group) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetProviderConfigReference of this Group. -func (mg *Group) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -/* -GetProviderReference of this Group. -Deprecated: Use GetProviderConfigReference. -*/ -func (mg *Group) GetProviderReference() *xpv1.Reference { - return mg.Spec.ProviderReference -} - -// GetWriteConnectionSecretToReference of this Group. -func (mg *Group) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this Group. -func (mg *Group) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this Group. -func (mg *Group) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetProviderConfigReference of this Group. -func (mg *Group) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -/* -SetProviderReference of this Group. -Deprecated: Use SetProviderConfigReference. -*/ -func (mg *Group) SetProviderReference(r *xpv1.Reference) { - mg.Spec.ProviderReference = r -} - -// SetWriteConnectionSecretToReference of this Group. -func (mg *Group) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} - -// GetCondition of this GroupPolicyAttachment. -func (mg *GroupPolicyAttachment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this GroupPolicyAttachment. -func (mg *GroupPolicyAttachment) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetProviderConfigReference of this GroupPolicyAttachment. -func (mg *GroupPolicyAttachment) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -/* -GetProviderReference of this GroupPolicyAttachment. -Deprecated: Use GetProviderConfigReference. -*/ -func (mg *GroupPolicyAttachment) GetProviderReference() *xpv1.Reference { - return mg.Spec.ProviderReference -} - -// GetWriteConnectionSecretToReference of this GroupPolicyAttachment. -func (mg *GroupPolicyAttachment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this GroupPolicyAttachment. -func (mg *GroupPolicyAttachment) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this GroupPolicyAttachment. -func (mg *GroupPolicyAttachment) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetProviderConfigReference of this GroupPolicyAttachment. -func (mg *GroupPolicyAttachment) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -/* -SetProviderReference of this GroupPolicyAttachment. -Deprecated: Use SetProviderConfigReference. -*/ -func (mg *GroupPolicyAttachment) SetProviderReference(r *xpv1.Reference) { - mg.Spec.ProviderReference = r -} - -// SetWriteConnectionSecretToReference of this GroupPolicyAttachment. -func (mg *GroupPolicyAttachment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} - -// GetCondition of this GroupUserMembership. -func (mg *GroupUserMembership) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this GroupUserMembership. -func (mg *GroupUserMembership) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetProviderConfigReference of this GroupUserMembership. -func (mg *GroupUserMembership) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -/* -GetProviderReference of this GroupUserMembership. -Deprecated: Use GetProviderConfigReference. -*/ -func (mg *GroupUserMembership) GetProviderReference() *xpv1.Reference { - return mg.Spec.ProviderReference -} - -// GetWriteConnectionSecretToReference of this GroupUserMembership. -func (mg *GroupUserMembership) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this GroupUserMembership. -func (mg *GroupUserMembership) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this GroupUserMembership. -func (mg *GroupUserMembership) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetProviderConfigReference of this GroupUserMembership. -func (mg *GroupUserMembership) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -/* -SetProviderReference of this GroupUserMembership. -Deprecated: Use SetProviderConfigReference. -*/ -func (mg *GroupUserMembership) SetProviderReference(r *xpv1.Reference) { - mg.Spec.ProviderReference = r -} - -// SetWriteConnectionSecretToReference of this GroupUserMembership. -func (mg *GroupUserMembership) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} - -// GetCondition of this OpenIDConnectProvider. -func (mg *OpenIDConnectProvider) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this OpenIDConnectProvider. -func (mg *OpenIDConnectProvider) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetProviderConfigReference of this OpenIDConnectProvider. -func (mg *OpenIDConnectProvider) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -/* -GetProviderReference of this OpenIDConnectProvider. -Deprecated: Use GetProviderConfigReference. -*/ -func (mg *OpenIDConnectProvider) GetProviderReference() *xpv1.Reference { - return mg.Spec.ProviderReference -} - -// GetWriteConnectionSecretToReference of this OpenIDConnectProvider. -func (mg *OpenIDConnectProvider) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this OpenIDConnectProvider. -func (mg *OpenIDConnectProvider) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this OpenIDConnectProvider. -func (mg *OpenIDConnectProvider) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetProviderConfigReference of this OpenIDConnectProvider. -func (mg *OpenIDConnectProvider) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -/* -SetProviderReference of this OpenIDConnectProvider. -Deprecated: Use SetProviderConfigReference. -*/ -func (mg *OpenIDConnectProvider) SetProviderReference(r *xpv1.Reference) { - mg.Spec.ProviderReference = r -} - -// SetWriteConnectionSecretToReference of this OpenIDConnectProvider. -func (mg *OpenIDConnectProvider) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} - -// GetCondition of this Policy. -func (mg *Policy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this Policy. -func (mg *Policy) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetProviderConfigReference of this Policy. -func (mg *Policy) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -/* -GetProviderReference of this Policy. -Deprecated: Use GetProviderConfigReference. -*/ -func (mg *Policy) GetProviderReference() *xpv1.Reference { - return mg.Spec.ProviderReference -} - -// GetWriteConnectionSecretToReference of this Policy. -func (mg *Policy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this Policy. -func (mg *Policy) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this Policy. -func (mg *Policy) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetProviderConfigReference of this Policy. -func (mg *Policy) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -/* -SetProviderReference of this Policy. -Deprecated: Use SetProviderConfigReference. -*/ -func (mg *Policy) SetProviderReference(r *xpv1.Reference) { - mg.Spec.ProviderReference = r -} - -// SetWriteConnectionSecretToReference of this Policy. -func (mg *Policy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} - -// GetCondition of this Role. -func (mg *Role) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this Role. -func (mg *Role) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetProviderConfigReference of this Role. -func (mg *Role) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -/* -GetProviderReference of this Role. -Deprecated: Use GetProviderConfigReference. -*/ -func (mg *Role) GetProviderReference() *xpv1.Reference { - return mg.Spec.ProviderReference -} - -// GetWriteConnectionSecretToReference of this Role. -func (mg *Role) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this Role. -func (mg *Role) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this Role. -func (mg *Role) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetProviderConfigReference of this Role. -func (mg *Role) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -/* -SetProviderReference of this Role. -Deprecated: Use SetProviderConfigReference. -*/ -func (mg *Role) SetProviderReference(r *xpv1.Reference) { - mg.Spec.ProviderReference = r -} - -// SetWriteConnectionSecretToReference of this Role. -func (mg *Role) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} - -// GetCondition of this RolePolicyAttachment. -func (mg *RolePolicyAttachment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this RolePolicyAttachment. -func (mg *RolePolicyAttachment) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetProviderConfigReference of this RolePolicyAttachment. -func (mg *RolePolicyAttachment) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -/* -GetProviderReference of this RolePolicyAttachment. -Deprecated: Use GetProviderConfigReference. -*/ -func (mg *RolePolicyAttachment) GetProviderReference() *xpv1.Reference { - return mg.Spec.ProviderReference -} - -// GetWriteConnectionSecretToReference of this RolePolicyAttachment. -func (mg *RolePolicyAttachment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this RolePolicyAttachment. -func (mg *RolePolicyAttachment) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this RolePolicyAttachment. -func (mg *RolePolicyAttachment) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetProviderConfigReference of this RolePolicyAttachment. -func (mg *RolePolicyAttachment) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -/* -SetProviderReference of this RolePolicyAttachment. -Deprecated: Use SetProviderConfigReference. -*/ -func (mg *RolePolicyAttachment) SetProviderReference(r *xpv1.Reference) { - mg.Spec.ProviderReference = r -} - -// SetWriteConnectionSecretToReference of this RolePolicyAttachment. -func (mg *RolePolicyAttachment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} - -// GetCondition of this User. -func (mg *User) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this User. -func (mg *User) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetProviderConfigReference of this User. -func (mg *User) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -/* -GetProviderReference of this User. -Deprecated: Use GetProviderConfigReference. -*/ -func (mg *User) GetProviderReference() *xpv1.Reference { - return mg.Spec.ProviderReference -} - -// GetWriteConnectionSecretToReference of this User. -func (mg *User) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this User. -func (mg *User) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this User. -func (mg *User) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetProviderConfigReference of this User. -func (mg *User) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -/* -SetProviderReference of this User. -Deprecated: Use SetProviderConfigReference. -*/ -func (mg *User) SetProviderReference(r *xpv1.Reference) { - mg.Spec.ProviderReference = r -} - -// SetWriteConnectionSecretToReference of this User. -func (mg *User) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} - -// GetCondition of this UserPolicyAttachment. -func (mg *UserPolicyAttachment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this UserPolicyAttachment. -func (mg *UserPolicyAttachment) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetProviderConfigReference of this UserPolicyAttachment. -func (mg *UserPolicyAttachment) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -/* -GetProviderReference of this UserPolicyAttachment. -Deprecated: Use GetProviderConfigReference. -*/ -func (mg *UserPolicyAttachment) GetProviderReference() *xpv1.Reference { - return mg.Spec.ProviderReference -} - -// GetWriteConnectionSecretToReference of this UserPolicyAttachment. -func (mg *UserPolicyAttachment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this UserPolicyAttachment. -func (mg *UserPolicyAttachment) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this UserPolicyAttachment. -func (mg *UserPolicyAttachment) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetProviderConfigReference of this UserPolicyAttachment. -func (mg *UserPolicyAttachment) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -/* -SetProviderReference of this UserPolicyAttachment. -Deprecated: Use SetProviderConfigReference. -*/ -func (mg *UserPolicyAttachment) SetProviderReference(r *xpv1.Reference) { - mg.Spec.ProviderReference = r -} - -// SetWriteConnectionSecretToReference of this UserPolicyAttachment. -func (mg *UserPolicyAttachment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.managedlist.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.managedlist.go deleted file mode 100644 index 216283ff..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.managedlist.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2021 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by angryjet. DO NOT EDIT. - -package v1beta1 - -import resource "github.com/crossplane/crossplane-runtime/pkg/resource" - -// GetItems of this AccessKeyList. -func (l *AccessKeyList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} - -// GetItems of this GroupList. -func (l *GroupList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} - -// GetItems of this GroupPolicyAttachmentList. -func (l *GroupPolicyAttachmentList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} - -// GetItems of this GroupUserMembershipList. -func (l *GroupUserMembershipList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} - -// GetItems of this OpenIDConnectProviderList. -func (l *OpenIDConnectProviderList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} - -// GetItems of this PolicyList. -func (l *PolicyList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} - -// GetItems of this RoleList. -func (l *RoleList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} - -// GetItems of this RolePolicyAttachmentList. -func (l *RolePolicyAttachmentList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} - -// GetItems of this UserList. -func (l *UserList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} - -// GetItems of this UserPolicyAttachmentList. -func (l *UserPolicyAttachmentList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} diff --git a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.resolvers.go b/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.resolvers.go deleted file mode 100644 index 82336da9..00000000 --- a/vendor/github.com/crossplane/provider-aws/apis/iam/v1beta1/zz_generated.resolvers.go +++ /dev/null @@ -1,220 +0,0 @@ -/* -Copyright 2021 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by angryjet. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - reference "github.com/crossplane/crossplane-runtime/pkg/reference" - errors "github.com/pkg/errors" - client "sigs.k8s.io/controller-runtime/pkg/client" -) - -// ResolveReferences of this AccessKey. -func (mg *AccessKey) ResolveReferences(ctx context.Context, c client.Reader) error { - r := reference.NewAPIResolver(c, mg) - - var rsp reference.ResolutionResponse - var err error - - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: mg.Spec.ForProvider.Username, - Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.UsernameRef, - Selector: mg.Spec.ForProvider.UsernameSelector, - To: reference.To{ - List: &UserList{}, - Managed: &User{}, - }, - }) - if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.Username") - } - mg.Spec.ForProvider.Username = rsp.ResolvedValue - mg.Spec.ForProvider.UsernameRef = rsp.ResolvedReference - - return nil -} - -// ResolveReferences of this GroupPolicyAttachment. -func (mg *GroupPolicyAttachment) ResolveReferences(ctx context.Context, c client.Reader) error { - r := reference.NewAPIResolver(c, mg) - - var rsp reference.ResolutionResponse - var err error - - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: mg.Spec.ForProvider.PolicyARN, - Extract: PolicyARN(), - Reference: mg.Spec.ForProvider.PolicyARNRef, - Selector: mg.Spec.ForProvider.PolicyARNSelector, - To: reference.To{ - List: &PolicyList{}, - Managed: &Policy{}, - }, - }) - if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.PolicyARN") - } - mg.Spec.ForProvider.PolicyARN = rsp.ResolvedValue - mg.Spec.ForProvider.PolicyARNRef = rsp.ResolvedReference - - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: mg.Spec.ForProvider.GroupName, - Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.GroupNameRef, - Selector: mg.Spec.ForProvider.GroupNameSelector, - To: reference.To{ - List: &GroupList{}, - Managed: &Group{}, - }, - }) - if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.GroupName") - } - mg.Spec.ForProvider.GroupName = rsp.ResolvedValue - mg.Spec.ForProvider.GroupNameRef = rsp.ResolvedReference - - return nil -} - -// ResolveReferences of this GroupUserMembership. -func (mg *GroupUserMembership) ResolveReferences(ctx context.Context, c client.Reader) error { - r := reference.NewAPIResolver(c, mg) - - var rsp reference.ResolutionResponse - var err error - - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: mg.Spec.ForProvider.GroupName, - Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.GroupNameRef, - Selector: mg.Spec.ForProvider.GroupNameSelector, - To: reference.To{ - List: &GroupList{}, - Managed: &Group{}, - }, - }) - if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.GroupName") - } - mg.Spec.ForProvider.GroupName = rsp.ResolvedValue - mg.Spec.ForProvider.GroupNameRef = rsp.ResolvedReference - - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: mg.Spec.ForProvider.UserName, - Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.UserNameRef, - Selector: mg.Spec.ForProvider.UserNameSelector, - To: reference.To{ - List: &UserList{}, - Managed: &User{}, - }, - }) - if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.UserName") - } - mg.Spec.ForProvider.UserName = rsp.ResolvedValue - mg.Spec.ForProvider.UserNameRef = rsp.ResolvedReference - - return nil -} - -// ResolveReferences of this RolePolicyAttachment. -func (mg *RolePolicyAttachment) ResolveReferences(ctx context.Context, c client.Reader) error { - r := reference.NewAPIResolver(c, mg) - - var rsp reference.ResolutionResponse - var err error - - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: mg.Spec.ForProvider.PolicyARN, - Extract: PolicyARN(), - Reference: mg.Spec.ForProvider.PolicyARNRef, - Selector: mg.Spec.ForProvider.PolicyARNSelector, - To: reference.To{ - List: &PolicyList{}, - Managed: &Policy{}, - }, - }) - if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.PolicyARN") - } - mg.Spec.ForProvider.PolicyARN = rsp.ResolvedValue - mg.Spec.ForProvider.PolicyARNRef = rsp.ResolvedReference - - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: mg.Spec.ForProvider.RoleName, - Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.RoleNameRef, - Selector: mg.Spec.ForProvider.RoleNameSelector, - To: reference.To{ - List: &RoleList{}, - Managed: &Role{}, - }, - }) - if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.RoleName") - } - mg.Spec.ForProvider.RoleName = rsp.ResolvedValue - mg.Spec.ForProvider.RoleNameRef = rsp.ResolvedReference - - return nil -} - -// ResolveReferences of this UserPolicyAttachment. -func (mg *UserPolicyAttachment) ResolveReferences(ctx context.Context, c client.Reader) error { - r := reference.NewAPIResolver(c, mg) - - var rsp reference.ResolutionResponse - var err error - - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: mg.Spec.ForProvider.PolicyARN, - Extract: PolicyARN(), - Reference: mg.Spec.ForProvider.PolicyARNRef, - Selector: mg.Spec.ForProvider.PolicyARNSelector, - To: reference.To{ - List: &PolicyList{}, - Managed: &Policy{}, - }, - }) - if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.PolicyARN") - } - mg.Spec.ForProvider.PolicyARN = rsp.ResolvedValue - mg.Spec.ForProvider.PolicyARNRef = rsp.ResolvedReference - - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: mg.Spec.ForProvider.UserName, - Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.UserNameRef, - Selector: mg.Spec.ForProvider.UserNameSelector, - To: reference.To{ - List: &UserList{}, - Managed: &User{}, - }, - }) - if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.UserName") - } - mg.Spec.ForProvider.UserName = rsp.ResolvedValue - mg.Spec.ForProvider.UserNameRef = rsp.ResolvedReference - - return nil -} diff --git a/vendor/github.com/kr/text/License b/vendor/github.com/kr/text/License deleted file mode 100644 index 480a3280..00000000 --- a/vendor/github.com/kr/text/License +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2012 Keith Rarick - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/kr/text/Readme b/vendor/github.com/kr/text/Readme deleted file mode 100644 index 7e6e7c06..00000000 --- a/vendor/github.com/kr/text/Readme +++ /dev/null @@ -1,3 +0,0 @@ -This is a Go package for manipulating paragraphs of text. - -See http://go.pkgdoc.org/github.com/kr/text for full documentation. diff --git a/vendor/github.com/kr/text/doc.go b/vendor/github.com/kr/text/doc.go deleted file mode 100644 index cf4c198f..00000000 --- a/vendor/github.com/kr/text/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package text provides rudimentary functions for manipulating text in -// paragraphs. -package text diff --git a/vendor/github.com/kr/text/indent.go b/vendor/github.com/kr/text/indent.go deleted file mode 100644 index 4ebac45c..00000000 --- a/vendor/github.com/kr/text/indent.go +++ /dev/null @@ -1,74 +0,0 @@ -package text - -import ( - "io" -) - -// Indent inserts prefix at the beginning of each non-empty line of s. The -// end-of-line marker is NL. -func Indent(s, prefix string) string { - return string(IndentBytes([]byte(s), []byte(prefix))) -} - -// IndentBytes inserts prefix at the beginning of each non-empty line of b. -// The end-of-line marker is NL. -func IndentBytes(b, prefix []byte) []byte { - var res []byte - bol := true - for _, c := range b { - if bol && c != '\n' { - res = append(res, prefix...) - } - res = append(res, c) - bol = c == '\n' - } - return res -} - -// Writer indents each line of its input. -type indentWriter struct { - w io.Writer - bol bool - pre [][]byte - sel int - off int -} - -// NewIndentWriter makes a new write filter that indents the input -// lines. Each line is prefixed in order with the corresponding -// element of pre. If there are more lines than elements, the last -// element of pre is repeated for each subsequent line. -func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer { - return &indentWriter{ - w: w, - pre: pre, - bol: true, - } -} - -// The only errors returned are from the underlying indentWriter. -func (w *indentWriter) Write(p []byte) (n int, err error) { - for _, c := range p { - if w.bol { - var i int - i, err = w.w.Write(w.pre[w.sel][w.off:]) - w.off += i - if err != nil { - return n, err - } - } - _, err = w.w.Write([]byte{c}) - if err != nil { - return n, err - } - n++ - w.bol = c == '\n' - if w.bol { - w.off = 0 - if w.sel < len(w.pre)-1 { - w.sel++ - } - } - } - return n, nil -} diff --git a/vendor/github.com/kr/text/wrap.go b/vendor/github.com/kr/text/wrap.go deleted file mode 100644 index b09bb037..00000000 --- a/vendor/github.com/kr/text/wrap.go +++ /dev/null @@ -1,86 +0,0 @@ -package text - -import ( - "bytes" - "math" -) - -var ( - nl = []byte{'\n'} - sp = []byte{' '} -) - -const defaultPenalty = 1e5 - -// Wrap wraps s into a paragraph of lines of length lim, with minimal -// raggedness. -func Wrap(s string, lim int) string { - return string(WrapBytes([]byte(s), lim)) -} - -// WrapBytes wraps b into a paragraph of lines of length lim, with minimal -// raggedness. -func WrapBytes(b []byte, lim int) []byte { - words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp) - var lines [][]byte - for _, line := range WrapWords(words, 1, lim, defaultPenalty) { - lines = append(lines, bytes.Join(line, sp)) - } - return bytes.Join(lines, nl) -} - -// WrapWords is the low-level line-breaking algorithm, useful if you need more -// control over the details of the text wrapping process. For most uses, either -// Wrap or WrapBytes will be sufficient and more convenient. -// -// WrapWords splits a list of words into lines with minimal "raggedness", -// treating each byte as one unit, accounting for spc units between adjacent -// words on each line, and attempting to limit lines to lim units. Raggedness -// is the total error over all lines, where error is the square of the -// difference of the length of the line and lim. Too-long lines (which only -// happen when a single word is longer than lim units) have pen penalty units -// added to the error. -func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte { - n := len(words) - - length := make([][]int, n) - for i := 0; i < n; i++ { - length[i] = make([]int, n) - length[i][i] = len(words[i]) - for j := i + 1; j < n; j++ { - length[i][j] = length[i][j-1] + spc + len(words[j]) - } - } - - nbrk := make([]int, n) - cost := make([]int, n) - for i := range cost { - cost[i] = math.MaxInt32 - } - for i := n - 1; i >= 0; i-- { - if length[i][n-1] <= lim || i == n-1 { - cost[i] = 0 - nbrk[i] = n - } else { - for j := i + 1; j < n; j++ { - d := lim - length[i][j-1] - c := d*d + cost[j] - if length[i][j-1] > lim { - c += pen // too-long lines get a worse penalty - } - if c < cost[i] { - cost[i] = c - nbrk[i] = j - } - } - } - } - - var lines [][][]byte - i := 0 - for i < n { - lines = append(lines, words[i:nbrk[i]]) - i = nbrk[i] - } - return lines -} diff --git a/vendor/github.com/motomux/pretty/.gitignore b/vendor/github.com/motomux/pretty/.gitignore deleted file mode 100644 index 1f0a99f2..00000000 --- a/vendor/github.com/motomux/pretty/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -[568].out -_go* -_test* -_obj diff --git a/vendor/github.com/motomux/pretty/License b/vendor/github.com/motomux/pretty/License deleted file mode 100644 index 05c783cc..00000000 --- a/vendor/github.com/motomux/pretty/License +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright 2012 Keith Rarick - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/motomux/pretty/Readme b/vendor/github.com/motomux/pretty/Readme deleted file mode 100644 index c589fc62..00000000 --- a/vendor/github.com/motomux/pretty/Readme +++ /dev/null @@ -1,9 +0,0 @@ -package pretty - - import "github.com/kr/pretty" - - Package pretty provides pretty-printing for Go values. - -Documentation - - http://godoc.org/github.com/kr/pretty diff --git a/vendor/github.com/motomux/pretty/diff.go b/vendor/github.com/motomux/pretty/diff.go deleted file mode 100644 index 125e7eb7..00000000 --- a/vendor/github.com/motomux/pretty/diff.go +++ /dev/null @@ -1,273 +0,0 @@ -package pretty - -import ( - "fmt" - "io" - "reflect" -) - -type sbuf []string - -func (p *sbuf) Printf(format string, a ...interface{}) { - s := fmt.Sprintf(format, a...) - *p = append(*p, s) -} - -// Diff returns a slice where each element describes -// a difference between a and b. -func Diff(a, b interface{}) (desc []string) { - Pdiff((*sbuf)(&desc), a, b) - return desc -} - -// wprintfer calls Fprintf on w for each Printf call -// with a trailing newline. -type wprintfer struct{ w io.Writer } - -func (p *wprintfer) Printf(format string, a ...interface{}) { - fmt.Fprintf(p.w, format+"\n", a...) -} - -// Fdiff writes to w a description of the differences between a and b. -func Fdiff(w io.Writer, a, b interface{}) { - Pdiff(&wprintfer{w}, a, b) -} - -type Printfer interface { - Printf(format string, a ...interface{}) -} - -// Pdiff prints to p a description of the differences between a and b. -// It calls Printf once for each difference, with no trailing newline. -// The standard library log.Logger is a Printfer. -func Pdiff(p Printfer, a, b interface{}) { - diffPrinter{w: p}.diff(reflect.ValueOf(a), reflect.ValueOf(b)) -} - -type Logfer interface { - Logf(format string, a ...interface{}) -} - -// logprintfer calls Fprintf on w for each Printf call -// with a trailing newline. -type logprintfer struct{ l Logfer } - -func (p *logprintfer) Printf(format string, a ...interface{}) { - p.l.Logf(format, a...) -} - -// Ldiff prints to l a description of the differences between a and b. -// It calls Logf once for each difference, with no trailing newline. -// The standard library testing.T and testing.B are Logfers. -func Ldiff(l Logfer, a, b interface{}) { - Pdiff(&logprintfer{l}, a, b) -} - -type diffPrinter struct { - w Printfer - l string // label -} - -func (w diffPrinter) printf(f string, a ...interface{}) { - var l string - if w.l != "" { - l = w.l + ": " - } - w.w.Printf(l+f, a...) -} - -func (w diffPrinter) diff(av, bv reflect.Value) { - if !av.IsValid() && bv.IsValid() { - w.printf("nil != %# v", formatter{v: bv, quote: true}) - return - } - if av.IsValid() && !bv.IsValid() { - w.printf("%# v != nil", formatter{v: av, quote: true}) - return - } - if !av.IsValid() && !bv.IsValid() { - return - } - - at := av.Type() - bt := bv.Type() - if at != bt { - w.printf("%v != %v", at, bt) - return - } - - switch kind := at.Kind(); kind { - case reflect.Bool: - if a, b := av.Bool(), bv.Bool(); a != b { - w.printf("%v != %v", a, b) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if a, b := av.Int(), bv.Int(); a != b { - w.printf("%d != %d", a, b) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - if a, b := av.Uint(), bv.Uint(); a != b { - w.printf("%d != %d", a, b) - } - case reflect.Float32, reflect.Float64: - if a, b := av.Float(), bv.Float(); a != b { - w.printf("%v != %v", a, b) - } - case reflect.Complex64, reflect.Complex128: - if a, b := av.Complex(), bv.Complex(); a != b { - w.printf("%v != %v", a, b) - } - case reflect.Array: - n := av.Len() - for i := 0; i < n; i++ { - w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) - } - case reflect.Chan, reflect.Func, reflect.UnsafePointer: - if a, b := av.Pointer(), bv.Pointer(); a != b { - w.printf("%#x != %#x", a, b) - } - case reflect.Interface: - w.diff(av.Elem(), bv.Elem()) - case reflect.Map: - ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys()) - for _, k := range ak { - w := w.relabel(fmt.Sprintf("[%#v]", k)) - w.printf("%q != (missing)", av.MapIndex(k)) - } - for _, k := range both { - w := w.relabel(fmt.Sprintf("[%#v]", k)) - w.diff(av.MapIndex(k), bv.MapIndex(k)) - } - for _, k := range bk { - w := w.relabel(fmt.Sprintf("[%#v]", k)) - w.printf("(missing) != %q", bv.MapIndex(k)) - } - if av.IsNil() != bv.IsNil() { - w.printf("%#v != %#v", av, bv) - break - } - case reflect.Ptr: - switch { - case av.IsNil() && !bv.IsNil(): - w.printf("nil != %# v", formatter{v: bv, quote: true}) - case !av.IsNil() && bv.IsNil(): - w.printf("%# v != nil", formatter{v: av, quote: true}) - case !av.IsNil() && !bv.IsNil(): - w.diff(av.Elem(), bv.Elem()) - } - case reflect.Slice: - lenA := av.Len() - lenB := bv.Len() - if lenA != lenB { - w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB) - break - } - for i := 0; i < lenA; i++ { - w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) - } - if av.IsNil() != bv.IsNil() { - w.printf("%#v != %#v", av, bv) - break - } - case reflect.String: - if a, b := av.String(), bv.String(); a != b { - w.printf("%q != %q", a, b) - } - case reflect.Struct: - for i := 0; i < av.NumField(); i++ { - w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i)) - } - default: - panic("unknown reflect Kind: " + kind.String()) - } -} - -func (d diffPrinter) relabel(name string) (d1 diffPrinter) { - d1 = d - if d.l != "" && name[0] != '[' { - d1.l += "." - } - d1.l += name - return d1 -} - -// keyEqual compares a and b for equality. -// Both a and b must be valid map keys. -func keyEqual(av, bv reflect.Value) bool { - if !av.IsValid() && !bv.IsValid() { - return true - } - if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() { - return false - } - switch kind := av.Kind(); kind { - case reflect.Bool: - a, b := av.Bool(), bv.Bool() - return a == b - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - a, b := av.Int(), bv.Int() - return a == b - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - a, b := av.Uint(), bv.Uint() - return a == b - case reflect.Float32, reflect.Float64: - a, b := av.Float(), bv.Float() - return a == b - case reflect.Complex64, reflect.Complex128: - a, b := av.Complex(), bv.Complex() - return a == b - case reflect.Array: - for i := 0; i < av.Len(); i++ { - if !keyEqual(av.Index(i), bv.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.UnsafePointer, reflect.Ptr: - a, b := av.Pointer(), bv.Pointer() - return a == b - case reflect.Interface: - return keyEqual(av.Elem(), bv.Elem()) - case reflect.String: - a, b := av.String(), bv.String() - return a == b - case reflect.Struct: - for i := 0; i < av.NumField(); i++ { - if !keyEqual(av.Field(i), bv.Field(i)) { - return false - } - } - return true - default: - panic("invalid map key type " + av.Type().String()) - } -} - -func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) { - for _, av := range a { - inBoth := false - for _, bv := range b { - if keyEqual(av, bv) { - inBoth = true - both = append(both, av) - break - } - } - if !inBoth { - ak = append(ak, av) - } - } - for _, bv := range b { - inBoth := false - for _, av := range a { - if keyEqual(av, bv) { - inBoth = true - break - } - } - if !inBoth { - bk = append(bk, bv) - } - } - return -} diff --git a/vendor/github.com/motomux/pretty/formatter.go b/vendor/github.com/motomux/pretty/formatter.go deleted file mode 100644 index a317d7b8..00000000 --- a/vendor/github.com/motomux/pretty/formatter.go +++ /dev/null @@ -1,328 +0,0 @@ -package pretty - -import ( - "fmt" - "io" - "reflect" - "strconv" - "text/tabwriter" - - "github.com/kr/text" -) - -type formatter struct { - v reflect.Value - force bool - quote bool -} - -// Formatter makes a wrapper, f, that will format x as go source with line -// breaks and tabs. Object f responds to the "%v" formatting verb when both the -// "#" and " " (space) flags are set, for example: -// -// fmt.Sprintf("%# v", Formatter(x)) -// -// If one of these two flags is not set, or any other verb is used, f will -// format x according to the usual rules of package fmt. -// In particular, if x satisfies fmt.Formatter, then x.Format will be called. -func Formatter(x interface{}) (f fmt.Formatter) { - return formatter{v: reflect.ValueOf(x), quote: true} -} - -func (fo formatter) String() string { - return fmt.Sprint(fo.v.Interface()) // unwrap it -} - -func (fo formatter) passThrough(f fmt.State, c rune) { - s := "%" - for i := 0; i < 128; i++ { - if f.Flag(i) { - s += string(i) - } - } - if w, ok := f.Width(); ok { - s += fmt.Sprintf("%d", w) - } - if p, ok := f.Precision(); ok { - s += fmt.Sprintf(".%d", p) - } - s += string(c) - fmt.Fprintf(f, s, fo.v.Interface()) -} - -func (fo formatter) Format(f fmt.State, c rune) { - if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') { - w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0) - p := &printer{tw: w, Writer: w, visited: make(map[visit]int)} - p.printValue(fo.v, true, fo.quote) - w.Flush() - return - } - fo.passThrough(f, c) -} - -type printer struct { - io.Writer - tw *tabwriter.Writer - visited map[visit]int - depth int -} - -func (p *printer) indent() *printer { - q := *p - q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0) - q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'}) - return &q -} - -func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) { - if showType { - io.WriteString(p, v.Type().String()) - fmt.Fprintf(p, "(%#v)", x) - } else { - fmt.Fprintf(p, "%#v", x) - } -} - -// printValue must keep track of already-printed pointer values to avoid -// infinite recursion. -type visit struct { - v uintptr - typ reflect.Type -} - -func (p *printer) printValue(v reflect.Value, showType, quote bool) { - if p.depth > 10 { - io.WriteString(p, "!%v(DEPTH EXCEEDED)") - return - } - - switch v.Kind() { - case reflect.Bool: - p.printInline(v, v.Bool(), showType) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p.printInline(v, v.Int(), showType) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p.printInline(v, v.Uint(), showType) - case reflect.Float32, reflect.Float64: - p.printInline(v, v.Float(), showType) - case reflect.Complex64, reflect.Complex128: - fmt.Fprintf(p, "%#v", v.Complex()) - case reflect.String: - p.fmtString(v.String(), quote) - case reflect.Map: - t := v.Type() - if showType { - io.WriteString(p, t.String()) - } - writeByte(p, '{') - if nonzero(v) { - expand := !canInline(v.Type()) - pp := p - if expand { - writeByte(p, '\n') - pp = p.indent() - } - keys := v.MapKeys() - for i := 0; i < v.Len(); i++ { - showTypeInStruct := true - k := keys[i] - mv := v.MapIndex(k) - pp.printValue(k, false, true) - writeByte(pp, ':') - if expand { - writeByte(pp, '\t') - } - showTypeInStruct = t.Elem().Kind() == reflect.Interface - pp.printValue(mv, showTypeInStruct, true) - if expand { - io.WriteString(pp, ",\n") - } else if i < v.Len()-1 { - io.WriteString(pp, ", ") - } - } - if expand { - pp.tw.Flush() - } - } - writeByte(p, '}') - case reflect.Struct: - t := v.Type() - if v.CanAddr() { - addr := v.UnsafeAddr() - vis := visit{addr, t} - if vd, ok := p.visited[vis]; ok && vd < p.depth { - p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false) - break // don't print v again - } - p.visited[vis] = p.depth - } - - if showType { - io.WriteString(p, t.String()) - } - writeByte(p, '{') - if nonzero(v) { - expand := !canInline(v.Type()) - pp := p - if expand { - writeByte(p, '\n') - pp = p.indent() - } - for i := 0; i < v.NumField(); i++ { - showTypeInStruct := true - if f := t.Field(i); f.Name != "" { - io.WriteString(pp, f.Name) - writeByte(pp, ':') - if expand { - writeByte(pp, '\t') - } - showTypeInStruct = labelType(f.Type) - } - pp.printValue(getField(v, i), showTypeInStruct, true) - if expand { - io.WriteString(pp, ",\n") - } else if i < v.NumField()-1 { - io.WriteString(pp, ", ") - } - } - if expand { - pp.tw.Flush() - } - } - writeByte(p, '}') - case reflect.Interface: - switch e := v.Elem(); { - case e.Kind() == reflect.Invalid: - io.WriteString(p, "nil") - case e.IsValid(): - pp := *p - pp.depth++ - pp.printValue(e, showType, true) - default: - io.WriteString(p, v.Type().String()) - io.WriteString(p, "(nil)") - } - case reflect.Array, reflect.Slice: - t := v.Type() - if showType { - io.WriteString(p, t.String()) - } - if v.Kind() == reflect.Slice && v.IsNil() && showType { - io.WriteString(p, "(nil)") - break - } - if v.Kind() == reflect.Slice && v.IsNil() { - io.WriteString(p, "nil") - break - } - writeByte(p, '{') - expand := !canInline(v.Type()) - pp := p - if expand { - writeByte(p, '\n') - pp = p.indent() - } - for i := 0; i < v.Len(); i++ { - showTypeInSlice := t.Elem().Kind() == reflect.Interface - pp.printValue(v.Index(i), showTypeInSlice, true) - if expand { - io.WriteString(pp, ",\n") - } else if i < v.Len()-1 { - io.WriteString(pp, ", ") - } - } - if expand { - pp.tw.Flush() - } - writeByte(p, '}') - case reflect.Ptr: - e := v.Elem() - if !e.IsValid() { - writeByte(p, '(') - io.WriteString(p, v.Type().String()) - io.WriteString(p, ")(nil)") - } else { - pp := *p - pp.depth++ - writeByte(pp, '&') - pp.printValue(e, true, true) - } - case reflect.Chan: - x := v.Pointer() - if showType { - writeByte(p, '(') - io.WriteString(p, v.Type().String()) - fmt.Fprintf(p, ")(%#v)", x) - } else { - fmt.Fprintf(p, "%#v", x) - } - case reflect.Func: - io.WriteString(p, v.Type().String()) - io.WriteString(p, " {...}") - case reflect.UnsafePointer: - p.printInline(v, v.Pointer(), showType) - case reflect.Invalid: - io.WriteString(p, "nil") - } -} - -func canInline(t reflect.Type) bool { - switch t.Kind() { - case reflect.Map: - return !canExpand(t.Elem()) - case reflect.Struct: - for i := 0; i < t.NumField(); i++ { - if canExpand(t.Field(i).Type) { - return false - } - } - return true - case reflect.Interface: - return false - case reflect.Array, reflect.Slice: - return !canExpand(t.Elem()) - case reflect.Ptr: - return false - case reflect.Chan, reflect.Func, reflect.UnsafePointer: - return false - } - return true -} - -func canExpand(t reflect.Type) bool { - switch t.Kind() { - case reflect.Map, reflect.Struct, - reflect.Interface, reflect.Array, reflect.Slice, - reflect.Ptr: - return true - } - return false -} - -func labelType(t reflect.Type) bool { - switch t.Kind() { - case reflect.Interface, reflect.Struct: - return true - } - return false -} - -func (p *printer) fmtString(s string, quote bool) { - if quote { - s = strconv.Quote(s) - } - io.WriteString(p, s) -} - -func writeByte(w io.Writer, b byte) { - w.Write([]byte{b}) -} - -func getField(v reflect.Value, i int) reflect.Value { - val := v.Field(i) - if val.Kind() == reflect.Interface && !val.IsNil() { - val = val.Elem() - } - return val -} diff --git a/vendor/github.com/motomux/pretty/pretty.go b/vendor/github.com/motomux/pretty/pretty.go deleted file mode 100644 index 49423ec7..00000000 --- a/vendor/github.com/motomux/pretty/pretty.go +++ /dev/null @@ -1,108 +0,0 @@ -// Package pretty provides pretty-printing for Go values. This is -// useful during debugging, to avoid wrapping long output lines in -// the terminal. -// -// It provides a function, Formatter, that can be used with any -// function that accepts a format string. It also provides -// convenience wrappers for functions in packages fmt and log. -package pretty - -import ( - "fmt" - "io" - "log" - "reflect" -) - -// Errorf is a convenience wrapper for fmt.Errorf. -// -// Calling Errorf(f, x, y) is equivalent to -// fmt.Errorf(f, Formatter(x), Formatter(y)). -func Errorf(format string, a ...interface{}) error { - return fmt.Errorf(format, wrap(a, false)...) -} - -// Fprintf is a convenience wrapper for fmt.Fprintf. -// -// Calling Fprintf(w, f, x, y) is equivalent to -// fmt.Fprintf(w, f, Formatter(x), Formatter(y)). -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) { - return fmt.Fprintf(w, format, wrap(a, false)...) -} - -// Log is a convenience wrapper for log.Printf. -// -// Calling Log(x, y) is equivalent to -// log.Print(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Log(a ...interface{}) { - log.Print(wrap(a, true)...) -} - -// Logf is a convenience wrapper for log.Printf. -// -// Calling Logf(f, x, y) is equivalent to -// log.Printf(f, Formatter(x), Formatter(y)). -func Logf(format string, a ...interface{}) { - log.Printf(format, wrap(a, false)...) -} - -// Logln is a convenience wrapper for log.Printf. -// -// Calling Logln(x, y) is equivalent to -// log.Println(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Logln(a ...interface{}) { - log.Println(wrap(a, true)...) -} - -// Print pretty-prints its operands and writes to standard output. -// -// Calling Print(x, y) is equivalent to -// fmt.Print(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Print(a ...interface{}) (n int, errno error) { - return fmt.Print(wrap(a, true)...) -} - -// Printf is a convenience wrapper for fmt.Printf. -// -// Calling Printf(f, x, y) is equivalent to -// fmt.Printf(f, Formatter(x), Formatter(y)). -func Printf(format string, a ...interface{}) (n int, errno error) { - return fmt.Printf(format, wrap(a, false)...) -} - -// Println pretty-prints its operands and writes to standard output. -// -// Calling Print(x, y) is equivalent to -// fmt.Println(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Println(a ...interface{}) (n int, errno error) { - return fmt.Println(wrap(a, true)...) -} - -// Sprint is a convenience wrapper for fmt.Sprintf. -// -// Calling Sprint(x, y) is equivalent to -// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Sprint(a ...interface{}) string { - return fmt.Sprint(wrap(a, true)...) -} - -// Sprintf is a convenience wrapper for fmt.Sprintf. -// -// Calling Sprintf(f, x, y) is equivalent to -// fmt.Sprintf(f, Formatter(x), Formatter(y)). -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, wrap(a, false)...) -} - -func wrap(a []interface{}, force bool) []interface{} { - w := make([]interface{}, len(a)) - for i, x := range a { - w[i] = formatter{v: reflect.ValueOf(x), force: force} - } - return w -} diff --git a/vendor/github.com/motomux/pretty/zero.go b/vendor/github.com/motomux/pretty/zero.go deleted file mode 100644 index abb5b6fc..00000000 --- a/vendor/github.com/motomux/pretty/zero.go +++ /dev/null @@ -1,41 +0,0 @@ -package pretty - -import ( - "reflect" -) - -func nonzero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() != 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() != 0 - case reflect.Float32, reflect.Float64: - return v.Float() != 0 - case reflect.Complex64, reflect.Complex128: - return v.Complex() != complex(0, 0) - case reflect.String: - return v.String() != "" - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if nonzero(getField(v, i)) { - return true - } - } - return false - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if nonzero(v.Index(i)) { - return true - } - } - return false - case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func: - return !v.IsNil() - case reflect.UnsafePointer: - return v.Pointer() != 0 - } - return true -} diff --git a/vendor/github.com/pluralsh/controller-reconcile-helper/reconcile-helper.go b/vendor/github.com/pluralsh/controller-reconcile-helper/reconcile-helper.go deleted file mode 100644 index 5b8fafda..00000000 --- a/vendor/github.com/pluralsh/controller-reconcile-helper/reconcile-helper.go +++ /dev/null @@ -1,2206 +0,0 @@ -package reconcile - -import ( - "context" - "reflect" - "time" - - "github.com/cenkalti/backoff" - "github.com/go-logr/logr" - - // istioNetworking "istio.io/api/networking/v1beta1" - istioNetworkingClientv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" - istioNetworkingClient "istio.io/client-go/pkg/apis/networking/v1beta1" - istioSecurityClient "istio.io/client-go/pkg/apis/security/v1beta1" - - // istioSecurity "istio.io/api/security/v1beta1" - // crossplaneAWSIdentityv1alpha1 "github.com/crossplane/provider-aws/apis/identity/v1alpha1" - ackIAM "github.com/aws-controllers-k8s/iam-controller/apis/v1alpha1" - crossplaneAWSIdentityv1beta1 "github.com/crossplane/provider-aws/apis/iam/v1beta1" - kfPodDefault "github.com/kubeflow/kubeflow/components/admission-webhook/pkg/apis/settings/v1alpha1" - platformv1alpha1 "github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1" - postgresv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - networkv1 "k8s.io/api/networking/v1" - rbacv1 "k8s.io/api/rbac/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - hncv1alpha2 "sigs.k8s.io/hierarchical-namespaces/api/v1alpha2" -) - -// Deployment reconciles a k8s deployment object. -func Deployment(ctx context.Context, r client.Client, deployment *appsv1.Deployment, log logr.Logger) error { - foundDeployment := &appsv1.Deployment{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: deployment.Name, Namespace: deployment.Namespace}, foundDeployment); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating Deployment", "namespace", deployment.Namespace, "name", deployment.Name) - if err := r.Create(ctx, deployment); err != nil { - log.Error(err, "Unable to create Deployment") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting Deployment") - return err - } - } - if !justCreated && CopyDeploymentFields(deployment, foundDeployment, log) { - log.Info("Updating Deployment", "namespace", deployment.Namespace, "name", deployment.Name) - if err := r.Update(ctx, foundDeployment); err != nil { - log.Error(err, "Unable to update Deployment") - return err - } - } - - return nil -} - -// Statefulset reconciles a k8s statefulset object. -func StatefulSet(ctx context.Context, r client.Client, statefulset *appsv1.StatefulSet, log logr.Logger) error { - foundStatefulset := &appsv1.StatefulSet{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: statefulset.Name, Namespace: statefulset.Namespace}, foundStatefulset); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating StatefulSet", "namespace", statefulset.Namespace, "name", statefulset.Name) - if err := r.Create(ctx, statefulset); err != nil { - log.Error(err, "Unable to create StatefulSet") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting StatefulSet") - return err - } - } - if !justCreated && CopyStatefulSetFields(statefulset, foundStatefulset, log) { - log.Info("Updating StatefulSet", "namespace", statefulset.Namespace, "name", statefulset.Name) - if err := r.Update(ctx, foundStatefulset); err != nil { - log.Error(err, "Unable to update StatefulSet") - return err - } - } - - return nil -} - -// PersistentVolumeClaim reconciles a k8s pvc object. -func PersistentVolumeClaim(ctx context.Context, r client.Client, pvc *corev1.PersistentVolumeClaim, log logr.Logger) error { - foundPVC := &corev1.PersistentVolumeClaim{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace}, foundPVC); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating PersistentVolumeClaim", "namespace", pvc.Namespace, "name", pvc.Name) - if err := r.Create(ctx, pvc); err != nil { - log.Error(err, "Unable to create PersistentVolumeClaim") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting PersistentVolumeClaim") - return err - } - } - if !justCreated && CopyPersistentVolumeClaim(pvc, foundPVC, log) { - log.Info("Updating PersistentVolumeClaim", "namespace", pvc.Namespace, "name", pvc.Name) - if err := r.Update(ctx, foundPVC); err != nil { - log.Error(err, "Unable to update PersistentVolumeClaim") - return err - } - } - - return nil -} - -// Service reconciles a k8s service object. -func Service(ctx context.Context, r client.Client, service *corev1.Service, log logr.Logger) error { - foundService := &corev1.Service{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: service.Name, Namespace: service.Namespace}, foundService); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating Service", "namespace", service.Namespace, "name", service.Name) - if err = r.Create(ctx, service); err != nil { - log.Error(err, "Unable to create Service") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting Service") - return err - } - } - if !justCreated && CopyServiceFields(service, foundService, log) { - log.Info("Updating Service", "namespace", service.Namespace, "name", service.Name) - if err := r.Update(ctx, foundService); err != nil { - log.Error(err, "Unable to update Service") - return err - } - } - - return nil -} - -// VirtualService reconciles an Istio virtual service object. -func VirtualService(ctx context.Context, r client.Client, virtualservice *istioNetworkingClient.VirtualService, log logr.Logger) error { - foundVirtualService := &istioNetworkingClient.VirtualService{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: virtualservice.Name, Namespace: virtualservice.Namespace}, foundVirtualService); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating VirtualService", "namespace", virtualservice.Namespace, "name", virtualservice.Name) - if err := r.Create(ctx, virtualservice); err != nil { - log.Error(err, "Unable to create VirtualService") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting VirtualService") - return err - } - } - if !justCreated && CopyVirtualService(virtualservice, foundVirtualService, log) { - log.Info("Updating VirtualService", "namespace", virtualservice.Namespace, "name", virtualservice.Name) - if err := r.Update(ctx, foundVirtualService); err != nil { - log.Error(err, "Unable to update VirtualService") - return err - } - } - return nil -} - -// Namespace reconciles a Namespace object. -func Namespace(ctx context.Context, r client.Client, namespace *corev1.Namespace, log logr.Logger) error { - foundNamespace := &corev1.Namespace{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: namespace.Name, Namespace: namespace.Namespace}, foundNamespace); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating Namespace", "namespace", namespace.Name) - if err = r.Create(ctx, namespace); err != nil { - // IncRequestErrorCounter("error creating namespace", SEVERITY_MAJOR) - log.Error(err, "Unable to create Namespace") - return err - } - err = backoff.Retry( - func() error { - return r.Get(ctx, types.NamespacedName{Name: namespace.Name}, foundNamespace) - }, - backoff.WithMaxRetries(backoff.NewConstantBackOff(3*time.Second), 5)) - if err != nil { - // IncRequestErrorCounter("error namespace create completion", SEVERITY_MAJOR) - log.Error(err, "Error Namespace create completion") - return err - // return r.appendErrorConditionAndReturn(ctx, namespace, - // "Owning namespace failed to create within 15 seconds") - } - log.Info("Created Namespace: "+foundNamespace.Name, "status", foundNamespace.Status.Phase) - justCreated = true - } else { - // IncRequestErrorCounter("error getting Namespace", SEVERITY_MAJOR) - log.Error(err, "Error getting Namespace") - return err - } - } - if !justCreated && CopyNamespace(namespace, foundNamespace, log) { - log.Info("Updating Namespace", "namespace", namespace.Name) - if err := r.Update(ctx, foundNamespace); err != nil { - log.Error(err, "Unable to update Namespace") - return err - } - } - - return nil -} - -// DestinationRule reconciles an Istio DestinationRule object. -func DestinationRule(ctx context.Context, r client.Client, destinationRule *istioNetworkingClient.DestinationRule, log logr.Logger) error { - foundDestinationRule := &istioNetworkingClient.DestinationRule{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: destinationRule.Name, Namespace: destinationRule.Namespace}, foundDestinationRule); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating Istio DestinationRule", "namespace", destinationRule.Namespace, "name", destinationRule.Name) - if err = r.Create(ctx, destinationRule); err != nil { - log.Error(err, "Unable to create Istio DestinationRule") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting Istio DestinationRule") - return err - } - } - if !justCreated && CopyDestinationRule(destinationRule, foundDestinationRule, log) { - log.Info("Updating Istio DestinationRule", "namespace", destinationRule.Namespace, "name", destinationRule.Name) - if err := r.Update(ctx, foundDestinationRule); err != nil { - log.Error(err, "Unable to update Istio DestinationRule") - return err - } - } - return nil -} - -// RequestAuthentication reconciles an Istio RequestAuthentication object. -func RequestAuthentication(ctx context.Context, r client.Client, requestAuth *istioSecurityClient.RequestAuthentication, log logr.Logger) error { - foundRequestAuth := &istioSecurityClient.RequestAuthentication{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: requestAuth.Name, Namespace: requestAuth.Namespace}, foundRequestAuth); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating Istio RequestAuthentication", "namespace", requestAuth.Namespace, "name", requestAuth.Name) - if err = r.Create(ctx, requestAuth); err != nil { - log.Error(err, "Unable to create Istio RequestAuthentication") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting Istio RequestAuthentication") - return err - } - } - if !justCreated && CopyRequestAuthentication(requestAuth, foundRequestAuth, log) { - log.Info("Updating Istio RequestAuthentication", "namespace", requestAuth.Namespace, "name", requestAuth.Name) - if err := r.Update(ctx, foundRequestAuth); err != nil { - log.Error(err, "Unable to update Istio RequestAuthentication") - return err - } - } - - return nil -} - -// AuthorizationPolicy reconciles an Istio AuthorizationPolicy object. -func AuthorizationPolicy(ctx context.Context, r client.Client, authPolicy *istioSecurityClient.AuthorizationPolicy, log logr.Logger) error { - foundAuthPolicy := &istioSecurityClient.AuthorizationPolicy{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: authPolicy.Name, Namespace: authPolicy.Namespace}, foundAuthPolicy); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating Istio AuthorizationPolicy", "namespace", authPolicy.Namespace, "name", authPolicy.Name) - if err = r.Create(ctx, authPolicy); err != nil { - log.Error(err, "Unable to create Istio AuthorizationPolicy") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting Istio AuthorizationPolicy") - return err - } - } - if !justCreated && CopyAuthorizationPolicy(authPolicy, foundAuthPolicy, log) { - log.Info("Updating Istio AuthorizationPolicy", "namespace", authPolicy.Namespace, "name", authPolicy.Name) - if err := r.Update(ctx, foundAuthPolicy); err != nil { - log.Error(err, "Unable to update Istio AuthorizationPolicy") - return err - } - } - - return nil -} - -// ServiceAccount reconciles a Service Account object. -func ServiceAccount(ctx context.Context, r client.Client, serviceAccount *corev1.ServiceAccount, log logr.Logger) error { - foundServiceAccount := &corev1.ServiceAccount{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: serviceAccount.Name, Namespace: serviceAccount.Namespace}, foundServiceAccount); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating ServiceAccount", "namespace", serviceAccount.Namespace, "name", serviceAccount.Name) - if err = r.Create(ctx, serviceAccount); err != nil { - log.Error(err, "Unable to create ServiceAccount") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting ServiceAccount") - return err - } - } - if !justCreated && CopyServiceAccount(serviceAccount, foundServiceAccount, log) { - log.Info("Updating ServiceAccount", "namespace", serviceAccount.Namespace, "name", serviceAccount.Name) - if err := r.Update(ctx, foundServiceAccount); err != nil { - log.Error(err, "Unable to update ServiceAccount") - return err - } - } - - return nil -} - -// ConfigMap reconciles a ConfigMap object. -func ConfigMap(ctx context.Context, r client.Client, configMap *corev1.ConfigMap, log logr.Logger) error { - foundConfigMap := &corev1.ConfigMap{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}, foundConfigMap); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name) - if err = r.Create(ctx, configMap); err != nil { - log.Error(err, "Unable to create ConfigMap") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting ConfigMap") - return err - } - } - if !justCreated && CopyConfigMap(configMap, foundConfigMap, log) { - log.Info("Updating ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name) - if err := r.Update(ctx, foundConfigMap); err != nil { - log.Error(err, "Unable to update ConfigMap") - return err - } - } - - return nil -} - -// RoleBinding reconciles a Role Binding object. -func RoleBinding(ctx context.Context, r client.Client, roleBinding *rbacv1.RoleBinding, log logr.Logger) error { - foundRoleBinding := &rbacv1.RoleBinding{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: roleBinding.Name, Namespace: roleBinding.Namespace}, foundRoleBinding); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating RoleBinding", "namespace", roleBinding.Namespace, "name", roleBinding.Name) - if err = r.Create(ctx, roleBinding); err != nil { - log.Error(err, "Unable to create RoleBinding") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting RoleBinding") - return err - } - } - if !justCreated && CopyRoleBinding(roleBinding, foundRoleBinding, log) { - log.Info("Updating RoleBinding", "namespace", roleBinding.Namespace, "name", roleBinding.Name) - if err := r.Update(ctx, foundRoleBinding); err != nil { - log.Error(err, "Unable to update RoleBinding") - return err - } - } - - return nil -} - -// NetworkPolicy reconciles a NetworkPolicy object. -func NetworkPolicy(ctx context.Context, r client.Client, networkPolicy *networkv1.NetworkPolicy, log logr.Logger) error { - foundNetworkPolicy := &networkv1.NetworkPolicy{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: networkPolicy.Name, Namespace: networkPolicy.Namespace}, foundNetworkPolicy); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating NetworkPolicy", "namespace", networkPolicy.Namespace, "name", networkPolicy.Name) - if err = r.Create(ctx, networkPolicy); err != nil { - log.Error(err, "Unable to create NetworkPolicy") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting NetworkPolicy") - return err - } - } - if !justCreated && CopyNetworkPolicy(networkPolicy, foundNetworkPolicy, log) { - log.Info("Updating NetworkPolicy", "namespace", networkPolicy.Namespace, "name", networkPolicy.Name) - if err := r.Update(ctx, foundNetworkPolicy); err != nil { - log.Error(err, "Unable to update NetworkPolicy") - return err - } - } - - return nil -} - -// kubeflowEnvironment reconciles a Kubeflow Environment object. -func KubeflowEnvironment(ctx context.Context, r client.Client, environment *platformv1alpha1.Environment, log logr.Logger) error { - foundEnvironment := &platformv1alpha1.Environment{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: environment.Name, Namespace: environment.Namespace}, foundEnvironment); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating KubeflowEnvironment", "namespace", environment.Namespace, "name", environment.Name) - if err = r.Create(ctx, environment); err != nil { - log.Error(err, "Unable to create KubeflowEnvironment") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting KubeflowEnvironment") - return err - } - } - if !justCreated && CopyKubeflowEnvironment(environment, foundEnvironment, log) { - log.Info("Updating KubeflowEnvironmentg", "namespace", environment.Namespace, "name", environment.Name) - if err := r.Update(ctx, foundEnvironment); err != nil { - log.Error(err, "Unable to update KubeflowEnvironment") - return err - } - } - - return nil -} - -// SubnamespaceAnchor reconciles a HNC Subnamespace object. -func SubnamespaceAnchor(ctx context.Context, r client.Client, userEnv *hncv1alpha2.SubnamespaceAnchor, log logr.Logger) error { - foundUserEnv := &hncv1alpha2.SubnamespaceAnchor{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: userEnv.Name, Namespace: userEnv.Namespace}, foundUserEnv); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating SubnamespaceAnchor", "namespace", userEnv.Namespace, "name", userEnv.Name) - if err = r.Create(ctx, userEnv); err != nil { - log.Error(err, "Unable to create SubnamespaceAnchor") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting SubnamespaceAnchor") - return err - } - } - if !justCreated && CopySubnamespaceAnchor(userEnv, foundUserEnv, log) { - log.Info("Updating SubnamespaceAnchor", "namespace", userEnv.Namespace, "name", userEnv.Name) - if err := r.Update(ctx, foundUserEnv); err != nil { - log.Error(err, "Unable to update SubnamespaceAnchor") - return err - } - } - - return nil -} - -// Postgresql reconciles a Postgresql Database object. -func Postgresql(ctx context.Context, r client.Client, postgres *postgresv1.Postgresql, log logr.Logger) error { - foundPostgresql := &postgresv1.Postgresql{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: postgres.Name, Namespace: postgres.Namespace}, foundPostgresql); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating PostgreSQL Database", "namespace", postgres.Namespace, "name", postgres.Name) - if err = r.Create(ctx, postgres); err != nil { - log.Error(err, "Unable to create PostgreSQL Database") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting PostgreSQL Database") - return err - } - } - if !justCreated && CopyPostgresql(postgres, foundPostgresql, log) { - log.Info("Updating PostgreSQL Database", "namespace", postgres.Namespace, "name", postgres.Name) - if err := r.Update(ctx, foundPostgresql); err != nil { - log.Error(err, "Unable to update PostgreSQL Database") - return err - } - } - - return nil -} - -// XPlaneIAMPolicy reconciles a CrossPlane IAM Policy object. -func XPlaneIAMPolicy(ctx context.Context, r client.Client, iamPolicy *crossplaneAWSIdentityv1beta1.Policy, log logr.Logger) error { - foundIAMPolicy := &crossplaneAWSIdentityv1beta1.Policy{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: iamPolicy.Name, Namespace: iamPolicy.Namespace}, foundIAMPolicy); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating CrossPlane IAM Policy", "namespace", iamPolicy.Namespace, "name", iamPolicy.Name) - if err = r.Create(ctx, iamPolicy); err != nil { - log.Error(err, "Unable to create CrossPlane IAM Policy") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting CrossPlane IAM Policy") - return err - } - } - if !justCreated && CopyXPlaneIAMPolicy(iamPolicy, foundIAMPolicy, log) { - log.Info("Updating CrossPlane IAM Policy", "namespace", iamPolicy.Namespace, "name", iamPolicy.Name) - if err := r.Update(ctx, foundIAMPolicy); err != nil { - log.Error(err, "Unable to update CrossPlane IAM Policy") - return err - } - } - - return nil -} - -// XPlaneIAMRole reconciles a CrossPlane IAM Role object. -func XPlaneIAMRole(ctx context.Context, r client.Client, iamRole *crossplaneAWSIdentityv1beta1.Role, log logr.Logger) error { - foundIAMRole := &crossplaneAWSIdentityv1beta1.Role{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: iamRole.Name, Namespace: iamRole.Namespace}, foundIAMRole); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating CrossPlane IAM Role", "namespace", iamRole.Namespace, "name", iamRole.Name) - if err = r.Create(ctx, iamRole); err != nil { - log.Error(err, "Unable to create CrossPlane IAM Role") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting CrossPlane IAM Role") - return err - } - } - if !justCreated && CopyXPlaneIAMRole(iamRole, foundIAMRole, log) { - log.Info("Updating CrossPlane IAM Role", "namespace", iamRole.Namespace, "name", iamRole.Name) - if err := r.Update(ctx, foundIAMRole); err != nil { - log.Error(err, "Unable to update CrossPlane IAM Role") - return err - } - } - - return nil -} - -// XPlaneIAMUser reconciles a CrossPlane IAM User object. -func XPlaneIAMUser(ctx context.Context, r client.Client, iamUser *crossplaneAWSIdentityv1beta1.User, log logr.Logger) error { - foundIAMUser := &crossplaneAWSIdentityv1beta1.User{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: iamUser.Name, Namespace: iamUser.Namespace}, foundIAMUser); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating CrossPlane IAM User", "namespace", iamUser.Namespace, "name", iamUser.Name) - if err = r.Create(ctx, iamUser); err != nil { - log.Error(err, "Unable to create CrossPlane IAM User") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting CrossPlane IAM User") - return err - } - } - if !justCreated && CopyXPlaneIAMUser(iamUser, foundIAMUser, log) { - log.Info("Updating CrossPlane IAM User", "namespace", iamUser.Namespace, "name", iamUser.Name) - if err := r.Update(ctx, foundIAMUser); err != nil { - log.Error(err, "Unable to update CrossPlane IAM User") - return err - } - } - - return nil -} - -// XPlaneIAMRolePolicyAttachement reconciles a CrossPlane IAM Role Policy Attachement object. -func XPlaneIAMRolePolicyAttachement(ctx context.Context, r client.Client, iamRolePolicyAttachement *crossplaneAWSIdentityv1beta1.RolePolicyAttachment, log logr.Logger) error { - foundRolePolicyAttachement := &crossplaneAWSIdentityv1beta1.RolePolicyAttachment{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: iamRolePolicyAttachement.Name, Namespace: iamRolePolicyAttachement.Namespace}, foundRolePolicyAttachement); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating CrossPlane IAM Role Policy Attachement", "namespace", iamRolePolicyAttachement.Namespace, "name", iamRolePolicyAttachement.Name) - if err = r.Create(ctx, iamRolePolicyAttachement); err != nil { - log.Error(err, "Unable to create CrossPlane IAM Role Policy Attachement") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting CrossPlane IAM Role Policy Attachement") - return err - } - } - if !justCreated && CopyXPlaneIAMRolePolicyAttachement(iamRolePolicyAttachement, foundRolePolicyAttachement, log) { - log.Info("Updating CrossPlane IAM Role Policy Attachement", "namespace", iamRolePolicyAttachement.Namespace, "name", iamRolePolicyAttachement.Name) - if err := r.Update(ctx, foundRolePolicyAttachement); err != nil { - log.Error(err, "Unable to update CrossPlane IAM Role Policy Attachement") - return err - } - } - - return nil -} - -// XPlaneIAMUserPolicyAttachement reconciles a CrossPlane IAM User Policy Attachement object. -func XPlaneIAMUserPolicyAttachement(ctx context.Context, r client.Client, iamUserPolicyAttachement *crossplaneAWSIdentityv1beta1.UserPolicyAttachment, log logr.Logger) error { - foundUserPolicyAttachement := &crossplaneAWSIdentityv1beta1.UserPolicyAttachment{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: iamUserPolicyAttachement.Name, Namespace: iamUserPolicyAttachement.Namespace}, foundUserPolicyAttachement); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating CrossPlane IAM User Policy Attachement", "namespace", iamUserPolicyAttachement.Namespace, "name", iamUserPolicyAttachement.Name) - if err = r.Create(ctx, iamUserPolicyAttachement); err != nil { - log.Error(err, "Unable to create CrossPlane IAM User Policy Attachement") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting CrossPlane IAM Policy Attachement") - return err - } - } - if !justCreated && CopyXPlaneIAMUserPolicyAttachement(iamUserPolicyAttachement, foundUserPolicyAttachement, log) { - log.Info("Updating CrossPlane IAM User Policy Attachement", "namespace", iamUserPolicyAttachement.Namespace, "name", iamUserPolicyAttachement.Name) - if err := r.Update(ctx, foundUserPolicyAttachement); err != nil { - log.Error(err, "Unable to update CrossPlane IAM User Policy Attachement") - return err - } - } - - return nil -} - -// XPlaneIAMAccessKey reconciles a CrossPlane IAM Access Key. -func XPlaneIAMAccessKey(ctx context.Context, r client.Client, iamAccessKey *crossplaneAWSIdentityv1beta1.AccessKey, log logr.Logger) error { - foundIAMAccessKey := &crossplaneAWSIdentityv1beta1.AccessKey{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: iamAccessKey.Name, Namespace: iamAccessKey.Namespace}, foundIAMAccessKey); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating CrossPlane IAM Access Key", "namespace", iamAccessKey.Namespace, "name", iamAccessKey.Name) - if err = r.Create(ctx, iamAccessKey); err != nil { - log.Error(err, "Unable to create CrossPlane IAM Access Key") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting CrossPlane IAM Access Key") - return err - } - } - if !justCreated && CopyXPlaneIAMAccessKey(iamAccessKey, foundIAMAccessKey, log) { - log.Info("Updating CrossPlane IAM Access Key", "namespace", iamAccessKey.Namespace, "name", iamAccessKey.Name) - if err := r.Update(ctx, foundIAMAccessKey); err != nil { - log.Error(err, "Unable to update CrossPlane IAM Access Key") - return err - } - } - - return nil -} - -// ACKIAMPolicy reconciles a ACK IAM Policy object. -func ACKIAMPolicy(ctx context.Context, r client.Client, iamPolicy *ackIAM.Policy, log logr.Logger) error { - foundIAMPolicy := &ackIAM.Policy{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: iamPolicy.Name, Namespace: iamPolicy.Namespace}, foundIAMPolicy); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating ACK IAM Policy", "namespace", iamPolicy.Namespace, "name", iamPolicy.Name) - if err = r.Create(ctx, iamPolicy); err != nil { - log.Error(err, "Unable to create ACK IAM Policy") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting ACK IAM Policy") - return err - } - } - if !justCreated && CopyACKIAMPolicy(iamPolicy, foundIAMPolicy, log) { - log.Info("Updating ACK IAM Policy", "namespace", iamPolicy.Namespace, "name", iamPolicy.Name) - if err := r.Update(ctx, foundIAMPolicy); err != nil { - log.Error(err, "Unable to update ACK IAM Policy") - return err - } - } - - return nil -} - -// ACKIAMRole reconciles a ACK IAM Role object. -func ACKIAMRole(ctx context.Context, r client.Client, iamRole *ackIAM.Role, log logr.Logger) error { - foundIAMRole := &ackIAM.Role{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: iamRole.Name, Namespace: iamRole.Namespace}, foundIAMRole); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating ACK IAM Role", "namespace", iamRole.Namespace, "name", iamRole.Name) - if err = r.Create(ctx, iamRole); err != nil { - log.Error(err, "Unable to create ACK IAM Role") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting ACK IAM Role") - return err - } - } - if !justCreated && CopyACKIAMRole(iamRole, foundIAMRole, log) { - log.Info("Updating ACK IAM Role", "namespace", iamRole.Namespace, "name", iamRole.Name) - if err := r.Update(ctx, foundIAMRole); err != nil { - log.Error(err, "Unable to update ACK IAM Role") - return err - } - } - - return nil -} - -// PeerAuthentication reconciles an Istio PeerAuthentication object. -func PeerAuthentication(ctx context.Context, r client.Client, peerAuthentication *istioSecurityClient.PeerAuthentication, log logr.Logger) error { - foundPeerAuthentication := &istioSecurityClient.PeerAuthentication{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: peerAuthentication.Name, Namespace: peerAuthentication.Namespace}, foundPeerAuthentication); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating Istio PeerAuthentication", "namespace", peerAuthentication.Namespace, "name", peerAuthentication.Name) - if err = r.Create(ctx, peerAuthentication); err != nil { - log.Error(err, "Unable to create Istio PeerAuthentication") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting Istio PeerAuthentication") - return err - } - } - if !justCreated && CopyPeerAuthentication(peerAuthentication, foundPeerAuthentication, log) { - log.Info("Updating Istio PeerAuthentication", "namespace", peerAuthentication.Namespace, "name", peerAuthentication.Name) - if err := r.Update(ctx, foundPeerAuthentication); err != nil { - log.Error(err, "Unable to update Istio PeerAuthentication") - return err - } - } - - return nil -} - -// EnvoyFilter reconciles an Istio EnvoyFilter object. -func EnvoyFilter(ctx context.Context, r client.Client, envoyFilter *istioNetworkingClientv1alpha3.EnvoyFilter, log logr.Logger) error { - foundEnvoyFilter := &istioNetworkingClientv1alpha3.EnvoyFilter{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: envoyFilter.Name, Namespace: envoyFilter.Namespace}, foundEnvoyFilter); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating Istio EnvoyFilter", "namespace", envoyFilter.Namespace, "name", envoyFilter.Name) - if err = r.Create(ctx, envoyFilter); err != nil { - log.Error(err, "Unable to create Istio EnvoyFilter") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting Istio EnvoyFilter") - return err - } - } - if !justCreated && CopyEnvoyFilter(envoyFilter, foundEnvoyFilter, log) { - log.Info("Updating Istio EnvoyFilter", "namespace", envoyFilter.Namespace, "name", envoyFilter.Name) - if err := r.Update(ctx, foundEnvoyFilter); err != nil { - log.Error(err, "Unable to update Istio EnvoyFilter") - return err - } - } - - return nil -} - -// PodDefault reconciles an Kubeflow PodDefault object. -func PodDefault(ctx context.Context, r client.Client, podDefault *kfPodDefault.PodDefault, log logr.Logger) error { - foundPodDefault := &kfPodDefault.PodDefault{} - justCreated := false - if err := r.Get(ctx, types.NamespacedName{Name: podDefault.Name, Namespace: podDefault.Namespace}, foundPodDefault); err != nil { - if apierrs.IsNotFound(err) { - log.Info("Creating Kubeflow PodDefault", "namespace", podDefault.Namespace, "name", podDefault.Name) - if err = r.Create(ctx, podDefault); err != nil { - log.Error(err, "Unable to create Kubeflow PodDefault") - return err - } - justCreated = true - } else { - log.Error(err, "Error getting Kubeflow PodDefault") - return err - } - } - if !justCreated && CopyPodDefault(podDefault, foundPodDefault, log) { - log.Info("Updating Kubeflow PodDefault", "namespace", podDefault.Namespace, "name", podDefault.Name) - if err := r.Update(ctx, foundPodDefault); err != nil { - log.Error(err, "Unable to update Kubeflow PodDefault") - return err - } - } - - return nil -} - -// Reference: https://github.com/pwittrock/kubebuilder-workshop/blob/master/pkg/util/util.go - -// CopyStatefulSetFields copies the owned fields from one StatefulSet to another -// Returns true if the fields copied from don't match to. -func CopyStatefulSetFields(from, to *appsv1.StatefulSet, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling StatefulSet due to label change") - log.V(2).Info("difference in StatefulSet labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling StatefulSet due to label change") - log.V(2).Info("difference in StatefulSet labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling StatefulSet due to annotation change") - log.V(2).Info("difference in StatefulSet annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling StatefulSet due to annotation change") - log.V(2).Info("difference in StatefulSet annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec.Replicas, from.Spec.Replicas) { - log.V(1).Info("reconciling StatefulSet due to replica change") - log.V(2).Info("difference in StatefulSet replicas", "wanted", from.Spec.Replicas, "existing", to.Spec.Replicas) - requireUpdate = true - } - to.Spec.Replicas = from.Spec.Replicas - - for k, v := range to.Spec.Template.Labels { - if from.Spec.Template.Labels[k] != v { - log.V(1).Info("reconciling StatefulSet due to template label change") - log.V(2).Info("difference in StatefulSet template labels", "wanted", from.Spec.Template.Labels, "existing", to.Spec.Template.Labels) - requireUpdate = true - } - } - if len(to.Spec.Template.Labels) == 0 && len(from.Spec.Template.Labels) != 0 { - log.V(1).Info("reconciling StatefulSet due to template label change") - log.V(2).Info("difference in StatefulSet template labels", "wanted", from.Spec.Template.Labels, "existing", to.Spec.Template.Labels) - requireUpdate = true - } - to.Spec.Template.Labels = from.Spec.Template.Labels - - for k, v := range to.Spec.Template.Annotations { - if from.Spec.Template.Annotations[k] != v { - log.V(1).Info("reconciling StatefulSet due to template annotation change") - log.V(2).Info("difference in StatefulSet template annotations", "wanted", from.Spec.Template.Annotations, "existing", to.Spec.Template.Annotations) - requireUpdate = true - } - } - if len(to.Spec.Template.Annotations) == 0 && len(from.Spec.Template.Annotations) != 0 { - log.V(1).Info("reconciling StatefulSet due to template annotation change") - log.V(2).Info("difference in StatefulSet template annotations", "wanted", from.Spec.Template.Annotations, "existing", to.Spec.Template.Annotations) - requireUpdate = true - } - to.Spec.Template.Annotations = from.Spec.Template.Annotations - - if !reflect.DeepEqual(to.Spec.Template.Spec.Volumes, from.Spec.Template.Spec.Volumes) { - log.V(1).Info("reconciling StatefulSet due to volumes change") - log.V(2).Info("difference in StatefulSet volumes", "wanted", from.Spec.Template.Spec.Volumes, "existing", to.Spec.Template.Spec.Volumes) - requireUpdate = true - } - to.Spec.Template.Spec.Volumes = from.Spec.Template.Spec.Volumes - - if !reflect.DeepEqual(to.Spec.Template.Spec.ServiceAccountName, from.Spec.Template.Spec.ServiceAccountName) { - log.V(1).Info("reconciling StatefulSet due to service account name change") - log.V(2).Info("difference in StatefulSet service account name", "wanted", from.Spec.Template.Spec.ServiceAccountName, "existing", to.Spec.Template.Spec.ServiceAccountName) - requireUpdate = true - } - to.Spec.Template.Spec.ServiceAccountName = from.Spec.Template.Spec.ServiceAccountName - - if !reflect.DeepEqual(to.Spec.Template.Spec.SecurityContext, from.Spec.Template.Spec.SecurityContext) { - log.V(1).Info("reconciling StatefulSet due to security context change") - log.V(2).Info("difference in StatefulSet security context", "wanted", from.Spec.Template.Spec.SecurityContext, "existing", to.Spec.Template.Spec.SecurityContext) - requireUpdate = true - } - to.Spec.Template.Spec.SecurityContext = from.Spec.Template.Spec.SecurityContext - - if !reflect.DeepEqual(to.Spec.Template.Spec.Affinity, from.Spec.Template.Spec.Affinity) { - log.V(1).Info("reconciling StatefulSet due to affinity change") - log.V(2).Info("difference in StatefulSet affinity", "wanted", from.Spec.Template.Spec.Affinity, "existing", to.Spec.Template.Spec.Affinity) - requireUpdate = true - } - to.Spec.Template.Spec.Affinity = from.Spec.Template.Spec.Affinity - - if !reflect.DeepEqual(to.Spec.Template.Spec.Tolerations, from.Spec.Template.Spec.Tolerations) { - log.V(1).Info("reconciling StatefulSet due to toleration change") - log.V(2).Info("difference in StatefulSet tolerations", "wanted", from.Spec.Template.Spec.Tolerations, "existing", to.Spec.Template.Spec.Tolerations) - requireUpdate = true - } - to.Spec.Template.Spec.Tolerations = from.Spec.Template.Spec.Tolerations - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].Name, from.Spec.Template.Spec.Containers[0].Name) { - log.V(1).Info("reconciling StatefulSet due to container[0] name change") - log.V(2).Info("difference in StatefulSet container[0] name", "wanted", from.Spec.Template.Spec.Containers[0].Name, "existing", to.Spec.Template.Spec.Containers[0].Name) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].Name = from.Spec.Template.Spec.Containers[0].Name - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].Image, from.Spec.Template.Spec.Containers[0].Image) { - log.V(1).Info("reconciling StatefulSet due to container[0] image change") - log.V(2).Info("difference in StatefulSet container[0] image", "wanted", from.Spec.Template.Spec.Containers[0].Image, "existing", to.Spec.Template.Spec.Containers[0].Image) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].Image = from.Spec.Template.Spec.Containers[0].Image - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].WorkingDir, from.Spec.Template.Spec.Containers[0].WorkingDir) { - log.V(1).Info("reconciling StatefulSet due to container[0] working dir change") - log.V(2).Info("difference in StatefulSet container[0] working dir", "wanted", from.Spec.Template.Spec.Containers[0].WorkingDir, "existing", to.Spec.Template.Spec.Containers[0].WorkingDir) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].WorkingDir = from.Spec.Template.Spec.Containers[0].WorkingDir - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].Ports, from.Spec.Template.Spec.Containers[0].Ports) { - log.V(1).Info("reconciling StatefulSet due to container[0] port change") - log.V(2).Info("difference in StatefulSet container[0] ports", "wanted", from.Spec.Template.Spec.Containers[0].Ports, "existing", to.Spec.Template.Spec.Containers[0].Ports) - - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].Ports = from.Spec.Template.Spec.Containers[0].Ports - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].Env, from.Spec.Template.Spec.Containers[0].Env) { - log.V(1).Info("reconciling StatefulSet due to container[0] env change") - log.V(2).Info("difference in StatefulSet container[0] env", "wanted", from.Spec.Template.Spec.Containers[0].Env, "existing", to.Spec.Template.Spec.Containers[0].Env) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].Env = from.Spec.Template.Spec.Containers[0].Env - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].EnvFrom, from.Spec.Template.Spec.Containers[0].EnvFrom) { - log.V(1).Info("reconciling StatefulSet due to container[0] EnvFrom change") - log.V(2).Info("difference in StatefulSet container[0] EnvFrom", "wanted", from.Spec.Template.Spec.Containers[0].EnvFrom, "existing", to.Spec.Template.Spec.Containers[0].EnvFrom) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].EnvFrom = from.Spec.Template.Spec.Containers[0].EnvFrom - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].Resources, from.Spec.Template.Spec.Containers[0].Resources) { - log.V(1).Info("reconciling StatefulSet due to container[0] resource change") - log.V(2).Info("difference in StatefulSet container[0] resources", "wanted", from.Spec.Template.Spec.Containers[0].Resources, "existing", to.Spec.Template.Spec.Containers[0].Resources) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].Resources = from.Spec.Template.Spec.Containers[0].Resources - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].VolumeMounts, from.Spec.Template.Spec.Containers[0].VolumeMounts) { - log.V(1).Info("reconciling StatefulSet due to container[0] VolumeMounts change") - log.V(2).Info("difference in StatefulSet container[0] VolumeMounts", "wanted", from.Spec.Template.Spec.Containers[0].VolumeMounts, "existing", to.Spec.Template.Spec.Containers[0].VolumeMounts) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].VolumeMounts = from.Spec.Template.Spec.Containers[0].VolumeMounts - - return requireUpdate -} - -func CopyDeploymentFields(from, to *appsv1.Deployment, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling Deployment due to label change") - log.V(2).Info("difference in Deployment labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling Deployment due to label change") - log.V(2).Info("difference in Deployment labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - // for k, v := range to.Annotations { - // if from.Annotations[k] != v { - // log.V(1).Info("reconciling Deployment due to annotation change") - // log.V(2).Info("difference in Deployment annotations", "wanted", from.Annotations, "existing", to.Annotations) - // requireUpdate = true - // } - // } - // if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - // log.V(1).Info("reconciling Deployment due to annotation change") - // log.V(2).Info("difference in Deployment annotations", "wanted", from.Annotations, "existing", to.Annotations) - // requireUpdate = true - // } - // to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec.Replicas, from.Spec.Replicas) { - log.V(1).Info("reconciling Deployment due to replica change") - log.V(2).Info("difference in Deployment replicas", "wanted", from.Spec.Replicas, "existing", to.Spec.Replicas) - requireUpdate = true - } - to.Spec.Replicas = from.Spec.Replicas - - for k, v := range to.Spec.Template.Labels { - if from.Spec.Template.Labels[k] != v { - log.V(1).Info("reconciling Deployment due to template label change") - log.V(2).Info("difference in Deployment template labels", "wanted", from.Spec.Template.Labels, "existing", to.Spec.Template.Labels) - requireUpdate = true - } - } - if len(to.Spec.Template.Labels) == 0 && len(from.Spec.Template.Labels) != 0 { - log.V(1).Info("reconciling Deployment due to template label change") - log.V(2).Info("difference in Deployment template labels", "wanted", from.Spec.Template.Labels, "existing", to.Spec.Template.Labels) - requireUpdate = true - } - to.Spec.Template.Labels = from.Spec.Template.Labels - - for k, v := range to.Spec.Template.Annotations { - if from.Spec.Template.Annotations[k] != v { - log.V(1).Info("reconciling Deployment due to template annotation change") - log.V(2).Info("difference in Deployment template annotations", "wanted", from.Spec.Template.Annotations, "existing", to.Spec.Template.Annotations) - requireUpdate = true - } - } - if len(to.Spec.Template.Annotations) == 0 && len(from.Spec.Template.Annotations) != 0 { - log.V(1).Info("reconciling Deployment due to template annotation change") - log.V(2).Info("difference in Deployment template annotations", "wanted", from.Spec.Template.Annotations, "existing", to.Spec.Template.Annotations) - requireUpdate = true - } - to.Spec.Template.Annotations = from.Spec.Template.Annotations - - if !reflect.DeepEqual(to.Spec.Template.Spec.Volumes, from.Spec.Template.Spec.Volumes) { - log.V(1).Info("reconciling Deployment due to volumes change") - log.V(2).Info("difference in Deployment volumes", "wanted", from.Spec.Template.Spec.Volumes, "existing", to.Spec.Template.Spec.Volumes) - requireUpdate = true - } - to.Spec.Template.Spec.Volumes = from.Spec.Template.Spec.Volumes - - if !reflect.DeepEqual(to.Spec.Template.Spec.ServiceAccountName, from.Spec.Template.Spec.ServiceAccountName) { - log.V(1).Info("reconciling Deployment due to service account name change") - log.V(2).Info("difference in Deployment service account name", "wanted", from.Spec.Template.Spec.ServiceAccountName, "existing", to.Spec.Template.Spec.ServiceAccountName) - requireUpdate = true - } - to.Spec.Template.Spec.ServiceAccountName = from.Spec.Template.Spec.ServiceAccountName - - if !reflect.DeepEqual(to.Spec.Template.Spec.SecurityContext, from.Spec.Template.Spec.SecurityContext) { - log.V(1).Info("reconciling Deployment due to security context change") - log.V(2).Info("difference in Deployment security context", "wanted", from.Spec.Template.Spec.SecurityContext, "existing", to.Spec.Template.Spec.SecurityContext) - requireUpdate = true - } - to.Spec.Template.Spec.SecurityContext = from.Spec.Template.Spec.SecurityContext - - if !reflect.DeepEqual(to.Spec.Template.Spec.Affinity, from.Spec.Template.Spec.Affinity) { - log.V(1).Info("reconciling Deployment due to affinity change") - log.V(2).Info("difference in Deployment affinity", "wanted", from.Spec.Template.Spec.Affinity, "existing", to.Spec.Template.Spec.Affinity) - requireUpdate = true - } - to.Spec.Template.Spec.Affinity = from.Spec.Template.Spec.Affinity - - if !reflect.DeepEqual(to.Spec.Template.Spec.Tolerations, from.Spec.Template.Spec.Tolerations) { - log.V(1).Info("reconciling Deployment due to toleration change") - log.V(2).Info("difference in Deployment tolerations", "wanted", from.Spec.Template.Spec.Tolerations, "existing", to.Spec.Template.Spec.Tolerations) - requireUpdate = true - } - to.Spec.Template.Spec.Tolerations = from.Spec.Template.Spec.Tolerations - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].Name, from.Spec.Template.Spec.Containers[0].Name) { - log.V(1).Info("reconciling Deployment due to container[0] name change") - log.V(2).Info("difference in Deployment container[0] name", "wanted", from.Spec.Template.Spec.Containers[0].Name, "existing", to.Spec.Template.Spec.Containers[0].Name) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].Name = from.Spec.Template.Spec.Containers[0].Name - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].Image, from.Spec.Template.Spec.Containers[0].Image) { - log.V(1).Info("reconciling Deployment due to container[0] image change") - log.V(2).Info("difference in Deployment container[0] image", "wanted", from.Spec.Template.Spec.Containers[0].Image, "existing", to.Spec.Template.Spec.Containers[0].Image) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].Image = from.Spec.Template.Spec.Containers[0].Image - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].WorkingDir, from.Spec.Template.Spec.Containers[0].WorkingDir) { - log.V(1).Info("reconciling Deployment due to container[0] working dir change") - log.V(2).Info("difference in Deployment container[0] working dir", "wanted", from.Spec.Template.Spec.Containers[0].WorkingDir, "existing", to.Spec.Template.Spec.Containers[0].WorkingDir) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].WorkingDir = from.Spec.Template.Spec.Containers[0].WorkingDir - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].Ports, from.Spec.Template.Spec.Containers[0].Ports) { - log.V(1).Info("reconciling Deployment due to container[0] port change") - log.V(2).Info("difference in Deployment container[0] ports", "wanted", from.Spec.Template.Spec.Containers[0].Ports, "existing", to.Spec.Template.Spec.Containers[0].Ports) - - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].Ports = from.Spec.Template.Spec.Containers[0].Ports - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].Env, from.Spec.Template.Spec.Containers[0].Env) { - log.V(1).Info("reconciling Deployment due to container[0] env change") - log.V(2).Info("difference in Deployment container[0] env", "wanted", from.Spec.Template.Spec.Containers[0].Env, "existing", to.Spec.Template.Spec.Containers[0].Env) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].Env = from.Spec.Template.Spec.Containers[0].Env - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].EnvFrom, from.Spec.Template.Spec.Containers[0].EnvFrom) { - log.V(1).Info("reconciling Deployment due to container[0] EnvFrom change") - log.V(2).Info("difference in Deployment container[0] EnvFrom", "wanted", from.Spec.Template.Spec.Containers[0].EnvFrom, "existing", to.Spec.Template.Spec.Containers[0].EnvFrom) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].EnvFrom = from.Spec.Template.Spec.Containers[0].EnvFrom - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].Resources, from.Spec.Template.Spec.Containers[0].Resources) { - log.V(1).Info("reconciling Deployment due to container[0] resource change") - log.V(2).Info("difference in Deployment container[0] resources", "wanted", from.Spec.Template.Spec.Containers[0].Resources, "existing", to.Spec.Template.Spec.Containers[0].Resources) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].Resources = from.Spec.Template.Spec.Containers[0].Resources - - if !reflect.DeepEqual(to.Spec.Template.Spec.Containers[0].VolumeMounts, from.Spec.Template.Spec.Containers[0].VolumeMounts) { - log.V(1).Info("reconciling Deployment due to container[0] VolumeMounts change") - log.V(2).Info("difference in Deployment container[0] VolumeMounts", "wanted", from.Spec.Template.Spec.Containers[0].VolumeMounts, "existing", to.Spec.Template.Spec.Containers[0].VolumeMounts) - requireUpdate = true - } - to.Spec.Template.Spec.Containers[0].VolumeMounts = from.Spec.Template.Spec.Containers[0].VolumeMounts - - return requireUpdate -} - -// CopyServiceFields copies the owned fields from one Service to another -func CopyServiceFields(from, to *corev1.Service, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling service due to label change") - log.V(2).Info("difference in service labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling service due to label change") - log.V(2).Info("difference in service labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling service due to annotation change") - log.V(2).Info("difference in service annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling service due to annotation change") - log.V(2).Info("difference in service annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - // Don't copy the entire Spec, because we can't overwrite the clusterIp field - if !reflect.DeepEqual(to.Spec.Selector, from.Spec.Selector) { - log.V(1).Info("reconciling service due to selector change") - log.V(2).Info("difference in service selector", "wanted", from.Spec.Selector, "existing", to.Spec.Selector) - requireUpdate = true - } - to.Spec.Selector = from.Spec.Selector - - if !reflect.DeepEqual(to.Spec.Ports, from.Spec.Ports) { - log.V(1).Info("reconciling service due to ports change") - log.V(2).Info("difference in service ports", "wanted", from.Spec.Ports, "existing", to.Spec.Ports) - requireUpdate = true - } - to.Spec.Ports = from.Spec.Ports - - return requireUpdate -} - -// Copy configuration related fields to another instance and returns true if there -// is a diff and thus needs to update. -func CopyVirtualService(from, to *istioNetworkingClient.VirtualService, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling virtualservice due to label change") - log.V(2).Info("difference in virtualservice labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling virtualservice due to label change") - log.V(2).Info("difference in virtualservice labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling virtualservice due to annotations change") - log.V(2).Info("difference in virtualservice annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling virtualservice due to annotations change") - log.V(2).Info("difference in virtualservice annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling virtualservice due to spec change") - log.V(2).Info("difference in virtualservice spec", "wanted", from.Spec, "exising", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyAuthorizationPolicy copies the owned fields from one AuthorizationPolicy to another -func CopyAuthorizationPolicy(from, to *istioSecurityClient.AuthorizationPolicy, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling AuthorizationPolicy due to label change") - log.V(2).Info("difference in AuthorizationPolicy labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling AuthorizationPolicy due to label change") - log.V(2).Info("difference in AuthorizationPolicy labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling AuthorizationPolicy due to annotation change") - log.V(2).Info("difference in AuthorizationPolicy annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling AuthorizationPolicy due to annotation change") - log.V(2).Info("difference in AuthorizationPolicy annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - // Don't copy the entire Spec, because we this can lead to unnecessary reconciles - - if !reflect.DeepEqual(to.Spec.Selector, from.Spec.Selector) { - log.V(1).Info("reconciling AuthorizationPolicy due to selector change") - log.V(2).Info("difference in AuthorizationPolicy selector", "wanted", from.Spec.Selector, "existing", to.Spec.Selector) - requireUpdate = true - } - to.Spec.Selector = from.Spec.Selector - - if !reflect.DeepEqual(to.Spec.Action, from.Spec.Action) { - log.V(1).Info("reconciling AuthorizationPolicy due to action change") - log.V(2).Info("difference in AuthorizationPolicy action", "wanted", from.Spec.Action, "existing", to.Spec.Action) - requireUpdate = true - } - to.Spec.Action = from.Spec.Action - - if !reflect.DeepEqual(to.Spec.Rules, from.Spec.Rules) { - log.V(1).Info("reconciling AuthorizationPolicy due to rule change") - log.V(2).Info("difference in AuthorizationPolicy rules", "wanted", from.Spec.Rules, "existing", to.Spec.Rules) - requireUpdate = true - } - to.Spec.Rules = from.Spec.Rules - - return requireUpdate -} - -// CopyDestinationRule copies the owned fields from one DestinationRule to another -func CopyDestinationRule(from, to *istioNetworkingClient.DestinationRule, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling DestinationRule due to label change") - log.V(2).Info("difference in DestinationRule labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling DestinationRule due to label change") - log.V(2).Info("difference in DestinationRule labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling DestinationRule due to annotation change") - log.V(2).Info("difference in DestinationRule annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling DestinationRule due to annotation change") - log.V(2).Info("difference in DestinationRule annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling DestinationRule due to Spec change") - log.V(2).Info("difference in DestinationRule Specs", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyRequestAuthentication copies the owned fields from one RequestAuthentication to another -func CopyRequestAuthentication(from, to *istioSecurityClient.RequestAuthentication, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling RequestAuthentication due to label change") - log.V(2).Info("difference in RequestAuthentication labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling RequestAuthentication due to label change") - log.V(2).Info("difference in RequestAuthentication labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling RequestAuthentication due to annotation change") - log.V(2).Info("difference in RequestAuthentication annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling RequestAuthentication due to annotation change") - log.V(2).Info("difference in RequestAuthentication annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - // Don't copy the entire Spec, because we this can lead to unnecessary reconciles - if !reflect.DeepEqual(to.Spec.Selector, from.Spec.Selector) { - log.V(1).Info("reconciling RequestAuthentication due to selector change") - log.V(2).Info("difference in RequestAuthentication selector", "wanted", from.Spec.Selector, "existing", to.Spec.Selector) - requireUpdate = true - } - to.Spec.Selector = from.Spec.Selector - - if !reflect.DeepEqual(to.Spec.JwtRules, from.Spec.JwtRules) { - log.V(1).Info("reconciling RequestAuthentication due to JwtRule change") - log.V(2).Info("difference in RequestAuthentication JwtRules", "wanted", from.Spec.JwtRules, "existing", to.Spec.JwtRules) - requireUpdate = true - } - to.Spec.JwtRules = from.Spec.JwtRules - - return requireUpdate -} - -// CopyServiceAccount copies the owned fields from one Service Account to another -func CopyServiceAccount(from, to *corev1.ServiceAccount, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling ServiceAccount due to label change") - log.V(2).Info("difference in ServiceAccount labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling ServiceAccount due to label change") - log.V(2).Info("difference in ServiceAccount labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling ServiceAccount due to annotation change") - log.V(2).Info("difference in ServiceAccount annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling ServiceAccount due to label change") - log.V(2).Info("difference in ServiceAccount labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Annotations = from.Annotations - - // Don't copy the entire Spec, because we this will lead to unnecessary reconciles - if !reflect.DeepEqual(to.ImagePullSecrets, from.ImagePullSecrets) { - log.V(1).Info("reconciling ServiceAccount due to ImagePullSecrets change") - log.V(2).Info("difference in ServiceAccount ImagePullSecrets", "wanted", from.ImagePullSecrets, "existing", to.ImagePullSecrets) - requireUpdate = true - } - to.ImagePullSecrets = from.ImagePullSecrets - - if !reflect.DeepEqual(to.AutomountServiceAccountToken, from.AutomountServiceAccountToken) { - log.V(1).Info("reconciling ServiceAccount due to AutomountServiceAccountToken change") - log.V(2).Info("difference in ServiceAccount AutomountServiceAccountToken", "wanted", from.AutomountServiceAccountToken, "existing", to.AutomountServiceAccountToken) - requireUpdate = true - } - to.AutomountServiceAccountToken = from.AutomountServiceAccountToken - - return requireUpdate -} - -// CopyConfigMap copies the owned fields from one Service Account to another -func CopyConfigMap(from, to *corev1.ConfigMap, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling ConfigMap due to label change") - log.V(2).Info("difference in ConfigMap labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling ConfigMap due to label change") - log.V(2).Info("difference in ConfigMap labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling ConfigMap due to annotation change") - log.V(2).Info("difference in ConfigMap annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling ConfigMap due to annotation change") - log.V(2).Info("difference in ConfigMap annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - // Don't copy the entire Spec, because we this will lead to unnecessary reconciles - if !reflect.DeepEqual(to.Data, from.Data) { - log.V(1).Info("reconciling ConfigMap due to Data change") - log.V(2).Info("difference in ConfigMap Data", "wanted", from.Data, "existing", to.Data) - requireUpdate = true - } - to.Data = from.Data - - if !reflect.DeepEqual(to.BinaryData, from.BinaryData) { - log.V(1).Info("reconciling ConfigMap due to BinaryData change") - log.V(2).Info("difference in ConfigMap BinaryData", "wanted", from.BinaryData, "existing", to.BinaryData) - requireUpdate = true - } - to.BinaryData = from.BinaryData - - return requireUpdate -} - -// CopyRoleBinding copies the owned fields from one Role Binding to another -func CopyRoleBinding(from, to *rbacv1.RoleBinding, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling RoleBinding due to label change") - log.V(2).Info("difference in RoleBinding labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling RoleBinding due to label change") - log.V(2).Info("difference in RoleBinding labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling RoleBinding due to annotation change") - log.V(2).Info("difference in RoleBinding annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling RoleBinding due to annotation change") - log.V(2).Info("difference in RoleBinding annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - // Don't copy the entire Spec, because we this will lead to unnecessary reconciles - if !reflect.DeepEqual(to.RoleRef, from.RoleRef) { - log.V(1).Info("reconciling RoleBinding due to RoleRef change") - log.V(2).Info("difference in RoleBinding RoleRef", "wanted", from.RoleRef, "existing", to.RoleRef) - requireUpdate = true - } - to.RoleRef = from.RoleRef - - if !reflect.DeepEqual(to.Subjects, from.Subjects) { - log.V(1).Info("reconciling RoleBinding due to Subject change") - log.V(2).Info("difference in RoleBinding Subjects", "wanted", from.Subjects, "existing", to.Subjects) - requireUpdate = true - } - to.Subjects = from.Subjects - - return requireUpdate -} - -// CopyNetworkPolicy copies the owned fields from one NetworkPolicy to another -func CopyNetworkPolicy(from, to *networkv1.NetworkPolicy, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling NetworkPolicy due to label change") - log.V(2).Info("difference in NetworkPolicy labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling NetworkPolicy due to label change") - log.V(2).Info("difference in NetworkPolicy labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling NetworkPolicy due to annotation change") - log.V(2).Info("difference in NetworkPolicy annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling NetworkPolicy due to annotation change") - log.V(2).Info("difference in NetworkPolicy annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling NetworkPolicy due to spec change") - log.V(2).Info("difference in NetworkPolicy spec", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyNamespace copies the owned fields from one Namespace to another -func CopyNamespace(from, to *corev1.Namespace, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling Namespace due to label change") - log.V(2).Info("difference in Namespace labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling Namespace due to label change") - log.V(2).Info("difference in Namespace labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling Namespace due to annotation change") - log.V(2).Info("difference in Namespace annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling Namespace due to annotation change") - log.V(2).Info("difference in Namespace annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - return requireUpdate -} - -// CopyPersistentVolumeClaim copies the owned fields from one PersistentVolumeClaim to another -func CopyPersistentVolumeClaim(from, to *corev1.PersistentVolumeClaim, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling PersistentVolumeClaim due to label change") - log.V(2).Info("difference in PersistentVolumeClaim labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling PersistentVolumeClaim due to label change") - log.V(2).Info("difference in PersistentVolumeClaim labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - // for k, v := range to.Annotations { - // if from.Annotations[k] != v { - // log.V(1).Info("reconciling PersistentVolumeClaim due to annotation change") - // log.V(2).Info("difference in PersistentVolumeClaim annotations", "wanted", from.Annotations, "existing", to.Annotations) - // requireUpdate = true - // } - // } - // if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - // log.V(1).Info("reconciling PersistentVolumeClaim due to annotation change") - // log.V(2).Info("difference in PersistentVolumeClaim annotations", "wanted", from.Annotations, "existing", to.Annotations) - // requireUpdate = true - // } - // to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec.Resources.Requests, from.Spec.Resources.Requests) { - log.V(1).Info("reconciling PersistentVolumeClaim due to resource requests change") - log.V(2).Info("difference in PersistentVolumeClaim resource requests", "wanted", from.Spec.Resources.Requests, "existing", to.Spec.Resources.Requests) - requireUpdate = true - } - to.Spec.Resources.Requests = from.Spec.Resources.Requests - - return requireUpdate -} - -// CopyPostgresql copies the owned fields from one Postgres instance to another -func CopyPostgresql(from, to *postgresv1.Postgresql, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling PostgreSQL Database due to label change") - log.V(2).Info("difference in PostgreSQL Database labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling PostgreSQL Database due to label change") - log.V(2).Info("difference in PostgreSQL Database labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling PostgreSQL Database due to annotation change") - log.V(2).Info("difference in PostgreSQL Database annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling PostgreSQL Database due to annotation change") - log.V(2).Info("difference in PostgreSQL Database annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling PostgreSQL Database due to spec change") - log.V(2).Info("difference in PostgreSQL Database spec", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyKubeflowEnvironment copies the owned fields from one Kubeflow Environment to another -func CopyKubeflowEnvironment(from, to *platformv1alpha1.Environment, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling KubeflowEnvironment due to label change") - log.V(2).Info("difference in KubeflowEnvironment labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling KubeflowEnvironment due to label change") - log.V(2).Info("difference in KubeflowEnvironment labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling KubeflowEnvironment due to annotations change") - log.V(2).Info("difference in KubeflowEnvironment annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling KubeflowEnvironment due to annotations change") - log.V(2).Info("difference in KubeflowEnvironment annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling KubeflowEnvironment due to spec change") - log.V(2).Info("difference in KubeflowEnvironment spec", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopySubnamespaceAnchor copies the owned fields from one Subnamespace to another -func CopySubnamespaceAnchor(from, to *hncv1alpha2.SubnamespaceAnchor, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling SubnamespaceAnchor due to label change") - log.V(2).Info("difference in SubnamespaceAnchor labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling SubnamespaceAnchor due to label change") - log.V(2).Info("difference in SubnamespaceAnchor labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling SubnamespaceAnchor due to annotation change") - log.V(2).Info("difference in SubnamespaceAnchor annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling SubnamespaceAnchor due to annotation change") - log.V(2).Info("difference in SubnamespaceAnchor annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - return requireUpdate -} - -// CopyXPlaneIAMPolicy copies the owned fields from one CrossPlane IAM Policy to another -func CopyXPlaneIAMPolicy(from, to *crossplaneAWSIdentityv1beta1.Policy, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling XPlaneIAMPolicy due to label change") - log.V(2).Info("difference in XPlaneIAMPolicy labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling XPlaneIAMPolicy due to label change") - log.V(2).Info("difference in XPlaneIAMPolicy labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling XPlaneIAMPolicy due to annotation change") - log.V(2).Info("difference in XPlaneIAMPolicy annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling XPlaneIAMPolicy due to annotation change") - log.V(2).Info("difference in XPlaneIAMPolicy annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling XPlaneIAMPolicy due to Spec change") - log.V(2).Info("difference in XPlaneIAMPolicy Specs", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyXPlaneIAMRole copies the owned fields from one CrossPlane IAM Role to another -func CopyXPlaneIAMRole(from, to *crossplaneAWSIdentityv1beta1.Role, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling XPlaneIAMRole due to label change") - log.V(2).Info("difference in XPlaneIAMRole labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling XPlaneIAMRole due to label change") - log.V(2).Info("difference in XPlaneIAMRole labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling XPlaneIAMRole due to annotation change") - log.V(2).Info("difference in XPlaneIAMRole annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling XPlaneIAMRole due to annotation change") - log.V(2).Info("difference in XPlaneIAMRole annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling XPlaneIAMRole due to Spec change") - log.V(2).Info("difference in XPlaneIAMRole Specs", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyXPlaneIAMUser copies the owned fields from one CrossPlane IAM User to another -func CopyXPlaneIAMUser(from, to *crossplaneAWSIdentityv1beta1.User, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling XPlaneIAMUser due to label change") - log.V(2).Info("difference in XPlaneIAMUser labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling XPlaneIAMUser due to label change") - log.V(2).Info("difference in XPlaneIAMUser labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling XPlaneIAMUser due to annotation change") - log.V(2).Info("difference in XPlaneIAMUser annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling XPlaneIAMUser due to annotation change") - log.V(2).Info("difference in XPlaneIAMUser annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling XPlaneIAMUser due to Spec change") - log.V(2).Info("difference in XPlaneIAMUser Specs", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyXPlaneIAMRolePolicyAttachement copies the owned fields from one CrossPlane IAM User Policy Attachement to another -func CopyXPlaneIAMRolePolicyAttachement(from, to *crossplaneAWSIdentityv1beta1.RolePolicyAttachment, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling XPlaneIAMRolePolicyAttachement due to label change") - log.V(2).Info("difference in XPlaneIAMRolePolicyAttachement labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling XPlaneIAMRolePolicyAttachement due to label change") - log.V(2).Info("difference in XPlaneIAMRolePolicyAttachement labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling XPlaneIAMRolePolicyAttachement due to annotation change") - log.V(2).Info("difference in XPlaneIAMRolePolicyAttachement annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling XPlaneIAMRolePolicyAttachement due to annotation change") - log.V(2).Info("difference in XPlaneIAMRolePolicyAttachement annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling XPlaneIAMRolePolicyAttachement due to Spec change") - log.V(2).Info("difference in XPlaneIAMRolePolicyAttachement Specs", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyXPlaneIAMUserPolicyAttachement copies the owned fields from one CrossPlane IAM User Policy Attachement to another -func CopyXPlaneIAMUserPolicyAttachement(from, to *crossplaneAWSIdentityv1beta1.UserPolicyAttachment, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling XPlaneIAMUserPolicyAttachement due to label change") - log.V(2).Info("difference in XPlaneIAMUserPolicyAttachement labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling XPlaneIAMUserPolicyAttachement due to label change") - log.V(2).Info("difference in XPlaneIAMUserPolicyAttachement labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling XPlaneIAMUserPolicyAttachement due to annotation change") - log.V(2).Info("difference in XPlaneIAMUserPolicyAttachement annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling XPlaneIAMUserPolicyAttachement due to annotation change") - log.V(2).Info("difference in XPlaneIAMUserPolicyAttachement annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling XPlaneIAMUserPolicyAttachement due to Spec change") - log.V(2).Info("difference in XPlaneIAMUserPolicyAttachement Specs", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyXPlaneIAMAccessKey copies the owned fields from one CrossPlane IAM Access Key to another -func CopyXPlaneIAMAccessKey(from, to *crossplaneAWSIdentityv1beta1.AccessKey, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling XPlaneIAMAccessKey due to label change") - log.V(2).Info("difference in XPlaneIAMAccessKey labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling XPlaneIAMAccessKey due to label change") - log.V(2).Info("difference in XPlaneIAMAccessKey labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling XPlaneIAMAccessKey due to annotation change") - log.V(2).Info("difference in XPlaneIAMAccessKey annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling XPlaneIAMAccessKey due to annotation change") - log.V(2).Info("difference in XPlaneIAMAccessKey annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec.DeletionPolicy, from.Spec.DeletionPolicy) { - log.V(1).Info("reconciling XPlaneIAMAccessKey due to DeletionPolicy change") - log.V(2).Info("difference in XPlaneIAMAccessKey DeletionPolicies", "wanted", from.Spec.DeletionPolicy, "existing", to.Spec.DeletionPolicy) - requireUpdate = true - } - to.Spec.DeletionPolicy = from.Spec.DeletionPolicy - - if !reflect.DeepEqual(to.Spec.ForProvider, from.Spec.ForProvider) { - log.V(1).Info("reconciling XPlaneIAMAccessKey due to ForProvider change") - log.V(2).Info("difference in XPlaneIAMAccessKey ForProviders", "wanted", from.Spec.ForProvider, "existing", to.Spec.ForProvider) - requireUpdate = true - } - to.Spec.ForProvider = from.Spec.ForProvider - - if !reflect.DeepEqual(to.Spec.ProviderConfigReference, from.Spec.ProviderConfigReference) { - log.V(1).Info("reconciling XPlaneIAMAccessKey due to ProviderConfigReference change") - log.V(2).Info("difference in XPlaneIAMAccessKey ProviderConfigReferences", "wanted", from.Spec.ProviderConfigReference, "existing", to.Spec.ProviderConfigReference) - requireUpdate = true - } - to.Spec.ProviderConfigReference = from.Spec.ProviderConfigReference - - return requireUpdate -} - -// CopyACKIAMPolicy copies the owned fields from one ACK IAM Policy to another -func CopyACKIAMPolicy(from, to *ackIAM.Policy, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling ACKIAMPolicy due to label change") - log.V(2).Info("difference in ACKIAMPolicy labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling ACKIAMPolicy due to label change") - log.V(2).Info("difference in ACKIAMPolicy labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling ACKIAMPolicy due to annotation change") - log.V(2).Info("difference in ACKIAMPolicy annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling ACKIAMPolicy due to annotation change") - log.V(2).Info("difference in ACKIAMPolicy annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling ACKIAMPolicy due to Spec change") - log.V(2).Info("difference in ACKIAMPolicy Specs", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyACKIAMRole copies the owned fields from one ACK IAM Role to another -func CopyACKIAMRole(from, to *ackIAM.Role, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling ACKIAMRole due to label change") - log.V(2).Info("difference in ACKIAMRole labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling ACKIAMRole due to label change") - log.V(2).Info("difference in ACKIAMRole labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling ACKIAMRole due to annotation change") - log.V(2).Info("difference in ACKIAMRole annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling ACKIAMRole due to annotation change") - log.V(2).Info("difference in ACKIAMRole annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling ACKIAMRole due to Spec change") - log.V(2).Info("difference in ACKIAMRole Specs", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyPeerAuthentication copies the owned fields from one Istio PeerAuthentication to another -func CopyPeerAuthentication(from, to *istioSecurityClient.PeerAuthentication, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling Istio PeerAuthentication due to label change") - log.V(2).Info("difference in Istio PeerAuthentication labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling Istio PeerAuthentication due to label change") - log.V(2).Info("difference in Istio PeerAuthentication labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling Istio PeerAuthentication due to annotation change") - log.V(2).Info("difference in Istio PeerAuthentication annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling Istio PeerAuthentication due to annotation change") - log.V(2).Info("difference in Istio PeerAuthentication annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling Istio PeerAuthentication due to Spec change") - log.V(2).Info("difference in Istio PeerAuthentication Specs", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyEnvoyFilter copies the owned fields from one Istio EnvoyFilter to another -func CopyEnvoyFilter(from, to *istioNetworkingClientv1alpha3.EnvoyFilter, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling Istio EnvoyFilter due to label change") - log.V(2).Info("difference in Istio EnvoyFilter labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling Istio EnvoyFilter due to label change") - log.V(2).Info("difference in Istio EnvoyFilter labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling Istio EnvoyFilter due to annotation change") - log.V(2).Info("difference in Istio EnvoyFilter annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling Istio EnvoyFilter due to annotation change") - log.V(2).Info("difference in Istio EnvoyFilter annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling Istio EnvoyFilter due to Spec change") - log.V(2).Info("difference in Istio EnvoyFilter Specs", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} - -// CopyPodDefault copies the owned fields from one Kubeflow PodDefault to another -func CopyPodDefault(from, to *kfPodDefault.PodDefault, log logr.Logger) bool { - requireUpdate := false - for k, v := range to.Labels { - if from.Labels[k] != v { - log.V(1).Info("reconciling Kubeflow PodDefault due to label change") - log.V(2).Info("difference in Kubeflow PodDefault labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - } - if len(to.Labels) == 0 && len(from.Labels) != 0 { - log.V(1).Info("reconciling Kubeflow PodDefault due to label change") - log.V(2).Info("difference in Kubeflow PodDefault labels", "wanted", from.Labels, "existing", to.Labels) - requireUpdate = true - } - to.Labels = from.Labels - - for k, v := range to.Annotations { - if from.Annotations[k] != v { - log.V(1).Info("reconciling Kubeflow PodDefault due to annotation change") - log.V(2).Info("difference in Kubeflow PodDefault annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - } - if len(to.Annotations) == 0 && len(from.Annotations) != 0 { - log.V(1).Info("reconciling Kubeflow PodDefault due to annotation change") - log.V(2).Info("difference in Kubeflow PodDefault annotations", "wanted", from.Annotations, "existing", to.Annotations) - requireUpdate = true - } - to.Annotations = from.Annotations - - if !reflect.DeepEqual(to.Spec, from.Spec) { - log.V(1).Info("reconciling Kubeflow PodDefault due to Spec change") - log.V(2).Info("difference in Kubeflow PodDefault Specs", "wanted", from.Spec, "existing", to.Spec) - requireUpdate = true - } - to.Spec = from.Spec - - return requireUpdate -} diff --git a/vendor/github.com/pluralsh/controller-reconcile-helper/renovate.json b/vendor/github.com/pluralsh/controller-reconcile-helper/renovate.json deleted file mode 100644 index f45d8f11..00000000 --- a/vendor/github.com/pluralsh/controller-reconcile-helper/renovate.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "extends": [ - "config:base" - ] -} diff --git a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/config_types.go b/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/config_types.go deleted file mode 100644 index f4b3160b..00000000 --- a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/config_types.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// ConfigSpec defines the desired state of Config -type ConfigSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Configs related to namespaces - Namespace NamespaceSpec `json:"namespace,omitempty"` - - // Network related configs - Network NetworkSpec `json:"network,omitempty"` - - // Security related configs - Security SecuritySpec `json:"security,omitempty"` - - // Identity related configs - Identity IdentitySpec `json:"identity,omitempty"` - - // Email addresses of Kubeflow cluster level admins - Admins []string `json:"admins,omitempty"` -} - -type NamespaceSpec struct { - // Prefix to add to all created namespaces - Prefix string `json:"prefix,omitempty"` - - // Labels to add to all created namespaces - DefaultLabels map[string]string `json:"defaultLabels,omitempty"` -} - -type NetworkSpec struct { - // Hostname of the Kubeflow deployment - Hostname string `json:"hostname,omitempty"` - - // Domain for the local cluster - //+kubebuilder:default:="cluster.local" - ClusterDomain string `json:"clusterDomain,omitempty"` - - // Istio related configs - Istio IstioSpec `json:"istio,omitempty"` -} - -type IstioSpec struct { - // Istio Gateway resource used for Kubeflow - ClusterGateway IstioGatewaySpec `json:"clusterGateway,omitempty"` -} - -type IstioGatewaySpec struct { - // Name of the Istio gateway to use for Kubeflow - Name string `json:"name,omitempty"` - - // Namespace of the Istio gateway to use for Kubeflow - Namespace string `json:"namespace,omitempty"` -} - -type SecuritySpec struct { - // Settings related to OIDC configuration - OIDC OIDCSpec `json:"oidc,omitempty"` -} - -type OIDCSpec struct { - // The OIDC issuer to setup with Istio - Issuer string `json:"issuer,omitempty"` - - // The JWKS URI for the OIDC issuer you would like to use with Istio - JwksURI string `json:"jwksURI,omitempty"` -} - -type IdentitySpec struct { - //+kubebuilder:validation:Optional - // Prefix found in the JWT email claim - UserIDPrefix string `json:"userIdPrefix,omitempty"` -} - -// ConfigStatus defines the observed state of Config -type ConfigStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file -} - -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status -//+kubebuilder:resource:path=configs,scope=Cluster - -// Config is the Schema for the configs API -type Config struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ConfigSpec `json:"spec,omitempty"` - Status ConfigStatus `json:"status,omitempty"` -} - -//+kubebuilder:object:root=true - -// ConfigList contains a list of Config -type ConfigList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Config `json:"items"` -} - -func init() { - SchemeBuilder.Register(&Config{}, &ConfigList{}) -} diff --git a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/environment_types.go b/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/environment_types.go deleted file mode 100644 index 6583aa5c..00000000 --- a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/environment_types.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -type EnvironmentType string - -const ( - ProjectEnvironment EnvironmentType = "Project" - UserEnvironment EnvironmentType = "User" - PersonalEnvironment EnvironmentType = "Personal" -) - -// EnvironmentSpec defines the desired state of Environment -type EnvironmentSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // +kubebuilder:validation:Enum=Project;User;Personal - Type EnvironmentType `json:"type,omitempty"` - // +kubebuilder:validation:Optional - ParentProject string `json:"parentProject,omitempty"` - Owners EnvironementOwners `json:"owners,omitempty"` - Permissions rbacv1.RoleRef `json:"permissions,omitempty"` -} - -type EnvironementOwners struct { - Users []string `json:"users,omitempty"` -} - -type EnvironmentCondition struct { - Type EnvironmentConditionType `json:"type,omitempty"` - Status string `json:"status,omitempty" description:"status of the condition, one of True, False, Unknown"` - Message string `json:"message,omitempty"` -} - -type EnvironmentConditionType string - -const ( - EnvironmentSucceed EnvironmentConditionType = "Successful" - EnvironmentFailed EnvironmentConditionType = "Failed" - EnvironmentUnknown EnvironmentConditionType = "Unknown" -) - -// EnvironmentStatus defines the observed state of Environment -type EnvironmentStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - Conditions []EnvironmentCondition `json:"conditions,omitempty"` -} - -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status -//+kubebuilder:resource:path=environments,scope=Cluster - -// Environment is the Schema for the environments API -type Environment struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec EnvironmentSpec `json:"spec,omitempty"` - Status EnvironmentStatus `json:"status,omitempty"` -} - -//+kubebuilder:object:root=true - -// EnvironmentList contains a list of Environment -type EnvironmentList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Environment `json:"items"` -} - -func init() { - SchemeBuilder.Register(&Environment{}, &EnvironmentList{}) -} diff --git a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/groupversion_info.go b/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/groupversion_info.go deleted file mode 100644 index 8f26a1a5..00000000 --- a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/groupversion_info.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1alpha1 contains API Schema definitions for the platform v1alpha1 API group -//+kubebuilder:object:generate=true -//+groupName=platform.kubeflow.org -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "platform.kubeflow.org", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/project_types.go b/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/project_types.go deleted file mode 100644 index 8578f342..00000000 --- a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/project_types.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// ProjectSpec defines the desired state of Project -type ProjectSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - Owners ProjectOwners `json:"owners,omitempty"` - Permissions rbacv1.RoleRef `json:"permissions,omitempty"` -} - -type ProjectOwners struct { - Users []string `json:"users,omitempty"` - // +kubebuilder:pruning:PreserveUnknownFields - Groups []string `json:"groups,omitempty"` -} - -type ProjectCondition struct { - Type string `json:"type,omitempty"` - Status string `json:"status,omitempty" description:"status of the condition, one of True, False, Unknown"` - Message string `json:"message,omitempty"` -} - -const ( - ProjectSucceed = "Successful" - ProjectFailed = "Failed" - ProjectUnknown = "Unknown" -) - -// ProjectStatus defines the observed state of Project -type ProjectStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - Conditions []ProjectCondition `json:"conditions,omitempty"` -} - -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status -//+kubebuilder:resource:path=projects,scope=Cluster - -// Project is the Schema for the projects API -type Project struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ProjectSpec `json:"spec,omitempty"` - Status ProjectStatus `json:"status,omitempty"` -} - -//+kubebuilder:object:root=true - -// ProjectList contains a list of Project -type ProjectList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Project `json:"items"` -} - -func init() { - SchemeBuilder.Register(&Project{}, &ProjectList{}) -} diff --git a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/resourcegroup_types.go b/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/resourcegroup_types.go deleted file mode 100644 index 18498d22..00000000 --- a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/resourcegroup_types.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// ResourceGroupSpec defines the desired state of ResourceGroup -type ResourceGroupSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // The resource requests and limits for this Resource Group - Resources corev1.ResourceRequirements `json:"resources,omitempty"` - - // The affinity config for this Resource Group - //+kubebuilder:validation:Optional - Affinity corev1.Affinity `json:"affinity,omitempty"` - - // The tolerations for this Resource Group - //+kubebuilder:validation:Optional - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` -} - -// ResourceGroupStatus defines the observed state of ResourceGroup -type ResourceGroupStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file -} - -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status -//+kubebuilder:resource:path=resourcegroups,scope=Cluster - -// ResourceGroup is the Schema for the resourcegroups API -type ResourceGroup struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ResourceGroupSpec `json:"spec,omitempty"` - Status ResourceGroupStatus `json:"status,omitempty"` -} - -//+kubebuilder:object:root=true - -// ResourceGroupList contains a list of ResourceGroup -type ResourceGroupList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []ResourceGroup `json:"items"` -} - -func init() { - SchemeBuilder.Register(&ResourceGroup{}, &ResourceGroupList{}) -} diff --git a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/storagegroup_types.go b/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/storagegroup_types.go deleted file mode 100644 index 366d1aea..00000000 --- a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/storagegroup_types.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// StorageGroupSpec defines the desired state of StorageGroup -type StorageGroupSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Name of the StorageClass associated with this Storage Group - StorageClassName string `json:"storageClassName,omitempty"` - - // Configs for setting the allowed sizes of PVCs - SizeConfig StorageGroupSizeConfig `json:"sizeConfig,omitempty"` -} - -type StorageGroupSizeConfig struct { - // Allow users to set custom sizes for PVCs created using this StorageGroup - // +kubebuilder:default:=false - Custom bool `json:"custom,omitempty"` - - // List of sizes of PVCs the user can select from for this StorageGroup - Options []StorageGroupSizeOption `json:"options,omitempty"` -} - -// +kubebuilder:validation:Pattern=`^[0-9]{1,4}[MGT][IBib]$` -type StorageGroupSizeOption string - -// StorageGroupStatus defines the observed state of StorageGroup -type StorageGroupStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file -} - -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status - -// StorageGroup is the Schema for the storagegroups API -type StorageGroup struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec StorageGroupSpec `json:"spec,omitempty"` - Status StorageGroupStatus `json:"status,omitempty"` -} - -//+kubebuilder:object:root=true - -// StorageGroupList contains a list of StorageGroup -type StorageGroupList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []StorageGroup `json:"items"` -} - -func init() { - SchemeBuilder.Register(&StorageGroup{}, &StorageGroupList{}) -} diff --git a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 18e70607..00000000 --- a/vendor/github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,715 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Config) DeepCopyInto(out *Config) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. -func (in *Config) DeepCopy() *Config { - if in == nil { - return nil - } - out := new(Config) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Config) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigList) DeepCopyInto(out *ConfigList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Config, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. -func (in *ConfigList) DeepCopy() *ConfigList { - if in == nil { - return nil - } - out := new(ConfigList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { - *out = *in - in.Namespace.DeepCopyInto(&out.Namespace) - out.Network = in.Network - out.Security = in.Security - out.Identity = in.Identity - if in.Admins != nil { - in, out := &in.Admins, &out.Admins - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. -func (in *ConfigSpec) DeepCopy() *ConfigSpec { - if in == nil { - return nil - } - out := new(ConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. -func (in *ConfigStatus) DeepCopy() *ConfigStatus { - if in == nil { - return nil - } - out := new(ConfigStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvironementOwners) DeepCopyInto(out *EnvironementOwners) { - *out = *in - if in.Users != nil { - in, out := &in.Users, &out.Users - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironementOwners. -func (in *EnvironementOwners) DeepCopy() *EnvironementOwners { - if in == nil { - return nil - } - out := new(EnvironementOwners) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Environment) DeepCopyInto(out *Environment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Environment. -func (in *Environment) DeepCopy() *Environment { - if in == nil { - return nil - } - out := new(Environment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Environment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvironmentCondition) DeepCopyInto(out *EnvironmentCondition) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentCondition. -func (in *EnvironmentCondition) DeepCopy() *EnvironmentCondition { - if in == nil { - return nil - } - out := new(EnvironmentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvironmentList) DeepCopyInto(out *EnvironmentList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Environment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentList. -func (in *EnvironmentList) DeepCopy() *EnvironmentList { - if in == nil { - return nil - } - out := new(EnvironmentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EnvironmentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvironmentSpec) DeepCopyInto(out *EnvironmentSpec) { - *out = *in - in.Owners.DeepCopyInto(&out.Owners) - out.Permissions = in.Permissions -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentSpec. -func (in *EnvironmentSpec) DeepCopy() *EnvironmentSpec { - if in == nil { - return nil - } - out := new(EnvironmentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvironmentStatus) DeepCopyInto(out *EnvironmentStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]EnvironmentCondition, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentStatus. -func (in *EnvironmentStatus) DeepCopy() *EnvironmentStatus { - if in == nil { - return nil - } - out := new(EnvironmentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IdentitySpec) DeepCopyInto(out *IdentitySpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentitySpec. -func (in *IdentitySpec) DeepCopy() *IdentitySpec { - if in == nil { - return nil - } - out := new(IdentitySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IstioGatewaySpec) DeepCopyInto(out *IstioGatewaySpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioGatewaySpec. -func (in *IstioGatewaySpec) DeepCopy() *IstioGatewaySpec { - if in == nil { - return nil - } - out := new(IstioGatewaySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IstioSpec) DeepCopyInto(out *IstioSpec) { - *out = *in - out.ClusterGateway = in.ClusterGateway -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioSpec. -func (in *IstioSpec) DeepCopy() *IstioSpec { - if in == nil { - return nil - } - out := new(IstioSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { - *out = *in - if in.DefaultLabels != nil { - in, out := &in.DefaultLabels, &out.DefaultLabels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. -func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { - if in == nil { - return nil - } - out := new(NamespaceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { - *out = *in - out.Istio = in.Istio -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. -func (in *NetworkSpec) DeepCopy() *NetworkSpec { - if in == nil { - return nil - } - out := new(NetworkSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OIDCSpec) DeepCopyInto(out *OIDCSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCSpec. -func (in *OIDCSpec) DeepCopy() *OIDCSpec { - if in == nil { - return nil - } - out := new(OIDCSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Project) DeepCopyInto(out *Project) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. -func (in *Project) DeepCopy() *Project { - if in == nil { - return nil - } - out := new(Project) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Project) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectCondition) DeepCopyInto(out *ProjectCondition) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectCondition. -func (in *ProjectCondition) DeepCopy() *ProjectCondition { - if in == nil { - return nil - } - out := new(ProjectCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectList) DeepCopyInto(out *ProjectList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Project, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. -func (in *ProjectList) DeepCopy() *ProjectList { - if in == nil { - return nil - } - out := new(ProjectList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ProjectList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectOwners) DeepCopyInto(out *ProjectOwners) { - *out = *in - if in.Users != nil { - in, out := &in.Users, &out.Users - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectOwners. -func (in *ProjectOwners) DeepCopy() *ProjectOwners { - if in == nil { - return nil - } - out := new(ProjectOwners) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { - *out = *in - in.Owners.DeepCopyInto(&out.Owners) - out.Permissions = in.Permissions -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. -func (in *ProjectSpec) DeepCopy() *ProjectSpec { - if in == nil { - return nil - } - out := new(ProjectSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ProjectCondition, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. -func (in *ProjectStatus) DeepCopy() *ProjectStatus { - if in == nil { - return nil - } - out := new(ProjectStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceGroup) DeepCopyInto(out *ResourceGroup) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroup. -func (in *ResourceGroup) DeepCopy() *ResourceGroup { - if in == nil { - return nil - } - out := new(ResourceGroup) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceGroup) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceGroupList) DeepCopyInto(out *ResourceGroupList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceGroup, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupList. -func (in *ResourceGroupList) DeepCopy() *ResourceGroupList { - if in == nil { - return nil - } - out := new(ResourceGroupList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceGroupList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceGroupSpec) DeepCopyInto(out *ResourceGroupSpec) { - *out = *in - in.Resources.DeepCopyInto(&out.Resources) - in.Affinity.DeepCopyInto(&out.Affinity) - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupSpec. -func (in *ResourceGroupSpec) DeepCopy() *ResourceGroupSpec { - if in == nil { - return nil - } - out := new(ResourceGroupSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceGroupStatus) DeepCopyInto(out *ResourceGroupStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupStatus. -func (in *ResourceGroupStatus) DeepCopy() *ResourceGroupStatus { - if in == nil { - return nil - } - out := new(ResourceGroupStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecuritySpec) DeepCopyInto(out *SecuritySpec) { - *out = *in - out.OIDC = in.OIDC -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecuritySpec. -func (in *SecuritySpec) DeepCopy() *SecuritySpec { - if in == nil { - return nil - } - out := new(SecuritySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageGroup) DeepCopyInto(out *StorageGroup) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageGroup. -func (in *StorageGroup) DeepCopy() *StorageGroup { - if in == nil { - return nil - } - out := new(StorageGroup) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StorageGroup) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageGroupList) DeepCopyInto(out *StorageGroupList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StorageGroup, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageGroupList. -func (in *StorageGroupList) DeepCopy() *StorageGroupList { - if in == nil { - return nil - } - out := new(StorageGroupList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StorageGroupList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageGroupSizeConfig) DeepCopyInto(out *StorageGroupSizeConfig) { - *out = *in - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make([]StorageGroupSizeOption, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageGroupSizeConfig. -func (in *StorageGroupSizeConfig) DeepCopy() *StorageGroupSizeConfig { - if in == nil { - return nil - } - out := new(StorageGroupSizeConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageGroupSpec) DeepCopyInto(out *StorageGroupSpec) { - *out = *in - in.SizeConfig.DeepCopyInto(&out.SizeConfig) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageGroupSpec. -func (in *StorageGroupSpec) DeepCopy() *StorageGroupSpec { - if in == nil { - return nil - } - out := new(StorageGroupSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageGroupStatus) DeepCopyInto(out *StorageGroupStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageGroupStatus. -func (in *StorageGroupStatus) DeepCopy() *StorageGroupStatus { - if in == nil { - return nil - } - out := new(StorageGroupStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/spf13/afero/.gitignore b/vendor/github.com/spf13/afero/.gitignore deleted file mode 100644 index 9c1d9861..00000000 --- a/vendor/github.com/spf13/afero/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -sftpfs/file1 -sftpfs/test/ diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml deleted file mode 100644 index e944f594..00000000 --- a/vendor/github.com/spf13/afero/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -sudo: false -language: go -arch: - - amd64 - - ppc64e - -go: - - "1.14" - - "1.15" - - "1.16" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build -v ./... - - go test -count=1 -cover -race -v ./... - - go vet ./... - - FILES=$(gofmt -s -l . zipfs sftpfs mem tarfs); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt deleted file mode 100644 index 298f0e26..00000000 --- a/vendor/github.com/spf13/afero/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md deleted file mode 100644 index fb8eaaf8..00000000 --- a/vendor/github.com/spf13/afero/README.md +++ /dev/null @@ -1,430 +0,0 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) - -A FileSystem Abstraction System for Go - -[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -# Overview - -Afero is a filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. - -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with afero while retaining all the power -and benefit of the os and ioutil packages. - -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. - -It is suitable for use in any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. - - -## Afero Features - -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware -* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` - -# Using Afero - -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define your own file system. -* Wrapper for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero - -Next include Afero in your application. -```go -import "github.com/spf13/afero" -``` - -## Step 2: Declare a backend - -First define a package variable and set it to a pointer to a filesystem. -```go -var AppFs = afero.NewMemMapFs() - -or - -var AppFs = afero.NewOsFs() -``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. - -## Step 3: Use it like you would the OS package - -Throughout your application use any function and method like you normally -would. - -So if my application before had: -```go -os.Open('/tmp/foo') -``` -We would replace it with: -```go -AppFs.Open('/tmp/foo') -``` - -`AppFs` being the variable we defined above. - - -## List of all available functions - -File System Methods Available: -```go -Chmod(name string, mode os.FileMode) : error -Chown(name string, uid, gid int) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error -``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. - -## Using Afero's utility functions - -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. - -The afero utilities support all afero compatible backends. - -The list of utilities includes: - -```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) - -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. - -### Calling utilities directly - -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") - -``` - -### Calling via Afero - -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` - -## Using Afero for Testing - -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. - -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed - -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). - -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. - -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} -``` - -# Available Backends - -## Operating System Native - -### OsFs - -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. - -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755) -``` - -## Memory Backed Storage - -### MemMapFs - -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. - -```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755) -``` - -#### InMemoryFile - -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. - -## Network Interfaces - -### SftpFs - -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. - -## Filtering Backends - -### BasePathFs - -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. - -```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` - -### ReadOnlyFs - -A thin wrapper around the source Fs providing a read only view. - -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM -``` - -# RegexpFs - -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. - -```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` - -### HttpFs - -Afero provides an http compatible backend which can wrap any of the existing -backends. - -The Http package requires a slightly specific version of Open which -returns an http.File type. - -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. - -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir()) -http.Handle("/", fileserver) -``` - -## Composite Backends - -Afero provides the ability have two filesystems (or more) act as a single -file system. - -### CacheOnReadFs - -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. - -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. - -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). - -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. - -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. - -```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) -``` - -### CopyOnWriteFs() - -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. - -Read operations will first look in the overlay and if not found there, will -serve the file from the base. - -Changes to the file system will only be made in the overlay. - -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). - -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. - -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) - - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` - -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. - - -## Desired/possible backends - -The following is a short list of possible backends we hope someone will -implement: - -* SSH -* S3 - -# About the project - -## What's in the name - -Afero comes from the latin roots Ad-Facere. - -**"Ad"** is a prefix meaning "to". - -**"Facere"** is a form of the root "faciō" making "make or do". - -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. - -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". - -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. - -## Release Notes - -See the [Releases Page](https://github.com/spf13/afero/releases). - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) - -## License - -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go deleted file mode 100644 index 469ff7d2..00000000 --- a/vendor/github.com/spf13/afero/afero.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package afero provides types and methods for interacting with the filesystem, -// as an abstraction layer. - -// Afero also provides a few implementations that are mostly interoperable. One that -// uses the operating system filesystem, one that uses memory to store files -// (cross platform) and an interface that should be implemented if you want to -// provide your own filesystem. - -package afero - -import ( - "errors" - "io" - "os" - "time" -) - -type Afero struct { - Fs -} - -// File represents a file in the filesystem. -type File interface { - io.Closer - io.Reader - io.ReaderAt - io.Seeker - io.Writer - io.WriterAt - - Name() string - Readdir(count int) ([]os.FileInfo, error) - Readdirnames(n int) ([]string, error) - Stat() (os.FileInfo, error) - Sync() error - Truncate(size int64) error - WriteString(s string) (ret int, err error) -} - -// Fs is the filesystem interface. -// -// Any simulated or real filesystem should implement this interface. -type Fs interface { - // Create creates a file in the filesystem, returning the file and an - // error, if any happens. - Create(name string) (File, error) - - // Mkdir creates a directory in the filesystem, return an error if any - // happens. - Mkdir(name string, perm os.FileMode) error - - // MkdirAll creates a directory path and all parents that does not exist - // yet. - MkdirAll(path string, perm os.FileMode) error - - // Open opens a file, returning it or an error, if any happens. - Open(name string) (File, error) - - // OpenFile opens a file using the given flags and the given mode. - OpenFile(name string, flag int, perm os.FileMode) (File, error) - - // Remove removes a file identified by name, returning an error, if any - // happens. - Remove(name string) error - - // RemoveAll removes a directory path and any children it contains. It - // does not fail if the path does not exist (return nil). - RemoveAll(path string) error - - // Rename renames a file. - Rename(oldname, newname string) error - - // Stat returns a FileInfo describing the named file, or an error, if any - // happens. - Stat(name string) (os.FileInfo, error) - - // The name of this FileSystem - Name() string - - // Chmod changes the mode of the named file to mode. - Chmod(name string, mode os.FileMode) error - - // Chown changes the uid and gid of the named file. - Chown(name string, uid, gid int) error - - //Chtimes changes the access and modification times of the named file - Chtimes(name string, atime time.Time, mtime time.Time) error -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml deleted file mode 100644 index 5d2f34bf..00000000 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: '{build}' -clone_folder: C:\gopath\src\github.com\spf13\afero -environment: - GOPATH: C:\gopath -build_script: -- cmd: >- - go version - - go env - - go get -v github.com/spf13/afero/... - - go build -v github.com/spf13/afero/... -test_script: -- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go deleted file mode 100644 index 4f983282..00000000 --- a/vendor/github.com/spf13/afero/basepath.go +++ /dev/null @@ -1,211 +0,0 @@ -package afero - -import ( - "os" - "path/filepath" - "runtime" - "strings" - "time" -) - -var _ Lstater = (*BasePathFs)(nil) - -// The BasePathFs restricts all operations to a given path within an Fs. -// The given file name to the operations on this Fs will be prepended with -// the base path before calling the base Fs. -// Any file name (after filepath.Clean()) outside this base path will be -// treated as non existing file. -// -// Note that it does not clean the error messages on return, so you may -// reveal the real path on errors. -type BasePathFs struct { - source Fs - path string -} - -type BasePathFile struct { - File - path string -} - -func (f *BasePathFile) Name() string { - sourcename := f.File.Name() - return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) -} - -func NewBasePathFs(source Fs, path string) Fs { - return &BasePathFs{source: source, path: path} -} - -// on a file outside the base path it returns the given file name and an error, -// else the given file with the base path prepended -func (b *BasePathFs) RealPath(name string) (path string, err error) { - if err := validateBasePathName(name); err != nil { - return name, err - } - - bpath := filepath.Clean(b.path) - path = filepath.Clean(filepath.Join(bpath, name)) - if !strings.HasPrefix(path, bpath) { - return name, os.ErrNotExist - } - - return path, nil -} - -func validateBasePathName(name string) error { - if runtime.GOOS != "windows" { - // Not much to do here; - // the virtual file paths all look absolute on *nix. - return nil - } - - // On Windows a common mistake would be to provide an absolute OS path - // We could strip out the base part, but that would not be very portable. - if filepath.IsAbs(name) { - return os.ErrNotExist - } - - return nil -} - -func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chtimes", Path: name, Err: err} - } - return b.source.Chtimes(name, atime, mtime) -} - -func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chmod", Path: name, Err: err} - } - return b.source.Chmod(name, mode) -} - -func (b *BasePathFs) Chown(name string, uid, gid int) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chown", Path: name, Err: err} - } - return b.source.Chown(name, uid, gid) -} - -func (b *BasePathFs) Name() string { - return "BasePathFs" -} - -func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "stat", Path: name, Err: err} - } - return b.source.Stat(name) -} - -func (b *BasePathFs) Rename(oldname, newname string) (err error) { - if oldname, err = b.RealPath(oldname); err != nil { - return &os.PathError{Op: "rename", Path: oldname, Err: err} - } - if newname, err = b.RealPath(newname); err != nil { - return &os.PathError{Op: "rename", Path: newname, Err: err} - } - return b.source.Rename(oldname, newname) -} - -func (b *BasePathFs) RemoveAll(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove_all", Path: name, Err: err} - } - return b.source.RemoveAll(name) -} - -func (b *BasePathFs) Remove(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - return b.source.Remove(name) -} - -func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "openfile", Path: name, Err: err} - } - sourcef, err := b.source.OpenFile(name, flag, mode) - if err != nil { - return nil, err - } - return &BasePathFile{sourcef, b.path}, nil -} - -func (b *BasePathFs) Open(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "open", Path: name, Err: err} - } - sourcef, err := b.source.Open(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.Mkdir(name, mode) -} - -func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.MkdirAll(name, mode) -} - -func (b *BasePathFs) Create(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "create", Path: name, Err: err} - } - sourcef, err := b.source.Create(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - name, err := b.RealPath(name) - if err != nil { - return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} - } - if lstater, ok := b.source.(Lstater); ok { - return lstater.LstatIfPossible(name) - } - fi, err := b.source.Stat(name) - return fi, false, err -} - -func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error { - oldname, err := b.RealPath(oldname) - if err != nil { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} - } - newname, err = b.RealPath(newname) - if err != nil { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} - } - if linker, ok := b.source.(Linker); ok { - return linker.SymlinkIfPossible(oldname, newname) - } - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) { - name, err := b.RealPath(name) - if err != nil { - return "", &os.PathError{Op: "readlink", Path: name, Err: err} - } - if reader, ok := b.source.(LinkReader); ok { - return reader.ReadlinkIfPossible(name) - } - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go deleted file mode 100644 index 71471aa2..00000000 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ /dev/null @@ -1,311 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -// If the cache duration is 0, cache time will be unlimited, i.e. once -// a file is in the layer, the base will never be read again for this file. -// -// For cache times greater than 0, the modification time of a file is -// checked. Note that a lot of file system implementations only allow a -// resolution of a second for timestamps... or as the godoc for os.Chtimes() -// states: "The underlying filesystem may truncate or round the values to a -// less precise time unit." -// -// This caching union will forward all write calls also to the base file -// system first. To prevent writing to the base Fs, wrap it in a read-only -// filter - Note: this will also make the overlay read-only, for writing files -// in the overlay, use the overlay Fs directly, not via the union Fs. -type CacheOnReadFs struct { - base Fs - layer Fs - cacheTime time.Duration -} - -func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { - return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} -} - -type cacheState int - -const ( - // not present in the overlay, unknown if it exists in the base: - cacheMiss cacheState = iota - // present in the overlay and in base, base file is newer: - cacheStale - // present in the overlay - with cache time == 0 it may exist in the base, - // with cacheTime > 0 it exists in the base and is same age or newer in the - // overlay - cacheHit - // happens if someone writes directly to the overlay without - // going through this union - cacheLocal -) - -func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { - var lfi, bfi os.FileInfo - lfi, err = u.layer.Stat(name) - if err == nil { - if u.cacheTime == 0 { - return cacheHit, lfi, nil - } - if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { - bfi, err = u.base.Stat(name) - if err != nil { - return cacheLocal, lfi, nil - } - if bfi.ModTime().After(lfi.ModTime()) { - return cacheStale, bfi, nil - } - } - return cacheHit, lfi, nil - } - - if err == syscall.ENOENT || os.IsNotExist(err) { - return cacheMiss, nil, nil - } - - return cacheMiss, nil, err -} - -func (u *CacheOnReadFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chtimes(name, atime, mtime) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chtimes(name, atime, mtime) - } - if err != nil { - return err - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chmod(name, mode) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chmod(name, mode) - } - if err != nil { - return err - } - return u.layer.Chmod(name, mode) -} - -func (u *CacheOnReadFs) Chown(name string, uid, gid int) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chown(name, uid, gid) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chown(name, uid, gid) - } - if err != nil { - return err - } - return u.layer.Chown(name, uid, gid) -} - -func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheMiss: - return u.base.Stat(name) - default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo - return fi, nil - } -} - -func (u *CacheOnReadFs) Rename(oldname, newname string) error { - st, _, err := u.cacheStatus(oldname) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Rename(oldname, newname) - case cacheStale, cacheMiss: - if err := u.copyToLayer(oldname); err != nil { - return err - } - err = u.base.Rename(oldname, newname) - } - if err != nil { - return err - } - return u.layer.Rename(oldname, newname) -} - -func (u *CacheOnReadFs) Remove(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.Remove(name) - } - if err != nil { - return err - } - return u.layer.Remove(name) -} - -func (u *CacheOnReadFs) RemoveAll(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.RemoveAll(name) - } - if err != nil { - return err - } - return u.layer.RemoveAll(name) -} - -func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - st, _, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheLocal, cacheHit: - default: - if err := u.copyToLayer(name); err != nil { - return nil, err - } - } - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - bfi, err := u.base.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - lfi, err := u.layer.OpenFile(name, flag, perm) - if err != nil { - bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? - return nil, err - } - return &UnionFile{Base: bfi, Layer: lfi}, nil - } - return u.layer.OpenFile(name, flag, perm) -} - -func (u *CacheOnReadFs) Open(name string) (File, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - - switch st { - case cacheLocal: - return u.layer.Open(name) - - case cacheMiss: - bfi, err := u.base.Stat(name) - if err != nil { - return nil, err - } - if bfi.IsDir() { - return u.base.Open(name) - } - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - - case cacheStale: - if !fi.IsDir() { - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - } - case cacheHit: - if !fi.IsDir() { - return u.layer.Open(name) - } - } - // the dirs from cacheHit, cacheStale fall down here: - bfile, _ := u.base.Open(name) - lfile, err := u.layer.Open(name) - if err != nil && bfile == nil { - return nil, err - } - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { - err := u.base.Mkdir(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache -} - -func (u *CacheOnReadFs) Name() string { - return "CacheOnReadFs" -} - -func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { - err := u.base.MkdirAll(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CacheOnReadFs) Create(name string) (File, error) { - bfh, err := u.base.Create(name) - if err != nil { - return nil, err - } - lfh, err := u.layer.Create(name) - if err != nil { - // oops, see comment about OS_TRUNC above, should we remove? then we have to - // remember if the file did not exist before - bfh.Close() - return nil, err - } - return &UnionFile{Base: bfh, Layer: lfh}, nil -} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go deleted file mode 100644 index 18b45824..00000000 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build aix darwin openbsd freebsd netbsd dragonfly - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go deleted file mode 100644 index 2b850e4d..00000000 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// +build !darwin -// +build !openbsd -// +build !freebsd -// +build !dragonfly -// +build !netbsd -// +build !aix - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go deleted file mode 100644 index 6ff8f309..00000000 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ /dev/null @@ -1,326 +0,0 @@ -package afero - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" -) - -var _ Lstater = (*CopyOnWriteFs)(nil) - -// The CopyOnWriteFs is a union filesystem: a read only base file system with -// a possibly writeable layer on top. Changes to the file system will only -// be made in the overlay: Changing an existing file in the base layer which -// is not present in the overlay will copy the file to the overlay ("changing" -// includes also calls to e.g. Chtimes(), Chmod() and Chown()). -// -// Reading directories is currently only supported via Open(), not OpenFile(). -type CopyOnWriteFs struct { - base Fs - layer Fs -} - -func NewCopyOnWriteFs(base Fs, layer Fs) Fs { - return &CopyOnWriteFs{base: base, layer: layer} -} - -// Returns true if the file is not in the overlay -func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { - if _, err := u.layer.Stat(name); err == nil { - return false, nil - } - _, err := u.base.Stat(name) - if err != nil { - if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { - return false, nil - } - } - if err == syscall.ENOENT { - return false, nil - } - } - return true, err -} - -func (u *CopyOnWriteFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chmod(name, mode) -} - -func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chown(name, uid, gid) -} - -func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { - fi, err := u.layer.Stat(name) - if err != nil { - isNotExist := u.isNotExist(err) - if isNotExist { - return u.base.Stat(name) - } - return nil, err - } - return fi, nil -} - -func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - llayer, ok1 := u.layer.(Lstater) - lbase, ok2 := u.base.(Lstater) - - if ok1 { - fi, b, err := llayer.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - - if !u.isNotExist(err) { - return nil, b, err - } - } - - if ok2 { - fi, b, err := lbase.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - if !u.isNotExist(err) { - return nil, b, err - } - } - - fi, err := u.Stat(name) - - return fi, false, err -} - -func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error { - if slayer, ok := u.layer.(Linker); ok { - return slayer.SymlinkIfPossible(oldname, newname) - } - - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) { - if rlayer, ok := u.layer.(LinkReader); ok { - return rlayer.ReadlinkIfPossible(name) - } - - if rbase, ok := u.base.(LinkReader); ok { - return rbase.ReadlinkIfPossible(name) - } - - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} - -func (u *CopyOnWriteFs) isNotExist(err error) bool { - if e, ok := err.(*os.PathError); ok { - err = e.Err - } - if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { - return true - } - return false -} - -// Renaming files present only in the base layer is not permitted -func (u *CopyOnWriteFs) Rename(oldname, newname string) error { - b, err := u.isBaseFile(oldname) - if err != nil { - return err - } - if b { - return syscall.EPERM - } - return u.layer.Rename(oldname, newname) -} - -// Removing files present only in the base layer is not permitted. If -// a file is present in the base layer and the overlay, only the overlay -// will be removed. -func (u *CopyOnWriteFs) Remove(name string) error { - err := u.layer.Remove(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) RemoveAll(name string) error { - err := u.layer.RemoveAll(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - if b { - if err = u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - dir := filepath.Dir(name) - isaDir, err := IsDir(u.base, dir) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if isaDir { - if err = u.layer.MkdirAll(dir, 0777); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - isaDir, err = IsDir(u.layer, dir) - if err != nil { - return nil, err - } - if isaDir { - return u.layer.OpenFile(name, flag, perm) - } - - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? - } - if b { - return u.base.OpenFile(name, flag, perm) - } - return u.layer.OpenFile(name, flag, perm) -} - -// This function handles the 9 different possibilities caused -// by the union which are the intersection of the following... -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory -func (u *CopyOnWriteFs) Open(name string) (File, error) { - // Since the overlay overrides the base we check that first - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - // If overlay doesn't exist, return the base (base state irrelevant) - if b { - return u.base.Open(name) - } - - // If overlay is a file, return it (base state irrelevant) - dir, err := IsDir(u.layer, name) - if err != nil { - return nil, err - } - if !dir { - return u.layer.Open(name) - } - - // Overlay is a directory, base state now matters. - // Base state has 3 states to check but 2 outcomes: - // A. It's a file or non-readable in the base (return just the overlay) - // B. It's an accessible directory in the base (return a UnionFile) - - // If base is file or nonreadable, return overlay - dir, err = IsDir(u.base, name) - if !dir || err != nil { - return u.layer.Open(name) - } - - // Both base & layer are directories - // Return union file (if opens are without error) - bfile, bErr := u.base.Open(name) - lfile, lErr := u.layer.Open(name) - - // If either have errors at this point something is very wrong. Return nil and the errors - if bErr != nil || lErr != nil { - return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) - } - - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - return ErrFileExists - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Name() string { - return "CopyOnWriteFs" -} - -func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - // This is in line with how os.MkdirAll behaves. - return nil - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) -} diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go deleted file mode 100644 index 2b86e30d..00000000 --- a/vendor/github.com/spf13/afero/httpFs.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "time" -) - -type httpDir struct { - basePath string - fs HttpFs -} - -func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || - strings.Contains(name, "\x00") { - return nil, errors.New("http: invalid character in file path") - } - dir := string(d.basePath) - if dir == "" { - dir = "." - } - - f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -type HttpFs struct { - source Fs -} - -func NewHttpFs(source Fs) *HttpFs { - return &HttpFs{source: source} -} - -func (h HttpFs) Dir(s string) *httpDir { - return &httpDir{basePath: s, fs: h} -} - -func (h HttpFs) Name() string { return "h HttpFs" } - -func (h HttpFs) Create(name string) (File, error) { - return h.source.Create(name) -} - -func (h HttpFs) Chmod(name string, mode os.FileMode) error { - return h.source.Chmod(name, mode) -} - -func (h HttpFs) Chown(name string, uid, gid int) error { - return h.source.Chown(name, uid, gid) -} - -func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return h.source.Chtimes(name, atime, mtime) -} - -func (h HttpFs) Mkdir(name string, perm os.FileMode) error { - return h.source.Mkdir(name, perm) -} - -func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { - return h.source.MkdirAll(path, perm) -} - -func (h HttpFs) Open(name string) (http.File, error) { - f, err := h.source.Open(name) - if err == nil { - if httpfile, ok := f.(http.File); ok { - return httpfile, nil - } - } - return nil, err -} - -func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return h.source.OpenFile(name, flag, perm) -} - -func (h HttpFs) Remove(name string) error { - return h.source.Remove(name) -} - -func (h HttpFs) RemoveAll(path string) error { - return h.source.RemoveAll(path) -} - -func (h HttpFs) Rename(oldname, newname string) error { - return h.source.Rename(oldname, newname) -} - -func (h HttpFs) Stat(name string) (os.FileInfo, error) { - return h.source.Stat(name) -} diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go deleted file mode 100644 index c8034553..00000000 --- a/vendor/github.com/spf13/afero/iofs.go +++ /dev/null @@ -1,288 +0,0 @@ -// +build go1.16 - -package afero - -import ( - "io" - "io/fs" - "os" - "path" - "time" -) - -// IOFS adopts afero.Fs to stdlib io/fs.FS -type IOFS struct { - Fs -} - -func NewIOFS(fs Fs) IOFS { - return IOFS{Fs: fs} -} - -var ( - _ fs.FS = IOFS{} - _ fs.GlobFS = IOFS{} - _ fs.ReadDirFS = IOFS{} - _ fs.ReadFileFS = IOFS{} - _ fs.StatFS = IOFS{} - _ fs.SubFS = IOFS{} -) - -func (iofs IOFS) Open(name string) (fs.File, error) { - const op = "open" - - // by convention for fs.FS implementations we should perform this check - if !fs.ValidPath(name) { - return nil, iofs.wrapError(op, name, fs.ErrInvalid) - } - - file, err := iofs.Fs.Open(name) - if err != nil { - return nil, iofs.wrapError(op, name, err) - } - - // file should implement fs.ReadDirFile - if _, ok := file.(fs.ReadDirFile); !ok { - file = readDirFile{file} - } - - return file, nil -} - -func (iofs IOFS) Glob(pattern string) ([]string, error) { - const op = "glob" - - // afero.Glob does not perform this check but it's required for implementations - if _, err := path.Match(pattern, ""); err != nil { - return nil, iofs.wrapError(op, pattern, err) - } - - items, err := Glob(iofs.Fs, pattern) - if err != nil { - return nil, iofs.wrapError(op, pattern, err) - } - - return items, nil -} - -func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) { - items, err := ReadDir(iofs.Fs, name) - if err != nil { - return nil, iofs.wrapError("readdir", name, err) - } - - ret := make([]fs.DirEntry, len(items)) - for i := range items { - ret[i] = dirEntry{items[i]} - } - - return ret, nil -} - -func (iofs IOFS) ReadFile(name string) ([]byte, error) { - const op = "readfile" - - if !fs.ValidPath(name) { - return nil, iofs.wrapError(op, name, fs.ErrInvalid) - } - - bytes, err := ReadFile(iofs.Fs, name) - if err != nil { - return nil, iofs.wrapError(op, name, err) - } - - return bytes, nil -} - -func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil } - -func (IOFS) wrapError(op, path string, err error) error { - if _, ok := err.(*fs.PathError); ok { - return err // don't need to wrap again - } - - return &fs.PathError{ - Op: op, - Path: path, - Err: err, - } -} - -// dirEntry provides adapter from os.FileInfo to fs.DirEntry -type dirEntry struct { - fs.FileInfo -} - -var _ fs.DirEntry = dirEntry{} - -func (d dirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } - -func (d dirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } - -// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open -type readDirFile struct { - File -} - -var _ fs.ReadDirFile = readDirFile{} - -func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { - items, err := r.File.Readdir(n) - if err != nil { - return nil, err - } - - ret := make([]fs.DirEntry, len(items)) - for i := range items { - ret[i] = dirEntry{items[i]} - } - - return ret, nil -} - -// FromIOFS adopts io/fs.FS to use it as afero.Fs -// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission -// To store modifications you may use afero.CopyOnWriteFs -type FromIOFS struct { - fs.FS -} - -var _ Fs = FromIOFS{} - -func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } - -func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } - -func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { - return notImplemented("mkdirall", path) -} - -func (f FromIOFS) Open(name string) (File, error) { - file, err := f.FS.Open(name) - if err != nil { - return nil, err - } - - return fromIOFSFile{File: file, name: name}, nil -} - -func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return f.Open(name) -} - -func (f FromIOFS) Remove(name string) error { - return notImplemented("remove", name) -} - -func (f FromIOFS) RemoveAll(path string) error { - return notImplemented("removeall", path) -} - -func (f FromIOFS) Rename(oldname, newname string) error { - return notImplemented("rename", oldname) -} - -func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) } - -func (f FromIOFS) Name() string { return "fromiofs" } - -func (f FromIOFS) Chmod(name string, mode os.FileMode) error { - return notImplemented("chmod", name) -} - -func (f FromIOFS) Chown(name string, uid, gid int) error { - return notImplemented("chown", name) -} - -func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error { - return notImplemented("chtimes", name) -} - -type fromIOFSFile struct { - fs.File - name string -} - -func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) { - readerAt, ok := f.File.(io.ReaderAt) - if !ok { - return -1, notImplemented("readat", f.name) - } - - return readerAt.ReadAt(p, off) -} - -func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) { - seeker, ok := f.File.(io.Seeker) - if !ok { - return -1, notImplemented("seek", f.name) - } - - return seeker.Seek(offset, whence) -} - -func (f fromIOFSFile) Write(p []byte) (n int, err error) { - return -1, notImplemented("write", f.name) -} - -func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) { - return -1, notImplemented("writeat", f.name) -} - -func (f fromIOFSFile) Name() string { return f.name } - -func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { - rdfile, ok := f.File.(fs.ReadDirFile) - if !ok { - return nil, notImplemented("readdir", f.name) - } - - entries, err := rdfile.ReadDir(count) - if err != nil { - return nil, err - } - - ret := make([]os.FileInfo, len(entries)) - for i := range entries { - ret[i], err = entries[i].Info() - - if err != nil { - return nil, err - } - } - - return ret, nil -} - -func (f fromIOFSFile) Readdirnames(n int) ([]string, error) { - rdfile, ok := f.File.(fs.ReadDirFile) - if !ok { - return nil, notImplemented("readdir", f.name) - } - - entries, err := rdfile.ReadDir(n) - if err != nil { - return nil, err - } - - ret := make([]string, len(entries)) - for i := range entries { - ret[i] = entries[i].Name() - } - - return ret, nil -} - -func (f fromIOFSFile) Sync() error { return nil } - -func (f fromIOFSFile) Truncate(size int64) error { - return notImplemented("truncate", f.name) -} - -func (f fromIOFSFile) WriteString(s string) (ret int, err error) { - return -1, notImplemented("writestring", f.name) -} - -func notImplemented(op, path string) error { - return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission} -} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go deleted file mode 100644 index a403133e..00000000 --- a/vendor/github.com/spf13/afero/ioutil.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "sync" - "time" -) - -// byName implements sort.Interface. -type byName []os.FileInfo - -func (f byName) Len() int { return len(f) } -func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } -func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } - -// ReadDir reads the directory named by dirname and returns -// a list of sorted directory entries. -func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { - return ReadDir(a.Fs, dirname) -} - -func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - list, err := f.Readdir(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Sort(byName(list)) - return list, nil -} - -// ReadFile reads the file named by filename and returns the contents. -// A successful call returns err == nil, not err == EOF. Because ReadFile -// reads the whole file, it does not treat an EOF from Read as an error -// to be reported. -func (a Afero) ReadFile(filename string) ([]byte, error) { - return ReadFile(a.Fs, filename) -} - -func ReadFile(fs Fs, filename string) ([]byte, error) { - f, err := fs.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - // It's a good but not certain bet that FileInfo will tell us exactly how much to - // read, so let's try it but be prepared for the answer to be wrong. - var n int64 - - if fi, err := f.Stat(); err == nil { - // Don't preallocate a huge buffer, just in case. - if size := fi.Size(); size < 1e9 { - n = size - } - } - // As initial capacity for readAll, use n + a little extra in case Size is zero, - // and to avoid another allocation after Read has filled the buffer. The readAll - // call will read into its allocated internal buffer cheaply. If the size was - // wrong, we'll either waste some space off the end or reallocate as needed, but - // in the overwhelmingly common case we'll get it just right. - return readAll(f, n+bytes.MinRead) -} - -// readAll reads from r until an error or EOF and returns the data it read -// from the internal buffer allocated with a specified capacity. -func readAll(r io.Reader, capacity int64) (b []byte, err error) { - buf := bytes.NewBuffer(make([]byte, 0, capacity)) - // If the buffer overflows, we will get bytes.ErrTooLarge. - // Return that as an error. Any other panic remains. - defer func() { - e := recover() - if e == nil { - return - } - if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { - err = panicErr - } else { - panic(e) - } - }() - _, err = buf.ReadFrom(r) - return buf.Bytes(), err -} - -// ReadAll reads from r until an error or EOF and returns the data it read. -// A successful call returns err == nil, not err == EOF. Because ReadAll is -// defined to read from src until EOF, it does not treat an EOF from Read -// as an error to be reported. -func ReadAll(r io.Reader) ([]byte, error) { - return readAll(r, bytes.MinRead) -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it with permissions perm; -// otherwise WriteFile truncates it before writing. -func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { - return WriteFile(a.Fs, filename, data, perm) -} - -func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { - f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextRandom() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFile creates a new temporary file in the directory dir, -// opens the file for reading and writing, and returns the resulting *os.File. -// The filename is generated by taking pattern and adding a random -// string to the end. If pattern includes a "*", the random string -// replaces the last "*". -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func (a Afero) TempFile(dir, pattern string) (f File, err error) { - return TempFile(a.Fs, dir, pattern) -} - -func TempFile(fs Fs, dir, pattern string) (f File, err error) { - if dir == "" { - dir = os.TempDir() - } - - var prefix, suffix string - if pos := strings.LastIndex(pattern, "*"); pos != -1 { - prefix, suffix = pattern[:pos], pattern[pos+1:] - } else { - prefix = pattern - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextRandom()+suffix) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} - -// TempDir creates a new temporary directory in the directory dir -// with a name beginning with prefix and returns the path of the -// new directory. If dir is the empty string, TempDir uses the -// default directory for temporary files (see os.TempDir). -// Multiple programs calling TempDir simultaneously -// will not choose the same directory. It is the caller's responsibility -// to remove the directory when no longer needed. -func (a Afero) TempDir(dir, prefix string) (name string, err error) { - return TempDir(a.Fs, dir, prefix) -} -func TempDir(fs Fs, dir, prefix string) (name string, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextRandom()) - err = fs.Mkdir(try, 0700) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - if err == nil { - name = try - } - break - } - return -} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go deleted file mode 100644 index 89c1bfc0..00000000 --- a/vendor/github.com/spf13/afero/lstater.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" -) - -// Lstater is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. -// Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. -type Lstater interface { - LstatIfPossible(name string) (os.FileInfo, bool, error) -} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go deleted file mode 100644 index 7db4b7de..00000000 --- a/vendor/github.com/spf13/afero/match.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2009 The Go Authors. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "path/filepath" - "sort" - "strings" -) - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// This was adapted from (http://golang.org/pkg/path/filepath) and uses several -// built-ins from that package. -func Glob(fs Fs, pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - // Lstat not supported by a ll filesystems. - if _, err = lstatIfPossible(fs, pattern); err != nil { - return nil, nil - } - return []string{pattern}, nil - } - - dir, file := filepath.Split(pattern) - switch dir { - case "": - dir = "." - case string(filepath.Separator): - // nothing - default: - dir = dir[0 : len(dir)-1] // chop off trailing separator - } - - if !hasMeta(dir) { - return glob(fs, dir, file, nil) - } - - var m []string - m, err = Glob(fs, dir) - if err != nil { - return - } - for _, d := range m { - matches, err = glob(fs, d, file, matches) - if err != nil { - return - } - } - return -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := fs.Stat(dir) - if err != nil { - return - } - if !fi.IsDir() { - return - } - d, err := fs.Open(dir) - if err != nil { - return - } - defer d.Close() - - names, _ := d.Readdirnames(-1) - sort.Strings(names) - - for _, n := range names { - matched, err := filepath.Match(pattern, n) - if err != nil { - return m, err - } - if matched { - m = append(m, filepath.Join(dir, n)) - } - } - return -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.ContainsAny(path, "*?[") -} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go deleted file mode 100644 index e104013f..00000000 --- a/vendor/github.com/spf13/afero/mem/dir.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -type Dir interface { - Len() int - Names() []string - Files() []*FileData - Add(*FileData) - Remove(*FileData) -} - -func RemoveFromMemDir(dir *FileData, f *FileData) { - dir.memDir.Remove(f) -} - -func AddToMemDir(dir *FileData, f *FileData) { - dir.memDir.Add(f) -} - -func InitializeDir(d *FileData) { - if d.memDir == nil { - d.dir = true - d.memDir = &DirMap{} - } -} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go deleted file mode 100644 index 03a57ee5..00000000 --- a/vendor/github.com/spf13/afero/mem/dirmap.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import "sort" - -type DirMap map[string]*FileData - -func (m DirMap) Len() int { return len(m) } -func (m DirMap) Add(f *FileData) { m[f.name] = f } -func (m DirMap) Remove(f *FileData) { delete(m, f.name) } -func (m DirMap) Files() (files []*FileData) { - for _, f := range m { - files = append(files, f) - } - sort.Sort(filesSorter(files)) - return files -} - -// implement sort.Interface for []*FileData -type filesSorter []*FileData - -func (s filesSorter) Len() int { return len(s) } -func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } - -func (m DirMap) Names() (names []string) { - for x := range m { - names = append(names, x) - } - return names -} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go deleted file mode 100644 index 5a20730c..00000000 --- a/vendor/github.com/spf13/afero/mem/file.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright © 2015 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import ( - "bytes" - "errors" - "io" - "os" - "path/filepath" - "sync" - "sync/atomic" - "time" -) - -const FilePathSeparator = string(filepath.Separator) - -type File struct { - // atomic requires 64-bit alignment for struct field access - at int64 - readDirCount int64 - closed bool - readOnly bool - fileData *FileData -} - -func NewFileHandle(data *FileData) *File { - return &File{fileData: data} -} - -func NewReadOnlyFileHandle(data *FileData) *File { - return &File{fileData: data, readOnly: true} -} - -func (f File) Data() *FileData { - return f.fileData -} - -type FileData struct { - sync.Mutex - name string - data []byte - memDir Dir - dir bool - mode os.FileMode - modtime time.Time - uid int - gid int -} - -func (d *FileData) Name() string { - d.Lock() - defer d.Unlock() - return d.name -} - -func CreateFile(name string) *FileData { - return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} -} - -func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true} -} - -func ChangeFileName(f *FileData, newname string) { - f.Lock() - f.name = newname - f.Unlock() -} - -func SetMode(f *FileData, mode os.FileMode) { - f.Lock() - f.mode = mode - f.Unlock() -} - -func SetModTime(f *FileData, mtime time.Time) { - f.Lock() - setModTime(f, mtime) - f.Unlock() -} - -func setModTime(f *FileData, mtime time.Time) { - f.modtime = mtime -} - -func SetUID(f *FileData, uid int) { - f.Lock() - f.uid = uid - f.Unlock() -} - -func SetGID(f *FileData, gid int) { - f.Lock() - f.gid = gid - f.Unlock() -} - -func GetFileInfo(f *FileData) *FileInfo { - return &FileInfo{f} -} - -func (f *File) Open() error { - atomic.StoreInt64(&f.at, 0) - atomic.StoreInt64(&f.readDirCount, 0) - f.fileData.Lock() - f.closed = false - f.fileData.Unlock() - return nil -} - -func (f *File) Close() error { - f.fileData.Lock() - f.closed = true - if !f.readOnly { - setModTime(f.fileData, time.Now()) - } - f.fileData.Unlock() - return nil -} - -func (f *File) Name() string { - return f.fileData.Name() -} - -func (f *File) Stat() (os.FileInfo, error) { - return &FileInfo{f.fileData}, nil -} - -func (f *File) Sync() error { - return nil -} - -func (f *File) Readdir(count int) (res []os.FileInfo, err error) { - if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} - } - var outLength int64 - - f.fileData.Lock() - files := f.fileData.memDir.Files()[f.readDirCount:] - if count > 0 { - if len(files) < count { - outLength = int64(len(files)) - } else { - outLength = int64(count) - } - if len(files) == 0 { - err = io.EOF - } - } else { - outLength = int64(len(files)) - } - f.readDirCount += outLength - f.fileData.Unlock() - - res = make([]os.FileInfo, outLength) - for i := range res { - res[i] = &FileInfo{files[i]} - } - - return res, err -} - -func (f *File) Readdirnames(n int) (names []string, err error) { - fi, err := f.Readdir(n) - names = make([]string, len(fi)) - for i, f := range fi { - _, names[i] = filepath.Split(f.Name()) - } - return names, err -} - -func (f *File) Read(b []byte) (n int, err error) { - f.fileData.Lock() - defer f.fileData.Unlock() - if f.closed == true { - return 0, ErrFileClosed - } - if len(b) > 0 && int(f.at) == len(f.fileData.data) { - return 0, io.EOF - } - if int(f.at) > len(f.fileData.data) { - return 0, io.ErrUnexpectedEOF - } - if len(f.fileData.data)-int(f.at) >= len(b) { - n = len(b) - } else { - n = len(f.fileData.data) - int(f.at) - } - copy(b, f.fileData.data[f.at:f.at+int64(n)]) - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - prev := atomic.LoadInt64(&f.at) - atomic.StoreInt64(&f.at, off) - n, err = f.Read(b) - atomic.StoreInt64(&f.at, prev) - return -} - -func (f *File) Truncate(size int64) error { - if f.closed == true { - return ErrFileClosed - } - if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - if size < 0 { - return ErrOutOfRange - } - f.fileData.Lock() - defer f.fileData.Unlock() - if size > int64(len(f.fileData.data)) { - diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) - } else { - f.fileData.data = f.fileData.data[0:size] - } - setModTime(f.fileData, time.Now()) - return nil -} - -func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed == true { - return 0, ErrFileClosed - } - switch whence { - case io.SeekStart: - atomic.StoreInt64(&f.at, offset) - case io.SeekCurrent: - atomic.AddInt64(&f.at, offset) - case io.SeekEnd: - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) - } - return f.at, nil -} - -func (f *File) Write(b []byte) (n int, err error) { - if f.closed == true { - return 0, ErrFileClosed - } - if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - n = len(b) - cur := atomic.LoadInt64(&f.at) - f.fileData.Lock() - defer f.fileData.Unlock() - diff := cur - int64(len(f.fileData.data)) - var tail []byte - if n+int(cur) < len(f.fileData.data) { - tail = f.fileData.data[n+int(cur):] - } - if diff > 0 { - f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{00}, int(diff)), b...)...) - f.fileData.data = append(f.fileData.data, tail...) - } else { - f.fileData.data = append(f.fileData.data[:cur], b...) - f.fileData.data = append(f.fileData.data, tail...) - } - setModTime(f.fileData, time.Now()) - - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Write(b) -} - -func (f *File) WriteString(s string) (ret int, err error) { - return f.Write([]byte(s)) -} - -func (f *File) Info() *FileInfo { - return &FileInfo{f.fileData} -} - -type FileInfo struct { - *FileData -} - -// Implements os.FileInfo -func (s *FileInfo) Name() string { - s.Lock() - _, name := filepath.Split(s.name) - s.Unlock() - return name -} -func (s *FileInfo) Mode() os.FileMode { - s.Lock() - defer s.Unlock() - return s.mode -} -func (s *FileInfo) ModTime() time.Time { - s.Lock() - defer s.Unlock() - return s.modtime -} -func (s *FileInfo) IsDir() bool { - s.Lock() - defer s.Unlock() - return s.dir -} -func (s *FileInfo) Sys() interface{} { return nil } -func (s *FileInfo) Size() int64 { - if s.IsDir() { - return int64(42) - } - s.Lock() - defer s.Unlock() - return int64(len(s.data)) -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go deleted file mode 100644 index 5c265f92..00000000 --- a/vendor/github.com/spf13/afero/memmap.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/spf13/afero/mem" -) - -const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod() - -type MemMapFs struct { - mu sync.RWMutex - data map[string]*mem.FileData - init sync.Once -} - -func NewMemMapFs() Fs { - return &MemMapFs{} -} - -func (m *MemMapFs) getData() map[string]*mem.FileData { - m.init.Do(func() { - m.data = make(map[string]*mem.FileData) - // Root should always exist, right? - // TODO: what about windows? - root := mem.CreateDir(FilePathSeparator) - mem.SetMode(root, os.ModeDir|0755) - m.data[FilePathSeparator] = root - }) - return m.data -} - -func (*MemMapFs) Name() string { return "MemMapFS" } - -func (m *MemMapFs) Create(name string) (File, error) { - name = normalizePath(name) - m.mu.Lock() - file := mem.CreateFile(name) - m.getData()[name] = file - m.registerWithParent(file, 0) - m.mu.Unlock() - return mem.NewFileHandle(file), nil -} - -func (m *MemMapFs) unRegisterWithParent(fileName string) error { - f, err := m.lockfreeOpen(fileName) - if err != nil { - return err - } - parent := m.findParent(f) - if parent == nil { - log.Panic("parent of ", f.Name(), " is nil") - } - - parent.Lock() - mem.RemoveFromMemDir(parent, f) - parent.Unlock() - return nil -} - -func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { - pdir, _ := filepath.Split(f.Name()) - pdir = filepath.Clean(pdir) - pfile, err := m.lockfreeOpen(pdir) - if err != nil { - return nil - } - return pfile -} - -func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { - if f == nil { - return - } - parent := m.findParent(f) - if parent == nil { - pdir := filepath.Dir(filepath.Clean(f.Name())) - err := m.lockfreeMkdir(pdir, perm) - if err != nil { - //log.Println("Mkdir error:", err) - return - } - parent, err = m.lockfreeOpen(pdir) - if err != nil { - //log.Println("Open after Mkdir error:", err) - return - } - } - - parent.Lock() - mem.InitializeDir(parent) - mem.AddToMemDir(parent, f) - parent.Unlock() -} - -func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - x, ok := m.getData()[name] - if ok { - // Only return ErrFileExists if it's a file, not a directory. - i := mem.FileInfo{FileData: x} - if !i.IsDir() { - return ErrFileExists - } - } else { - item := mem.CreateDir(name) - mem.SetMode(item, os.ModeDir|perm) - m.getData()[name] = item - m.registerWithParent(item, perm) - } - return nil -} - -func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { - perm &= chmodBits - name = normalizePath(name) - - m.mu.RLock() - _, ok := m.getData()[name] - m.mu.RUnlock() - if ok { - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - - m.mu.Lock() - item := mem.CreateDir(name) - mem.SetMode(item, os.ModeDir|perm) - m.getData()[name] = item - m.registerWithParent(item, perm) - m.mu.Unlock() - - return m.setFileMode(name, perm|os.ModeDir) -} - -func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { - err := m.Mkdir(path, perm) - if err != nil { - if err.(*os.PathError).Err == ErrFileExists { - return nil - } - return err - } - return nil -} - -// Handle some relative paths -func normalizePath(path string) string { - path = filepath.Clean(path) - - switch path { - case ".": - return FilePathSeparator - case "..": - return FilePathSeparator - default: - return path - } -} - -func (m *MemMapFs) Open(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewReadOnlyFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) openWrite(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) open(name string) (*mem.FileData, error) { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} - } - return f, nil -} - -func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { - name = normalizePath(name) - f, ok := m.getData()[name] - if ok { - return f, nil - } else { - return nil, ErrFileNotFound - } -} - -func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - perm &= chmodBits - chmod := false - file, err := m.openWrite(name) - if err == nil && (flag&os.O_EXCL > 0) { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists} - } - if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { - file, err = m.Create(name) - chmod = true - } - if err != nil { - return nil, err - } - if flag == os.O_RDONLY { - file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) - } - if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, os.SEEK_END) - if err != nil { - file.Close() - return nil, err - } - } - if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { - err = file.Truncate(0) - if err != nil { - file.Close() - return nil, err - } - } - if chmod { - return file, m.setFileMode(name, perm) - } - return file, nil -} - -func (m *MemMapFs) Remove(name string) error { - name = normalizePath(name) - - m.mu.Lock() - defer m.mu.Unlock() - - if _, ok := m.getData()[name]; ok { - err := m.unRegisterWithParent(name) - if err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - delete(m.getData(), name) - } else { - return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} - } - return nil -} - -func (m *MemMapFs) RemoveAll(path string) error { - path = normalizePath(path) - m.mu.Lock() - m.unRegisterWithParent(path) - m.mu.Unlock() - - m.mu.RLock() - defer m.mu.RUnlock() - - for p := range m.getData() { - if strings.HasPrefix(p, path) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - m.mu.Unlock() - m.mu.RLock() - } - } - return nil -} - -func (m *MemMapFs) Rename(oldname, newname string) error { - oldname = normalizePath(oldname) - newname = normalizePath(newname) - - if oldname == newname { - return nil - } - - m.mu.RLock() - defer m.mu.RUnlock() - if _, ok := m.getData()[oldname]; ok { - m.mu.RUnlock() - m.mu.Lock() - m.unRegisterWithParent(oldname) - fileData := m.getData()[oldname] - delete(m.getData(), oldname) - mem.ChangeFileName(fileData, newname) - m.getData()[newname] = fileData - m.registerWithParent(fileData, 0) - m.mu.Unlock() - m.mu.RLock() - } else { - return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} - } - return nil -} - -func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fileInfo, err := m.Stat(name) - return fileInfo, false, err -} - -func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { - f, err := m.Open(name) - if err != nil { - return nil, err - } - fi := mem.GetFileInfo(f.(*mem.File).Data()) - return fi, nil -} - -func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { - mode &= chmodBits - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits - - mode = prevOtherBits | mode - return m.setFileMode(name, mode) -} - -func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetMode(f, mode) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) Chown(name string, uid, gid int) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound} - } - - mem.SetUID(f, uid) - mem.SetGID(f, gid) - - return nil -} - -func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetModTime(f, mtime) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) List() { - for _, x := range m.data { - y := mem.FileInfo{FileData: x} - fmt.Println(x.Name(), y.Size()) - } -} diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go deleted file mode 100644 index f1366321..00000000 --- a/vendor/github.com/spf13/afero/os.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "time" -) - -var _ Lstater = (*OsFs)(nil) - -// OsFs is a Fs implementation that uses functions provided by the os package. -// -// For details in any method, check the documentation of the os package -// (http://golang.org/pkg/os/). -type OsFs struct{} - -func NewOsFs() Fs { - return &OsFs{} -} - -func (OsFs) Name() string { return "OsFs" } - -func (OsFs) Create(name string) (File, error) { - f, e := os.Create(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(name, perm) -} - -func (OsFs) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -func (OsFs) Open(name string) (File, error) { - f, e := os.Open(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, e := os.OpenFile(name, flag, perm) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Remove(name string) error { - return os.Remove(name) -} - -func (OsFs) RemoveAll(path string) error { - return os.RemoveAll(path) -} - -func (OsFs) Rename(oldname, newname string) error { - return os.Rename(oldname, newname) -} - -func (OsFs) Stat(name string) (os.FileInfo, error) { - return os.Stat(name) -} - -func (OsFs) Chmod(name string, mode os.FileMode) error { - return os.Chmod(name, mode) -} - -func (OsFs) Chown(name string, uid, gid int) error { - return os.Chown(name, uid, gid) -} - -func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(name, atime, mtime) -} - -func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fi, err := os.Lstat(name) - return fi, true, err -} - -func (OsFs) SymlinkIfPossible(oldname, newname string) error { - return os.Symlink(oldname, newname) -} - -func (OsFs) ReadlinkIfPossible(name string) (string, error) { - return os.Readlink(name) -} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go deleted file mode 100644 index 18f60a0f..00000000 --- a/vendor/github.com/spf13/afero/path.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "path/filepath" - "sort" -) - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -// adapted from https://golang.org/src/path/filepath/path.go -func readDirNames(fs Fs, dirname string) ([]string, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - names, err := f.Readdirnames(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// walk recursively descends path, calling walkFn -// adapted from https://golang.org/src/path/filepath/path.go -func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { - err := walkFn(path, info, nil) - if err != nil { - if info.IsDir() && err == filepath.SkipDir { - return nil - } - return err - } - - if !info.IsDir() { - return nil - } - - names, err := readDirNames(fs, path) - if err != nil { - return walkFn(path, info, err) - } - - for _, name := range names { - filename := filepath.Join(path, name) - fileInfo, err := lstatIfPossible(fs, filename) - if err != nil { - if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { - return err - } - } else { - err = walk(fs, filename, fileInfo, walkFn) - if err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - } - return nil -} - -// if the filesystem supports it, use Lstat, else use fs.Stat -func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { - if lfs, ok := fs.(Lstater); ok { - fi, _, err := lfs.LstatIfPossible(path) - return fi, err - } - return fs.Stat(path) -} - -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very -// large directories Walk can be inefficient. -// Walk does not follow symbolic links. - -func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { - return Walk(a.Fs, root, walkFn) -} - -func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { - info, err := lstatIfPossible(fs, root) - if err != nil { - return walkFn(root, nil, err) - } - return walk(fs, root, info, walkFn) -} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go deleted file mode 100644 index bd8f9264..00000000 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ /dev/null @@ -1,96 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -var _ Lstater = (*ReadOnlyFs)(nil) - -type ReadOnlyFs struct { - source Fs -} - -func NewReadOnlyFs(source Fs) Fs { - return &ReadOnlyFs{source: source} -} - -func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { - return ReadDir(r.source, name) -} - -func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chown(n string, uid, gid int) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Name() string { - return "ReadOnlyFilter" -} - -func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { - return r.source.Stat(name) -} - -func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - if lsf, ok := r.source.(Lstater); ok { - return lsf.LstatIfPossible(name) - } - fi, err := r.Stat(name) - return fi, false, err -} - -func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) { - if srdr, ok := r.source.(LinkReader); ok { - return srdr.ReadlinkIfPossible(name) - } - - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} - -func (r *ReadOnlyFs) Rename(o, n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) RemoveAll(p string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Remove(n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - return nil, syscall.EPERM - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *ReadOnlyFs) Open(n string) (File, error) { - return r.source.Open(n) -} - -func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Create(n string) (File, error) { - return nil, syscall.EPERM -} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go deleted file mode 100644 index ac359c62..00000000 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ /dev/null @@ -1,224 +0,0 @@ -package afero - -import ( - "os" - "regexp" - "syscall" - "time" -) - -// The RegexpFs filters files (not directories) by regular expression. Only -// files matching the given regexp will be allowed, all others get a ENOENT error ( -// "No such file or directory"). -// -type RegexpFs struct { - re *regexp.Regexp - source Fs -} - -func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { - return &RegexpFs{source: source, re: re} -} - -type RegexpFile struct { - f File - re *regexp.Regexp -} - -func (r *RegexpFs) matchesName(name string) error { - if r.re == nil { - return nil - } - if r.re.MatchString(name) { - return nil - } - return syscall.ENOENT -} - -func (r *RegexpFs) dirOrMatches(name string) error { - dir, err := IsDir(r.source, name) - if err != nil { - return err - } - if dir { - return nil - } - return r.matchesName(name) -} - -func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chtimes(name, a, m) -} - -func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chmod(name, mode) -} - -func (r *RegexpFs) Chown(name string, uid, gid int) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chown(name, uid, gid) -} - -func (r *RegexpFs) Name() string { - return "RegexpFs" -} - -func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.Stat(name) -} - -func (r *RegexpFs) Rename(oldname, newname string) error { - dir, err := IsDir(r.source, oldname) - if err != nil { - return err - } - if dir { - return nil - } - if err := r.matchesName(oldname); err != nil { - return err - } - if err := r.matchesName(newname); err != nil { - return err - } - return r.source.Rename(oldname, newname) -} - -func (r *RegexpFs) RemoveAll(p string) error { - dir, err := IsDir(r.source, p) - if err != nil { - return err - } - if !dir { - if err := r.matchesName(p); err != nil { - return err - } - } - return r.source.RemoveAll(p) -} - -func (r *RegexpFs) Remove(name string) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Remove(name) -} - -func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *RegexpFs) Open(name string) (File, error) { - dir, err := IsDir(r.source, name) - if err != nil { - return nil, err - } - if !dir { - if err := r.matchesName(name); err != nil { - return nil, err - } - } - f, err := r.source.Open(name) - if err != nil { - return nil, err - } - return &RegexpFile{f: f, re: r.re}, nil -} - -func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { - return r.source.Mkdir(n, p) -} - -func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { - return r.source.MkdirAll(n, p) -} - -func (r *RegexpFs) Create(name string) (File, error) { - if err := r.matchesName(name); err != nil { - return nil, err - } - return r.source.Create(name) -} - -func (f *RegexpFile) Close() error { - return f.f.Close() -} - -func (f *RegexpFile) Read(s []byte) (int, error) { - return f.f.Read(s) -} - -func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { - return f.f.ReadAt(s, o) -} - -func (f *RegexpFile) Seek(o int64, w int) (int64, error) { - return f.f.Seek(o, w) -} - -func (f *RegexpFile) Write(s []byte) (int, error) { - return f.f.Write(s) -} - -func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { - return f.f.WriteAt(s, o) -} - -func (f *RegexpFile) Name() string { - return f.f.Name() -} - -func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { - var rfi []os.FileInfo - rfi, err = f.f.Readdir(c) - if err != nil { - return nil, err - } - for _, i := range rfi { - if i.IsDir() || f.re.MatchString(i.Name()) { - fi = append(fi, i) - } - } - return fi, nil -} - -func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { - fi, err := f.Readdir(c) - if err != nil { - return nil, err - } - for _, s := range fi { - n = append(n, s.Name()) - } - return n, nil -} - -func (f *RegexpFile) Stat() (os.FileInfo, error) { - return f.f.Stat() -} - -func (f *RegexpFile) Sync() error { - return f.f.Sync() -} - -func (f *RegexpFile) Truncate(s int64) error { - return f.f.Truncate(s) -} - -func (f *RegexpFile) WriteString(s string) (int, error) { - return f.f.WriteString(s) -} diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go deleted file mode 100644 index d1c6ea53..00000000 --- a/vendor/github.com/spf13/afero/symlink.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" -) - -// Symlinker is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It indicates support for 3 symlink related interfaces that implement the -// behaviors of the os methods: -// - Lstat -// - Symlink, and -// - Readlink -type Symlinker interface { - Lstater - Linker - LinkReader -} - -// Linker is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem, -// or the filesystem otherwise supports Symlink's. -type Linker interface { - SymlinkIfPossible(oldname, newname string) error -} - -// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system -// does not support Symlink's either directly or through its delegated filesystem. -// As expressed by support for the Linker interface. -var ErrNoSymlink = errors.New("symlink not supported") - -// LinkReader is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -type LinkReader interface { - ReadlinkIfPossible(name string) (string, error) -} - -// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system -// does not support the readlink operation either directly or through its delegated filesystem. -// As expressed by support for the LinkReader interface. -var ErrNoReadlink = errors.New("readlink not supported") diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go deleted file mode 100644 index 985363ee..00000000 --- a/vendor/github.com/spf13/afero/unionFile.go +++ /dev/null @@ -1,317 +0,0 @@ -package afero - -import ( - "io" - "os" - "path/filepath" - "syscall" -) - -// The UnionFile implements the afero.File interface and will be returned -// when reading a directory present at least in the overlay or opening a file -// for writing. -// -// The calls to -// Readdir() and Readdirnames() merge the file os.FileInfo / names from the -// base and the overlay - for files present in both layers, only those -// from the overlay will be used. -// -// When opening files for writing (Create() / OpenFile() with the right flags) -// the operations will be done in both layers, starting with the overlay. A -// successful read in the overlay will move the cursor position in the base layer -// by the number of bytes read. -type UnionFile struct { - Base File - Layer File - Merger DirsMerger - off int - files []os.FileInfo -} - -func (f *UnionFile) Close() error { - // first close base, so we have a newer timestamp in the overlay. If we'd close - // the overlay first, we'd get a cacheStale the next time we access this file - // -> cache would be useless ;-) - if f.Base != nil { - f.Base.Close() - } - if f.Layer != nil { - return f.Layer.Close() - } - return BADFD -} - -func (f *UnionFile) Read(s []byte) (int, error) { - if f.Layer != nil { - n, err := f.Layer.Read(s) - if (err == nil || err == io.EOF) && f.Base != nil { - // advance the file position also in the base file, the next - // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { - // only overwrite err in case the seek fails: we need to - // report an eventual io.EOF to the caller - err = seekErr - } - } - return n, err - } - if f.Base != nil { - return f.Base.Read(s) - } - return 0, BADFD -} - -func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { - if f.Layer != nil { - n, err := f.Layer.ReadAt(s, o) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) - } - return n, err - } - if f.Base != nil { - return f.Base.ReadAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { - if f.Layer != nil { - pos, err = f.Layer.Seek(o, w) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o, w) - } - return pos, err - } - if f.Base != nil { - return f.Base.Seek(o, w) - } - return 0, BADFD -} - -func (f *UnionFile) Write(s []byte) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? - _, err = f.Base.Write(s) - } - return n, err - } - if f.Base != nil { - return f.Base.Write(s) - } - return 0, BADFD -} - -func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteAt(s, o) - if err == nil && f.Base != nil { - _, err = f.Base.WriteAt(s, o) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Name() string { - if f.Layer != nil { - return f.Layer.Name() - } - return f.Base.Name() -} - -// DirsMerger is how UnionFile weaves two directories together. -// It takes the FileInfo slices from the layer and the base and returns a -// single view. -type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) - -var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - var files = make(map[string]os.FileInfo) - - for _, fi := range lofi { - files[fi.Name()] = fi - } - - for _, fi := range bofi { - if _, exists := files[fi.Name()]; !exists { - files[fi.Name()] = fi - } - } - - rfi := make([]os.FileInfo, len(files)) - - i := 0 - for _, fi := range files { - rfi[i] = fi - i++ - } - - return rfi, nil - -} - -// Readdir will weave the two directories together and -// return a single view of the overlayed directories. -// At the end of the directory view, the error is io.EOF if c > 0. -func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger - if merge == nil { - merge = defaultUnionMergeDirsFn - } - - if f.off == 0 { - var lfi []os.FileInfo - if f.Layer != nil { - lfi, err = f.Layer.Readdir(-1) - if err != nil { - return nil, err - } - } - - var bfi []os.FileInfo - if f.Base != nil { - bfi, err = f.Base.Readdir(-1) - if err != nil { - return nil, err - } - - } - merged, err := merge(lfi, bfi) - if err != nil { - return nil, err - } - f.files = append(f.files, merged...) - } - files := f.files[f.off:] - - if c <= 0 { - return files, nil - } - - if len(files) == 0 { - return nil, io.EOF - } - - if c > len(files) { - c = len(files) - } - - defer func() { f.off += c }() - return files[:c], nil -} - -func (f *UnionFile) Readdirnames(c int) ([]string, error) { - rfi, err := f.Readdir(c) - if err != nil { - return nil, err - } - var names []string - for _, fi := range rfi { - names = append(names, fi.Name()) - } - return names, nil -} - -func (f *UnionFile) Stat() (os.FileInfo, error) { - if f.Layer != nil { - return f.Layer.Stat() - } - if f.Base != nil { - return f.Base.Stat() - } - return nil, BADFD -} - -func (f *UnionFile) Sync() (err error) { - if f.Layer != nil { - err = f.Layer.Sync() - if err == nil && f.Base != nil { - err = f.Base.Sync() - } - return err - } - if f.Base != nil { - return f.Base.Sync() - } - return BADFD -} - -func (f *UnionFile) Truncate(s int64) (err error) { - if f.Layer != nil { - err = f.Layer.Truncate(s) - if err == nil && f.Base != nil { - err = f.Base.Truncate(s) - } - return err - } - if f.Base != nil { - return f.Base.Truncate(s) - } - return BADFD -} - -func (f *UnionFile) WriteString(s string) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteString(s) - if err == nil && f.Base != nil { - _, err = f.Base.WriteString(s) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteString(s) - } - return 0, BADFD -} - -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - - // First make sure the directory exists - exists, err := Exists(layer, filepath.Dir(name)) - if err != nil { - return err - } - if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? - if err != nil { - return err - } - } - - // Create the file on the overlay - lfh, err := layer.Create(name) - if err != nil { - return err - } - n, err := io.Copy(lfh, bfh) - if err != nil { - // If anything fails, clean up the file - layer.Remove(name) - lfh.Close() - return err - } - - bfi, err := bfh.Stat() - if err != nil || bfi.Size() != n { - layer.Remove(name) - lfh.Close() - return syscall.EIO - } - - err = lfh.Close() - if err != nil { - layer.Remove(name) - lfh.Close() - return err - } - return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) -} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go deleted file mode 100644 index 4f253f48..00000000 --- a/vendor/github.com/spf13/afero/util.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright ©2015 Steve Francia -// Portions Copyright ©2015 The Hugo Authors -// Portions Copyright 2016-present Bjørn Erik Pedersen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "unicode" - - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" -) - -// Filepath separator defined by os.Separator. -const FilePathSeparator = string(filepath.Separator) - -// Takes a reader and a path and writes the content -func (a Afero) WriteReader(path string, r io.Reader) (err error) { - return WriteReader(a.Fs, path, r) -} - -func WriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - if err != os.ErrExist { - return err - } - } - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -// Same as WriteReader but checks to see if file/directory already exists. -func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { - return SafeWriteReader(a.Fs, path, r) -} - -func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - return - } - } - - exists, err := Exists(fs, path) - if err != nil { - return - } - if exists { - return fmt.Errorf("%v already exists", path) - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -func (a Afero) GetTempDir(subPath string) string { - return GetTempDir(a.Fs, subPath) -} - -// GetTempDir returns the default temp directory with trailing slash -// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx -func GetTempDir(fs Fs, subPath string) string { - addSlash := func(p string) string { - if FilePathSeparator != p[len(p)-1:] { - p = p + FilePathSeparator - } - return p - } - dir := addSlash(os.TempDir()) - - if subPath != "" { - // preserve windows backslash :-( - if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) - } - dir = dir + UnicodeSanitize((subPath)) - if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) - } - - if exists, _ := Exists(fs, dir); exists { - return addSlash(dir) - } - - err := fs.MkdirAll(dir, 0777) - if err != nil { - panic(err) - } - dir = addSlash(dir) - } - return dir -} - -// Rewrite string to remove non-standard path characters -func UnicodeSanitize(s string) string { - source := []rune(s) - target := make([]rune, 0, len(source)) - - for _, r := range source { - if unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsMark(r) || - r == '.' || - r == '/' || - r == '\\' || - r == '_' || - r == '-' || - r == '%' || - r == ' ' || - r == '#' { - target = append(target, r) - } - } - - return string(target) -} - -// Transform characters with accents into plain forms. -func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) - result, _, _ := transform.String(t, string(s)) - - return result -} - -func isMn(r rune) bool { - return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks -} - -func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { - return FileContainsBytes(a.Fs, filename, subslice) -} - -// Check if a file contains a specified byte slice. -func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslice), nil -} - -func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { - return FileContainsAnyBytes(a.Fs, filename, subslices) -} - -// Check if a file contains any of the specified byte slices. -func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslices...), nil -} - -// readerContains reports whether any of the subslices is within r. -func readerContainsAny(r io.Reader, subslices ...[]byte) bool { - - if r == nil || len(subslices) == 0 { - return false - } - - largestSlice := 0 - - for _, sl := range subslices { - if len(sl) > largestSlice { - largestSlice = len(sl) - } - } - - if largestSlice == 0 { - return false - } - - bufflen := largestSlice * 4 - halflen := bufflen / 2 - buff := make([]byte, bufflen) - var err error - var n, i int - - for { - i++ - if i == 1 { - n, err = io.ReadAtLeast(r, buff[:halflen], halflen) - } else { - if i != 2 { - // shift left to catch overlapping matches - copy(buff[:], buff[halflen:]) - } - n, err = io.ReadAtLeast(r, buff[halflen:], halflen) - } - - if n > 0 { - for _, sl := range subslices { - if bytes.Contains(buff, sl) { - return true - } - } - } - - if err != nil { - break - } - } - return false -} - -func (a Afero) DirExists(path string) (bool, error) { - return DirExists(a.Fs, path) -} - -// DirExists checks if a path exists and is a directory. -func DirExists(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err == nil && fi.IsDir() { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func (a Afero) IsDir(path string) (bool, error) { - return IsDir(a.Fs, path) -} - -// IsDir checks if a given path is a directory. -func IsDir(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - return fi.IsDir(), nil -} - -func (a Afero) IsEmpty(path string) (bool, error) { - return IsEmpty(a.Fs, path) -} - -// IsEmpty checks if a given file or directory is empty. -func IsEmpty(fs Fs, path string) (bool, error) { - if b, _ := Exists(fs, path); !b { - return false, fmt.Errorf("%q path does not exist", path) - } - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - if fi.IsDir() { - f, err := fs.Open(path) - if err != nil { - return false, err - } - defer f.Close() - list, err := f.Readdir(-1) - return len(list) == 0, nil - } - return fi.Size() == 0, nil -} - -func (a Afero) Exists(path string) (bool, error) { - return Exists(a.Fs, path) -} - -// Check if a file or directory exists. -func Exists(fs Fs, path string) (bool, error) { - _, err := fs.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { - combinedPath := filepath.Join(basePathFs.path, relativePath) - if parent, ok := basePathFs.source.(*BasePathFs); ok { - return FullBaseFsPath(parent, combinedPath) - } - - return combinedPath -} diff --git a/vendor/github.com/zalando/postgres-operator/LICENSE b/vendor/github.com/zalando/postgres-operator/LICENSE deleted file mode 100644 index 7c0f459a..00000000 --- a/vendor/github.com/zalando/postgres-operator/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2021 Zalando SE - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/register.go b/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/register.go deleted file mode 100644 index f9939665..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/register.go +++ /dev/null @@ -1,6 +0,0 @@ -package acidzalando - -const ( - // GroupName is the group name for the operator CRDs - GroupName = "acid.zalan.do" -) diff --git a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/const.go b/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/const.go deleted file mode 100644 index 3cb1c1ad..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/const.go +++ /dev/null @@ -1,19 +0,0 @@ -package v1 - -// ClusterStatusUnknown etc : status of a Postgres cluster known to the operator -const ( - ClusterStatusUnknown = "" - ClusterStatusCreating = "Creating" - ClusterStatusUpdating = "Updating" - ClusterStatusUpdateFailed = "UpdateFailed" - ClusterStatusSyncFailed = "SyncFailed" - ClusterStatusAddFailed = "CreateFailed" - ClusterStatusRunning = "Running" - ClusterStatusInvalid = "Invalid" -) - -const ( - serviceNameMaxLength = 63 - clusterNameMaxLength = serviceNameMaxLength - len("-repl") - serviceNameRegexString = `^[a-z]([-a-z0-9]*[a-z0-9])?$` -) diff --git a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/crds.go b/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/crds.go deleted file mode 100644 index 582b1379..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/crds.go +++ /dev/null @@ -1,1647 +0,0 @@ -package v1 - -import ( - acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" - "github.com/zalando/postgres-operator/pkg/util" - apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// CRDResource* define names necesssary for the k8s CRD API -const ( - PostgresCRDResourceKind = "postgresql" - PostgresCRDResourcePlural = "postgresqls" - PostgresCRDResouceName = PostgresCRDResourcePlural + "." + acidzalando.GroupName - PostgresCRDResourceShort = "pg" - - OperatorConfigCRDResouceKind = "OperatorConfiguration" - OperatorConfigCRDResourcePlural = "operatorconfigurations" - OperatorConfigCRDResourceName = OperatorConfigCRDResourcePlural + "." + acidzalando.GroupName - OperatorConfigCRDResourceShort = "opconfig" -) - -// PostgresCRDResourceColumns definition of AdditionalPrinterColumns for postgresql CRD -var PostgresCRDResourceColumns = []apiextv1.CustomResourceColumnDefinition{ - { - Name: "Team", - Type: "string", - Description: "Team responsible for Postgres cluster", - JSONPath: ".spec.teamId", - }, - { - Name: "Version", - Type: "string", - Description: "PostgreSQL version", - JSONPath: ".spec.postgresql.version", - }, - { - Name: "Pods", - Type: "integer", - Description: "Number of Pods per Postgres cluster", - JSONPath: ".spec.numberOfInstances", - }, - { - Name: "Volume", - Type: "string", - Description: "Size of the bound volume", - JSONPath: ".spec.volume.size", - }, - { - Name: "CPU-Request", - Type: "string", - Description: "Requested CPU for Postgres containers", - JSONPath: ".spec.resources.requests.cpu", - }, - { - Name: "Memory-Request", - Type: "string", - Description: "Requested memory for Postgres containers", - JSONPath: ".spec.resources.requests.memory", - }, - { - Name: "Age", - Type: "date", - JSONPath: ".metadata.creationTimestamp", - }, - { - Name: "Status", - Type: "string", - Description: "Current sync status of postgresql resource", - JSONPath: ".status.PostgresClusterStatus", - }, -} - -// OperatorConfigCRDResourceColumns definition of AdditionalPrinterColumns for OperatorConfiguration CRD -var OperatorConfigCRDResourceColumns = []apiextv1.CustomResourceColumnDefinition{ - { - Name: "Image", - Type: "string", - Description: "Spilo image to be used for Pods", - JSONPath: ".configuration.docker_image", - }, - { - Name: "Cluster-Label", - Type: "string", - Description: "Label for K8s resources created by operator", - JSONPath: ".configuration.kubernetes.cluster_name_label", - }, - { - Name: "Service-Account", - Type: "string", - Description: "Name of service account to be used", - JSONPath: ".configuration.kubernetes.pod_service_account_name", - }, - { - Name: "Min-Instances", - Type: "integer", - Description: "Minimum number of instances per Postgres cluster", - JSONPath: ".configuration.min_instances", - }, - { - Name: "Age", - Type: "date", - JSONPath: ".metadata.creationTimestamp", - }, -} - -var min0 = 0.0 -var min1 = 1.0 -var minDisable = -1.0 - -// PostgresCRDResourceValidation to check applied manifest parameters -var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ - OpenAPIV3Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - Required: []string{"kind", "apiVersion", "spec"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "kind": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"postgresql"`), - }, - }, - }, - "apiVersion": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"acid.zalan.do/v1"`), - }, - }, - }, - "spec": { - Type: "object", - Required: []string{"numberOfInstances", "teamId", "postgresql", "volume"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "additionalVolumes": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - Required: []string{"name", "mountPath", "volumeSource"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "name": { - Type: "string", - }, - "mountPath": { - Type: "string", - }, - "targetContainers": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "volumeSource": { - Type: "object", - XPreserveUnknownFields: util.True(), - }, - "subPath": { - Type: "string", - }, - }, - }, - }, - }, - "allowedSourceRanges": { - Type: "array", - Nullable: true, - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - Pattern: "^(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\/(\\d|[1-2]\\d|3[0-2])$", - }, - }, - }, - "clone": { - Type: "object", - Required: []string{"cluster"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "cluster": { - Type: "string", - }, - "s3_endpoint": { - Type: "string", - }, - "s3_access_key_id": { - Type: "string", - }, - "s3_secret_access_key": { - Type: "string", - }, - "s3_force_path_style": { - Type: "boolean", - }, - "s3_wal_path": { - Type: "string", - }, - "timestamp": { - Type: "string", - Description: "Date-time format that specifies a timezone as an offset relative to UTC e.g. 1996-12-19T16:39:57-08:00", - Pattern: "^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\\.[0-9]+)?(([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$", - }, - "uid": { - Type: "string", - Format: "uuid", - }, - }, - }, - "connectionPooler": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "dockerImage": { - Type: "string", - }, - "maxDBConnections": { - Type: "integer", - }, - "mode": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"session"`), - }, - { - Raw: []byte(`"transaction"`), - }, - }, - }, - "numberOfInstances": { - Type: "integer", - Minimum: &min1, - }, - "resources": { - Type: "object", - Required: []string{"requests", "limits"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "limits": { - Type: "object", - Required: []string{"cpu", "memory"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "cpu": { - Type: "string", - Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", - }, - "memory": { - Type: "string", - Description: "Plain integer or fixed-point integer using one of these suffixes: E, P, T, G, M, k (with or without a tailing i). Must be greater than 0", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", - }, - }, - }, - "requests": { - Type: "object", - Required: []string{"cpu", "memory"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "cpu": { - Type: "string", - Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", - }, - "memory": { - Type: "string", - Description: "Plain integer or fixed-point integer using one of these suffixes: E, P, T, G, M, k (with or without a tailing i). Must be greater than 0", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", - }, - }, - }, - }, - }, - "schema": { - Type: "string", - }, - "user": { - Type: "string", - }, - }, - }, - "databases": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - Description: "User names specified here as database owners must be declared in the users key of the spec key", - }, - }, - }, - "dockerImage": { - Type: "string", - }, - "enableConnectionPooler": { - Type: "boolean", - }, - "enableReplicaConnectionPooler": { - Type: "boolean", - }, - "enableLogicalBackup": { - Type: "boolean", - }, - "enableMasterLoadBalancer": { - Type: "boolean", - }, - "enableReplicaLoadBalancer": { - Type: "boolean", - }, - "enableShmVolume": { - Type: "boolean", - }, - "init_containers": { - Type: "array", - Description: "Deprecated", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - XPreserveUnknownFields: util.True(), - }, - }, - }, - "initContainers": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - XPreserveUnknownFields: util.True(), - }, - }, - }, - "logicalBackupSchedule": { - Type: "string", - Pattern: "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$", - }, - "maintenanceWindows": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - Pattern: "^\\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\\d):([0-5]?\\d)|(2[0-3]|[01]?\\d):([0-5]?\\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\\d):([0-5]?\\d)|(2[0-3]|[01]?\\d):([0-5]?\\d))\\ *$", - }, - }, - }, - "nodeAffinity": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "preferredDuringSchedulingIgnoredDuringExecution": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - Required: []string{"preference", "weight"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "preference": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "matchExpressions": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Allows: true, - }, - }, - }, - }, - "matchFields": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Allows: true, - }, - }, - }, - }, - }, - }, - "weight": { - Type: "integer", - Format: "int32", - }, - }, - }, - }, - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - Type: "object", - Required: []string{"nodeSelectorTerms"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "nodeSelectorTerms": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "matchExpressions": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Allows: true, - }, - }, - }, - }, - "matchFields": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Allows: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "numberOfInstances": { - Type: "integer", - Minimum: &min0, - }, - "patroni": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "initdb": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "loop_wait": { - Type: "integer", - }, - "maximum_lag_on_failover": { - Type: "integer", - }, - "pg_hba": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "retry_timeout": { - Type: "integer", - }, - "slots": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - }, - }, - "synchronous_mode": { - Type: "boolean", - }, - "synchronous_mode_strict": { - Type: "boolean", - }, - "ttl": { - Type: "integer", - }, - }, - }, - "podAnnotations": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "pod_priority_class_name": { - Type: "string", - Description: "Deprecated", - }, - "podPriorityClassName": { - Type: "string", - }, - "postgresql": { - Type: "object", - Required: []string{"version"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "version": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"9.5"`), - }, - { - Raw: []byte(`"9.6"`), - }, - { - Raw: []byte(`"10"`), - }, - { - Raw: []byte(`"11"`), - }, - { - Raw: []byte(`"12"`), - }, - { - Raw: []byte(`"13"`), - }, - { - Raw: []byte(`"14"`), - }, - }, - }, - "parameters": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - }, - }, - "preparedDatabases": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "defaultUsers": { - Type: "boolean", - }, - "extensions": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "schemas": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "defaultUsers": { - Type: "boolean", - }, - "defaultRoles": { - Type: "boolean", - }, - }, - }, - }, - }, - "secretNamespace": { - Type: "string", - }, - }, - }, - }, - }, - "replicaLoadBalancer": { - Type: "boolean", - Description: "Deprecated", - }, - "resources": { - Type: "object", - Required: []string{"requests", "limits"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "limits": { - Type: "object", - Required: []string{"cpu", "memory"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "cpu": { - Type: "string", - Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", - }, - "memory": { - Type: "string", - Description: "Plain integer or fixed-point integer using one of these suffixes: E, P, T, G, M, k (with or without a tailing i). Must be greater than 0", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", - }, - }, - }, - "requests": { - Type: "object", - Required: []string{"cpu", "memory"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "cpu": { - Type: "string", - Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", - }, - "memory": { - Type: "string", - Description: "Plain integer or fixed-point integer using one of these suffixes: E, P, T, G, M, k (with or without a tailing i). Must be greater than 0", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", - }, - }, - }, - }, - }, - "schedulerName": { - Type: "string", - }, - "serviceAnnotations": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "sidecars": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - XPreserveUnknownFields: util.True(), - }, - }, - }, - "spiloRunAsUser": { - Type: "integer", - }, - "spiloRunAsGroup": { - Type: "integer", - }, - "spiloFSGroup": { - Type: "integer", - }, - "standby": { - Type: "object", - Required: []string{"s3_wal_path"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "s3_wal_path": { - Type: "string", - }, - }, - }, - "teamId": { - Type: "string", - }, - "tls": { - Type: "object", - Required: []string{"secretName"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "secretName": { - Type: "string", - }, - "certificateFile": { - Type: "string", - }, - "privateKeyFile": { - Type: "string", - }, - "caFile": { - Type: "string", - }, - "caSecretName": { - Type: "string", - }, - }, - }, - "tolerations": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - Required: []string{"key", "operator", "effect"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "key": { - Type: "string", - }, - "operator": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"Equal"`), - }, - { - Raw: []byte(`"Exists"`), - }, - }, - }, - "value": { - Type: "string", - }, - "effect": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"NoExecute"`), - }, - { - Raw: []byte(`"NoSchedule"`), - }, - { - Raw: []byte(`"PreferNoSchedule"`), - }, - }, - }, - "tolerationSeconds": { - Type: "integer", - }, - }, - }, - }, - }, - "useLoadBalancer": { - Type: "boolean", - Description: "Deprecated", - }, - "users": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "array", - Description: "Role flags specified here must not contradict each other", - Nullable: true, - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"bypassrls"`), - }, - { - Raw: []byte(`"BYPASSRLS"`), - }, - { - Raw: []byte(`"nobypassrls"`), - }, - { - Raw: []byte(`"NOBYPASSRLS"`), - }, - { - Raw: []byte(`"createdb"`), - }, - { - Raw: []byte(`"CREATEDB"`), - }, - { - Raw: []byte(`"nocreatedb"`), - }, - { - Raw: []byte(`"NOCREATEDB"`), - }, - { - Raw: []byte(`"createrole"`), - }, - { - Raw: []byte(`"CREATEROLE"`), - }, - { - Raw: []byte(`"nocreaterole"`), - }, - { - Raw: []byte(`"NOCREATEROLE"`), - }, - { - Raw: []byte(`"inherit"`), - }, - { - Raw: []byte(`"INHERIT"`), - }, - { - Raw: []byte(`"noinherit"`), - }, - { - Raw: []byte(`"NOINHERIT"`), - }, - { - Raw: []byte(`"login"`), - }, - { - Raw: []byte(`"LOGIN"`), - }, - { - Raw: []byte(`"nologin"`), - }, - { - Raw: []byte(`"NOLOGIN"`), - }, - { - Raw: []byte(`"replication"`), - }, - { - Raw: []byte(`"REPLICATION"`), - }, - { - Raw: []byte(`"noreplication"`), - }, - { - Raw: []byte(`"NOREPLICATION"`), - }, - { - Raw: []byte(`"superuser"`), - }, - { - Raw: []byte(`"SUPERUSER"`), - }, - { - Raw: []byte(`"nosuperuser"`), - }, - { - Raw: []byte(`"NOSUPERUSER"`), - }, - }, - }, - }, - }, - }, - }, - "volume": { - Type: "object", - Required: []string{"size"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "iops": { - Type: "integer", - }, - "selector": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "matchExpressions": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - Required: []string{"key", "operator", "values"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "key": { - Type: "string", - }, - "operator": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"In"`), - }, - { - Raw: []byte(`"NotIn"`), - }, - { - Raw: []byte(`"Exists"`), - }, - { - Raw: []byte(`"DoesNotExist"`), - }, - }, - }, - "values": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - }, - }, - }, - }, - "matchLabels": { - Type: "object", - XPreserveUnknownFields: util.True(), - }, - }, - }, - "size": { - Type: "string", - Description: "Value must not be zero", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", - }, - "storageClass": { - Type: "string", - }, - "subPath": { - Type: "string", - }, - "throughput": { - Type: "integer", - }, - }, - }, - }, - }, - "status": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - }, - }, -} - -// OperatorConfigCRDResourceValidation to check applied manifest parameters -var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ - OpenAPIV3Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - Required: []string{"kind", "apiVersion", "configuration"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "kind": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"OperatorConfiguration"`), - }, - }, - }, - "apiVersion": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"acid.zalan.do/v1"`), - }, - }, - }, - "configuration": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "docker_image": { - Type: "string", - }, - "enable_crd_validation": { - Type: "boolean", - }, - "enable_lazy_spilo_upgrade": { - Type: "boolean", - }, - "enable_shm_volume": { - Type: "boolean", - }, - "enable_spilo_wal_path_compat": { - Type: "boolean", - }, - "etcd_host": { - Type: "string", - }, - "kubernetes_use_configmaps": { - Type: "boolean", - }, - "max_instances": { - Type: "integer", - Description: "-1 = disabled", - Minimum: &minDisable, - }, - "min_instances": { - Type: "integer", - Description: "-1 = disabled", - Minimum: &minDisable, - }, - "resync_period": { - Type: "string", - }, - "repair_period": { - Type: "string", - }, - "set_memory_request_to_limit": { - Type: "boolean", - }, - "sidecar_docker_images": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "sidecars": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - XPreserveUnknownFields: util.True(), - }, - }, - }, - "workers": { - Type: "integer", - Minimum: &min1, - }, - "users": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "replication_username": { - Type: "string", - }, - "super_username": { - Type: "string", - }, - }, - }, - "major_version_upgrade": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "major_version_upgrade_mode": { - Type: "string", - }, - "minimal_major_version": { - Type: "string", - }, - "target_major_version": { - Type: "string", - }, - }, - }, - "kubernetes": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "additional_pod_capabilities": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "cluster_domain": { - Type: "string", - }, - "cluster_labels": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "cluster_name_label": { - Type: "string", - }, - "custom_pod_annotations": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "delete_annotation_date_key": { - Type: "string", - }, - "delete_annotation_name_key": { - Type: "string", - }, - "downscaler_annotations": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "enable_cross_namespace_secret": { - Type: "boolean", - }, - "enable_init_containers": { - Type: "boolean", - }, - "enable_pod_antiaffinity": { - Type: "boolean", - }, - "enable_pod_disruption_budget": { - Type: "boolean", - }, - "enable_sidecars": { - Type: "boolean", - }, - "infrastructure_roles_secret_name": { - Type: "string", - }, - "infrastructure_roles_secrets": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - Required: []string{"secretname", "userkey", "passwordkey"}, - Properties: map[string]apiextv1.JSONSchemaProps{ - "secretname": { - Type: "string", - }, - "userkey": { - Type: "string", - }, - "passwordkey": { - Type: "string", - }, - "rolekey": { - Type: "string", - }, - "defaultuservalue": { - Type: "string", - }, - "defaultrolevalue": { - Type: "string", - }, - "details": { - Type: "string", - }, - "template": { - Type: "boolean", - }, - }, - }, - }, - }, - "inherited_annotations": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "inherited_labels": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "master_pod_move_timeout": { - Type: "string", - }, - "node_readiness_label": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "oauth_token_secret_name": { - Type: "string", - }, - "pdb_name_format": { - Type: "string", - }, - "pod_antiaffinity_topology_key": { - Type: "string", - }, - "pod_environment_configmap": { - Type: "string", - }, - "pod_environment_secret": { - Type: "string", - }, - "pod_management_policy": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"ordered_ready"`), - }, - { - Raw: []byte(`"parallel"`), - }, - }, - }, - "pod_priority_class_name": { - Type: "string", - }, - "pod_role_label": { - Type: "string", - }, - "pod_service_account_definition": { - Type: "string", - }, - "pod_service_account_name": { - Type: "string", - }, - "pod_service_account_role_binding_definition": { - Type: "string", - }, - "pod_terminate_grace_period": { - Type: "string", - }, - "secret_name_template": { - Type: "string", - }, - "spilo_runasuser": { - Type: "integer", - }, - "spilo_runasgroup": { - Type: "integer", - }, - "spilo_fsgroup": { - Type: "integer", - }, - "spilo_privileged": { - Type: "boolean", - }, - "spilo_allow_privilege_escalation": { - Type: "boolean", - }, - "storage_resize_mode": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"ebs"`), - }, - { - Raw: []byte(`"pvc"`), - }, - { - Raw: []byte(`"off"`), - }, - }, - }, - "toleration": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "watched_namespace": { - Type: "string", - }, - }, - }, - "postgres_pod_resources": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "default_cpu_limit": { - Type: "string", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", - }, - "default_cpu_request": { - Type: "string", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", - }, - "default_memory_limit": { - Type: "string", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", - }, - "default_memory_request": { - Type: "string", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", - }, - "min_cpu_limit": { - Type: "string", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", - }, - "min_memory_limit": { - Type: "string", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", - }, - }, - }, - "timeouts": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "pod_label_wait_timeout": { - Type: "string", - }, - "pod_deletion_wait_timeout": { - Type: "string", - }, - "ready_wait_interval": { - Type: "string", - }, - "ready_wait_timeout": { - Type: "string", - }, - "resource_check_interval": { - Type: "string", - }, - "resource_check_timeout": { - Type: "string", - }, - }, - }, - "load_balancer": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "custom_service_annotations": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "db_hosted_zone": { - Type: "string", - }, - "enable_master_load_balancer": { - Type: "boolean", - }, - "enable_replica_load_balancer": { - Type: "boolean", - }, - "external_traffic_policy": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"Cluster"`), - }, - { - Raw: []byte(`"Local"`), - }, - }, - }, - "master_dns_name_format": { - Type: "string", - }, - "replica_dns_name_format": { - Type: "string", - }, - }, - }, - "aws_or_gcp": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "additional_secret_mount": { - Type: "string", - }, - "additional_secret_mount_path": { - Type: "string", - }, - "aws_region": { - Type: "string", - }, - "enable_ebs_gp3_migration": { - Type: "boolean", - }, - "enable_ebs_gp3_migration_max_size": { - Type: "integer", - }, - "gcp_credentials": { - Type: "string", - }, - "kube_iam_role": { - Type: "string", - }, - "log_s3_bucket": { - Type: "string", - }, - "wal_s3_bucket": { - Type: "string", - }, - }, - }, - "logical_backup": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "logical_backup_docker_image": { - Type: "string", - }, - "logical_backup_google_application_credentials": { - Type: "string", - }, - "logical_backup_job_prefix": { - Type: "string", - }, - "logical_backup_provider": { - Type: "string", - }, - "logical_backup_s3_access_key_id": { - Type: "string", - }, - "logical_backup_s3_bucket": { - Type: "string", - }, - "logical_backup_s3_endpoint": { - Type: "string", - }, - "logical_backup_s3_region": { - Type: "string", - }, - "logical_backup_s3_secret_access_key": { - Type: "string", - }, - "logical_backup_s3_sse": { - Type: "string", - }, - "logical_backup_schedule": { - Type: "string", - Pattern: "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$", - }, - }, - }, - "debug": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "debug_logging": { - Type: "boolean", - }, - "enable_database_access": { - Type: "boolean", - }, - }, - }, - "teams_api": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "enable_admin_role_for_users": { - Type: "boolean", - }, - "enable_postgres_team_crd": { - Type: "boolean", - }, - "enable_postgres_team_crd_superusers": { - Type: "boolean", - }, - "enable_team_member_deprecation": { - Type: "boolean", - }, - "enable_team_superuser": { - Type: "boolean", - }, - "enable_teams_api": { - Type: "boolean", - }, - "pam_configuration": { - Type: "string", - }, - "pam_role_name": { - Type: "string", - }, - "postgres_superuser_teams": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "protected_role_names": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "role_deletion_suffix": { - Type: "string", - }, - "team_admin_role": { - Type: "string", - }, - "team_api_role_configuration": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "teams_api_url": { - Type: "string", - }, - }, - }, - "logging_rest_api": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "api_port": { - Type: "integer", - }, - "cluster_history_entries": { - Type: "integer", - }, - "ring_log_lines": { - Type: "integer", - }, - }, - }, - "scalyr": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "scalyr_api_key": { - Type: "string", - }, - "scalyr_cpu_limit": { - Type: "string", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", - }, - "scalyr_cpu_request": { - Type: "string", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", - }, - "scalyr_image": { - Type: "string", - }, - "scalyr_memory_limit": { - Type: "string", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", - }, - "scalyr_memory_request": { - Type: "string", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", - }, - "scalyr_server_url": { - Type: "string", - }, - }, - }, - "connection_pooler": { - Type: "object", - Properties: map[string]apiextv1.JSONSchemaProps{ - "connection_pooler_default_cpu_limit": { - Type: "string", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", - }, - "connection_pooler_default_cpu_request": { - Type: "string", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", - }, - "connection_pooler_default_memory_limit": { - Type: "string", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", - }, - "connection_pooler_default_memory_request": { - Type: "string", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", - }, - "connection_pooler_image": { - Type: "string", - }, - "connection_pooler_max_db_connections": { - Type: "integer", - }, - "connection_pooler_mode": { - Type: "string", - Enum: []apiextv1.JSON{ - { - Raw: []byte(`"session"`), - }, - { - Raw: []byte(`"transaction"`), - }, - }, - }, - "connection_pooler_number_of_instances": { - Type: "integer", - Minimum: &min1, - }, - "connection_pooler_schema": { - Type: "string", - }, - "connection_pooler_user": { - Type: "string", - }, - }, - }, - }, - }, - "status": { - Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - }, - }, -} - -func buildCRD(name, kind, plural, short string, columns []apiextv1.CustomResourceColumnDefinition, validation apiextv1.CustomResourceValidation) *apiextv1.CustomResourceDefinition { - return &apiextv1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiextv1.CustomResourceDefinitionSpec{ - Group: SchemeGroupVersion.Group, - Names: apiextv1.CustomResourceDefinitionNames{ - Plural: plural, - ShortNames: []string{short}, - Kind: kind, - Categories: []string{"all"}, - }, - Scope: apiextv1.NamespaceScoped, - Versions: []apiextv1.CustomResourceDefinitionVersion{ - { - Name: SchemeGroupVersion.Version, - Served: true, - Storage: true, - Subresources: &apiextv1.CustomResourceSubresources{ - Status: &apiextv1.CustomResourceSubresourceStatus{}, - }, - AdditionalPrinterColumns: columns, - Schema: &validation, - }, - }, - }, - } -} - -// PostgresCRD returns CustomResourceDefinition built from PostgresCRDResource -func PostgresCRD(enableValidation *bool) *apiextv1.CustomResourceDefinition { - postgresCRDvalidation := apiextv1.CustomResourceValidation{} - - if enableValidation != nil && *enableValidation { - postgresCRDvalidation = PostgresCRDResourceValidation - } - - return buildCRD(PostgresCRDResouceName, - PostgresCRDResourceKind, - PostgresCRDResourcePlural, - PostgresCRDResourceShort, - PostgresCRDResourceColumns, - postgresCRDvalidation) -} - -// ConfigurationCRD returns CustomResourceDefinition built from OperatorConfigCRDResource -func ConfigurationCRD(enableValidation *bool) *apiextv1.CustomResourceDefinition { - opconfigCRDvalidation := apiextv1.CustomResourceValidation{} - - if enableValidation != nil && *enableValidation { - opconfigCRDvalidation = OperatorConfigCRDResourceValidation - } - - return buildCRD(OperatorConfigCRDResourceName, - OperatorConfigCRDResouceKind, - OperatorConfigCRDResourcePlural, - OperatorConfigCRDResourceShort, - OperatorConfigCRDResourceColumns, - opconfigCRDvalidation) -} diff --git a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/doc.go b/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/doc.go deleted file mode 100644 index 15937875..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package v1 is the v1 version of the API. -// +k8s:deepcopy-gen=package,register - -// +groupName=acid.zalan.do - -package v1 diff --git a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/marshal.go b/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/marshal.go deleted file mode 100644 index f4167ce9..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/marshal.go +++ /dev/null @@ -1,153 +0,0 @@ -package v1 - -import ( - "encoding/json" - "fmt" - "strings" - "time" -) - -type postgresqlCopy Postgresql -type postgresStatusCopy PostgresStatus - -// MarshalJSON converts a maintenance window definition to JSON. -func (m *MaintenanceWindow) MarshalJSON() ([]byte, error) { - if m.Everyday { - return []byte(fmt.Sprintf("\"%s-%s\"", - m.StartTime.Format("15:04"), - m.EndTime.Format("15:04"))), nil - } - - return []byte(fmt.Sprintf("\"%s:%s-%s\"", - m.Weekday.String()[:3], - m.StartTime.Format("15:04"), - m.EndTime.Format("15:04"))), nil -} - -// UnmarshalJSON converts a JSON to the maintenance window definition. -func (m *MaintenanceWindow) UnmarshalJSON(data []byte) error { - var ( - got MaintenanceWindow - err error - ) - - parts := strings.Split(string(data[1:len(data)-1]), "-") - if len(parts) != 2 { - return fmt.Errorf("incorrect maintenance window format") - } - - fromParts := strings.Split(parts[0], ":") - switch len(fromParts) { - case 3: - got.Everyday = false - got.Weekday, err = parseWeekday(fromParts[0]) - if err != nil { - return fmt.Errorf("could not parse weekday: %v", err) - } - - got.StartTime, err = parseTime(fromParts[1] + ":" + fromParts[2]) - case 2: - got.Everyday = true - got.StartTime, err = parseTime(fromParts[0] + ":" + fromParts[1]) - default: - return fmt.Errorf("incorrect maintenance window format") - } - if err != nil { - return fmt.Errorf("could not parse start time: %v", err) - } - - got.EndTime, err = parseTime(parts[1]) - if err != nil { - return fmt.Errorf("could not parse end time: %v", err) - } - - if got.EndTime.Before(&got.StartTime) { - return fmt.Errorf("'From' time must be prior to the 'To' time") - } - - *m = got - - return nil -} - -// UnmarshalJSON converts a JSON to the status subresource definition. -func (ps *PostgresStatus) UnmarshalJSON(data []byte) error { - var ( - tmp postgresStatusCopy - status string - ) - - err := json.Unmarshal(data, &tmp) - if err != nil { - metaErr := json.Unmarshal(data, &status) - if metaErr != nil { - return fmt.Errorf("could not parse status: %v; err %v", string(data), metaErr) - } - tmp.PostgresClusterStatus = status - } - *ps = PostgresStatus(tmp) - - return nil -} - -// UnmarshalJSON converts a JSON into the PostgreSQL object. -func (p *Postgresql) UnmarshalJSON(data []byte) error { - var tmp postgresqlCopy - - err := json.Unmarshal(data, &tmp) - if err != nil { - metaErr := json.Unmarshal(data, &tmp.ObjectMeta) - if metaErr != nil { - return err - } - - tmp.Error = err.Error() - tmp.Status.PostgresClusterStatus = ClusterStatusInvalid - - *p = Postgresql(tmp) - - return nil - } - tmp2 := Postgresql(tmp) - - if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil { - tmp2.Error = err.Error() - tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} - } else if err := validateCloneClusterDescription(tmp2.Spec.Clone); err != nil { - - tmp2.Error = err.Error() - tmp2.Status.PostgresClusterStatus = ClusterStatusInvalid - } else { - tmp2.Spec.ClusterName = clusterName - } - - *p = tmp2 - - return nil -} - -// UnmarshalJSON convert to Duration from byte slice of json -func (d *Duration) UnmarshalJSON(b []byte) error { - var ( - v interface{} - err error - ) - if err = json.Unmarshal(b, &v); err != nil { - return err - } - switch val := v.(type) { - case string: - t, err := time.ParseDuration(val) - if err != nil { - return err - } - *d = Duration(t) - return nil - case float64: - t := time.Duration(val) - *d = Duration(t) - return nil - default: - return fmt.Errorf("could not recognize type %T as a valid type to unmarshal to Duration", val) - } -} diff --git a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go deleted file mode 100644 index 6d0dd136..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ /dev/null @@ -1,249 +0,0 @@ -package v1 - -// Operator configuration CRD definition, please use snake_case for field names. - -import ( - "github.com/zalando/postgres-operator/pkg/util/config" - - "time" - - "github.com/zalando/postgres-operator/pkg/spec" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:onlyVerbs=get -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OperatorConfiguration defines the specification for the OperatorConfiguration. -type OperatorConfiguration struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - Configuration OperatorConfigurationData `json:"configuration"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OperatorConfigurationList is used in the k8s API calls -type OperatorConfigurationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []OperatorConfiguration `json:"items"` -} - -// PostgresUsersConfiguration defines the system users of Postgres. -type PostgresUsersConfiguration struct { - SuperUsername string `json:"super_username,omitempty"` - ReplicationUsername string `json:"replication_username,omitempty"` -} - -// MajorVersionUpgradeConfiguration defines how to execute major version upgrades of Postgres. -type MajorVersionUpgradeConfiguration struct { - MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"off"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade - MinimalMajorVersion string `json:"minimal_major_version" default:"9.6"` - TargetMajorVersion string `json:"target_major_version" default:"14"` -} - -// KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself -type KubernetesMetaConfiguration struct { - PodServiceAccountName string `json:"pod_service_account_name,omitempty"` - // TODO: change it to the proper json - PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` - PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"` - PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"` - SpiloPrivileged bool `json:"spilo_privileged,omitempty"` - SpiloAllowPrivilegeEscalation *bool `json:"spilo_allow_privilege_escalation,omitempty"` - SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"` - SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"` - SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"` - AdditionalPodCapabilities []string `json:"additional_pod_capabilities,omitempty"` - WatchedNamespace string `json:"watched_namespace,omitempty"` - PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"` - EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"` - StorageResizeMode string `json:"storage_resize_mode,omitempty"` - EnableInitContainers *bool `json:"enable_init_containers,omitempty"` - EnableSidecars *bool `json:"enable_sidecars,omitempty"` - SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"` - ClusterDomain string `json:"cluster_domain,omitempty"` - OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"` - InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` - InfrastructureRolesDefs []*config.InfrastructureRole `json:"infrastructure_roles_secrets,omitempty"` - PodRoleLabel string `json:"pod_role_label,omitempty"` - ClusterLabels map[string]string `json:"cluster_labels,omitempty"` - InheritedLabels []string `json:"inherited_labels,omitempty"` - InheritedAnnotations []string `json:"inherited_annotations,omitempty"` - DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"` - ClusterNameLabel string `json:"cluster_name_label,omitempty"` - DeleteAnnotationDateKey string `json:"delete_annotation_date_key,omitempty"` - DeleteAnnotationNameKey string `json:"delete_annotation_name_key,omitempty"` - NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` - CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"` - // TODO: use a proper toleration structure? - PodToleration map[string]string `json:"toleration,omitempty"` - PodEnvironmentConfigMap spec.NamespacedName `json:"pod_environment_configmap,omitempty"` - PodEnvironmentSecret string `json:"pod_environment_secret,omitempty"` - PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` - MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"` - EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"` - PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"` - PodManagementPolicy string `json:"pod_management_policy,omitempty"` - EnableCrossNamespaceSecret bool `json:"enable_cross_namespace_secret,omitempty"` -} - -// PostgresPodResourcesDefaults defines the spec of default resources -type PostgresPodResourcesDefaults struct { - DefaultCPURequest string `json:"default_cpu_request,omitempty"` - DefaultMemoryRequest string `json:"default_memory_request,omitempty"` - DefaultCPULimit string `json:"default_cpu_limit,omitempty"` - DefaultMemoryLimit string `json:"default_memory_limit,omitempty"` - MinCPULimit string `json:"min_cpu_limit,omitempty"` - MinMemoryLimit string `json:"min_memory_limit,omitempty"` -} - -// OperatorTimeouts defines the timeout of ResourceCheck, PodWait, ReadyWait -type OperatorTimeouts struct { - ResourceCheckInterval Duration `json:"resource_check_interval,omitempty"` - ResourceCheckTimeout Duration `json:"resource_check_timeout,omitempty"` - PodLabelWaitTimeout Duration `json:"pod_label_wait_timeout,omitempty"` - PodDeletionWaitTimeout Duration `json:"pod_deletion_wait_timeout,omitempty"` - ReadyWaitInterval Duration `json:"ready_wait_interval,omitempty"` - ReadyWaitTimeout Duration `json:"ready_wait_timeout,omitempty"` -} - -// LoadBalancerConfiguration defines the LB configuration -type LoadBalancerConfiguration struct { - DbHostedZone string `json:"db_hosted_zone,omitempty"` - EnableMasterLoadBalancer bool `json:"enable_master_load_balancer,omitempty"` - EnableReplicaLoadBalancer bool `json:"enable_replica_load_balancer,omitempty"` - CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"` - MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"` - ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"` - ExternalTrafficPolicy string `json:"external_traffic_policy" default:"Cluster"` -} - -// AWSGCPConfiguration defines the configuration for AWS -// TODO complete Google Cloud Platform (GCP) configuration -type AWSGCPConfiguration struct { - WALES3Bucket string `json:"wal_s3_bucket,omitempty"` - AWSRegion string `json:"aws_region,omitempty"` - WALGSBucket string `json:"wal_gs_bucket,omitempty"` - GCPCredentials string `json:"gcp_credentials,omitempty"` - WALAZStorageAccount string `json:"wal_az_storage_account,omitempty"` - LogS3Bucket string `json:"log_s3_bucket,omitempty"` - KubeIAMRole string `json:"kube_iam_role,omitempty"` - AdditionalSecretMount string `json:"additional_secret_mount,omitempty"` - AdditionalSecretMountPath string `json:"additional_secret_mount_path" default:"/meta/credentials"` - EnableEBSGp3Migration bool `json:"enable_ebs_gp3_migration" default:"false"` - EnableEBSGp3MigrationMaxSize int64 `json:"enable_ebs_gp3_migration_max_size" default:"1000"` -} - -// OperatorDebugConfiguration defines options for the debug mode -type OperatorDebugConfiguration struct { - DebugLogging bool `json:"debug_logging,omitempty"` - EnableDBAccess bool `json:"enable_database_access,omitempty"` -} - -// TeamsAPIConfiguration defines the configuration of TeamsAPI -type TeamsAPIConfiguration struct { - EnableTeamsAPI bool `json:"enable_teams_api,omitempty"` - TeamsAPIUrl string `json:"teams_api_url,omitempty"` - TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` - EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"` - EnableAdminRoleForUsers bool `json:"enable_admin_role_for_users,omitempty"` - TeamAdminRole string `json:"team_admin_role,omitempty"` - PamRoleName string `json:"pam_role_name,omitempty"` - PamConfiguration string `json:"pam_configuration,omitempty"` - ProtectedRoles []string `json:"protected_role_names,omitempty"` - PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"` - EnablePostgresTeamCRD bool `json:"enable_postgres_team_crd,omitempty"` - EnablePostgresTeamCRDSuperusers bool `json:"enable_postgres_team_crd_superusers,omitempty"` - EnableTeamMemberDeprecation bool `json:"enable_team_member_deprecation,omitempty"` - RoleDeletionSuffix string `json:"role_deletion_suffix,omitempty"` -} - -// LoggingRESTAPIConfiguration defines Logging API conf -type LoggingRESTAPIConfiguration struct { - APIPort int `json:"api_port,omitempty"` - RingLogLines int `json:"ring_log_lines,omitempty"` - ClusterHistoryEntries int `json:"cluster_history_entries,omitempty"` -} - -// ScalyrConfiguration defines the configuration for ScalyrAPI -type ScalyrConfiguration struct { - ScalyrAPIKey string `json:"scalyr_api_key,omitempty"` - ScalyrImage string `json:"scalyr_image,omitempty"` - ScalyrServerURL string `json:"scalyr_server_url,omitempty"` - ScalyrCPURequest string `json:"scalyr_cpu_request,omitempty"` - ScalyrMemoryRequest string `json:"scalyr_memory_request,omitempty"` - ScalyrCPULimit string `json:"scalyr_cpu_limit,omitempty"` - ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"` -} - -// ConnectionPoolerConfiguration defines default configuration for connection pooler -type ConnectionPoolerConfiguration struct { - NumberOfInstances *int32 `json:"connection_pooler_number_of_instances,omitempty"` - Schema string `json:"connection_pooler_schema,omitempty"` - User string `json:"connection_pooler_user,omitempty"` - Image string `json:"connection_pooler_image,omitempty"` - Mode string `json:"connection_pooler_mode,omitempty"` - MaxDBConnections *int32 `json:"connection_pooler_max_db_connections,omitempty"` - DefaultCPURequest string `json:"connection_pooler_default_cpu_request,omitempty"` - DefaultMemoryRequest string `json:"connection_pooler_default_memory_request,omitempty"` - DefaultCPULimit string `json:"connection_pooler_default_cpu_limit,omitempty"` - DefaultMemoryLimit string `json:"connection_pooler_default_memory_limit,omitempty"` -} - -// OperatorLogicalBackupConfiguration defines configuration for logical backup -type OperatorLogicalBackupConfiguration struct { - Schedule string `json:"logical_backup_schedule,omitempty"` - DockerImage string `json:"logical_backup_docker_image,omitempty"` - BackupProvider string `json:"logical_backup_provider,omitempty"` - S3Bucket string `json:"logical_backup_s3_bucket,omitempty"` - S3Region string `json:"logical_backup_s3_region,omitempty"` - S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"` - S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"` - S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"` - S3SSE string `json:"logical_backup_s3_sse,omitempty"` - GoogleApplicationCredentials string `json:"logical_backup_google_application_credentials,omitempty"` - JobPrefix string `json:"logical_backup_job_prefix,omitempty"` -} - -// OperatorConfigurationData defines the operation config -type OperatorConfigurationData struct { - EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"` - EnableLazySpiloUpgrade bool `json:"enable_lazy_spilo_upgrade,omitempty"` - EnablePgVersionEnvVar bool `json:"enable_pgversion_env_var,omitempty"` - EnableSpiloWalPathCompat bool `json:"enable_spilo_wal_path_compat,omitempty"` - EtcdHost string `json:"etcd_host,omitempty"` - KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"` - DockerImage string `json:"docker_image,omitempty"` - Workers uint32 `json:"workers,omitempty"` - MinInstances int32 `json:"min_instances,omitempty"` - MaxInstances int32 `json:"max_instances,omitempty"` - ResyncPeriod Duration `json:"resync_period,omitempty"` - RepairPeriod Duration `json:"repair_period,omitempty"` - SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"` - ShmVolume *bool `json:"enable_shm_volume,omitempty"` - SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` // deprecated in favour of SidecarContainers - SidecarContainers []v1.Container `json:"sidecars,omitempty"` - PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"` - MajorVersionUpgrade MajorVersionUpgradeConfiguration `json:"major_version_upgrade"` - Kubernetes KubernetesMetaConfiguration `json:"kubernetes"` - PostgresPodResources PostgresPodResourcesDefaults `json:"postgres_pod_resources"` - Timeouts OperatorTimeouts `json:"timeouts"` - LoadBalancer LoadBalancerConfiguration `json:"load_balancer"` - AWSGCP AWSGCPConfiguration `json:"aws_or_gcp"` - OperatorDebug OperatorDebugConfiguration `json:"debug"` - TeamsAPI TeamsAPIConfiguration `json:"teams_api"` - LoggingRESTAPI LoggingRESTAPIConfiguration `json:"logging_rest_api"` - Scalyr ScalyrConfiguration `json:"scalyr"` - LogicalBackup OperatorLogicalBackupConfiguration `json:"logical_backup"` - ConnectionPooler ConnectionPoolerConfiguration `json:"connection_pooler"` -} - -//Duration shortens this frequently used name -type Duration time.Duration diff --git a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/postgres_team_type.go b/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/postgres_team_type.go deleted file mode 100644 index 5697c193..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/postgres_team_type.go +++ /dev/null @@ -1,33 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PostgresTeam defines Custom Resource Definition Object for team management. -type PostgresTeam struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec PostgresTeamSpec `json:"spec"` -} - -// PostgresTeamSpec defines the specification for the PostgresTeam TPR. -type PostgresTeamSpec struct { - AdditionalSuperuserTeams map[string][]string `json:"additionalSuperuserTeams,omitempty"` - AdditionalTeams map[string][]string `json:"additionalTeams,omitempty"` - AdditionalMembers map[string][]string `json:"additionalMembers,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PostgresTeamList defines a list of PostgresTeam definitions. -type PostgresTeamList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []PostgresTeam `json:"items"` -} diff --git a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/postgresql_type.go deleted file mode 100644 index 079cb8b9..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ /dev/null @@ -1,228 +0,0 @@ -package v1 - -// Postgres CRD definition, please use CamelCase for field names. - -import ( - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Postgresql defines PostgreSQL Custom Resource Definition Object. -type Postgresql struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec PostgresSpec `json:"spec"` - Status PostgresStatus `json:"status"` - Error string `json:"-"` -} - -// PostgresSpec defines the specification for the PostgreSQL TPR. -type PostgresSpec struct { - PostgresqlParam `json:"postgresql"` - Volume `json:"volume,omitempty"` - Patroni `json:"patroni,omitempty"` - Resources `json:"resources,omitempty"` - - EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"` - EnableReplicaConnectionPooler *bool `json:"enableReplicaConnectionPooler,omitempty"` - ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"` - - TeamID string `json:"teamId"` - DockerImage string `json:"dockerImage,omitempty"` - - SpiloRunAsUser *int64 `json:"spiloRunAsUser,omitempty"` - SpiloRunAsGroup *int64 `json:"spiloRunAsGroup,omitempty"` - SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"` - - // vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest - // in that case the var evaluates to nil and the value is taken from the operator config - EnableMasterLoadBalancer *bool `json:"enableMasterLoadBalancer,omitempty"` - EnableReplicaLoadBalancer *bool `json:"enableReplicaLoadBalancer,omitempty"` - - // deprecated load balancer settings maintained for backward compatibility - // see "Load balancers" operator docs - UseLoadBalancer *bool `json:"useLoadBalancer,omitempty"` - ReplicaLoadBalancer *bool `json:"replicaLoadBalancer,omitempty"` - - // load balancers' source ranges are the same for master and replica services - AllowedSourceRanges []string `json:"allowedSourceRanges"` - - NumberOfInstances int32 `json:"numberOfInstances"` - Users map[string]UserFlags `json:"users,omitempty"` - MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` - Clone *CloneDescription `json:"clone,omitempty"` - ClusterName string `json:"-"` - Databases map[string]string `json:"databases,omitempty"` - PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"` - SchedulerName *string `json:"schedulerName,omitempty"` - NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"` - Tolerations []v1.Toleration `json:"tolerations,omitempty"` - Sidecars []Sidecar `json:"sidecars,omitempty"` - InitContainers []v1.Container `json:"initContainers,omitempty"` - PodPriorityClassName string `json:"podPriorityClassName,omitempty"` - ShmVolume *bool `json:"enableShmVolume,omitempty"` - EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"` - LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` - StandbyCluster *StandbyDescription `json:"standby,omitempty"` - PodAnnotations map[string]string `json:"podAnnotations,omitempty"` - ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` - TLS *TLSDescription `json:"tls,omitempty"` - AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"` - - // deprecated json tags - InitContainersOld []v1.Container `json:"init_containers,omitempty"` - PodPriorityClassNameOld string `json:"pod_priority_class_name,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PostgresqlList defines a list of PostgreSQL clusters. -type PostgresqlList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []Postgresql `json:"items"` -} - -// PreparedDatabase describes elements to be bootstrapped -type PreparedDatabase struct { - PreparedSchemas map[string]PreparedSchema `json:"schemas,omitempty"` - DefaultUsers bool `json:"defaultUsers,omitempty" defaults:"false"` - Extensions map[string]string `json:"extensions,omitempty"` - SecretNamespace string `json:"secretNamespace,omitempty"` -} - -// PreparedSchema describes elements to be bootstrapped per schema -type PreparedSchema struct { - DefaultRoles *bool `json:"defaultRoles,omitempty" defaults:"true"` - DefaultUsers bool `json:"defaultUsers,omitempty" defaults:"false"` -} - -// MaintenanceWindow describes the time window when the operator is allowed to do maintenance on a cluster. -type MaintenanceWindow struct { - Everyday bool - Weekday time.Weekday - StartTime metav1.Time // Start time - EndTime metav1.Time // End time -} - -// Volume describes a single volume in the manifest. -type Volume struct { - Selector *metav1.LabelSelector `json:"selector,omitempty"` - Size string `json:"size"` - StorageClass string `json:"storageClass,omitempty"` - SubPath string `json:"subPath,omitempty"` - Iops *int64 `json:"iops,omitempty"` - Throughput *int64 `json:"throughput,omitempty"` - VolumeType string `json:"type,omitempty"` -} - -// AdditionalVolume specs additional optional volumes for statefulset -type AdditionalVolume struct { - Name string `json:"name"` - MountPath string `json:"mountPath"` - SubPath string `json:"subPath,omitempty"` - TargetContainers []string `json:"targetContainers"` - VolumeSource v1.VolumeSource `json:"volumeSource"` -} - -// PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values. -type PostgresqlParam struct { - PgVersion string `json:"version"` - Parameters map[string]string `json:"parameters,omitempty"` -} - -// ResourceDescription describes CPU and memory resources defined for a cluster. -type ResourceDescription struct { - CPU string `json:"cpu"` - Memory string `json:"memory"` -} - -// Resources describes requests and limits for the cluster resouces. -type Resources struct { - ResourceRequests ResourceDescription `json:"requests,omitempty"` - ResourceLimits ResourceDescription `json:"limits,omitempty"` -} - -// Patroni contains Patroni-specific configuration -type Patroni struct { - InitDB map[string]string `json:"initdb,omitempty"` - PgHba []string `json:"pg_hba,omitempty"` - TTL uint32 `json:"ttl,omitempty"` - LoopWait uint32 `json:"loop_wait,omitempty"` - RetryTimeout uint32 `json:"retry_timeout,omitempty"` - MaximumLagOnFailover float32 `json:"maximum_lag_on_failover,omitempty"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213 - Slots map[string]map[string]string `json:"slots,omitempty"` - SynchronousMode bool `json:"synchronous_mode,omitempty"` - SynchronousModeStrict bool `json:"synchronous_mode_strict,omitempty"` -} - -// StandbyDescription contains s3 wal path -type StandbyDescription struct { - S3WalPath string `json:"s3_wal_path,omitempty"` -} - -// TLSDescription specs TLS properties -type TLSDescription struct { - SecretName string `json:"secretName,omitempty"` - CertificateFile string `json:"certificateFile,omitempty"` - PrivateKeyFile string `json:"privateKeyFile,omitempty"` - CAFile string `json:"caFile,omitempty"` - CASecretName string `json:"caSecretName,omitempty"` -} - -// CloneDescription describes which cluster the new should clone and up to which point in time -type CloneDescription struct { - ClusterName string `json:"cluster,omitempty"` - UID string `json:"uid,omitempty"` - EndTimestamp string `json:"timestamp,omitempty"` - S3WalPath string `json:"s3_wal_path,omitempty"` - S3Endpoint string `json:"s3_endpoint,omitempty"` - S3AccessKeyId string `json:"s3_access_key_id,omitempty"` - S3SecretAccessKey string `json:"s3_secret_access_key,omitempty"` - S3ForcePathStyle *bool `json:"s3_force_path_style,omitempty" defaults:"false"` -} - -// Sidecar defines a container to be run in the same pod as the Postgres container. -type Sidecar struct { - Resources `json:"resources,omitempty"` - Name string `json:"name,omitempty"` - DockerImage string `json:"image,omitempty"` - Ports []v1.ContainerPort `json:"ports,omitempty"` - Env []v1.EnvVar `json:"env,omitempty"` -} - -// UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users -type UserFlags []string - -// PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.) -type PostgresStatus struct { - PostgresClusterStatus string `json:"PostgresClusterStatus"` -} - -// ConnectionPooler Options for connection pooler -// -// TODO: prepared snippets of configuration, one can choose via type, e.g. -// pgbouncer-large (with higher resources) or odyssey-small (with smaller -// resources) -// Type string `json:"type,omitempty"` -// -// TODO: figure out what other important parameters of the connection pooler it -// makes sense to expose. E.g. pool size (min/max boundaries), max client -// connections etc. -type ConnectionPooler struct { - NumberOfInstances *int32 `json:"numberOfInstances,omitempty"` - Schema string `json:"schema,omitempty"` - User string `json:"user,omitempty"` - Mode string `json:"mode,omitempty"` - DockerImage string `json:"dockerImage,omitempty"` - MaxDBConnections *int32 `json:"maxDBConnections,omitempty"` - - Resources `json:"resources,omitempty"` -} diff --git a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/register.go b/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/register.go deleted file mode 100644 index 9dcbf2ba..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/register.go +++ /dev/null @@ -1,54 +0,0 @@ -package v1 - -import ( - acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// APIVersion of the `postgresql` and `operator` CRDs -const ( - APIVersion = "v1" -) - -var ( - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - - // SchemeBuilder : An instance of runtime.SchemeBuilder, global for this package - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - //AddToScheme is localSchemeBuilder.AddToScheme - AddToScheme = localSchemeBuilder.AddToScheme - //SchemeGroupVersion has GroupName and APIVersion - SchemeGroupVersion = schema.GroupVersion{Group: acidzalando.GroupName, Version: APIVersion} -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - // AddKnownType assumes derives the type kind from the type name, which is always uppercase. - // For our CRDs we use lowercase names historically, therefore we have to supply the name separately. - // TODO: User uppercase CRDResourceKind of our types in the next major API version - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresql"), &Postgresql{}) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresqlList"), &PostgresqlList{}) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("PostgresTeam"), &PostgresTeam{}) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("PostgresTeamList"), &PostgresTeamList{}) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfiguration"), - &OperatorConfiguration{}) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfigurationList"), - &OperatorConfigurationList{}) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/util.go b/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/util.go deleted file mode 100644 index a795ec68..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/util.go +++ /dev/null @@ -1,106 +0,0 @@ -package v1 - -import ( - "fmt" - "regexp" - "strings" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - weekdays = map[string]int{"Sun": 0, "Mon": 1, "Tue": 2, "Wed": 3, "Thu": 4, "Fri": 5, "Sat": 6} - serviceNameRegex = regexp.MustCompile(serviceNameRegexString) -) - -// Clone convenience wrapper around DeepCopy -func (p *Postgresql) Clone() *Postgresql { - if p == nil { - return nil - } - return p.DeepCopy() -} - -func parseTime(s string) (metav1.Time, error) { - parts := strings.Split(s, ":") - if len(parts) != 2 { - return metav1.Time{}, fmt.Errorf("incorrect time format") - } - timeLayout := "15:04" - - tp, err := time.Parse(timeLayout, s) - if err != nil { - return metav1.Time{}, err - } - - return metav1.Time{Time: tp.UTC()}, nil -} - -func parseWeekday(s string) (time.Weekday, error) { - weekday, ok := weekdays[s] - if !ok { - return time.Weekday(0), fmt.Errorf("incorrect weekday") - } - - return time.Weekday(weekday), nil -} - -func extractClusterName(clusterName string, teamName string) (string, error) { - teamNameLen := len(teamName) - if len(clusterName) < teamNameLen+2 { - return "", fmt.Errorf("cluster name must match {TEAM}-{NAME} format. Got cluster name '%v', team name '%v'", clusterName, teamName) - } - - if teamNameLen == 0 { - return "", fmt.Errorf("team name is empty") - } - - if strings.ToLower(clusterName[:teamNameLen+1]) != strings.ToLower(teamName)+"-" { - return "", fmt.Errorf("name must match {TEAM}-{NAME} format") - } - if len(clusterName) > clusterNameMaxLength { - return "", fmt.Errorf("name cannot be longer than %d characters", clusterNameMaxLength) - } - if !serviceNameRegex.MatchString(clusterName) { - return "", fmt.Errorf("name must confirm to DNS-1035, regex used for validation is %q", - serviceNameRegexString) - } - - return clusterName[teamNameLen+1:], nil -} - -func validateCloneClusterDescription(clone *CloneDescription) error { - // when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name - if clone != nil && clone.ClusterName != "" && clone.EndTimestamp == "" { - if !serviceNameRegex.MatchString(clone.ClusterName) { - return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q", - serviceNameRegexString) - } - if len(clone.ClusterName) > serviceNameMaxLength { - return fmt.Errorf("clone cluster name must be no longer than %d characters", serviceNameMaxLength) - } - } - return nil -} - -// Success of the current Status -func (postgresStatus PostgresStatus) Success() bool { - return postgresStatus.PostgresClusterStatus != ClusterStatusAddFailed && - postgresStatus.PostgresClusterStatus != ClusterStatusUpdateFailed && - postgresStatus.PostgresClusterStatus != ClusterStatusSyncFailed -} - -// Running status of cluster -func (postgresStatus PostgresStatus) Running() bool { - return postgresStatus.PostgresClusterStatus == ClusterStatusRunning -} - -// Creating status of cluster -func (postgresStatus PostgresStatus) Creating() bool { - return postgresStatus.PostgresClusterStatus == ClusterStatusCreating -} - -func (postgresStatus PostgresStatus) String() string { - return postgresStatus.PostgresClusterStatus -} diff --git a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go deleted file mode 100644 index c0be8fdf..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,1227 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2021 Compose, Zalando SE - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - config "github.com/zalando/postgres-operator/pkg/util/config" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSGCPConfiguration) DeepCopyInto(out *AWSGCPConfiguration) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSGCPConfiguration. -func (in *AWSGCPConfiguration) DeepCopy() *AWSGCPConfiguration { - if in == nil { - return nil - } - out := new(AWSGCPConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdditionalVolume) DeepCopyInto(out *AdditionalVolume) { - *out = *in - if in.TargetContainers != nil { - in, out := &in.TargetContainers, &out.TargetContainers - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.VolumeSource.DeepCopyInto(&out.VolumeSource) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalVolume. -func (in *AdditionalVolume) DeepCopy() *AdditionalVolume { - if in == nil { - return nil - } - out := new(AdditionalVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloneDescription) DeepCopyInto(out *CloneDescription) { - *out = *in - if in.S3ForcePathStyle != nil { - in, out := &in.S3ForcePathStyle, &out.S3ForcePathStyle - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneDescription. -func (in *CloneDescription) DeepCopy() *CloneDescription { - if in == nil { - return nil - } - out := new(CloneDescription) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConnectionPooler) DeepCopyInto(out *ConnectionPooler) { - *out = *in - if in.NumberOfInstances != nil { - in, out := &in.NumberOfInstances, &out.NumberOfInstances - *out = new(int32) - **out = **in - } - if in.MaxDBConnections != nil { - in, out := &in.MaxDBConnections, &out.MaxDBConnections - *out = new(int32) - **out = **in - } - out.Resources = in.Resources - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPooler. -func (in *ConnectionPooler) DeepCopy() *ConnectionPooler { - if in == nil { - return nil - } - out := new(ConnectionPooler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConnectionPoolerConfiguration) DeepCopyInto(out *ConnectionPoolerConfiguration) { - *out = *in - if in.NumberOfInstances != nil { - in, out := &in.NumberOfInstances, &out.NumberOfInstances - *out = new(int32) - **out = **in - } - if in.MaxDBConnections != nil { - in, out := &in.MaxDBConnections, &out.MaxDBConnections - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolerConfiguration. -func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfiguration { - if in == nil { - return nil - } - out := new(ConnectionPoolerConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) { - *out = *in - if in.SpiloAllowPrivilegeEscalation != nil { - in, out := &in.SpiloAllowPrivilegeEscalation, &out.SpiloAllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.SpiloRunAsUser != nil { - in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser - *out = new(int64) - **out = **in - } - if in.SpiloRunAsGroup != nil { - in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup - *out = new(int64) - **out = **in - } - if in.SpiloFSGroup != nil { - in, out := &in.SpiloFSGroup, &out.SpiloFSGroup - *out = new(int64) - **out = **in - } - if in.AdditionalPodCapabilities != nil { - in, out := &in.AdditionalPodCapabilities, &out.AdditionalPodCapabilities - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.EnablePodDisruptionBudget != nil { - in, out := &in.EnablePodDisruptionBudget, &out.EnablePodDisruptionBudget - *out = new(bool) - **out = **in - } - if in.EnableInitContainers != nil { - in, out := &in.EnableInitContainers, &out.EnableInitContainers - *out = new(bool) - **out = **in - } - if in.EnableSidecars != nil { - in, out := &in.EnableSidecars, &out.EnableSidecars - *out = new(bool) - **out = **in - } - out.OAuthTokenSecretName = in.OAuthTokenSecretName - out.InfrastructureRolesSecretName = in.InfrastructureRolesSecretName - if in.InfrastructureRolesDefs != nil { - in, out := &in.InfrastructureRolesDefs, &out.InfrastructureRolesDefs - *out = make([]*config.InfrastructureRole, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(config.InfrastructureRole) - **out = **in - } - } - } - if in.ClusterLabels != nil { - in, out := &in.ClusterLabels, &out.ClusterLabels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.InheritedLabels != nil { - in, out := &in.InheritedLabels, &out.InheritedLabels - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.InheritedAnnotations != nil { - in, out := &in.InheritedAnnotations, &out.InheritedAnnotations - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DownscalerAnnotations != nil { - in, out := &in.DownscalerAnnotations, &out.DownscalerAnnotations - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NodeReadinessLabel != nil { - in, out := &in.NodeReadinessLabel, &out.NodeReadinessLabel - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.CustomPodAnnotations != nil { - in, out := &in.CustomPodAnnotations, &out.CustomPodAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.PodToleration != nil { - in, out := &in.PodToleration, &out.PodToleration - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.PodEnvironmentConfigMap = in.PodEnvironmentConfigMap - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesMetaConfiguration. -func (in *KubernetesMetaConfiguration) DeepCopy() *KubernetesMetaConfiguration { - if in == nil { - return nil - } - out := new(KubernetesMetaConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerConfiguration) DeepCopyInto(out *LoadBalancerConfiguration) { - *out = *in - if in.CustomServiceAnnotations != nil { - in, out := &in.CustomServiceAnnotations, &out.CustomServiceAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerConfiguration. -func (in *LoadBalancerConfiguration) DeepCopy() *LoadBalancerConfiguration { - if in == nil { - return nil - } - out := new(LoadBalancerConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoggingRESTAPIConfiguration) DeepCopyInto(out *LoggingRESTAPIConfiguration) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingRESTAPIConfiguration. -func (in *LoggingRESTAPIConfiguration) DeepCopy() *LoggingRESTAPIConfiguration { - if in == nil { - return nil - } - out := new(LoggingRESTAPIConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MaintenanceWindow) DeepCopyInto(out *MaintenanceWindow) { - *out = *in - in.StartTime.DeepCopyInto(&out.StartTime) - in.EndTime.DeepCopyInto(&out.EndTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindow. -func (in *MaintenanceWindow) DeepCopy() *MaintenanceWindow { - if in == nil { - return nil - } - out := new(MaintenanceWindow) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MajorVersionUpgradeConfiguration) DeepCopyInto(out *MajorVersionUpgradeConfiguration) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MajorVersionUpgradeConfiguration. -func (in *MajorVersionUpgradeConfiguration) DeepCopy() *MajorVersionUpgradeConfiguration { - if in == nil { - return nil - } - out := new(MajorVersionUpgradeConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Configuration.DeepCopyInto(&out.Configuration) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfiguration. -func (in *OperatorConfiguration) DeepCopy() *OperatorConfiguration { - if in == nil { - return nil - } - out := new(OperatorConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OperatorConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData) { - *out = *in - if in.EnableCRDValidation != nil { - in, out := &in.EnableCRDValidation, &out.EnableCRDValidation - *out = new(bool) - **out = **in - } - if in.ShmVolume != nil { - in, out := &in.ShmVolume, &out.ShmVolume - *out = new(bool) - **out = **in - } - if in.SidecarImages != nil { - in, out := &in.SidecarImages, &out.SidecarImages - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.SidecarContainers != nil { - in, out := &in.SidecarContainers, &out.SidecarContainers - *out = make([]corev1.Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - out.PostgresUsersConfiguration = in.PostgresUsersConfiguration - out.MajorVersionUpgrade = in.MajorVersionUpgrade - in.Kubernetes.DeepCopyInto(&out.Kubernetes) - out.PostgresPodResources = in.PostgresPodResources - out.Timeouts = in.Timeouts - in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) - out.AWSGCP = in.AWSGCP - out.OperatorDebug = in.OperatorDebug - in.TeamsAPI.DeepCopyInto(&out.TeamsAPI) - out.LoggingRESTAPI = in.LoggingRESTAPI - out.Scalyr = in.Scalyr - out.LogicalBackup = in.LogicalBackup - in.ConnectionPooler.DeepCopyInto(&out.ConnectionPooler) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigurationData. -func (in *OperatorConfigurationData) DeepCopy() *OperatorConfigurationData { - if in == nil { - return nil - } - out := new(OperatorConfigurationData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorConfigurationList) DeepCopyInto(out *OperatorConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]OperatorConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigurationList. -func (in *OperatorConfigurationList) DeepCopy() *OperatorConfigurationList { - if in == nil { - return nil - } - out := new(OperatorConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OperatorConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorDebugConfiguration) DeepCopyInto(out *OperatorDebugConfiguration) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorDebugConfiguration. -func (in *OperatorDebugConfiguration) DeepCopy() *OperatorDebugConfiguration { - if in == nil { - return nil - } - out := new(OperatorDebugConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorLogicalBackupConfiguration) DeepCopyInto(out *OperatorLogicalBackupConfiguration) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorLogicalBackupConfiguration. -func (in *OperatorLogicalBackupConfiguration) DeepCopy() *OperatorLogicalBackupConfiguration { - if in == nil { - return nil - } - out := new(OperatorLogicalBackupConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorTimeouts) DeepCopyInto(out *OperatorTimeouts) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorTimeouts. -func (in *OperatorTimeouts) DeepCopy() *OperatorTimeouts { - if in == nil { - return nil - } - out := new(OperatorTimeouts) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Patroni) DeepCopyInto(out *Patroni) { - *out = *in - if in.InitDB != nil { - in, out := &in.InitDB, &out.InitDB - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.PgHba != nil { - in, out := &in.PgHba, &out.PgHba - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Slots != nil { - in, out := &in.Slots, &out.Slots - *out = make(map[string]map[string]string, len(*in)) - for key, val := range *in { - var outVal map[string]string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Patroni. -func (in *Patroni) DeepCopy() *Patroni { - if in == nil { - return nil - } - out := new(Patroni) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PostgresPodResourcesDefaults) DeepCopyInto(out *PostgresPodResourcesDefaults) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresPodResourcesDefaults. -func (in *PostgresPodResourcesDefaults) DeepCopy() *PostgresPodResourcesDefaults { - if in == nil { - return nil - } - out := new(PostgresPodResourcesDefaults) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { - *out = *in - in.PostgresqlParam.DeepCopyInto(&out.PostgresqlParam) - in.Volume.DeepCopyInto(&out.Volume) - in.Patroni.DeepCopyInto(&out.Patroni) - out.Resources = in.Resources - if in.EnableConnectionPooler != nil { - in, out := &in.EnableConnectionPooler, &out.EnableConnectionPooler - *out = new(bool) - **out = **in - } - if in.EnableReplicaConnectionPooler != nil { - in, out := &in.EnableReplicaConnectionPooler, &out.EnableReplicaConnectionPooler - *out = new(bool) - **out = **in - } - if in.ConnectionPooler != nil { - in, out := &in.ConnectionPooler, &out.ConnectionPooler - *out = new(ConnectionPooler) - (*in).DeepCopyInto(*out) - } - if in.SpiloRunAsUser != nil { - in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser - *out = new(int64) - **out = **in - } - if in.SpiloRunAsGroup != nil { - in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup - *out = new(int64) - **out = **in - } - if in.SpiloFSGroup != nil { - in, out := &in.SpiloFSGroup, &out.SpiloFSGroup - *out = new(int64) - **out = **in - } - if in.EnableMasterLoadBalancer != nil { - in, out := &in.EnableMasterLoadBalancer, &out.EnableMasterLoadBalancer - *out = new(bool) - **out = **in - } - if in.EnableReplicaLoadBalancer != nil { - in, out := &in.EnableReplicaLoadBalancer, &out.EnableReplicaLoadBalancer - *out = new(bool) - **out = **in - } - if in.UseLoadBalancer != nil { - in, out := &in.UseLoadBalancer, &out.UseLoadBalancer - *out = new(bool) - **out = **in - } - if in.ReplicaLoadBalancer != nil { - in, out := &in.ReplicaLoadBalancer, &out.ReplicaLoadBalancer - *out = new(bool) - **out = **in - } - if in.AllowedSourceRanges != nil { - in, out := &in.AllowedSourceRanges, &out.AllowedSourceRanges - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Users != nil { - in, out := &in.Users, &out.Users - *out = make(map[string]UserFlags, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(UserFlags, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - if in.MaintenanceWindows != nil { - in, out := &in.MaintenanceWindows, &out.MaintenanceWindows - *out = make([]MaintenanceWindow, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Clone != nil { - in, out := &in.Clone, &out.Clone - *out = new(CloneDescription) - (*in).DeepCopyInto(*out) - } - if in.Databases != nil { - in, out := &in.Databases, &out.Databases - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.PreparedDatabases != nil { - in, out := &in.PreparedDatabases, &out.PreparedDatabases - *out = make(map[string]PreparedDatabase, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.SchedulerName != nil { - in, out := &in.SchedulerName, &out.SchedulerName - *out = new(string) - **out = **in - } - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(corev1.NodeAffinity) - (*in).DeepCopyInto(*out) - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Sidecars != nil { - in, out := &in.Sidecars, &out.Sidecars - *out = make([]Sidecar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]corev1.Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ShmVolume != nil { - in, out := &in.ShmVolume, &out.ShmVolume - *out = new(bool) - **out = **in - } - if in.StandbyCluster != nil { - in, out := &in.StandbyCluster, &out.StandbyCluster - *out = new(StandbyDescription) - **out = **in - } - if in.PodAnnotations != nil { - in, out := &in.PodAnnotations, &out.PodAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ServiceAnnotations != nil { - in, out := &in.ServiceAnnotations, &out.ServiceAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.TLS != nil { - in, out := &in.TLS, &out.TLS - *out = new(TLSDescription) - **out = **in - } - if in.AdditionalVolumes != nil { - in, out := &in.AdditionalVolumes, &out.AdditionalVolumes - *out = make([]AdditionalVolume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InitContainersOld != nil { - in, out := &in.InitContainersOld, &out.InitContainersOld - *out = make([]corev1.Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSpec. -func (in *PostgresSpec) DeepCopy() *PostgresSpec { - if in == nil { - return nil - } - out := new(PostgresSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PostgresStatus) DeepCopyInto(out *PostgresStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresStatus. -func (in *PostgresStatus) DeepCopy() *PostgresStatus { - if in == nil { - return nil - } - out := new(PostgresStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PostgresTeam) DeepCopyInto(out *PostgresTeam) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeam. -func (in *PostgresTeam) DeepCopy() *PostgresTeam { - if in == nil { - return nil - } - out := new(PostgresTeam) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PostgresTeam) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PostgresTeamList) DeepCopyInto(out *PostgresTeamList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PostgresTeam, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeamList. -func (in *PostgresTeamList) DeepCopy() *PostgresTeamList { - if in == nil { - return nil - } - out := new(PostgresTeamList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PostgresTeamList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PostgresTeamSpec) DeepCopyInto(out *PostgresTeamSpec) { - *out = *in - if in.AdditionalSuperuserTeams != nil { - in, out := &in.AdditionalSuperuserTeams, &out.AdditionalSuperuserTeams - *out = make(map[string][]string, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]string, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - if in.AdditionalTeams != nil { - in, out := &in.AdditionalTeams, &out.AdditionalTeams - *out = make(map[string][]string, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]string, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - if in.AdditionalMembers != nil { - in, out := &in.AdditionalMembers, &out.AdditionalMembers - *out = make(map[string][]string, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]string, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeamSpec. -func (in *PostgresTeamSpec) DeepCopy() *PostgresTeamSpec { - if in == nil { - return nil - } - out := new(PostgresTeamSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PostgresUsersConfiguration) DeepCopyInto(out *PostgresUsersConfiguration) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresUsersConfiguration. -func (in *PostgresUsersConfiguration) DeepCopy() *PostgresUsersConfiguration { - if in == nil { - return nil - } - out := new(PostgresUsersConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Postgresql) DeepCopyInto(out *Postgresql) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Postgresql. -func (in *Postgresql) DeepCopy() *Postgresql { - if in == nil { - return nil - } - out := new(Postgresql) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Postgresql) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PostgresqlList) DeepCopyInto(out *PostgresqlList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Postgresql, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlList. -func (in *PostgresqlList) DeepCopy() *PostgresqlList { - if in == nil { - return nil - } - out := new(PostgresqlList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PostgresqlList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PostgresqlParam) DeepCopyInto(out *PostgresqlParam) { - *out = *in - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlParam. -func (in *PostgresqlParam) DeepCopy() *PostgresqlParam { - if in == nil { - return nil - } - out := new(PostgresqlParam) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreparedDatabase) DeepCopyInto(out *PreparedDatabase) { - *out = *in - if in.PreparedSchemas != nil { - in, out := &in.PreparedSchemas, &out.PreparedSchemas - *out = make(map[string]PreparedSchema, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.Extensions != nil { - in, out := &in.Extensions, &out.Extensions - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreparedDatabase. -func (in *PreparedDatabase) DeepCopy() *PreparedDatabase { - if in == nil { - return nil - } - out := new(PreparedDatabase) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreparedSchema) DeepCopyInto(out *PreparedSchema) { - *out = *in - if in.DefaultRoles != nil { - in, out := &in.DefaultRoles, &out.DefaultRoles - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreparedSchema. -func (in *PreparedSchema) DeepCopy() *PreparedSchema { - if in == nil { - return nil - } - out := new(PreparedSchema) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDescription. -func (in *ResourceDescription) DeepCopy() *ResourceDescription { - if in == nil { - return nil - } - out := new(ResourceDescription) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Resources) DeepCopyInto(out *Resources) { - *out = *in - out.ResourceRequests = in.ResourceRequests - out.ResourceLimits = in.ResourceLimits - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resources. -func (in *Resources) DeepCopy() *Resources { - if in == nil { - return nil - } - out := new(Resources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScalyrConfiguration) DeepCopyInto(out *ScalyrConfiguration) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalyrConfiguration. -func (in *ScalyrConfiguration) DeepCopy() *ScalyrConfiguration { - if in == nil { - return nil - } - out := new(ScalyrConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Sidecar) DeepCopyInto(out *Sidecar) { - *out = *in - out.Resources = in.Resources - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]corev1.ContainerPort, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]corev1.EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sidecar. -func (in *Sidecar) DeepCopy() *Sidecar { - if in == nil { - return nil - } - out := new(Sidecar) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StandbyDescription) DeepCopyInto(out *StandbyDescription) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandbyDescription. -func (in *StandbyDescription) DeepCopy() *StandbyDescription { - if in == nil { - return nil - } - out := new(StandbyDescription) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TLSDescription) DeepCopyInto(out *TLSDescription) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSDescription. -func (in *TLSDescription) DeepCopy() *TLSDescription { - if in == nil { - return nil - } - out := new(TLSDescription) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) { - *out = *in - if in.TeamAPIRoleConfiguration != nil { - in, out := &in.TeamAPIRoleConfiguration, &out.TeamAPIRoleConfiguration - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ProtectedRoles != nil { - in, out := &in.ProtectedRoles, &out.ProtectedRoles - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PostgresSuperuserTeams != nil { - in, out := &in.PostgresSuperuserTeams, &out.PostgresSuperuserTeams - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeamsAPIConfiguration. -func (in *TeamsAPIConfiguration) DeepCopy() *TeamsAPIConfiguration { - if in == nil { - return nil - } - out := new(TeamsAPIConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in UserFlags) DeepCopyInto(out *UserFlags) { - { - in := &in - *out = make(UserFlags, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserFlags. -func (in UserFlags) DeepCopy() UserFlags { - if in == nil { - return nil - } - out := new(UserFlags) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Volume) DeepCopyInto(out *Volume) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.Iops != nil { - in, out := &in.Iops, &out.Iops - *out = new(int64) - **out = **in - } - if in.Throughput != nil { - in, out := &in.Throughput, &out.Throughput - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. -func (in *Volume) DeepCopy() *Volume { - if in == nil { - return nil - } - out := new(Volume) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/zalando/postgres-operator/pkg/spec/types.go b/vendor/github.com/zalando/postgres-operator/pkg/spec/types.go deleted file mode 100644 index 533aae79..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/spec/types.go +++ /dev/null @@ -1,216 +0,0 @@ -package spec - -import ( - "database/sql" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "os" - "strings" - "time" - - "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" -) - -// NamespacedName describes the namespace/name pairs used in Kubernetes names. -type NamespacedName types.NamespacedName - -const fileWithNamespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" - -// RoleOrigin contains the code of the origin of a role -type RoleOrigin int - -// The rolesOrigin constant values must be sorted by the role priority for -// resolveNameConflict(...) to work. -const ( - RoleOriginUnknown RoleOrigin = iota - RoleOriginManifest - RoleOriginInfrastructure - RoleOriginTeamsAPI - RoleOriginSystem - RoleOriginBootstrap - RoleConnectionPooler -) - -type syncUserOperation int - -// Possible values for the sync user operation (removal of users is not supported yet) -const ( - PGSyncUserAdd = iota - PGsyncUserAlter - PGSyncAlterSet // handle ALTER ROLE SET parameter = value - PGSyncUserRename -) - -// PgUser contains information about a single user. -type PgUser struct { - Origin RoleOrigin `yaml:"-"` - Name string `yaml:"-"` - Namespace string `yaml:"-"` - Password string `yaml:"-"` - Flags []string `yaml:"user_flags"` - MemberOf []string `yaml:"inrole"` - Parameters map[string]string `yaml:"db_parameters"` - AdminRole string `yaml:"admin_role"` - Deleted bool `yaml:"deleted"` -} - -func (user *PgUser) Valid() bool { - return user.Name != "" && user.Password != "" -} - -// PgUserMap maps user names to the definitions. -type PgUserMap map[string]PgUser - -// PgSyncUserRequest has information about a single request to sync a user. -type PgSyncUserRequest struct { - Kind syncUserOperation - User PgUser -} - -// UserSyncer defines an interface for the implementations to sync users from the manifest to the DB. -type UserSyncer interface { - ProduceSyncRequests(dbUsers PgUserMap, newUsers PgUserMap) (req []PgSyncUserRequest) - ExecuteSyncRequests(req []PgSyncUserRequest, db *sql.DB) error -} - -// LogEntry describes log entry in the RingLogger -type LogEntry struct { - Time time.Time - Level logrus.Level - ClusterName *NamespacedName `json:",omitempty"` - Worker *uint32 `json:",omitempty"` - Message string -} - -// Diff describes diff -type Diff struct { - EventTime time.Time - ProcessTime time.Time - Diff []string -} - -// ControllerStatus describes status of the controller -type ControllerStatus struct { - LastSyncTime int64 - Clusters int - WorkerQueueSize map[int]int -} - -// QueueDump describes cache.FIFO queue -type QueueDump struct { - Keys []string - List []interface{} -} - -// ControllerConfig describes configuration of the controller -type ControllerConfig struct { - RestConfig *rest.Config `json:"-"` - InfrastructureRoles map[string]PgUser - - NoDatabaseAccess bool - NoTeamsAPI bool - CRDReadyWaitInterval time.Duration - CRDReadyWaitTimeout time.Duration - ConfigMapName NamespacedName - Namespace string - - EnableJsonLogging bool -} - -// cached value for the GetOperatorNamespace -var operatorNamespace string - -func (n NamespacedName) String() string { - return types.NamespacedName(n).String() -} - -// MarshalJSON defines marshaling rule for the namespaced name type. -func (n NamespacedName) MarshalJSON() ([]byte, error) { - return []byte("\"" + n.String() + "\""), nil -} - -// Decode converts a (possibly unqualified) string into the namespaced name object. -func (n *NamespacedName) Decode(value string) error { - return n.DecodeWorker(value, GetOperatorNamespace()) -} - -// UnmarshalJSON converts a byte slice to NamespacedName -func (n *NamespacedName) UnmarshalJSON(data []byte) error { - result := NamespacedName{} - var tmp string - if err := json.Unmarshal(data, &tmp); err != nil { - return err - } - if err := result.Decode(tmp); err != nil { - return err - } - *n = result - return nil -} - -// DecodeWorker separates the decode logic to (unit) test -// from obtaining the operator namespace that depends on k8s mounting files at runtime -func (n *NamespacedName) DecodeWorker(value, operatorNamespace string) error { - var ( - name types.NamespacedName - ) - - result := strings.SplitN(value, string(types.Separator), 2) - if len(result) < 2 { - name.Name = result[0] - } else { - name.Name = strings.TrimLeft(result[1], string(types.Separator)) - name.Namespace = result[0] - } - if name.Name == "" { - return fmt.Errorf("incorrect namespaced name: %v", value) - } - if name.Namespace == "" { - name.Namespace = operatorNamespace - } - - *n = NamespacedName(name) - - return nil -} - -func (r RoleOrigin) String() string { - switch r { - case RoleOriginUnknown: - return "unknown" - case RoleOriginManifest: - return "manifest role" - case RoleOriginInfrastructure: - return "infrastructure role" - case RoleOriginTeamsAPI: - return "teams API role" - case RoleOriginSystem: - return "system role" - case RoleOriginBootstrap: - return "bootstrapped role" - case RoleConnectionPooler: - return "connection pooler role" - default: - panic(fmt.Sprintf("bogus role origin value %d", r)) - } -} - -// GetOperatorNamespace assumes serviceaccount secret is mounted by kubernetes -// Placing this func here instead of pgk/util avoids circular import -func GetOperatorNamespace() string { - if operatorNamespace == "" { - if namespaceFromEnvironment := os.Getenv("OPERATOR_NAMESPACE"); namespaceFromEnvironment != "" { - return namespaceFromEnvironment - } - operatorNamespaceBytes, err := ioutil.ReadFile(fileWithNamespace) - if err != nil { - log.Fatalf("Unable to detect operator namespace from within its pod due to: %v", err) - } - operatorNamespace = string(operatorNamespaceBytes) - } - return operatorNamespace -} diff --git a/vendor/github.com/zalando/postgres-operator/pkg/util/config/config.go b/vendor/github.com/zalando/postgres-operator/pkg/util/config/config.go deleted file mode 100644 index 78e0a6c4..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/util/config/config.go +++ /dev/null @@ -1,288 +0,0 @@ -package config - -import ( - "encoding/json" - "strings" - "time" - - "fmt" - - "github.com/zalando/postgres-operator/pkg/spec" - "github.com/zalando/postgres-operator/pkg/util/constants" - v1 "k8s.io/api/core/v1" -) - -// CRD describes CustomResourceDefinition specific configuration parameters -type CRD struct { - ReadyWaitInterval time.Duration `name:"ready_wait_interval" default:"4s"` - ReadyWaitTimeout time.Duration `name:"ready_wait_timeout" default:"30s"` - ResyncPeriod time.Duration `name:"resync_period" default:"30m"` - RepairPeriod time.Duration `name:"repair_period" default:"5m"` - EnableCRDValidation *bool `name:"enable_crd_validation" default:"true"` -} - -// Resources describes kubernetes resource specific configuration parameters -type Resources struct { - ResourceCheckInterval time.Duration `name:"resource_check_interval" default:"3s"` - ResourceCheckTimeout time.Duration `name:"resource_check_timeout" default:"10m"` - PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"` - PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"` - PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` - SpiloRunAsUser *int64 `name:"spilo_runasuser"` - SpiloRunAsGroup *int64 `name:"spilo_runasgroup"` - SpiloFSGroup *int64 `name:"spilo_fsgroup"` - PodPriorityClassName string `name:"pod_priority_class_name"` - ClusterDomain string `name:"cluster_domain" default:"cluster.local"` - SpiloPrivileged bool `name:"spilo_privileged" default:"false"` - SpiloAllowPrivilegeEscalation *bool `name:"spilo_allow_privilege_escalation" default:"true"` - AdditionalPodCapabilities []string `name:"additional_pod_capabilities" default:""` - ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"` - InheritedLabels []string `name:"inherited_labels" default:""` - InheritedAnnotations []string `name:"inherited_annotations" default:""` - DownscalerAnnotations []string `name:"downscaler_annotations"` - ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"` - DeleteAnnotationDateKey string `name:"delete_annotation_date_key"` - DeleteAnnotationNameKey string `name:"delete_annotation_name_key"` - PodRoleLabel string `name:"pod_role_label" default:"spilo-role"` - PodToleration map[string]string `name:"toleration" default:""` - DefaultCPURequest string `name:"default_cpu_request" default:"100m"` - DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"` - DefaultCPULimit string `name:"default_cpu_limit" default:"1"` - DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"` - MinCPULimit string `name:"min_cpu_limit" default:"250m"` - MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"` - PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"` - PodEnvironmentSecret string `name:"pod_environment_secret"` - NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""` - MaxInstances int32 `name:"max_instances" default:"-1"` - MinInstances int32 `name:"min_instances" default:"-1"` - ShmVolume *bool `name:"enable_shm_volume" default:"true"` -} - -type InfrastructureRole struct { - // Name of a secret which describes the role, and optionally name of a - // configmap with an extra information - SecretName spec.NamespacedName - - UserKey string - PasswordKey string - RoleKey string - - DefaultUserValue string - DefaultRoleValue string - - // This field point out the detailed yaml definition of the role, if exists - Details string - - // Specify if a secret contains multiple fields in the following format: - // - // %(userkey)idx: ... - // %(passwordkey)idx: ... - // %(rolekey)idx: ... - // - // If it does, Name/Password/Role are interpreted not as unique field - // names, but as a template. - - Template bool -} - -// Auth describes authentication specific configuration parameters -type Auth struct { - SecretNameTemplate StringTemplate `name:"secret_name_template" default:"{username}.{cluster}.credentials.{tprkind}.{tprgroup}"` - PamRoleName string `name:"pam_role_name" default:"zalandos"` - PamConfiguration string `name:"pam_configuration" default:"https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees"` - TeamsAPIUrl string `name:"teams_api_url" default:"https://teams.example.com/api/"` - OAuthTokenSecretName spec.NamespacedName `name:"oauth_token_secret_name" default:"postgresql-operator"` - InfrastructureRolesSecretName spec.NamespacedName `name:"infrastructure_roles_secret_name"` - InfrastructureRoles []*InfrastructureRole `name:"-"` - InfrastructureRolesDefs string `name:"infrastructure_roles_secrets"` - SuperUsername string `name:"super_username" default:"postgres"` - ReplicationUsername string `name:"replication_username" default:"standby"` -} - -// Scalyr holds the configuration for the Scalyr Agent sidecar for log shipping: -type Scalyr struct { - ScalyrAPIKey string `name:"scalyr_api_key" default:""` - ScalyrImage string `name:"scalyr_image" default:""` - ScalyrServerURL string `name:"scalyr_server_url" default:"https://upload.eu.scalyr.com"` - ScalyrCPURequest string `name:"scalyr_cpu_request" default:"100m"` - ScalyrMemoryRequest string `name:"scalyr_memory_request" default:"50Mi"` - ScalyrCPULimit string `name:"scalyr_cpu_limit" default:"1"` - ScalyrMemoryLimit string `name:"scalyr_memory_limit" default:"500Mi"` -} - -// LogicalBackup defines configuration for logical backup -type LogicalBackup struct { - LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` - LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup:v1.7.1"` - LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"` - LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""` - LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""` - LogicalBackupS3Endpoint string `name:"logical_backup_s3_endpoint" default:""` - LogicalBackupS3AccessKeyID string `name:"logical_backup_s3_access_key_id" default:""` - LogicalBackupS3SecretAccessKey string `name:"logical_backup_s3_secret_access_key" default:""` - LogicalBackupS3SSE string `name:"logical_backup_s3_sse" default:""` - LogicalBackupGoogleApplicationCredentials string `name:"logical_backup_google_application_credentials" default:""` - LogicalBackupJobPrefix string `name:"logical_backup_job_prefix" default:"logical-backup-"` -} - -// Operator options for connection pooler -type ConnectionPooler struct { - NumberOfInstances *int32 `name:"connection_pooler_number_of_instances" default:"2"` - Schema string `name:"connection_pooler_schema" default:"pooler"` - User string `name:"connection_pooler_user" default:"pooler"` - Image string `name:"connection_pooler_image" default:"registry.opensource.zalan.do/acid/pgbouncer"` - Mode string `name:"connection_pooler_mode" default:"transaction"` - MaxDBConnections *int32 `name:"connection_pooler_max_db_connections" default:"60"` - ConnectionPoolerDefaultCPURequest string `name:"connection_pooler_default_cpu_request" default:"500m"` - ConnectionPoolerDefaultMemoryRequest string `name:"connection_pooler_default_memory_request" default:"100Mi"` - ConnectionPoolerDefaultCPULimit string `name:"connection_pooler_default_cpu_limit" default:"1"` - ConnectionPoolerDefaultMemoryLimit string `name:"connection_pooler_default_memory_limit" default:"100Mi"` -} - -// Config describes operator config -type Config struct { - CRD - Resources - Auth - Scalyr - LogicalBackup - ConnectionPooler - - WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' - KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"` - EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS - DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-14:2.1-p3"` - SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers - SidecarContainers []v1.Container `name:"sidecars"` - PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` - // value of this string must be valid JSON or YAML; see initPodServiceAccount - PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""` - PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""` - MasterPodMoveTimeout time.Duration `name:"master_pod_move_timeout" default:"20m"` - DbHostedZone string `name:"db_hosted_zone" default:"db.example.com"` - AWSRegion string `name:"aws_region" default:"eu-central-1"` - WALES3Bucket string `name:"wal_s3_bucket"` - LogS3Bucket string `name:"log_s3_bucket"` - KubeIAMRole string `name:"kube_iam_role"` - WALGSBucket string `name:"wal_gs_bucket"` - GCPCredentials string `name:"gcp_credentials"` - WALAZStorageAccount string `name:"wal_az_storage_account"` - AdditionalSecretMount string `name:"additional_secret_mount"` - AdditionalSecretMountPath string `name:"additional_secret_mount_path" default:"/meta/credentials"` - EnableEBSGp3Migration bool `name:"enable_ebs_gp3_migration" default:"false"` - EnableEBSGp3MigrationMaxSize int64 `name:"enable_ebs_gp3_migration_max_size" default:"1000"` - DebugLogging bool `name:"debug_logging" default:"true"` - EnableDBAccess bool `name:"enable_database_access" default:"true"` - EnableTeamsAPI bool `name:"enable_teams_api" default:"true"` - EnableTeamSuperuser bool `name:"enable_team_superuser" default:"false"` - TeamAdminRole string `name:"team_admin_role" default:"admin"` - RoleDeletionSuffix string `name:"role_deletion_suffix" default:"_deleted"` - EnableTeamMemberDeprecation bool `name:"enable_team_member_deprecation" default:"false"` - EnableAdminRoleForUsers bool `name:"enable_admin_role_for_users" default:"true"` - EnablePostgresTeamCRD bool `name:"enable_postgres_team_crd" default:"false"` - EnablePostgresTeamCRDSuperusers bool `name:"enable_postgres_team_crd_superusers" default:"false"` - EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"` - EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"` - CustomServiceAnnotations map[string]string `name:"custom_service_annotations"` - CustomPodAnnotations map[string]string `name:"custom_pod_annotations"` - EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"` - PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"` - StorageResizeMode string `name:"storage_resize_mode" default:"pvc"` - EnableLoadBalancer *bool `name:"enable_load_balancer"` // deprecated and kept for backward compatibility - ExternalTrafficPolicy string `name:"external_traffic_policy" default:"Cluster"` - MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"` - ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"` - PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"` - EnablePodDisruptionBudget *bool `name:"enable_pod_disruption_budget" default:"true"` - EnableInitContainers *bool `name:"enable_init_containers" default:"true"` - EnableSidecars *bool `name:"enable_sidecars" default:"true"` - Workers uint32 `name:"workers" default:"8"` - APIPort int `name:"api_port" default:"8080"` - RingLogLines int `name:"ring_log_lines" default:"100"` - ClusterHistoryEntries int `name:"cluster_history_entries" default:"1000"` - TeamAPIRoleConfiguration map[string]string `name:"team_api_role_configuration" default:"log_statement:all"` - PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` - PodManagementPolicy string `name:"pod_management_policy" default:"ordered_ready"` - ProtectedRoles []string `name:"protected_role_names" default:"admin"` - PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""` - SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"` - EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"` - EnableCrossNamespaceSecret bool `name:"enable_cross_namespace_secret" default:"false"` - EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"true"` - EnableSpiloWalPathCompat bool `name:"enable_spilo_wal_path_compat" default:"false"` - MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"off"` - MinimalMajorVersion string `name:"minimal_major_version" default:"9.6"` - TargetMajorVersion string `name:"target_major_version" default:"14"` -} - -// MustMarshal marshals the config or panics -func (c Config) MustMarshal() string { - b, err := json.MarshalIndent(c, "", " ") - if err != nil { - panic(err) - } - - return string(b) -} - -// NewFromMap creates Config from the map -func NewFromMap(m map[string]string) *Config { - cfg := Config{} - fields, _ := structFields(&cfg) - - for _, structField := range fields { - key := strings.ToLower(structField.Name) - value, ok := m[key] - if !ok && structField.Default != "" { - value = structField.Default - } - - if value == "" { - continue - } - err := processField(value, structField.Field) - if err != nil { - panic(err) - } - } - if err := validate(&cfg); err != nil { - panic(err) - } - - return &cfg -} - -// Copy creates a copy of the config -func Copy(c *Config) Config { - cfg := *c - - cfg.ClusterLabels = make(map[string]string, len(c.ClusterLabels)) - for k, v := range c.ClusterLabels { - cfg.ClusterLabels[k] = v - } - - return cfg -} - -func validate(cfg *Config) (err error) { - if cfg.MinInstances > 0 && cfg.MaxInstances > 0 && cfg.MinInstances > cfg.MaxInstances { - err = fmt.Errorf("minimum number of instances %d is set higher than the maximum number %d", - cfg.MinInstances, cfg.MaxInstances) - } - if cfg.Workers == 0 { - err = fmt.Errorf("number of workers should be higher than 0") - } - - if *cfg.ConnectionPooler.NumberOfInstances < constants.ConnectionPoolerMinInstances { - msg := "number of connection pooler instances should be higher than %d" - err = fmt.Errorf(msg, constants.ConnectionPoolerMinInstances) - } - - if cfg.ConnectionPooler.User == cfg.SuperUsername { - msg := "Connection pool user is not allowed to be the same as super user, username: %s" - err = fmt.Errorf(msg, cfg.ConnectionPooler.User) - } - - return -} diff --git a/vendor/github.com/zalando/postgres-operator/pkg/util/config/util.go b/vendor/github.com/zalando/postgres-operator/pkg/util/config/util.go deleted file mode 100644 index 4c1bdf7e..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/util/config/util.go +++ /dev/null @@ -1,246 +0,0 @@ -package config - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - "strings" - "time" -) - -type decoder interface { - Decode(value string) error -} - -type fieldInfo struct { - Name string - Default string - Field reflect.Value -} - -// StringTemplate is a convenience alias -type StringTemplate string - -func decoderFrom(field reflect.Value) (d decoder) { - // it may be impossible for a struct field to fail this check - if !field.CanInterface() { - return - } - - d, ok := field.Interface().(decoder) - if !ok && field.CanAddr() { - d, _ = field.Addr().Interface().(decoder) - } - - return d -} - -// taken from github.com/kelseyhightower/envconfig -func structFields(spec interface{}) ([]fieldInfo, error) { - s := reflect.ValueOf(spec).Elem() - - // over allocate an info array, we will extend if needed later - infos := make([]fieldInfo, 0, s.NumField()) - for i := 0; i < s.NumField(); i++ { - f := s.Field(i) - ftype := s.Type().Field(i) - - fieldName := ftype.Tag.Get("name") - if fieldName == "" { - fieldName = strings.ToLower(ftype.Name) - } - - // Capture information about the config variable - info := fieldInfo{ - Name: fieldName, - Field: f, - Default: ftype.Tag.Get("default"), - } - infos = append(infos, info) - - if f.Kind() == reflect.Struct { - // honor Decode if present - if decoderFrom(f) == nil { - embeddedPtr := f.Addr().Interface() - embeddedInfos, err := structFields(embeddedPtr) - if err != nil { - return nil, err - } - infos = append(infos[:len(infos)-1], embeddedInfos...) - - continue - } - } - } - - return infos, nil -} - -func processField(value string, field reflect.Value) error { - typ := field.Type() - - decoder := decoderFrom(field) - if decoder != nil { - return decoder.Decode(value) - } - - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - if field.IsNil() { - field.Set(reflect.New(typ)) - } - field = field.Elem() - } - - switch typ.Kind() { - case reflect.String: - field.SetString(value) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - var ( - val int64 - err error - ) - if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" { - var d time.Duration - d, err = time.ParseDuration(value) - val = int64(d) - } else { - val, err = strconv.ParseInt(value, 0, typ.Bits()) - } - if err != nil { - return err - } - - field.SetInt(val) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - val, err := strconv.ParseUint(value, 0, typ.Bits()) - if err != nil { - return err - } - field.SetUint(val) - case reflect.Bool: - val, err := strconv.ParseBool(value) - if err != nil { - return err - } - field.SetBool(val) - case reflect.Float32, reflect.Float64: - val, err := strconv.ParseFloat(value, typ.Bits()) - if err != nil { - return err - } - field.SetFloat(val) - case reflect.Slice: - vals := strings.Split(value, ",") - sl := reflect.MakeSlice(typ, len(vals), len(vals)) - for i, val := range vals { - err := processField(val, sl.Index(i)) - if err != nil { - return err - } - } - field.Set(sl) - case reflect.Map: - pairs, err := getMapPairsFromString(value) - if err != nil { - return fmt.Errorf("could not split value %q into map items: %v", value, err) - } - mp := reflect.MakeMap(typ) - for _, pair := range pairs { - kvpair := strings.Split(pair, ":") - if len(kvpair) != 2 { - return fmt.Errorf("invalid map item: %q", pair) - } - k := reflect.New(typ.Key()).Elem() - err := processField(kvpair[0], k) - if err != nil { - return err - } - v := reflect.New(typ.Elem()).Elem() - err = processField(kvpair[1], v) - if err != nil { - return err - } - mp.SetMapIndex(k, v) - } - field.Set(mp) - } - - return nil -} - -type parserState int - -const ( - plain parserState = iota - doubleQuoted - singleQuoted -) - -// Split the pair candidates by commas not located inside open quotes -// Escape characters are not supported for simplicity, as we don't -// expect to find them inside the map values for our use cases -func getMapPairsFromString(value string) (pairs []string, err error) { - pairs = make([]string, 0) - state := plain - var start, quote int - - for i, ch := range strings.Split(value, "") { - if (ch == `"` || ch == `'`) && i > 0 && value[i-1] == '\\' { - fmt.Printf("Parser warning: ecape character '\\' have no effect on quotes inside the configuration value %s\n", value) - } - if ch == `"` { - if state == plain { - state = doubleQuoted - quote = i - } else if state == doubleQuoted { - state = plain - quote = 0 - } - } - if ch == "'" { - if state == plain { - state = singleQuoted - quote = i - } else if state == singleQuoted { - state = plain - quote = 0 - } - } - if ch == "," && state == plain { - pairs = append(pairs, strings.Trim(value[start:i], " \t")) - start = i + 1 - } - } - if state != plain { - err = fmt.Errorf("unmatched quote starting at position %d", quote+1) - pairs = nil - } else { - pairs = append(pairs, strings.Trim(value[start:], " \t")) - } - return -} - -// Decode cast value to StringTemplate -func (f *StringTemplate) Decode(value string) error { - *f = StringTemplate(value) - - return nil -} - -// Format formatted string from StringTemplate -func (f *StringTemplate) Format(a ...string) string { - res := string(*f) - - for i := 0; i < len(a); i += 2 { - res = strings.Replace(res, "{"+a[i]+"}", a[i+1], -1) - } - - return res -} - -// MarshalJSON converts a StringTemplate to byte slice -func (f StringTemplate) MarshalJSON() ([]byte, error) { - return json.Marshal(string(f)) -} diff --git a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/annotations.go b/vendor/github.com/zalando/postgres-operator/pkg/util/constants/annotations.go deleted file mode 100644 index fc5a84fa..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/annotations.go +++ /dev/null @@ -1,11 +0,0 @@ -package constants - -// Names and values in Kubernetes annotation for services, statefulsets and volumes -const ( - ZalandoDNSNameAnnotation = "external-dns.alpha.kubernetes.io/hostname" - ElbTimeoutAnnotationName = "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout" - ElbTimeoutAnnotationValue = "3600" - KubeIAmAnnotation = "iam.amazonaws.com/role" - VolumeStorateProvisionerAnnotation = "pv.kubernetes.io/provisioned-by" - PostgresqlControllerAnnotationKey = "acid.zalan.do/controller" -) diff --git a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/aws.go b/vendor/github.com/zalando/postgres-operator/pkg/util/constants/aws.go deleted file mode 100644 index f1cfd597..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/aws.go +++ /dev/null @@ -1,17 +0,0 @@ -package constants - -import "time" - -// AWS specific constants used by other modules -const ( - // EBS related constants - EBSVolumeIDStart = "/vol-" - EBSProvisioner = "kubernetes.io/aws-ebs" - //https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VolumeModification.html - EBSVolumeStateModifying = "modifying" - EBSVolumeStateOptimizing = "optimizing" - EBSVolumeStateFailed = "failed" - EBSVolumeStateCompleted = "completed" - EBSVolumeResizeWaitInterval = 2 * time.Second - EBSVolumeResizeWaitTimeout = 30 * time.Second -) diff --git a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/kubernetes.go b/vendor/github.com/zalando/postgres-operator/pkg/util/constants/kubernetes.go deleted file mode 100644 index fd2712ac..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/kubernetes.go +++ /dev/null @@ -1,13 +0,0 @@ -package constants - -import "time" - -// General kubernetes-related constants -const ( - PostgresContainerName = "postgres" - K8sAPIPath = "/apis" - - QueueResyncPeriodPod = 5 * time.Minute - QueueResyncPeriodTPR = 5 * time.Minute - QueueResyncPeriodNode = 5 * time.Minute -) diff --git a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/pooler.go b/vendor/github.com/zalando/postgres-operator/pkg/util/constants/pooler.go deleted file mode 100644 index ded795bb..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/pooler.go +++ /dev/null @@ -1,18 +0,0 @@ -package constants - -// Connection pooler specific constants -const ( - ConnectionPoolerUserName = "pooler" - ConnectionPoolerSchemaName = "pooler" - ConnectionPoolerDefaultType = "pgbouncer" - ConnectionPoolerDefaultMode = "transaction" - ConnectionPoolerDefaultCpuRequest = "500m" - ConnectionPoolerDefaultCpuLimit = "1" - ConnectionPoolerDefaultMemoryRequest = "100Mi" - ConnectionPoolerDefaultMemoryLimit = "100Mi" - - ConnectionPoolerContainer = 0 - ConnectionPoolerMaxDBConnections = 60 - ConnectionPoolerMaxClientConnections = 10000 - ConnectionPoolerMinInstances = 1 -) diff --git a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/postgresql.go b/vendor/github.com/zalando/postgres-operator/pkg/util/constants/postgresql.go deleted file mode 100644 index 41bfdd66..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/postgresql.go +++ /dev/null @@ -1,18 +0,0 @@ -package constants - -import "time" - -// PostgreSQL specific constants -const ( - DataVolumeName = "pgdata" - PostgresDataMount = "/home/postgres/pgdata" - PostgresDataPath = PostgresDataMount + "/pgroot" - - PatroniPGParametersParameterName = "parameters" - - PostgresConnectRetryTimeout = 2 * time.Minute - PostgresConnectTimeout = 15 * time.Second - - ShmVolumeName = "dshm" - ShmVolumePath = "/dev/shm" -) diff --git a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/roles.go b/vendor/github.com/zalando/postgres-operator/pkg/util/constants/roles.go deleted file mode 100644 index dd906fe8..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/roles.go +++ /dev/null @@ -1,22 +0,0 @@ -package constants - -// Roles specific constants -const ( - PasswordLength = 64 - SuperuserKeyName = "superuser" - ConnectionPoolerUserKeyName = "pooler" - ReplicationUserKeyName = "replication" - RoleFlagSuperuser = "SUPERUSER" - RoleFlagInherit = "INHERIT" - RoleFlagLogin = "LOGIN" - RoleFlagNoLogin = "NOLOGIN" - RoleFlagCreateRole = "CREATEROLE" - RoleFlagCreateDB = "CREATEDB" - RoleFlagReplication = "REPLICATION" - RoleFlagByPassRLS = "BYPASSRLS" - OwnerRoleNameSuffix = "_owner" - ReaderRoleNameSuffix = "_reader" - WriterRoleNameSuffix = "_writer" - UserRoleNameSuffix = "_user" - DefaultSearchPath = "\"$user\"" -) diff --git a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/units.go b/vendor/github.com/zalando/postgres-operator/pkg/util/constants/units.go deleted file mode 100644 index e124e0b7..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/util/constants/units.go +++ /dev/null @@ -1,6 +0,0 @@ -package constants - -// Measurement-unit definitions -const ( - Gigabyte = 1073741824 -) diff --git a/vendor/github.com/zalando/postgres-operator/pkg/util/util.go b/vendor/github.com/zalando/postgres-operator/pkg/util/util.go deleted file mode 100644 index a5292558..00000000 --- a/vendor/github.com/zalando/postgres-operator/pkg/util/util.go +++ /dev/null @@ -1,354 +0,0 @@ -package util - -import ( - "crypto/hmac" - "crypto/md5" // #nosec we need it to for PostgreSQL md5 passwords - cryptoRand "crypto/rand" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "fmt" - "math/big" - "math/rand" - "reflect" - "regexp" - "sort" - "strings" - "time" - - "github.com/motomux/pretty" - resource "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/zalando/postgres-operator/pkg/spec" - "golang.org/x/crypto/pbkdf2" -) - -const ( - md5prefix = "md5" - scramsha256prefix = "SCRAM-SHA-256" - saltlength = 16 - iterations = 4096 -) - -var passwordChars = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") - -func init() { - rand.Seed(time.Now().Unix()) -} - -// helper function to get bool pointers -func True() *bool { - b := true - return &b -} - -func False() *bool { - b := false - return &b -} - -// RandomPassword generates a secure, random alphanumeric password of a given length. -func RandomPassword(n int) string { - b := make([]byte, n) - for i := range b { - maxN := big.NewInt(int64(len(passwordChars))) - if n, err := cryptoRand.Int(cryptoRand.Reader, maxN); err != nil { - panic(fmt.Errorf("Unable to generate secure, random password: %v", err)) - } else { - b[i] = passwordChars[n.Int64()] - } - } - return string(b) -} - -// NameFromMeta converts a metadata object to the NamespacedName name representation. -func NameFromMeta(meta metav1.ObjectMeta) spec.NamespacedName { - return spec.NamespacedName{ - Namespace: meta.Namespace, - Name: meta.Name, - } -} - -type Hasher func(user spec.PgUser) string -type Random func(n int) string - -type Encryptor struct { - encrypt Hasher - random Random -} - -func NewEncryptor(encryption string) *Encryptor { - e := Encryptor{random: RandomPassword} - m := map[string]Hasher{ - "md5": e.PGUserPasswordMD5, - "scram-sha-256": e.PGUserPasswordScramSHA256, - } - hasher, ok := m[encryption] - if !ok { - hasher = e.PGUserPasswordMD5 - } - e.encrypt = hasher - return &e -} - -func (e *Encryptor) PGUserPassword(user spec.PgUser) string { - if (len(user.Password) == md5.Size*2+len(md5prefix) && user.Password[:3] == md5prefix) || - (len(user.Password) > len(scramsha256prefix) && user.Password[:len(scramsha256prefix)] == scramsha256prefix) || user.Password == "" { - // Avoid processing already encrypted or empty passwords - return user.Password - } - return e.encrypt(user) -} - -func (e *Encryptor) PGUserPasswordMD5(user spec.PgUser) string { - s := md5.Sum([]byte(user.Password + user.Name)) // #nosec, using md5 since PostgreSQL uses it for hashing passwords. - return md5prefix + hex.EncodeToString(s[:]) -} - -func (e *Encryptor) PGUserPasswordScramSHA256(user spec.PgUser) string { - salt := []byte(e.random(saltlength)) - key := pbkdf2.Key([]byte(user.Password), salt, iterations, 32, sha256.New) - mac := hmac.New(sha256.New, key) - mac.Write([]byte("Server Key")) - serverKey := mac.Sum(nil) - mac = hmac.New(sha256.New, key) - mac.Write([]byte("Client Key")) - clientKey := mac.Sum(nil) - storedKey := sha256.Sum256(clientKey) - pass := fmt.Sprintf("%s$%v:%s$%s:%s", - scramsha256prefix, - iterations, - base64.StdEncoding.EncodeToString(salt), - base64.StdEncoding.EncodeToString(storedKey[:]), - base64.StdEncoding.EncodeToString(serverKey), - ) - return pass -} - -// Diff returns diffs between 2 objects -func Diff(a, b interface{}) []string { - return pretty.Diff(a, b) -} - -// PrettyDiff shows the diff between 2 objects in an easy to understand format. It is mainly used for debugging output. -func PrettyDiff(a, b interface{}) string { - return strings.Join(Diff(a, b), "\n") -} - -// Compare two string slices while ignoring the order of elements -func IsEqualIgnoreOrder(a, b []string) bool { - if len(a) != len(b) { - return false - } - a_copy := make([]string, len(a)) - b_copy := make([]string, len(b)) - copy(a_copy, a) - copy(b_copy, b) - sort.Strings(a_copy) - sort.Strings(b_copy) - - return reflect.DeepEqual(a_copy, b_copy) -} - -// SliceReplaceElement -func StringSliceReplaceElement(s []string, a, b string) (result []string) { - tmp := make([]string, 0, len(s)) - for _, str := range s { - if str == a { - str = b - } - tmp = append(tmp, str) - } - return tmp -} - -// SubstractStringSlices finds elements in a that are not in b and return them as a result slice. -func SubstractStringSlices(a []string, b []string) (result []string, equal bool) { - // Slices are assumed to contain unique elements only -OUTER: - for _, vala := range a { - for _, valb := range b { - if vala == valb { - continue OUTER - } - } - result = append(result, vala) - } - return result, len(result) == 0 -} - -// FindNamedStringSubmatch returns a map of strings holding the text of the matches of the r regular expression -func FindNamedStringSubmatch(r *regexp.Regexp, s string) map[string]string { - matches := r.FindStringSubmatch(s) - grNames := r.SubexpNames() - - if matches == nil { - return nil - } - - groupMatches := 0 - res := make(map[string]string, len(grNames)) - for i, n := range grNames { - if n == "" { - continue - } - - res[n] = matches[i] - groupMatches++ - } - - if groupMatches == 0 { - return nil - } - - return res -} - -// SliceContains -func SliceContains(slice interface{}, item interface{}) bool { - s := reflect.ValueOf(slice) - if s.Kind() != reflect.Slice { - panic("Invalid data-type") - } - for i := 0; i < s.Len(); i++ { - if s.Index(i).Interface() == item { - return true - } - } - return false -} - -// MapContains returns true if and only if haystack contains all the keys from the needle with matching corresponding values -func MapContains(haystack, needle map[string]string) bool { - if len(haystack) < len(needle) { - return false - } - - for k, v := range needle { - v2, ok := haystack[k] - if !ok || v2 != v { - return false - } - } - - return true -} - -// Coalesce returns the first argument if it is not null, otherwise the second one. -func Coalesce(val, defaultVal string) string { - if val == "" { - return defaultVal - } - return val -} - -// CoalesceStrArr returns the first argument if it is not null, otherwise the second one. -func CoalesceStrArr(val, defaultVal []string) []string { - if len(val) == 0 { - return defaultVal - } - return val -} - -// CoalesceStrMap returns the first argument if it is not null, otherwise the second one. -func CoalesceStrMap(val, defaultVal map[string]string) map[string]string { - if len(val) == 0 { - return defaultVal - } - return val -} - -// CoalesceInt works like coalesce but for int -func CoalesceInt(val, defaultVal int) int { - if val == 0 { - return defaultVal - } - return val -} - -// CoalesceInt32 works like coalesce but for *int32 -func CoalesceInt32(val, defaultVal *int32) *int32 { - if val == nil { - return defaultVal - } - return val -} - -// CoalesceUInt32 works like coalesce but for uint32 -func CoalesceUInt32(val, defaultVal uint32) uint32 { - if val == 0 { - return defaultVal - } - return val -} - -// CoalesceInt64 works like coalesce but for int64 -func CoalesceInt64(val, defaultVal int64) int64 { - if val == 0 { - return defaultVal - } - return val -} - -// CoalesceBool works like coalesce but for *bool -func CoalesceBool(val, defaultVal *bool) *bool { - if val == nil { - return defaultVal - } - return val -} - -// CoalesceDuration works like coalesce but for time.Duration -func CoalesceDuration(val time.Duration, defaultVal string) time.Duration { - if val == 0 { - duration, err := time.ParseDuration(defaultVal) - if err != nil { - panic(err) - } - return duration - } - return val -} - -// Test if any of the values is nil -func testNil(values ...*int32) bool { - for _, v := range values { - if v == nil { - return true - } - } - - return false -} - -// MaxInt32 : Return maximum of two integers provided via pointers. If one value -// is not defined, return the other one. If both are not defined, result is also -// undefined, caller needs to check for that. -func MaxInt32(a, b *int32) *int32 { - if testNil(a, b) { - return nil - } - - if *a > *b { - return a - } - - return b -} - -// IsSmallerQuantity : checks if first resource is of a smaller quantity than the second -func IsSmallerQuantity(requestStr, limitStr string) (bool, error) { - - request, err := resource.ParseQuantity(requestStr) - if err != nil { - return false, fmt.Errorf("could not parse request %v : %v", requestStr, err) - } - - limit, err2 := resource.ParseQuantity(limitStr) - if err2 != nil { - return false, fmt.Errorf("could not parse limit %v : %v", limitStr, err2) - } - - return request.Cmp(limit) == -1, nil -} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go deleted file mode 100644 index 593f6530..00000000 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC -2898 / PKCS #5 v2.0. - -A key derivation function is useful when encrypting data based on a password -or any other not-fully-random data. It uses a pseudorandom function to derive -a secure encryption key based on the password. - -While v2.0 of the standard defines only one pseudorandom function to use, -HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved -Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To -choose, you can pass the `New` functions from the different SHA packages to -pbkdf2.Key. -*/ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" - -import ( - "crypto/hmac" - "hash" -) - -// Key derives a key from the password, salt and iteration count, returning a -// []byte of length keylen that can be used as cryptographic key. The key is -// derived based on the method described as PBKDF2 with the HMAC variant using -// the supplied hash function. -// -// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -// doing: -// -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) -// -// Remember to get a good random salt. At least 8 bytes is recommended by the -// RFC. -// -// Using a higher iteration count will increase the cost of an exhaustive -// search but will also make derivation proportionally slower. -func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hashLen := prf.Size() - numBlocks := (keyLen + hashLen - 1) / hashLen - - var buf [4]byte - dk := make([]byte, 0, numBlocks*hashLen) - U := make([]byte, hashLen) - for block := 1; block <= numBlocks; block++ { - // N.B.: || means concatenation, ^ means XOR - // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter - // U_1 = PRF(password, salt || uint(i)) - prf.Reset() - prf.Write(salt) - buf[0] = byte(block >> 24) - buf[1] = byte(block >> 16) - buf[2] = byte(block >> 8) - buf[3] = byte(block) - prf.Write(buf[:4]) - dk = prf.Sum(dk) - T := dk[len(dk)-hashLen:] - copy(U, T) - - // U_n = PRF(password, U_(n-1)) - for n := 2; n <= iter; n++ { - prf.Reset() - prf.Write(U) - U = U[:0] - U = prf.Sum(U) - for x := range U { - T[x] ^= U[x] - } - } - } - return dk[:keyLen] -} diff --git a/vendor/modules.txt b/vendor/modules.txt index a5eb3fb2..1b33a304 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -26,7 +26,7 @@ github.com/aws-controllers-k8s/iam-controller/apis/v1alpha1 # github.com/aws-controllers-k8s/runtime v0.17.0 ## explicit; go 1.17 github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1 -# github.com/aws/aws-sdk-go v1.42.0 +# github.com/aws/aws-sdk-go v1.42.53 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr @@ -80,14 +80,8 @@ github.com/cenkalti/backoff github.com/cespare/xxhash/v2 # github.com/crossplane/crossplane-runtime v0.15.1-0.20210930095326-d5661210733b ## explicit; go 1.16 -github.com/crossplane/crossplane-runtime/apis/common/v1 -github.com/crossplane/crossplane-runtime/pkg/errors -github.com/crossplane/crossplane-runtime/pkg/meta -github.com/crossplane/crossplane-runtime/pkg/reference -github.com/crossplane/crossplane-runtime/pkg/resource # github.com/crossplane/provider-aws v0.22.0 ## explicit; go 1.16 -github.com/crossplane/provider-aws/apis/iam/v1beta1 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew @@ -171,7 +165,6 @@ github.com/jmespath/go-jmespath github.com/json-iterator/go # github.com/kr/text v0.2.0 ## explicit -github.com/kr/text # github.com/kubeflow/kubeflow/components/admission-webhook v0.0.0-20220513185335-8be0d987f765 ## explicit; go 1.17 github.com/kubeflow/kubeflow/components/admission-webhook/pkg/apis/settings/v1alpha1 @@ -192,7 +185,6 @@ github.com/modern-go/concurrent github.com/modern-go/reflect2 # github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d ## explicit -github.com/motomux/pretty # github.com/nxadm/tail v1.4.8 ## explicit; go 1.13 github.com/nxadm/tail @@ -238,10 +230,8 @@ github.com/onsi/gomega/types github.com/pkg/errors # github.com/pluralsh/controller-reconcile-helper v0.0.0-20220524100904-61632865a5a5 ## explicit; go 1.17 -github.com/pluralsh/controller-reconcile-helper # github.com/pluralsh/kubeflow-controller v0.0.0-20211119170150-da85f07f50f4 ## explicit; go 1.17 -github.com/pluralsh/kubeflow-controller/apis/platform/v1alpha1 # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib @@ -269,8 +259,6 @@ github.com/prometheus/procfs/internal/util github.com/sirupsen/logrus # github.com/spf13/afero v1.6.0 ## explicit; go 1.13 -github.com/spf13/afero -github.com/spf13/afero/mem # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag @@ -289,12 +277,6 @@ github.com/tidwall/match github.com/tidwall/pretty # github.com/zalando/postgres-operator v1.7.1 ## explicit; go 1.16 -github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do -github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1 -github.com/zalando/postgres-operator/pkg/spec -github.com/zalando/postgres-operator/pkg/util -github.com/zalando/postgres-operator/pkg/util/config -github.com/zalando/postgres-operator/pkg/util/constants # go.opencensus.io v0.23.0 ## explicit; go 1.13 go.opencensus.io @@ -329,7 +311,6 @@ go.uber.org/zap/internal/exit go.uber.org/zap/zapcore # golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 ## explicit; go 1.17 -golang.org/x/crypto/pbkdf2 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 # golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd @@ -847,7 +828,6 @@ sigs.k8s.io/controller-runtime/pkg/webhook/conversion sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics # sigs.k8s.io/hierarchical-namespaces v0.9.0 ## explicit; go 1.17 -sigs.k8s.io/hierarchical-namespaces/api/v1alpha2 # sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 ## explicit; go 1.16 sigs.k8s.io/json diff --git a/vendor/sigs.k8s.io/hierarchical-namespaces/LICENSE b/vendor/sigs.k8s.io/hierarchical-namespaces/LICENSE deleted file mode 100644 index 8dada3ed..00000000 --- a/vendor/sigs.k8s.io/hierarchical-namespaces/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/groupversion_info.go b/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/groupversion_info.go deleted file mode 100644 index 73b6ea7d..00000000 --- a/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/groupversion_info.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1 contains API Schema definitions for the hnc v1 API group -// +kubebuilder:object:generate=true -// +groupName=hnc.x-k8s.io -package v1alpha2 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "hnc.x-k8s.io", Version: "v1alpha2"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/hierarchy_types.go b/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/hierarchy_types.go deleted file mode 100644 index 78045d44..00000000 --- a/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/hierarchy_types.go +++ /dev/null @@ -1,240 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Constants for types and well-known names -const ( - Singleton = "hierarchy" - HierarchyConfigurations = "hierarchyconfigurations" -) - -// Constants for labels and annotations -const ( - MetaGroup = "hnc.x-k8s.io" - LabelInheritedFrom = MetaGroup + "/inherited-from" - FinalizerHasSubnamespace = MetaGroup + "/hasSubnamespace" - LabelTreeDepthSuffix = ".tree." + MetaGroup + "/depth" - AnnotationManagedBy = MetaGroup + "/managed-by" - AnnotationPropagatePrefix = "propagate." + MetaGroup - - AnnotationSelector = AnnotationPropagatePrefix + "/select" - AnnotationTreeSelector = AnnotationPropagatePrefix + "/treeSelect" - AnnotationNoneSelector = AnnotationPropagatePrefix + "/none" - - // LabelManagedByStandard will eventually replace our own managed-by annotation (we didn't know - // about this standard label when we invented our own). - LabelManagedByApps = "app.kubernetes.io/managed-by" - - // LabelIncludedNamespace is the label added by HNC on the namespaces that - // should be enforced by our validators. - LabelIncludedNamespace = MetaGroup + "/included-namespace" -) - -const ( - // Condition types. - ConditionActivitiesHalted string = "ActivitiesHalted" - ConditionBadConfiguration string = "BadConfiguration" - - // Condition reasons. - ReasonAncestor string = "AncestorHaltActivities" - ReasonDeletingCRD string = "DeletingCRD" - ReasonInCycle string = "InCycle" - ReasonParentMissing string = "ParentMissing" - ReasonIllegalParent string = "IllegalParent" - ReasonAnchorMissing string = "SubnamespaceAnchorMissing" -) - -// AllConditions have all the conditions by type and reason. Please keep this -// list in alphabetic order. This is specifically used to clear (set to 0) -// conditions in the metrics. -var AllConditions = map[string][]string{ - ConditionActivitiesHalted: { - ReasonAncestor, - ReasonDeletingCRD, - ReasonInCycle, - ReasonParentMissing, - ReasonIllegalParent, - }, - ConditionBadConfiguration: { - ReasonAnchorMissing, - }, -} - -const ( - // EventCannotPropagate is for events when a namespace contains an object that - // couldn't be propagated *out* of the namespace, to one or more of its - // descendants. If the object couldn't be propagated to *any* descendants - for - // example, because it has a finalizer on it (HNC can't propagate objects with - // finalizers), the error message will point to the object in this namespace. - // Otherwise, if it couldn't be propagated to *some* descendant, the error - // message will point to the descendant. - EventCannotPropagate string = "CannotPropagateObject" - // EventCannotUpdate is for events when a namespace has an object that couldn't - // be propagated *into* this namespace - that is, it couldn't be created in - // the first place, or it couldn't be updated. The error message will point to - // the source namespace. - EventCannotUpdate string = "CannotUpdateObject" - // EventCannotGetSelector is for events when an object has annotations that cannot be - // parsed into a valid selector - EventCannotParseSelector string = "CannotParseSelector" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// +kubebuilder:object:root=true -// +kubebuilder:storageversion - -// Hierarchy is the Schema for the hierarchies API -type HierarchyConfiguration struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec HierarchyConfigurationSpec `json:"spec,omitempty"` - Status HierarchyConfigurationStatus `json:"status,omitempty"` -} - -// HierarchySpec defines the desired state of Hierarchy -type HierarchyConfigurationSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Parent indicates the parent of this namespace, if any. - Parent string `json:"parent,omitempty"` - - // AllowCascadingDeletion indicates if the subnamespaces of this namespace are - // allowed to cascading delete. - AllowCascadingDeletion bool `json:"allowCascadingDeletion,omitempty"` -} - -// HierarchyStatus defines the observed state of Hierarchy -type HierarchyConfigurationStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Children indicates the direct children of this namespace, if any. - Children []string `json:"children,omitempty"` - - // Conditions describes the errors, if any. - Conditions []Condition `json:"conditions,omitempty"` -} - -// +kubebuilder:object:root=true - -// HierarchyList contains a list of Hierarchy -type HierarchyConfigurationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []HierarchyConfiguration `json:"items"` -} - -// metav1.Condition is introduced in k8s.io/apimachinery v0.20.0-alpha.1 and we -// don't want to take a dependency on it yet, thus we copied the below struct from -// https://github.com/kubernetes/apimachinery/blob/master/pkg/apis/meta/v1/types.go: - -// Condition contains details for one aspect of the current state of this API Resource. -// --- -// This struct is intended for direct use as an array at the field path .status.conditions. For example, -// type FooStatus struct{ -// // Represents the observations of a foo's current state. -// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" -// // +patchMergeKey=type -// // +patchStrategy=merge -// // +listType=map -// // +listMapKey=type -// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -// -// // other fields -// } -type Condition struct { - // type of condition in CamelCase or in foo.example.com/CamelCase. - // --- - // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - // useful (see .node.status.conditions), the ability to deconflict is important. - // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` - // +kubebuilder:validation:MaxLength=316 - Type string `json:"type" protobuf:"bytes,1,opt,name=type"` - // status of the condition, one of True, False, Unknown. - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:Enum=True;False;Unknown - Status metav1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"` - // observedGeneration represents the .metadata.generation that the condition was set based upon. - // For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - // with respect to the current state of the instance. - // +optional - // +kubebuilder:validation:Minimum=0 - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` - // lastTransitionTime is the last time the condition transitioned from one status to another. - // This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:Type=string - // +kubebuilder:validation:Format=date-time - LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // reason contains a programmatic identifier indicating the reason for the condition's last transition. - // Producers of specific condition types may define expected values and meanings for this field, - // and whether the values are considered a guaranteed API. - // The value should be a CamelCase string. - // This field may not be empty. - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxLength=1024 - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$` - Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"` - // message is a human readable message indicating details about the transition. - // This may be an empty string. - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxLength=32768 - Message string `json:"message" protobuf:"bytes,6,opt,name=message"` -} - -// NewCondition fills some required field with default values for schema -// validation, e.g. Status and LastTransitionTime. -func NewCondition(tp, reason, msg string) Condition { - return Condition{ - Type: tp, - Status: "True", - // Set time as an obviously wrong value 1970-01-01T00:00:00Z since we - // overwrite conditions every time. - LastTransitionTime: metav1.Unix(0, 0), - Reason: reason, - Message: msg, - } -} - -func (c Condition) String() string { - msg := c.Message - if len(msg) > 100 { - msg = msg[:100] + "..." - } - return fmt.Sprintf("%s (%s): %s", c.Type, c.Reason, msg) -} - -func init() { - SchemeBuilder.Register(&HierarchyConfiguration{}, &HierarchyConfigurationList{}) -} diff --git a/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/hnc_config.go b/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/hnc_config.go deleted file mode 100644 index a6de2ec9..00000000 --- a/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/hnc_config.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Constants for resources and well-known names. -const ( - HNCConfigSingleton = "config" - HNCConfigSingletons = "hncconfigurations" - RBACGroup = "rbac.authorization.k8s.io" - RoleResource = "roles" - RoleKind = "Role" - RoleBindingResource = "rolebindings" - RoleBindingKind = "RoleBinding" -) - -// SynchronizationMode describes propagation mode of objects of the same kind. -// The only three modes currently supported are "Propagate", "Ignore", and "Remove". -// See detailed definition below. An unsupported mode will be treated as "ignore". -type SynchronizationMode string - -const ( - // Propagate objects from ancestors to descendants and deletes obsolete descendants. - Propagate SynchronizationMode = "Propagate" - - // Ignore the modification of this resource. New or changed objects will not be propagated, and - // obsolete objects will not be deleted. The inheritedFrom label is not removed. Any unknown mode - // is treated as Ignore. - Ignore SynchronizationMode = "Ignore" - - // Remove all existing propagated copies. - Remove SynchronizationMode = "Remove" -) - -const ( - // Condition types. - ConditionBadTypeConfiguration = "BadConfiguration" - ConditionOutOfSync = "OutOfSync" - // NamespaceCondition is set if there are namespace conditions, which are set - // in the HierarchyConfiguration objects. The condition reasons would be the - // condition types in HierarchyConfiguration, e.g. "ActivitiesHalted". - ConditionNamespace = "NamespaceCondition" - - // Condition reasons for BadConfiguration - ReasonMultipleConfigsForType = "MultipleConfigurationsForType" - ReasonResourceNotFound = "ResourceNotFound" - ReasonResourceNotNamespaced = "ResourceNotNamespaced" - - // Condition reason for OutOfSync, e.g. errors when creating a reconciler. - ReasonUnknown = "Unknown" -) - -// EnforcedTypes are the types enforced by HNC that they should not show up in -// the spec and only in the status. Any configurations of the enforced types in -// the spec would cause 'MultipleConfigurationsForType' condition. -var EnforcedTypes = []ResourceSpec{ - {Group: RBACGroup, Resource: RoleResource, Mode: Propagate}, - {Group: RBACGroup, Resource: RoleBindingResource, Mode: Propagate}, -} - -// IsEnforcedType returns true if configuration is on an enforced type. -func IsEnforcedType(grm ResourceSpec) bool { - for _, tp := range EnforcedTypes { - if tp.Group == grm.Group && tp.Resource == grm.Resource { - return true - } - } - return false -} - -// ResourceSpec defines the desired synchronization state of a specific resource. -type ResourceSpec struct { - // Group of the resource defined below. This is used to unambiguously identify - // the resource. It may be omitted for core resources (e.g. "secrets"). - Group string `json:"group,omitempty"` - // Resource to be configured. - Resource string `json:"resource"` - // Synchronization mode of the kind. If the field is empty, it will be treated - // as "Propagate". - // +optional - // +kubebuilder:validation:Enum=Propagate;Ignore;Remove - Mode SynchronizationMode `json:"mode,omitempty"` -} - -// ResourceStatus defines the actual synchronization state of a specific resource. -type ResourceStatus struct { - // The API group of the resource being synchronized. - Group string `json:"group"` - - // The API version used by HNC when propagating this resource. - Version string `json:"version"` - - // The resource being synchronized. - Resource string `json:"resource"` - - // Mode describes the synchronization mode of the kind. Typically, it will be the same as the mode - // in the spec, except when the reconciler has fallen behind or for resources with an enforced - // default synchronization mode, such as RBAC objects. - Mode SynchronizationMode `json:"mode,omitempty"` - - // Tracks the number of objects that are being propagated to descendant namespaces. The propagated - // objects are created by HNC. - // +kubebuilder:validation:Minimum=0 - // +optional - NumPropagatedObjects *int `json:"numPropagatedObjects,omitempty"` - - // Tracks the number of objects that are created by users. - // +kubebuilder:validation:Minimum=0 - // +optional - NumSourceObjects *int `json:"numSourceObjects,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=hncconfigurations,scope=Cluster -// +kubebuilder:storageversion - -// HNCConfiguration is a cluster-wide configuration for HNC as a whole. See details in http://bit.ly/hnc-type-configuration -type HNCConfiguration struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec HNCConfigurationSpec `json:"spec,omitempty"` - Status HNCConfigurationStatus `json:"status,omitempty"` -} - -// HNCConfigurationSpec defines the desired state of HNC configuration. -type HNCConfigurationSpec struct { - // Resources defines the cluster-wide settings for resource synchronization. - // Note that 'roles' and 'rolebindings' are pre-configured by HNC with - // 'Propagate' mode and are omitted in the spec. Any configuration of 'roles' - // or 'rolebindings' are not allowed. To learn more, see - // https://github.com/kubernetes-sigs/hierarchical-namespaces/blob/master/docs/user-guide/how-to.md#admin-types - Resources []ResourceSpec `json:"resources,omitempty"` -} - -// HNCConfigurationStatus defines the observed state of HNC configuration. -type HNCConfigurationStatus struct { - // Resources indicates the observed synchronization states of the resources. - Resources []ResourceStatus `json:"resources,omitempty"` - - // Conditions describes the errors, if any. If there are any conditions with - // "ActivitiesHalted" reason, this means that HNC cannot function in the - // affected namespaces. The HierarchyConfiguration object in each of the - // affected namespaces will have more information. To learn more about - // conditions, see https://github.com/kubernetes-sigs/hierarchical-namespaces/blob/master/docs/user-guide/concepts.md#admin-conditions. - Conditions []Condition `json:"conditions,omitempty"` -} - -// +kubebuilder:object:root=true - -// HNCConfigurationList contains a list of HNCConfiguration. -type HNCConfigurationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []HNCConfiguration `json:"items"` -} - -func init() { - SchemeBuilder.Register(&HNCConfiguration{}, &HNCConfigurationList{}) -} diff --git a/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/subnamespace_anchor.go b/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/subnamespace_anchor.go deleted file mode 100644 index d6ded962..00000000 --- a/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/subnamespace_anchor.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Constants for the subnamespace anchor resource type and namespace annotation. -const ( - Anchors = "subnamespaceanchors" - AnchorKind = "SubnamespaceAnchor" - AnchorAPIVersion = MetaGroup + "/v1alpha2" - SubnamespaceOf = MetaGroup + "/subnamespace-of" -) - -// SubnamespaceAnchorState describes the state of the subnamespace. The state could be -// "Missing", "Ok", "Conflict" or "Forbidden". The definitions will be described below. -type SubnamespaceAnchorState string - -// Anchor states, which are documented in the comment to SubnamespaceAnchorStatus.State. -const ( - Missing SubnamespaceAnchorState = "Missing" - Ok SubnamespaceAnchorState = "Ok" - Conflict SubnamespaceAnchorState = "Conflict" - Forbidden SubnamespaceAnchorState = "Forbidden" -) - -// SubnamespaceAnchorStatus defines the observed state of SubnamespaceAnchor. -type SubnamespaceAnchorStatus struct { - // Describes the state of the subnamespace anchor. - // - // Currently, the supported values are: - // - // - "Missing": the subnamespace has not been created yet. This should be the default state when - // the anchor is just created. - // - // - "Ok": the subnamespace exists. This is the only good state of the anchor. - // - // - "Conflict": a namespace of the same name already exists. The admission controller will - // attempt to prevent this. - // - // - "Forbidden": the anchor was created in a namespace that doesn't allow children, such as - // kube-system or hnc-system. The admission controller will attempt to prevent this. - State SubnamespaceAnchorState `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=subnamespaceanchors,shortName=subns,scope=Namespaced -// +kubebuilder:storageversion - -// SubnamespaceAnchor is the Schema for the subnamespace API. -// See details at http://bit.ly/hnc-self-serve-ux. -type SubnamespaceAnchor struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Status SubnamespaceAnchorStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// SubnamespaceAnchorList contains a list of SubnamespaceAnchor. -type SubnamespaceAnchorList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []SubnamespaceAnchor `json:"items"` -} - -func init() { - SchemeBuilder.Register(&SubnamespaceAnchor{}, &SubnamespaceAnchorList{}) -} diff --git a/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/zz_generated.deepcopy.go deleted file mode 100644 index c246cfa5..00000000 --- a/vendor/sigs.k8s.io/hierarchical-namespaces/api/v1alpha2/zz_generated.deepcopy.go +++ /dev/null @@ -1,363 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Condition) DeepCopyInto(out *Condition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. -func (in *Condition) DeepCopy() *Condition { - if in == nil { - return nil - } - out := new(Condition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HNCConfiguration) DeepCopyInto(out *HNCConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HNCConfiguration. -func (in *HNCConfiguration) DeepCopy() *HNCConfiguration { - if in == nil { - return nil - } - out := new(HNCConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HNCConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HNCConfigurationList) DeepCopyInto(out *HNCConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HNCConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HNCConfigurationList. -func (in *HNCConfigurationList) DeepCopy() *HNCConfigurationList { - if in == nil { - return nil - } - out := new(HNCConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HNCConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HNCConfigurationSpec) DeepCopyInto(out *HNCConfigurationSpec) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HNCConfigurationSpec. -func (in *HNCConfigurationSpec) DeepCopy() *HNCConfigurationSpec { - if in == nil { - return nil - } - out := new(HNCConfigurationSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HNCConfigurationStatus) DeepCopyInto(out *HNCConfigurationStatus) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HNCConfigurationStatus. -func (in *HNCConfigurationStatus) DeepCopy() *HNCConfigurationStatus { - if in == nil { - return nil - } - out := new(HNCConfigurationStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HierarchyConfiguration) DeepCopyInto(out *HierarchyConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchyConfiguration. -func (in *HierarchyConfiguration) DeepCopy() *HierarchyConfiguration { - if in == nil { - return nil - } - out := new(HierarchyConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HierarchyConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HierarchyConfigurationList) DeepCopyInto(out *HierarchyConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HierarchyConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchyConfigurationList. -func (in *HierarchyConfigurationList) DeepCopy() *HierarchyConfigurationList { - if in == nil { - return nil - } - out := new(HierarchyConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HierarchyConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HierarchyConfigurationSpec) DeepCopyInto(out *HierarchyConfigurationSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchyConfigurationSpec. -func (in *HierarchyConfigurationSpec) DeepCopy() *HierarchyConfigurationSpec { - if in == nil { - return nil - } - out := new(HierarchyConfigurationSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HierarchyConfigurationStatus) DeepCopyInto(out *HierarchyConfigurationStatus) { - *out = *in - if in.Children != nil { - in, out := &in.Children, &out.Children - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchyConfigurationStatus. -func (in *HierarchyConfigurationStatus) DeepCopy() *HierarchyConfigurationStatus { - if in == nil { - return nil - } - out := new(HierarchyConfigurationStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. -func (in *ResourceSpec) DeepCopy() *ResourceSpec { - if in == nil { - return nil - } - out := new(ResourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { - *out = *in - if in.NumPropagatedObjects != nil { - in, out := &in.NumPropagatedObjects, &out.NumPropagatedObjects - *out = new(int) - **out = **in - } - if in.NumSourceObjects != nil { - in, out := &in.NumSourceObjects, &out.NumSourceObjects - *out = new(int) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus. -func (in *ResourceStatus) DeepCopy() *ResourceStatus { - if in == nil { - return nil - } - out := new(ResourceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubnamespaceAnchor) DeepCopyInto(out *SubnamespaceAnchor) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Status = in.Status -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnamespaceAnchor. -func (in *SubnamespaceAnchor) DeepCopy() *SubnamespaceAnchor { - if in == nil { - return nil - } - out := new(SubnamespaceAnchor) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SubnamespaceAnchor) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubnamespaceAnchorList) DeepCopyInto(out *SubnamespaceAnchorList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]SubnamespaceAnchor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnamespaceAnchorList. -func (in *SubnamespaceAnchorList) DeepCopy() *SubnamespaceAnchorList { - if in == nil { - return nil - } - out := new(SubnamespaceAnchorList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SubnamespaceAnchorList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubnamespaceAnchorStatus) DeepCopyInto(out *SubnamespaceAnchorStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnamespaceAnchorStatus. -func (in *SubnamespaceAnchorStatus) DeepCopy() *SubnamespaceAnchorStatus { - if in == nil { - return nil - } - out := new(SubnamespaceAnchorStatus) - in.DeepCopyInto(out) - return out -}