diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 35eede302e..b8d198cfb8 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -4891,6 +4891,10 @@ "description": "SecretRef provides the secret reference needed to access the S3 source", "type": "string" }, + "serviceAccountName": { + "description": "ServiceAccountName provides the SAN needed if we want to use chain creds for S3 access (optional, if SecretRef supplied)", + "type": "string" + }, "url": { "description": "URL is the url of the S3 source", "type": "string", diff --git a/cmd/cdi-importer/importer.go b/cmd/cdi-importer/importer.go index 341f75ff95..731b954a2d 100644 --- a/cmd/cdi-importer/importer.go +++ b/cmd/cdi-importer/importer.go @@ -255,6 +255,7 @@ func newDataSource(source string, contentType string, volumeMode v1.PersistentVo ep, _ := util.ParseEnvVar(common.ImporterEndpoint, false) acc, _ := util.ParseEnvVar(common.ImporterAccessKeyID, false) sec, _ := util.ParseEnvVar(common.ImporterSecretKey, false) + s3ChainAuth, _ := strconv.ParseBool(os.Getenv(common.UseS3CredentialsChainAuth)) keyf, _ := util.ParseEnvVar(common.ImporterGoogleCredentialFileVar, false) diskID, _ := util.ParseEnvVar(common.ImporterDiskID, false) uuid, _ := util.ParseEnvVar(common.ImporterUUID, false) @@ -271,34 +272,45 @@ func newDataSource(source string, contentType string, volumeMode v1.PersistentVo case cc.SourceHTTP: ds, err := importer.NewHTTPDataSource(getHTTPEp(ep), acc, sec, certDir, cdiv1.DataVolumeContentType(contentType)) if err != nil { - errorCannotConnectDataSource(err, "http") + errorCannotConnectDataSource(err, cc.SourceHTTP) } return ds case cc.SourceImageio: ds, err := importer.NewImageioDataSource(ep, acc, sec, certDir, diskID, currentCheckpoint, previousCheckpoint) if err != nil { - errorCannotConnectDataSource(err, "imageio") + errorCannotConnectDataSource(err, cc.SourceImageio) } return ds case cc.SourceRegistry: ds := importer.NewRegistryDataSource(ep, acc, sec, certDir, insecureTLS) return ds case cc.SourceS3: - ds, err := importer.NewS3DataSource(ep, acc, sec, certDir) + var ( + ds *importer.S3DataSource + err error + ) + if s3ChainAuth { + // use this as a flag to say the user has a SAN set up with creds that IRSA will read + klog.Infof("Attempting to create your S3 Data Source with cloud provider creds.\n") + ds, err = importer.NewChainCredentialsS3DataSource(ep, certDir) + } else { + // default behaviour of using supplied access key and secret key to configure S3 client + ds, err = importer.NewS3DataSource(ep, acc, sec, certDir) + } if err != nil { - errorCannotConnectDataSource(err, "s3") + errorCannotConnectDataSource(err, cc.SourceS3) } return ds case cc.SourceGCS: ds, err := importer.NewGCSDataSource(ep, keyf) if err != nil { - errorCannotConnectDataSource(err, "gcs") + errorCannotConnectDataSource(err, cc.SourceGCS) } return ds case cc.SourceVDDK: ds, err := importer.NewVDDKDataSource(ep, acc, sec, thumbprint, uuid, backingFile, currentCheckpoint, previousCheckpoint, finalCheckpoint, volumeMode) if err != nil { - errorCannotConnectDataSource(err, "vddk") + errorCannotConnectDataSource(err, cc.SourceVDDK) } return ds default: diff --git a/pkg/apis/core/v1beta1/openapi_generated.go b/pkg/apis/core/v1beta1/openapi_generated.go index 47e937cb5c..1b50f4cb94 100644 --- a/pkg/apis/core/v1beta1/openapi_generated.go +++ b/pkg/apis/core/v1beta1/openapi_generated.go @@ -17983,6 +17983,13 @@ func schema_pkg_apis_core_v1beta1_DataVolumeSourceS3(ref common.ReferenceCallbac Format: "", }, }, + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountName provides the SAN needed if we want to use chain creds for S3 access (optional, if SecretRef supplied)", + Type: []string{"string"}, + Format: "", + }, + }, "certConfigMap": { SchemaProps: spec.SchemaProps{ Description: "CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate", diff --git a/pkg/common/common.go b/pkg/common/common.go index 3e948fca63..bc354f405c 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -111,6 +111,8 @@ const ( ImporterAccessKeyID = "IMPORTER_ACCESS_KEY_ID" // ImporterSecretKey provides a constant to capture our env variable "IMPORTER_SECRET_KEY" ImporterSecretKey = "IMPORTER_SECRET_KEY" + // UseS3CredentialsChainAuth flags whether or not to use the default aws config chain + UseS3CredentialsChainAuth = "S3_CREDENTIALS_CHAIN_AUTH" // ImporterImageSize provides a constant to capture our env variable "IMPORTER_IMAGE_SIZE" ImporterImageSize = "IMPORTER_IMAGE_SIZE" // ImporterCertDirVar provides a constant to capture our env variable "IMPORTER_CERT_DIR" diff --git a/pkg/controller/import-controller.go b/pkg/controller/import-controller.go index 49f1ff898f..d0bbd703ee 100644 --- a/pkg/controller/import-controller.go +++ b/pkg/controller/import-controller.go @@ -102,6 +102,7 @@ type importPodEnvVar struct { extraHeaders []string secretExtraHeaders []string cacheMode string + s3ChainAuth bool } type importerPodArgs struct { @@ -117,6 +118,7 @@ type importerPodArgs struct { workloadNodePlacement *sdkapi.NodePlacement vddkImageName *string priorityClassName string + serviceAccountName string } // NewImportController creates a new instance of the import controller. @@ -912,15 +914,16 @@ func makeImporterPodSpec(args *importerPodArgs) *corev1.Pod { }, }, Spec: corev1.PodSpec{ - Containers: makeImporterContainerSpec(args), - InitContainers: makeImporterInitContainersSpec(args), - Volumes: makeImporterVolumeSpec(args), - RestartPolicy: corev1.RestartPolicyOnFailure, - NodeSelector: args.workloadNodePlacement.NodeSelector, - Tolerations: args.workloadNodePlacement.Tolerations, - Affinity: args.workloadNodePlacement.Affinity, - PriorityClassName: args.priorityClassName, - ImagePullSecrets: args.imagePullSecrets, + Containers: makeImporterContainerSpec(args), + InitContainers: makeImporterInitContainersSpec(args), + Volumes: makeImporterVolumeSpec(args), + RestartPolicy: corev1.RestartPolicyOnFailure, + NodeSelector: args.workloadNodePlacement.NodeSelector, + Tolerations: args.workloadNodePlacement.Tolerations, + Affinity: args.workloadNodePlacement.Affinity, + PriorityClassName: args.priorityClassName, + ImagePullSecrets: args.imagePullSecrets, + ServiceAccountName: args.serviceAccountName, }, } @@ -948,6 +951,7 @@ func makeImporterPodSpec(args *importerPodArgs) *corev1.Pod { } func makeImporterContainerSpec(args *importerPodArgs) []corev1.Container { + args.podEnvVar.s3ChainAuth = args.serviceAccountName != "" // prep podEnvVar for Import method below containers := []corev1.Container{ { Name: common.ImporterPodName, @@ -1263,6 +1267,10 @@ func makeImportEnv(podEnvVar *importPodEnvVar, uid types.UID) []corev1.EnvVar { Name: common.CacheMode, Value: podEnvVar.cacheMode, }, + { + Name: common.UseS3CredentialsChainAuth, + Value: strconv.FormatBool(podEnvVar.s3ChainAuth), + }, } if podEnvVar.secretName != "" && podEnvVar.source != cc.SourceGCS { env = append(env, corev1.EnvVar{ diff --git a/pkg/controller/import-controller_test.go b/pkg/controller/import-controller_test.go index 96fb5e14a7..edb510bb11 100644 --- a/pkg/controller/import-controller_test.go +++ b/pkg/controller/import-controller_test.go @@ -962,7 +962,8 @@ var _ = Describe("Import test env", func() { currentCheckpoint: "", previousCheckpoint: "", finalCheckpoint: "", - preallocation: false} + preallocation: false, + serviceAccountName: ""} Expect(reflect.DeepEqual(makeImportEnv(testEnvVar, mockUID), createImportTestEnv(testEnvVar, mockUID))).To(BeTrue()) }) }) @@ -1245,6 +1246,10 @@ func createImportTestEnv(podEnvVar *importPodEnvVar, uid string) []corev1.EnvVar Name: common.CacheMode, Value: podEnvVar.cacheMode, }, + { + Name: common.ImporterServiceAccountName, + Value: podEnvVar.serviceAccountName, + }, } if podEnvVar.secretName != "" { diff --git a/pkg/importer/s3-datasource.go b/pkg/importer/s3-datasource.go index 3b3f473610..236d1fdc91 100644 --- a/pkg/importer/s3-datasource.go +++ b/pkg/importer/s3-datasource.go @@ -21,8 +21,10 @@ import ( ) const ( - s3FolderSep = "/" - httpScheme = "http" + s3FolderSep = "/" + httpScheme = "http" + emptyAccessKey = "" + emptySecretKey = "" ) // S3Client is the interface to the used S3 client. @@ -53,7 +55,7 @@ type S3DataSource struct { } // NewS3DataSource creates a new instance of the S3DataSource -func NewS3DataSource(endpoint, accessKey, secKey string, certDir string) (*S3DataSource, error) { +func NewS3DataSource(endpoint string, accessKey string, secKey string, certDir string) (*S3DataSource, error) { ep, err := ParseEndpoint(endpoint) if err != nil { return nil, errors.Wrapf(err, fmt.Sprintf("unable to parse endpoint %q", endpoint)) @@ -70,6 +72,23 @@ func NewS3DataSource(endpoint, accessKey, secKey string, certDir string) (*S3Dat }, nil } +// NewChainCredentialsS3DataSource creates a new instance of the S3DataSource using chain credentials (wraps NewS3DataSource) +func NewChainCredentialsS3DataSource(endpoint, certDir string) (*S3DataSource, error) { + /* + Quick Note on IRSA credential chain: + When you initialize a new service client without providing any credential arguments, the SDK uses the default credential provider chain to find AWS credentials. The SDK uses the first provider in the chain that returns credentials without an error. The default provider chain looks for credentials in the following order: + + - Environment variables. (* set when a `serviceAccountName` is supplied) + + - Shared credentials file. + + - If your application uses an ECS task definition or RunTask API operation, IAM role for tasks. + + - If your application is running on an Amazon EC2 instance, IAM role for Amazon EC2. + */ + return NewS3DataSource(endpoint, emptyAccessKey, emptySecretKey, certDir) +} + // Info is called to get initial information about the data. func (sd *S3DataSource) Info() (ProcessingPhase, error) { var err error @@ -140,7 +159,7 @@ func (sd *S3DataSource) Close() error { return err } -func createS3Reader(ep *url.URL, accessKey, secKey string, certDir string) (io.ReadCloser, error) { +func createS3Reader(ep *url.URL, accessKey string, secKey string, certDir string) (io.ReadCloser, error) { klog.V(3).Infoln("Using S3 client to get data") endpoint := ep.Host @@ -168,31 +187,38 @@ func createS3Reader(ep *url.URL, accessKey, secKey string, certDir string) (io.R return objectReader, nil } -func getS3Client(endpoint, accessKey, secKey string, certDir string, urlScheme string) (S3Client, error) { +func getS3Client(endpoint string, accessKey string, secKey string, certDir string, urlScheme string) (S3Client, error) { // Adding certs using CustomCABundle will overwrite the SystemCerts, so we opt by creating a custom HTTPClient httpClient, err := createHTTPClient(certDir) if err != nil { return nil, errors.Wrap(err, "Error creating http client for s3") } + var creds *credentials.Credentials + if accessKey != emptyAccessKey && secKey != emptySecretKey { + creds = credentials.NewStaticCredentials(accessKey, secKey, "") + } - creds := credentials.NewStaticCredentials(accessKey, secKey, "") region := extractRegion(endpoint) disableSSL := false // Disable SSL for http endpoint. This should cause the s3 client to create http requests. if urlScheme == httpScheme { disableSSL = true } - - sess, err := session.NewSession(&aws.Config{ + sessionConfig := &aws.Config{ Region: aws.String(region), Endpoint: aws.String(endpoint), - Credentials: creds, S3ForcePathStyle: aws.Bool(true), HTTPClient: httpClient, DisableSSL: &disableSSL, - }, - ) + } + if creds != nil { + sessionConfig.Credentials = creds + } else { + // recommended value to set when relying on credential chains + sessionConfig.CredentialsChainVerboseErrors = aws.Bool(true) + } + sess, err := session.NewSession(sessionConfig) if err != nil { return nil, err } diff --git a/pkg/operator/resources/crds_generated.go b/pkg/operator/resources/crds_generated.go index d60c2fa0be..6da1386b76 100644 --- a/pkg/operator/resources/crds_generated.go +++ b/pkg/operator/resources/crds_generated.go @@ -6096,6 +6096,11 @@ spec: description: SecretRef provides the secret reference needed to access the S3 source type: string + serviceAccountName: + description: ServiceAccountName provides the SAN needed + if we want to use chain creds for S3 access (optional, + if SecretRef supplied) + type: string url: description: URL is the url of the S3 source type: string @@ -7051,6 +7056,11 @@ spec: description: SecretRef provides the secret reference needed to access the S3 source type: string + serviceAccountName: + description: ServiceAccountName provides the SAN needed if + we want to use chain creds for S3 access (optional, if SecretRef + supplied) + type: string url: description: URL is the url of the S3 source type: string @@ -8075,6 +8085,11 @@ spec: description: SecretRef provides the secret reference needed to access the S3 source type: string + serviceAccountName: + description: ServiceAccountName provides the SAN needed if + we want to use chain creds for S3 access (optional, if SecretRef + supplied) + type: string url: description: URL is the url of the S3 source type: string diff --git a/staging/src/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go b/staging/src/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go index 194e531b04..3ac7476813 100644 --- a/staging/src/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go +++ b/staging/src/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go @@ -169,6 +169,9 @@ type DataVolumeSourceS3 struct { URL string `json:"url"` //SecretRef provides the secret reference needed to access the S3 source SecretRef string `json:"secretRef,omitempty"` + //ServiceAccountName provides the SAN needed if we want to use chain creds for S3 access (optional, if SecretRef supplied) + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` // CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate // +optional CertConfigMap string `json:"certConfigMap,omitempty"` diff --git a/staging/src/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go b/staging/src/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go index fb5ab9c6a9..4526ea4dc4 100644 --- a/staging/src/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go +++ b/staging/src/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go @@ -82,10 +82,11 @@ func (DataVolumeSourceUpload) SwaggerDoc() map[string]string { func (DataVolumeSourceS3) SwaggerDoc() map[string]string { return map[string]string{ - "": "DataVolumeSourceS3 provides the parameters to create a Data Volume from an S3 source", - "url": "URL is the url of the S3 source", - "secretRef": "SecretRef provides the secret reference needed to access the S3 source", - "certConfigMap": "CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate\n+optional", + "": "DataVolumeSourceS3 provides the parameters to create a Data Volume from an S3 source", + "url": "URL is the url of the S3 source", + "secretRef": "SecretRef provides the secret reference needed to access the S3 source", + "serviceAccountName": "ServiceAccountName provides the SAN needed if we want to use chain creds for S3 access (optional, if SecretRef supplied)\n+optional", + "certConfigMap": "CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate\n+optional", } }