From 9bfe48baeb32a5065c0239eab6540ad4d030e222 Mon Sep 17 00:00:00 2001 From: Mickael Stanislas Date: Thu, 19 Oct 2023 17:13:36 +0200 Subject: [PATCH] feat: add `cloudavenue_s3_bucket_lifecycle_configuration` resource/datasource --- .changelog/579.txt | 19 + .../s3_bucket_lifecycle_configuration.md | 118 ++++ .../s3_bucket_lifecycle_configuration.md | 287 ++++++++++ .../data-source.tf | 3 + go.mod | 10 +- go.sum | 22 +- internal/helpers/testsacc/testsacc.go | 27 +- internal/provider/provider_datasources.go | 1 + internal/provider/provider_resources.go | 1 + internal/provider/s3/base.go | 9 +- ...cket_lifecycle_configuration_datasource.go | 103 ++++ ...bucket_lifecycle_configuration_resource.go | 524 +++++++++++++++++ .../bucket_lifecycle_configuration_schema.go | 348 ++++++++++++ ...ket_lifecycle_configuration_schema_test.go | 61 ++ .../bucket_lifecycle_configuration_types.go | 282 ++++++++++ internal/testsacc/acctest.go | 29 +- internal/testsacc/acctest_datasources_test.go | 1 + internal/testsacc/acctest_resources_test.go | 1 + ...lifecycle_configuration_datasource_test.go | 61 ++ ...t_lifecycle_configuration_resource_test.go | 530 ++++++++++++++++++ ..._versioning_configuration_resource_test.go | 1 + ...cket_versioning_configuration.md copy.tmpl | 25 + .../s3_bucket_lifecycle_configuration.md.tmpl | 189 +++++++ 23 files changed, 2623 insertions(+), 29 deletions(-) create mode 100644 .changelog/579.txt create mode 100644 docs/data-sources/s3_bucket_lifecycle_configuration.md create mode 100644 docs/resources/s3_bucket_lifecycle_configuration.md create mode 100644 examples/data-sources/cloudavenue_s3_bucket_lifecycle_configuration/data-source.tf create mode 100644 internal/provider/s3/bucket_lifecycle_configuration_datasource.go create mode 100644 internal/provider/s3/bucket_lifecycle_configuration_resource.go create mode 100644 internal/provider/s3/bucket_lifecycle_configuration_schema.go create mode 100644 internal/provider/s3/bucket_lifecycle_configuration_schema_test.go create mode 100644 internal/provider/s3/bucket_lifecycle_configuration_types.go create mode 100644 internal/testsacc/s3_bucket_lifecycle_configuration_datasource_test.go create mode 100644 internal/testsacc/s3_bucket_lifecycle_configuration_resource_test.go create mode 100644 templates/data-sources/s3_bucket_versioning_configuration.md copy.tmpl create mode 100644 templates/resources/s3_bucket_lifecycle_configuration.md.tmpl diff --git a/.changelog/579.txt b/.changelog/579.txt new file mode 100644 index 000000000..75128ce43 --- /dev/null +++ b/.changelog/579.txt @@ -0,0 +1,19 @@ +```release-note:dependency +deps: bumps github.com/FrangipaneTeam/terraform-plugin-framework-supertypes from 0.1.0 to 0.2.0 +``` + +```release-note:dependency +deps: bumps github.com/FrangipaneTeam/terraform-plugin-framework-superschema from 1.5.5 to 1.6.0 +``` + +```release-note:dependency +deps: bumps github.com/orange-cloudavenue/cloudavenue-sdk-go from 0.3.0 to 0.3.1 +``` + +```release-note:new-resource +`resource/cloudavenue_s3_lifecycle_configuration` is a new resource type that allows to manage S3 lifecycle configuration. +``` + +```release-note:new-data-source +`datasource/cloudavenue_s3_lifecycle_configuration` is a new data source type that allows to retrieve S3 lifecycle configuration. +``` diff --git a/docs/data-sources/s3_bucket_lifecycle_configuration.md b/docs/data-sources/s3_bucket_lifecycle_configuration.md new file mode 100644 index 000000000..34321a55a --- /dev/null +++ b/docs/data-sources/s3_bucket_lifecycle_configuration.md @@ -0,0 +1,118 @@ +--- +page_title: "cloudavenue_s3_bucket_lifecycle_configuration Data Source - cloudavenue" +subcategory: "S3 (Object Storage)" +description: |- + The cloudavenue_s3_bucket_lifecycle_configuration data source allows you to retrieve information about an ... +--- + +# cloudavenue_s3_bucket_lifecycle_configuration (Data Source) + +The `cloudavenue_s3_bucket_lifecycle_configuration` data source allows you to retrieve information about an ... + +## Example Usage + +```terraform +data "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.example.name +} +``` + + +## Schema + +### Required + +- `bucket` (String) The name of the bucket. + +### Optional + +- `timeouts` (Attributes) (see [below for nested schema](#nestedatt--timeouts)) + +### Read-Only + +- `id` (String) The ID is a bucket name. +- `rules` (Attributes List) Rules that define lifecycle configuration. (see [below for nested schema](#nestedatt--rules)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `rules` + +Read-Only: + +- `abort_incomplete_multipart_upload` (Attributes) Configuration block that specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. (see [below for nested schema](#nestedatt--rules--abort_incomplete_multipart_upload)) +- `expiration` (Attributes) Configuration block that specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker. (see [below for nested schema](#nestedatt--rules--expiration)) +- `filter` (Attributes) Configuration block used to identify objects that a Lifecycle Rule applies to. (see [below for nested schema](#nestedatt--rules--filter)) +- `id` (String) Unique identifier for the rule. +- `noncurrent_version_expiration` (Attributes) Configuration block that specifies when noncurrent object versions expire. (see [below for nested schema](#nestedatt--rules--noncurrent_version_expiration)) +- `status` (String) Whether the rule is currently being applied. + + +### Nested Schema for `rules.abort_incomplete_multipart_upload` + +Optional: + +- `days_after_initiation` (Number) Number of days after which Amazon S3 aborts an incomplete multipart upload. + + + +### Nested Schema for `rules.expiration` + +Read-Only: + +- `date` (String) Date the object is to be moved or deleted. The date value must be in [RFC3339 full-date format](https://datatracker.ietf.org/doc/html/rfc3339#section-5.6) e.g. `2023-10-10T00:00:00Z`. +- `days` (Number) Lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer. +- `expired_object_delete_marker` (Boolean) Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to `true`, the delete marker will be expired; if set to `false` the policy takes no action. + + + +### Nested Schema for `rules.filter` + +Read-Only: + +- `and` (Attributes) Configuration block used to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all the predicates configured inside the and block. (see [below for nested schema](#nestedatt--rules--filter--and)) +- `prefix` (String) Match objects with this prefix. +- `tag` (Attributes) Specifies object tag key and value. (see [below for nested schema](#nestedatt--rules--filter--tag)) + + +### Nested Schema for `rules.filter.and` + +Read-Only: + +- `prefix` (String) Match objects with this prefix. +- `tags` (Attributes List) Specifies object tag key and value. (see [below for nested schema](#nestedatt--rules--filter--and--tags)) + + +### Nested Schema for `rules.filter.and.tags` + +Read-Only: + +- `key` (String) Object tag key. +- `value` (String) Object tag value. + + + + +### Nested Schema for `rules.filter.tag` + +Read-Only: + +- `key` (String) Object tag key. +- `value` (String) Object tag value. + + + + +### Nested Schema for `rules.noncurrent_version_expiration` + +Read-Only: + +- `newer_noncurrent_versions` (Number) Number of noncurrent versions Amazon S3 will retain. +- `noncurrent_days` (Number) Number of days an object is noncurrent before Amazon S3 can perform the associated action. Must be a positive integer. + diff --git a/docs/resources/s3_bucket_lifecycle_configuration.md b/docs/resources/s3_bucket_lifecycle_configuration.md new file mode 100644 index 000000000..474ef4ff7 --- /dev/null +++ b/docs/resources/s3_bucket_lifecycle_configuration.md @@ -0,0 +1,287 @@ +--- +page_title: "cloudavenue_s3_bucket_lifecycle_configuration Resource - cloudavenue" +subcategory: "S3 (Object Storage)" +description: |- + The cloudavenue_s3_bucket_lifecycle_configuration resource allows you to manage ... +--- + +# cloudavenue_s3_bucket_lifecycle_configuration (Resource) + +Provides an independent configuration resource for S3 bucket [lifecycle configuration](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). + +The `cloudavenue_s3_bucket_lifecycle_configuration` resource allows you to manage ... + +An S3 Lifecycle configuration consists of one or more Lifecycle rules. Each rule consists of the following: + +* An ID that identifies the rule. The ID must be unique within the configuration. +* A Status that indicates whether the rule is currently being applied. +* A Filter that identifies a subset of objects to which the rule applies. +* One or more Lifecycle actions that you want Amazon S3 to perform on the objects identified by the Filter. + +For more information about Lifecycle configuration, see [Lifecycle Configuration Elements](https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html). + + ~> **NOTE** S3 Buckets only support a single lifecycle configuration. Declaring multiple `cloudavenue_s3_bucket_lifecycle_configuration` resources to the same S3 Bucket will cause a perpetual difference in configuration. + + ~> **NOTE** Lifecycle configurations may take some time to fully propagate to all CloudAvenue S3 systems. Running Terraform operations shortly after creating a lifecycle configuration may result in changes that affect configuration idempotence. See the Amazon S3 User Guide on [setting lifecycle configuration on a bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/how-to-set-lifecycle-configuration-intro.html). + +## Examples Usage + +### Specifying a filter using key prefixes + +The Lifecycle rule applies to a subset of objects based on the key name prefix (`logs/`). + +```hcl +resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.example.name + + rules = [{ + id = "rule_id_1" + + filter = { + prefix = "logs/" + } + + noncurrent_version_expiration = { + noncurrent_days = 90 + } + + status = "Enabled" + }] +} +``` + +If you want to apply a Lifecycle action to a subset of objects based on different key name prefixes, specify separate rules. + +```hcl +resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.example.name + + rules = [ + { + id = "rule_id_1" + + filter = { + prefix = "config/" + } + + noncurrent_version_expiration = { + noncurrent_days = 180 + } + + status = "Enabled" + }, + { + id = "rule_id_2" + + filter = { + prefix = "cache/" + } + + noncurrent_version_expiration = { + noncurrent_days = 10 + } + + status = "Enabled" + } + ] +} +``` + +### Specifying a filter based on tag + +The Lifecycle rule applies to a subset of objects based on the tag key and value (`tag1` and `value1`). + +```hcl +resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.example.name + + rules = [{ + id = "rule_id_1" + + filter = { + tag = { + key = "tag1" + value = "value1" + } + } + + expiration = { + days = 90 + } + + status = "Enabled" + }] +} +``` + +### Specifying a filter based on tags range and prefix + +The Lifecycle rule applies to a subset of objects based on the tag key and value (`tag1` and `value1`) and the key name prefix (`logs/`). + +```hcl +resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.example.name + + rules = [{ + id = "rule_id_1" + + filter = { + and { + prefix = "logs/" + tags = [ + { + key = "tag1" + value = "value1" + } + ] + } + } + + expiration = { + days = 90 + } + + status = "Enabled" + }] +} +``` + +### Creating a Lifecycle Configuration for a bucket with versioning + +```hcl +resource "cloudavenue_s3_bucket" "example" { + name = "example" +} + +resource "cloudavenue_s3_bucket_versioning_configuration" "example" { + bucket = cloudavenue_s3_bucket.example.name + status = "Enabled" +} + +resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket_versioning_configuration.example.bucket + + rules = [{ + id = "rule_id_1" + + filter = { + prefix = "logs/" + } + + expiration { + days = 90 + } + + status = "Enabled" + }] +} +``` + + +## Schema + +### Required + +- `bucket` (String) (ForceNew) The name of the bucket. +- `rules` (Attributes List) Rules that define lifecycle configuration. List must contain at least 1 elements. (see [below for nested schema](#nestedatt--rules)) + +### Optional + +- `timeouts` (Attributes) (see [below for nested schema](#nestedatt--timeouts)) + +### Read-Only + +- `id` (String) The ID is a bucket name. + + +### Nested Schema for `rules` + +Required: + +- `filter` (Attributes) Configuration block used to identify objects that a Lifecycle Rule applies to. (see [below for nested schema](#nestedatt--rules--filter)) +- `id` (String) Unique identifier for the rule. String length must be between 1 and 255. + +Optional: + +- `abort_incomplete_multipart_upload` (Attributes) Configuration block that specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. (see [below for nested schema](#nestedatt--rules--abort_incomplete_multipart_upload)) +- `expiration` (Attributes) Configuration block that specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker. Ensure that if an attribute is set, these are not set: "[<.expiration,<.noncurrent_version_expiration]". (see [below for nested schema](#nestedatt--rules--expiration)) +- `noncurrent_version_expiration` (Attributes) Configuration block that specifies when noncurrent object versions expire. Ensure that if an attribute is set, these are not set: "[<.expiration,<.noncurrent_version_expiration]". (see [below for nested schema](#nestedatt--rules--noncurrent_version_expiration)) +- `status` (String) Whether the rule is currently being applied. Value must be one of : `Enabled`, `Disabled`. Value defaults to `Enabled`. + + +### Nested Schema for `rules.filter` + +Optional: + +- `and` (Attributes) Configuration block used to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all the predicates configured inside the and block. (see [below for nested schema](#nestedatt--rules--filter--and)) +- `prefix` (String) Match objects with this prefix. +- `tag` (Attributes) Specifies object tag key and value. (see [below for nested schema](#nestedatt--rules--filter--tag)) + + +### Nested Schema for `rules.filter.and` + +Optional: + +- `prefix` (String) Match objects with this prefix. +- `tags` (Attributes List) Specifies object tag key and value. (see [below for nested schema](#nestedatt--rules--filter--and--tags)) + + +### Nested Schema for `rules.filter.and.tags` + +Required: + +- `key` (String) Object tag key. +- `value` (String) Object tag value. + + + + +### Nested Schema for `rules.filter.tag` + +Required: + +- `key` (String) Object tag key. +- `value` (String) Object tag value. + + + + +### Nested Schema for `rules.abort_incomplete_multipart_upload` + +Optional: + +- `days_after_initiation` (Number) Number of days after which Amazon S3 aborts an incomplete multipart upload. + + + +### Nested Schema for `rules.expiration` + +Optional: + +- `date` (String) Date the object is to be moved or deleted. The date value must be in [RFC3339 full-date format](https://datatracker.ietf.org/doc/html/rfc3339#section-5.6) e.g. `2023-10-10T00:00:00Z`. Ensure that one and only one attribute from this collection is set : `date`, `days`, `expired_object_delete_marker`. +- `days` (Number) Lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer. Ensure that one and only one attribute from this collection is set : `date`, `days`, `expired_object_delete_marker`. +- `expired_object_delete_marker` (Boolean) Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to `true`, the delete marker will be expired; if set to `false` the policy takes no action. Ensure that one and only one attribute from this collection is set : `date`, `days`, `expired_object_delete_marker`. Ensure that if an attribute is set, these are not set: "[<.<.filter.tag,<.<.filter.and.tags]". + + + +### Nested Schema for `rules.noncurrent_version_expiration` + +Optional: + +- `newer_noncurrent_versions` (Number) Number of noncurrent versions Amazon S3 will retain. Value must be at least 0. +- `noncurrent_days` (Number) Number of days an object is noncurrent before Amazon S3 can perform the associated action. Must be a positive integer. Value must be at least 1. + + + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + -> **Timeout** Default timeout is **5 minutes**. + diff --git a/examples/data-sources/cloudavenue_s3_bucket_lifecycle_configuration/data-source.tf b/examples/data-sources/cloudavenue_s3_bucket_lifecycle_configuration/data-source.tf new file mode 100644 index 000000000..9546ddb59 --- /dev/null +++ b/examples/data-sources/cloudavenue_s3_bucket_lifecycle_configuration/data-source.tf @@ -0,0 +1,3 @@ +data "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.example.name +} diff --git a/go.mod b/go.mod index d979e2e11..566d18849 100644 --- a/go.mod +++ b/go.mod @@ -5,8 +5,8 @@ go 1.20 require ( github.com/FrangipaneTeam/terraform-analytic-tool v0.0.12 github.com/FrangipaneTeam/terraform-plugin-framework-planmodifiers v1.3.4 - github.com/FrangipaneTeam/terraform-plugin-framework-superschema v1.5.5 - github.com/FrangipaneTeam/terraform-plugin-framework-supertypes v0.1.0 + github.com/FrangipaneTeam/terraform-plugin-framework-superschema v1.6.0 + github.com/FrangipaneTeam/terraform-plugin-framework-supertypes v0.2.0 github.com/FrangipaneTeam/terraform-plugin-framework-validators v1.8.1 github.com/antihax/optional v1.0.0 github.com/aws/aws-sdk-go v1.45.28 @@ -14,14 +14,14 @@ require ( github.com/google/uuid v1.3.1 github.com/hashicorp/aws-sdk-go-base v1.1.0 github.com/hashicorp/terraform-plugin-docs v0.16.0 - github.com/hashicorp/terraform-plugin-framework v1.4.1 + github.com/hashicorp/terraform-plugin-framework v1.4.2 github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 github.com/hashicorp/terraform-plugin-go v0.19.0 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 github.com/iancoleman/strcase v0.3.0 - github.com/orange-cloudavenue/cloudavenue-sdk-go v0.3.0 + github.com/orange-cloudavenue/cloudavenue-sdk-go v0.3.1 github.com/orange-cloudavenue/infrapi-sdk-go v0.1.4-0.20231005074857-89878ea119fb github.com/rs/zerolog v1.31.0 github.com/thanhpk/randstr v1.0.6 @@ -52,7 +52,7 @@ require ( github.com/go-redis/redis/v8 v8.11.5 // indirect github.com/go-resty/resty/v2 v2.10.0 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect diff --git a/go.sum b/go.sum index 94fb5a7c3..0c015b934 100644 --- a/go.sum +++ b/go.sum @@ -3,10 +3,10 @@ github.com/FrangipaneTeam/terraform-analytic-tool v0.0.12 h1:rbh0EtyILnuyu07RuOh github.com/FrangipaneTeam/terraform-analytic-tool v0.0.12/go.mod h1:j3TxedNm9WrKKseOSBKxnNtquuXa0FQChO/QdcvPKtg= github.com/FrangipaneTeam/terraform-plugin-framework-planmodifiers v1.3.4 h1:FGb+DIj8AtUehYLt7b0t1rjqK/7sggkoZwR4tqdQjcc= github.com/FrangipaneTeam/terraform-plugin-framework-planmodifiers v1.3.4/go.mod h1:ngKE3iJWLWPLZ64umTr5ndnqd9mvMxHUZPk9pCkGSJY= -github.com/FrangipaneTeam/terraform-plugin-framework-superschema v1.5.5 h1:O53/WxECIU5QYDv4ORW/qX2wyHDfrCTApM6dc3L6hrQ= -github.com/FrangipaneTeam/terraform-plugin-framework-superschema v1.5.5/go.mod h1:X5jERZJ0j8vkPPYUDa0JNJlaF2nb01VBYvxSPTUod18= -github.com/FrangipaneTeam/terraform-plugin-framework-supertypes v0.1.0 h1:QHcnFyAQy/yWcBargdCt8Eprnh7G7vOYOw7z/4LOlM8= -github.com/FrangipaneTeam/terraform-plugin-framework-supertypes v0.1.0/go.mod h1:GGgBv/UI8efmEok0VJWSxp3m6GdkETqzJf5SWH83aDs= +github.com/FrangipaneTeam/terraform-plugin-framework-superschema v1.6.0 h1:G2StO1BSiYMAVVynB6Vsy64rn+UZa0VpZwOvDk9IQ+g= +github.com/FrangipaneTeam/terraform-plugin-framework-superschema v1.6.0/go.mod h1:Rro2AUhgh2SHp3P8cu+pQinotCbkohb7MM4nZ15nP8M= +github.com/FrangipaneTeam/terraform-plugin-framework-supertypes v0.2.0 h1:lcJY8AEbpbYp/M/jPUdYZArsZKAkM2LECSYDfKOiIiQ= +github.com/FrangipaneTeam/terraform-plugin-framework-supertypes v0.2.0/go.mod h1:Aux7edspqsudNKr9YFgCkAgUwFj44RyOV123XolOzjY= github.com/FrangipaneTeam/terraform-plugin-framework-validators v1.8.1 h1:C17IEM0/4sxsTN0IpwDdgncea/cxfZVlWESIWvlEuBo= github.com/FrangipaneTeam/terraform-plugin-framework-validators v1.8.1/go.mod h1:GkF0MeJVDma0RHglpXxSzZ3yv7ftneMgI1KwHrJBOWA= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -95,8 +95,8 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= @@ -138,8 +138,8 @@ github.com/hashicorp/terraform-json v0.17.1 h1:eMfvh/uWggKmY7Pmb3T85u86E2EQg6EQH github.com/hashicorp/terraform-json v0.17.1/go.mod h1:Huy6zt6euxaY9knPAFKjUITn8QxUFIe9VuSzb4zn/0o= github.com/hashicorp/terraform-plugin-docs v0.16.0 h1:UmxFr3AScl6Wged84jndJIfFccGyBZn52KtMNsS12dI= github.com/hashicorp/terraform-plugin-docs v0.16.0/go.mod h1:M3ZrlKBJAbPMtNOPwHicGi1c+hZUh7/g0ifT/z7TVfA= -github.com/hashicorp/terraform-plugin-framework v1.4.1 h1:ZC29MoB3Nbov6axHdgPbMz7799pT5H8kIrM8YAsaVrs= -github.com/hashicorp/terraform-plugin-framework v1.4.1/go.mod h1:XC0hPcQbBvlbxwmjxuV/8sn8SbZRg4XwGMs22f+kqV0= +github.com/hashicorp/terraform-plugin-framework v1.4.2 h1:P7a7VP1GZbjc4rv921Xy5OckzhoiO3ig6SGxwelD2sI= +github.com/hashicorp/terraform-plugin-framework v1.4.2/go.mod h1:GWl3InPFZi2wVQmdVnINPKys09s9mLmTZr95/ngLnbY= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaKFhm4h2TgvMUlNzFAtUqlcOWnWPm+9E= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= @@ -228,8 +228,8 @@ github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/orange-cloudavenue/cloudavenue-sdk-go v0.3.0 h1:S5ZOzQ7Iyk6/iAebdVH+H/gNs5EokUG/27l2z4TMb4w= -github.com/orange-cloudavenue/cloudavenue-sdk-go v0.3.0/go.mod h1:XyfayrWLUBIxh49bIZUnJUUo0B2E7Vloxvo+6Zn0K2w= +github.com/orange-cloudavenue/cloudavenue-sdk-go v0.3.1 h1:pLR9VPuZznhFOrYBcB6FYoMjzmm+FMy1DaIPHG+hAhc= +github.com/orange-cloudavenue/cloudavenue-sdk-go v0.3.1/go.mod h1:XyfayrWLUBIxh49bIZUnJUUo0B2E7Vloxvo+6Zn0K2w= github.com/orange-cloudavenue/infrapi-sdk-go v0.1.4-0.20231005074857-89878ea119fb h1:1/Wc21Tp9RnDOUTjKBm9x3wi+UgUkDc2bv0fHJc5f2o= github.com/orange-cloudavenue/infrapi-sdk-go v0.1.4-0.20231005074857-89878ea119fb/go.mod h1:pGa9mB6s+weCi5QtNe5nicp7yL0C/e+i+3wHRh4cjBE= github.com/peterhellberg/link v1.2.0 h1:UA5pg3Gp/E0F2WdX7GERiNrPQrM1K6CVJUUWfHa4t6c= @@ -271,7 +271,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/thanhpk/randstr v1.0.6 h1:psAOktJFD4vV9NEVb3qkhRSMvYh4ORRaj1+w/hn4B+o= github.com/thanhpk/randstr v1.0.6/go.mod h1:M/H2P1eNLZzlDwAzpkkkUvoyNNMbzRGhESZuEQk3r0U= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= diff --git a/internal/helpers/testsacc/testsacc.go b/internal/helpers/testsacc/testsacc.go index 07c6b18ca..e535968c4 100644 --- a/internal/helpers/testsacc/testsacc.go +++ b/internal/helpers/testsacc/testsacc.go @@ -19,6 +19,11 @@ type ( ListOfDependencies []ResourceName DependenciesConfigResponse []func() TFData + // TODO : Add TFACCLog management. + TFACCLog struct { + Level string `env:"LEVEL,default=info"` + } + TestACC interface { // GetResourceName returns the name of the resource under test. GetResourceName() string @@ -46,6 +51,9 @@ type ( // Import returns the Terraform configurations to use for the import test. Imports []TFImport + // Destroy will create a destroy plan if set to true. + Destroy bool + // CacheDependenciesConfig is used to cache the dependencies config. CacheDependenciesConfig TFData @@ -306,12 +314,16 @@ func (t Test) GenerateSteps(ctx context.Context, testName TestName, testACC Test listOfChecks := t.CommonChecks listOfChecks = append(listOfChecks, t.Create.Checks...) + // lastConfigGenerated is the last Terraform configuration generated. (Used for destroy step) + var lastConfigGenerated string + // * Compute dependencies config t.ComputeDependenciesConfig(testACC) // * Create step + lastConfigGenerated = t.Create.Generate(ctx, t.CacheDependenciesConfig) createTestStep := resource.TestStep{ - Config: t.Create.Generate(ctx, t.CacheDependenciesConfig), + Config: lastConfigGenerated, Check: resource.ComposeAggregateTestCheckFunc( listOfChecks..., ), @@ -336,8 +348,9 @@ func (t Test) GenerateSteps(ctx context.Context, testName TestName, testACC Test listOfChecks := t.CommonChecks listOfChecks = append(listOfChecks, update.Checks...) + lastConfigGenerated = update.Generate(ctx, t.CacheDependenciesConfig) updateTestStep := resource.TestStep{ - Config: update.Generate(ctx, t.CacheDependenciesConfig), + Config: lastConfigGenerated, Check: resource.ComposeAggregateTestCheckFunc( listOfChecks..., ), @@ -379,6 +392,16 @@ func (t Test) GenerateSteps(ctx context.Context, testName TestName, testACC Test } } + // * Destroy step + if t.Destroy { + destroyTestStep := resource.TestStep{ + Config: lastConfigGenerated, + Destroy: true, + } + + steps = append(steps, destroyTestStep) + } + return } diff --git a/internal/provider/provider_datasources.go b/internal/provider/provider_datasources.go index 1b7823518..9e8249bac 100644 --- a/internal/provider/provider_datasources.go +++ b/internal/provider/provider_datasources.go @@ -89,5 +89,6 @@ func (p *cloudavenueProvider) DataSources(_ context.Context) []func() datasource s3.NewBucketDataSource, s3.NewBucketVersioningConfigurationDatasource, s3.NewBucketCorsConfigurationDatasource, + s3.NewBucketLifecycleConfigurationDataSource, } } diff --git a/internal/provider/provider_resources.go b/internal/provider/provider_resources.go index edc2f0a45..b5f102ac1 100644 --- a/internal/provider/provider_resources.go +++ b/internal/provider/provider_resources.go @@ -82,5 +82,6 @@ func (p *cloudavenueProvider) Resources(_ context.Context) []func() resource.Res s3.NewBucketVersioningConfigurationResource, s3.NewBucketResource, s3.NewBucketCorsConfigurationResource, + s3.NewBucketLifecycleConfigurationResource, } } diff --git a/internal/provider/s3/base.go b/internal/provider/s3/base.go index 4e659274d..1e56626cb 100644 --- a/internal/provider/s3/base.go +++ b/internal/provider/s3/base.go @@ -75,11 +75,16 @@ type RetryWhenConfig[T any] struct { Function func() (T, error) } +const ( + lifeCycleStatusEnabled = "Enabled" + lifeCycleStatusDisabled = "Disabled" +) + var ErrRetryWhenTimeout = errors.New("timeout reached") // retryWhen executes the function passed in the configuration object until the timeout is reached or the context is cancelled. // It will retry if the shouldRetry function returns true. It will stop if the shouldRetry function returns false. -func retryWhen[T any](ctx context.Context, config *RetryWhenConfig[T], shouldRetry func(error) bool) (T, error) { //nolint: ireturn,unused +func retryWhen[T any](ctx context.Context, config *RetryWhenConfig[T], shouldRetry func(error) bool) (T, error) { //nolint: ireturn retryInterval := config.Interval if DefaultWaitRetryInterval != nil { retryInterval = *DefaultWaitRetryInterval @@ -106,7 +111,7 @@ func retryWhen[T any](ctx context.Context, config *RetryWhenConfig[T], shouldRet } // retryWhenAWSErrCodeEquals retries a function when it returns a specific AWS error. -func retryWhenAWSErrCodeEquals[T any](ctx context.Context, codes []string, config *RetryWhenConfig[T]) (T, error) { //nolint: ireturn,unused +func retryWhenAWSErrCodeEquals[T any](ctx context.Context, codes []string, config *RetryWhenConfig[T]) (T, error) { //nolint: ireturn return retryWhen(ctx, config, func(err error) bool { return tfawserr.ErrCodeEquals(err, codes...) }) diff --git a/internal/provider/s3/bucket_lifecycle_configuration_datasource.go b/internal/provider/s3/bucket_lifecycle_configuration_datasource.go new file mode 100644 index 000000000..9fbbe8633 --- /dev/null +++ b/internal/provider/s3/bucket_lifecycle_configuration_datasource.go @@ -0,0 +1,103 @@ +// Package s3 provides a Terraform datasource. +package s3 + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/hashicorp/terraform-plugin-framework/diag" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + + "github.com/orange-cloudavenue/terraform-provider-cloudavenue/internal/client" + "github.com/orange-cloudavenue/terraform-provider-cloudavenue/internal/metrics" +) + +var ( + _ datasource.DataSource = &BucketLifecycleConfigurationDataSource{} + _ datasource.DataSourceWithConfigure = &BucketLifecycleConfigurationDataSource{} +) + +func NewBucketLifecycleConfigurationDataSource() datasource.DataSource { + return &BucketLifecycleConfigurationDataSource{} +} + +type BucketLifecycleConfigurationDataSource struct { + client *client.CloudAvenue + + s3Client *s3.S3 +} + +// Init Initializes the data source. +func (d *BucketLifecycleConfigurationDataSource) Init(ctx context.Context, dm *BucketLifecycleConfigurationDatasourceModel) (diags diag.Diagnostics) { + d.s3Client = d.client.CAVSDK.V1.S3() + + return +} + +func (d *BucketLifecycleConfigurationDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_" + categoryName + "_bucket_lifecycle_configuration" +} + +func (d *BucketLifecycleConfigurationDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = bucketLifecycleConfigurationSchema(ctx).GetDataSource(ctx) +} + +func (d *BucketLifecycleConfigurationDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*client.CloudAvenue) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *client.CloudAvenue, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *BucketLifecycleConfigurationDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + defer metrics.New("data.cloudavenue_s3_bucket_lifecycle_configuration", d.client.GetOrgName(), metrics.Read)() + + config := &BucketLifecycleConfigurationDatasourceModel{} + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, config)...) + if resp.Diagnostics.HasError() { + return + } + + // Init the resource + resp.Diagnostics.Append(d.Init(ctx, config)...) + if resp.Diagnostics.HasError() { + return + } + + /* + Implement the data source read logic here. + */ + + data, _, diags := genericReadLifeCycleConfiguration(ctx, &readLifeCycleConfigurationConfig[*BucketLifecycleConfigurationDatasourceModel]{ + Client: d.s3Client, + Timeout: func() (time.Duration, diag.Diagnostics) { + return config.Timeouts.Read(ctx, defaultReadTimeout) + }, + BucketName: func() *string { + return config.Bucket.GetPtr() + }, + }, config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/internal/provider/s3/bucket_lifecycle_configuration_resource.go b/internal/provider/s3/bucket_lifecycle_configuration_resource.go new file mode 100644 index 000000000..ed7cad405 --- /dev/null +++ b/internal/provider/s3/bucket_lifecycle_configuration_resource.go @@ -0,0 +1,524 @@ +package s3 + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + + "github.com/hashicorp/terraform-plugin-framework/resource" + + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + + "github.com/orange-cloudavenue/terraform-provider-cloudavenue/internal/client" + "github.com/orange-cloudavenue/terraform-provider-cloudavenue/internal/metrics" + "github.com/orange-cloudavenue/terraform-provider-cloudavenue/pkg/utils" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &BucketLifecycleConfigurationResource{} + _ resource.ResourceWithConfigure = &BucketLifecycleConfigurationResource{} + _ resource.ResourceWithImportState = &BucketLifecycleConfigurationResource{} + // _ resource.ResourceWithModifyPlan = &BucketLifecycleConfigurationResource{} + // _ resource.ResourceWithUpgradeState = &BucketLifecycleConfigurationResource{} + // _ resource.ResourceWithValidateConfig = &BucketLifecycleConfigurationResource{}. +) + +// NewBucketLifecycleConfigurationResource is a helper function to simplify the provider implementation. +func NewBucketLifecycleConfigurationResource() resource.Resource { + return &BucketLifecycleConfigurationResource{} +} + +// BucketLifecycleConfigurationResource is the resource implementation. +type BucketLifecycleConfigurationResource struct { + client *client.CloudAvenue + + s3Client *s3.S3 +} + +// If the resource don't have same schema/structure as the data source, you can use the following code: +// type BucketLifecycleConfigurationResourceModel struct { +// ID types.String `tfsdk:"id"` +// } + +// Init Initializes the resource. +func (r *BucketLifecycleConfigurationResource) Init(ctx context.Context, rm *BucketLifecycleConfigurationModel) (diags diag.Diagnostics) { + r.s3Client = r.client.CAVSDK.V1.S3() + + return +} + +// Metadata returns the resource type name. +func (r *BucketLifecycleConfigurationResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_" + categoryName + "_bucket_lifecycle_configuration" +} + +// Schema defines the schema for the resource. +func (r *BucketLifecycleConfigurationResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = bucketLifecycleConfigurationSchema(ctx).GetResource(ctx) +} + +func (r *BucketLifecycleConfigurationResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*client.CloudAvenue) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *client.CloudAvenue, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + r.client = client +} + +// Create creates the resource and sets the initial Terraform state. +func (r *BucketLifecycleConfigurationResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + defer metrics.New("cloudavenue_s3_bucket_lifecycle_configuration", r.client.GetOrgName(), metrics.Create)() + + plan := &BucketLifecycleConfigurationModel{} + + // Retrieve values from plan + resp.Diagnostics.Append(req.Plan.Get(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } + + // Init the resource + resp.Diagnostics.Append(r.Init(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } + + /* + Implement the resource creation logic here. + */ + + createTimeout, diags := plan.Timeouts.Create(ctx, defaultCreateTimeout) + diags.Append(diags...) + if diags.HasError() { + return + } + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // use generic createOrUpdate function + resp.Diagnostics.Append(r.createOrUpdateLifeCycle(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } + + // Use generic read function to refresh the state + state, _, d := r.read(ctx, plan) + if d.HasError() { + resp.Diagnostics.Append(d...) + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, state)...) +} + +// Read refreshes the Terraform state with the latest data. +func (r *BucketLifecycleConfigurationResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + defer metrics.New("cloudavenue_s3_bucket_lifecycle_configuration", r.client.GetOrgName(), metrics.Read)() + + state := &BucketLifecycleConfigurationModel{} + + // Get current state + resp.Diagnostics.Append(req.State.Get(ctx, state)...) + if resp.Diagnostics.HasError() { + return + } + + // Init the resource + resp.Diagnostics.Append(r.Init(ctx, state)...) + if resp.Diagnostics.HasError() { + return + } + + // Refresh the state + stateRefreshed, found, d := r.read(ctx, state) + if !found { + resp.State.RemoveResource(ctx) + return + } + if d.HasError() { + resp.Diagnostics.Append(d...) + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, stateRefreshed)...) +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *BucketLifecycleConfigurationResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + defer metrics.New("cloudavenue_s3_bucket_lifecycle_configuration", r.client.GetOrgName(), metrics.Update)() + + var ( + plan = &BucketLifecycleConfigurationModel{} + state = &BucketLifecycleConfigurationModel{} + ) + + // Get current plan and state + resp.Diagnostics.Append(req.Plan.Get(ctx, plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, state)...) + if resp.Diagnostics.HasError() { + return + } + + // Init the resource + resp.Diagnostics.Append(r.Init(ctx, state)...) + if resp.Diagnostics.HasError() { + return + } + + /* + Implement the resource update here + */ + + updateTimeout, diags := plan.Timeouts.Update(ctx, defaultUpdateTimeout) + diags.Append(diags...) + if diags.HasError() { + return + } + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + // use generic createOrUpdate function + resp.Diagnostics.Append(r.createOrUpdateLifeCycle(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } + + // Use generic read function to refresh the state + stateRefreshed, _, d := r.read(ctx, plan) + if d.HasError() { + resp.Diagnostics.Append(d...) + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, stateRefreshed)...) +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *BucketLifecycleConfigurationResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + defer metrics.New("cloudavenue_s3_bucket_lifecycle_configuration", r.client.GetOrgName(), metrics.Delete)() + + state := &BucketLifecycleConfigurationModel{} + + // Get current state + resp.Diagnostics.Append(req.State.Get(ctx, state)...) + if resp.Diagnostics.HasError() { + return + } + + // Init the resource + resp.Diagnostics.Append(r.Init(ctx, state)...) + if resp.Diagnostics.HasError() { + return + } + + /* + Implement the resource deletion here + */ + + deleteTimeout, diags := state.Timeouts.Update(ctx, defaultUpdateTimeout) + diags.Append(diags...) + if diags.HasError() { + return + } + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + if _, err := r.s3Client.DeleteBucketLifecycleWithContext(ctx, &s3.DeleteBucketLifecycleInput{ + Bucket: state.Bucket.GetPtr(), + }); err != nil { + resp.Diagnostics.AddError("Error deleting S3 Bucket Lifecycle Configuration", err.Error()) + } +} + +func (r *BucketLifecycleConfigurationResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + defer metrics.New("cloudavenue_s3_bucket_lifecycle_configuration", r.client.GetOrgName(), metrics.Import)() + + // * Import basic + resource.ImportStatePassthroughID(ctx, path.Root("bucket"), req, resp) +} + +// * CustomFuncs + +// read is a generic read function that can be used by the resource Create, Read and Update functions. +func (r *BucketLifecycleConfigurationResource) read(ctx context.Context, planOrState *BucketLifecycleConfigurationModel) (stateRefreshed *BucketLifecycleConfigurationModel, found bool, diags diag.Diagnostics) { + return genericReadLifeCycleConfiguration(ctx, &readLifeCycleConfigurationConfig[*BucketLifecycleConfigurationModel]{ + Client: r.s3Client, + Timeout: func() (time.Duration, diag.Diagnostics) { + return planOrState.Timeouts.Read(ctx, defaultReadTimeout) + }, + BucketName: func() *string { + return planOrState.Bucket.GetPtr() + }, + }, planOrState) +} + +// createOrUpdate is a generic create or update function that can be used by the resource Create and Update functions. +func (r *BucketLifecycleConfigurationResource) createOrUpdateLifeCycle(ctx context.Context, planOrState *BucketLifecycleConfigurationModel) (diags diag.Diagnostics) { + /* + Implement the resource create or update here + */ + // lifeCycleRules := &BucketLifecycleConfigurationModelRules{} + // diags.Append(planOrState.Rules.Get(ctx, lifeCycleRules, false)...) + // if diags.HasError() { + // return + // } + + lifeCycleRules, d := planOrState.Rules.Get(ctx) + diags.Append(d...) + if diags.HasError() { + return + } + + rules := make([]*s3.LifecycleRule, 0) + + for _, rule := range lifeCycleRules { + s3Rule := &s3.LifecycleRule{ + ID: rule.ID.GetPtr(), + Status: rule.Status.GetPtr(), + } + + // * AbortIncompleteMultipartUpload + if rule.AbortIncompleteMultipartUpload.IsKnown() { + abortIncompleteMultipartUpload, d := rule.AbortIncompleteMultipartUpload.Get(ctx) + diags.Append(d...) + if diags.HasError() { + return + } + + s3Rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ + DaysAfterInitiation: abortIncompleteMultipartUpload.DaysAfterInitiation.GetPtr(), + } + } + + // * Expiration + if rule.Expiration.IsKnown() { + expiration, d := rule.Expiration.Get(ctx) + diags.Append(d...) + if diags.HasError() { + return + } + + s3Rule.Expiration = &s3.LifecycleExpiration{} + + if expiration.Days.IsKnown() { + s3Rule.Expiration.Days = expiration.Days.GetPtr() + } + + if expiration.ExpiredObjectDeleteMarker.IsKnown() { + s3Rule.Expiration.ExpiredObjectDeleteMarker = expiration.ExpiredObjectDeleteMarker.GetPtr() + } + + if expiration.Date.IsKnown() { + t, err := time.Parse(time.RFC3339, expiration.Date.Get()) + if err != nil { + diags.AddError("Error parsing S3 Bucket Lifecycle Rule Expiration date", err.Error()) + return + } + s3Rule.Expiration.Date = utils.TakePointer(t) + } + } + + // * Filter + if rule.Filter.IsKnown() { + filter, d := rule.Filter.Get(ctx) + diags.Append(d...) + if diags.HasError() { + return + } + + s3Rule.Filter = &s3.LifecycleRuleFilter{ + Prefix: filter.Prefix.GetPtr(), + } + + // ? Tag + if filter.Tag.IsKnown() { + tag, d := filter.Tag.Get(ctx) + diags.Append(d...) + if diags.HasError() { + return + } + + s3Rule.Filter.Tag = &s3.Tag{ + Key: tag.Key.GetPtr(), + Value: tag.Value.GetPtr(), + } + } + + // ? And + if !filter.And.IsNull() { + and, d := filter.And.Get(ctx) + diags.Append(d...) + if diags.HasError() { + return + } + + s3Rule.Filter.And = &s3.LifecycleRuleAndOperator{ + Prefix: and.Prefix.GetPtr(), + } + + // ? And.Tags + if and.Tags.IsKnown() { + tags, d := and.Tags.Get(ctx) + diags.Append(d...) + if diags.HasError() { + return + } + + s3Rule.Filter.And.Tags = make([]*s3.Tag, 0) + + for _, t := range tags { + s3Rule.Filter.And.Tags = append(s3Rule.Filter.And.Tags, &s3.Tag{ + Key: t.Key.GetPtr(), + Value: t.Value.GetPtr(), + }) + } + } + } + } + + // * NoncurrentVersionExpiration + if rule.NoncurrentVersionExpiration.IsKnown() { + noncurrentVersionExpiration, d := rule.NoncurrentVersionExpiration.Get(ctx) + diags.Append(d...) + if diags.HasError() { + return + } + + s3Rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ + NoncurrentDays: noncurrentVersionExpiration.NoncurrentDays.GetPtr(), + NewerNoncurrentVersions: noncurrentVersionExpiration.NewerNoncurrentVersions.GetPtr(), + } + } + + rules = append(rules, s3Rule) + } + + input := &s3.PutBucketLifecycleConfigurationInput{ + Bucket: planOrState.Bucket.GetPtr(), + LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ + Rules: rules, + }, + } + + createTimeout, d := planOrState.Timeouts.Create(ctx, defaultCreateTimeout) + if d.HasError() { + diags.Append(d...) + return + } + + if _, err := retryWhenAWSErrCodeEquals(ctx, []string{s3.ErrCodeNoSuchBucket}, &RetryWhenConfig[*s3.PutBucketLifecycleConfigurationOutput]{ + Timeout: createTimeout, + Interval: 15 * time.Second, + Function: func() (*s3.PutBucketLifecycleConfigurationOutput, error) { + return r.s3Client.PutBucketLifecycleConfigurationWithContext(ctx, input) + }, + }); err != nil { + diags.AddError("Error putting S3 Bucket Lifecycle Configuration", err.Error()) + return + } + + if err := waitForLifecycleConfigurationRulesStatus(ctx, r.s3Client, planOrState.Bucket.Get(), rules); err != nil { + diags.AddError("Error waiting for S3 Lifecycle Configuration for bucket to reach expected rules status", err.Error()) + } + + return +} + +const ( + lifecycleConfigurationExtraRetryDelay = 5 * time.Second + lifecycleConfigurationRulesPropagationTimeout = 10 * time.Minute + lifecycleConfigurationRulesSteadyTimeout = 2 * time.Minute + + // General timeout for S3 bucket changes to propagate. + // See https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html#ConsistencyModel. + s3BucketPropagationTimeout = 2 * time.Minute // nosemgrep:ci.s3-in-const-name, ci.s3-in-var-name + + // LifecycleConfigurationRulesStatusReady occurs when all configured rules reach their desired state (Enabled or Disabled). + LifecycleConfigurationRulesStatusReady = "READY" + // LifecycleConfigurationRulesStatusNotReady occurs when all configured rules have not reached their desired state (Enabled or Disabled). + LifecycleConfigurationRulesStatusNotReady = "NOT_READY" +) + +func waitForLifecycleConfigurationRulesStatus(ctx context.Context, conn *s3.S3, bucket string, rules []*s3.LifecycleRule) error { + stateConf := &retry.StateChangeConf{ + Pending: []string{"", LifecycleConfigurationRulesStatusNotReady}, + Target: []string{LifecycleConfigurationRulesStatusReady}, + Refresh: lifecycleConfigurationRulesStatus(ctx, conn, bucket, rules), + Timeout: lifecycleConfigurationRulesPropagationTimeout, + MinTimeout: 10 * time.Second, + ContinuousTargetOccurence: 3, + NotFoundChecks: 20, + } + + _, err := stateConf.WaitForStateContext(ctx) + + return err +} + +func lifecycleConfigurationRulesStatus(ctx context.Context, conn *s3.S3, bucket string, rules []*s3.LifecycleRule) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &s3.GetBucketLifecycleConfigurationInput{ + Bucket: aws.String(bucket), + } + + output, err := conn.GetBucketLifecycleConfigurationWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, ErrCodeNoSuchLifecycleConfiguration, s3.ErrCodeNoSuchBucket) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + if output == nil { + return nil, "", &retry.NotFoundError{ + Message: "Empty result", + LastRequest: input, + } + } + + for _, expectedRule := range rules { + found := false + + for _, actualRule := range output.Rules { + if aws.StringValue(actualRule.ID) != aws.StringValue(expectedRule.ID) { + continue + } + found = true + if aws.StringValue(actualRule.Status) != aws.StringValue(expectedRule.Status) { + return output, LifecycleConfigurationRulesStatusNotReady, nil + } + } + + if !found { + return output, LifecycleConfigurationRulesStatusNotReady, nil + } + } + + return output, LifecycleConfigurationRulesStatusReady, nil + } +} diff --git a/internal/provider/s3/bucket_lifecycle_configuration_schema.go b/internal/provider/s3/bucket_lifecycle_configuration_schema.go new file mode 100644 index 000000000..64b6db7ba --- /dev/null +++ b/internal/provider/s3/bucket_lifecycle_configuration_schema.go @@ -0,0 +1,348 @@ +package s3 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + + schemaD "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + schemaR "github.com/hashicorp/terraform-plugin-framework/resource/schema" + + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + + "github.com/hashicorp/terraform-plugin-framework-validators/boolvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/objectvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + + superschema "github.com/FrangipaneTeam/terraform-plugin-framework-superschema" +) + +func bucketLifecycleConfigurationSchema(_ context.Context) superschema.Schema { + return superschema.Schema{ + Resource: superschema.SchemaDetails{ + MarkdownDescription: "The `cloudavenue_s3_bucket_lifecycle_configuration` resource allows you to manage ...", + }, + DataSource: superschema.SchemaDetails{ + MarkdownDescription: "The `cloudavenue_s3_bucket_lifecycle_configuration` data source allows you to retrieve information about an ...", + }, + Attributes: map[string]superschema.Attribute{ + "timeouts": superschema.TimeoutAttribute{ + Resource: &superschema.ResourceTimeoutAttribute{ + Create: true, + Update: true, + Delete: true, + Read: true, + }, + DataSource: &superschema.DatasourceTimeoutAttribute{ + Read: true, + }, + }, + "id": superschema.SuperStringAttribute{ + Common: &schemaR.StringAttribute{ + Computed: true, + MarkdownDescription: "The ID is a bucket name.", + }, + }, + "bucket": superschema.SuperStringAttribute{ + Common: &schemaR.StringAttribute{ + MarkdownDescription: "The name of the bucket.", + Required: true, + }, + Resource: &schemaR.StringAttribute{ + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + "rules": superschema.SuperListNestedAttributeOf[BucketLifecycleConfigurationModelRule]{ + Common: &schemaR.ListNestedAttribute{ + MarkdownDescription: "Rules that define lifecycle configuration.", + }, + Resource: &schemaR.ListNestedAttribute{ + Required: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + }, + DataSource: &schemaD.ListNestedAttribute{ + Computed: true, + }, + Attributes: superschema.Attributes{ + "abort_incomplete_multipart_upload": superschema.SuperSingleNestedAttributeOf[BucketLifecycleConfigurationModelAbortIncompleteMultipartUpload]{ + Common: &schemaR.SingleNestedAttribute{ + MarkdownDescription: "Configuration block that specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload", + }, + Resource: &schemaR.SingleNestedAttribute{ + Optional: true, + }, + DataSource: &schemaD.SingleNestedAttribute{ + Computed: true, + }, + Attributes: superschema.Attributes{ + "days_after_initiation": superschema.SuperInt64Attribute{ + Common: &schemaR.Int64Attribute{ + MarkdownDescription: "Number of days after which Amazon S3 aborts an incomplete multipart upload.", + Optional: true, + }, + }, + }, + }, + "expiration": superschema.SuperSingleNestedAttributeOf[BucketLifecycleConfigurationModelExpiration]{ + Common: &schemaR.SingleNestedAttribute{ + MarkdownDescription: "Configuration block that specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker.", + }, + Resource: &schemaR.SingleNestedAttribute{ + Optional: true, + Validators: []validator.Object{ + objectvalidator.ConflictsWith(path.MatchRelative().AtParent().AtName("expiration"), path.MatchRelative().AtParent().AtName("noncurrent_version_expiration")), + }, + }, + DataSource: &schemaD.SingleNestedAttribute{ + Computed: true, + }, + Attributes: superschema.Attributes{ + "date": superschema.SuperStringAttribute{ + Common: &schemaR.StringAttribute{ + MarkdownDescription: "Date the object is to be moved or deleted. The date value must be in [RFC3339 full-date format](https://datatracker.ietf.org/doc/html/rfc3339#section-5.6) e.g. `2023-10-10T00:00:00Z`", + }, + Resource: &schemaR.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.ExactlyOneOf(path.MatchRelative().AtParent().AtName("date"), path.MatchRelative().AtParent().AtName("days"), path.MatchRelative().AtParent().AtName("expired_object_delete_marker")), + // TODO : Add validator for RFC3339 full-date format. + }, + }, + DataSource: &schemaD.StringAttribute{ + Computed: true, + }, + }, + "days": superschema.SuperInt64Attribute{ + Common: &schemaR.Int64Attribute{ + MarkdownDescription: "Lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.", + Computed: true, // API return 0 if not set. + }, + Resource: &schemaR.Int64Attribute{ + Optional: true, + Validators: []validator.Int64{ + int64validator.ExactlyOneOf(path.MatchRelative().AtParent().AtName("date"), path.MatchRelative().AtParent().AtName("days"), path.MatchRelative().AtParent().AtName("expired_object_delete_marker")), + }, + }, + }, + "expired_object_delete_marker": superschema.SuperBoolAttribute{ + Common: &schemaR.BoolAttribute{ + MarkdownDescription: "Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to `true`, the delete marker will be expired; if set to `false` the policy takes no action", + Computed: true, // API return false if not set. + }, + Resource: &schemaR.BoolAttribute{ + Optional: true, + Validators: []validator.Bool{ + boolvalidator.ExactlyOneOf(path.MatchRelative().AtParent().AtName("date"), path.MatchRelative().AtParent().AtName("days"), path.MatchRelative().AtParent().AtName("expired_object_delete_marker")), + boolvalidator.ConflictsWith(path.MatchRelative().AtParent().AtParent().AtName("filter").AtName("tag"), path.MatchRelative().AtParent().AtParent().AtName("filter").AtName("and").AtName("tags")), + }, + }, + }, + }, + }, + "filter": superschema.SuperSingleNestedAttributeOf[BucketLifecycleConfigurationModelFilter]{ + Common: &schemaR.SingleNestedAttribute{ + MarkdownDescription: "Configuration block used to identify objects that a Lifecycle Rule applies to.", + }, + Resource: &schemaR.SingleNestedAttribute{ + Required: true, + }, + DataSource: &schemaD.SingleNestedAttribute{ + Computed: true, + }, + Attributes: superschema.Attributes{ + "and": superschema.SuperSingleNestedAttributeOf[BucketLifecycleConfigurationModelAnd]{ + Common: &schemaR.SingleNestedAttribute{ + MarkdownDescription: "Configuration block used to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all the predicates configured inside the and block.", + }, + Resource: &schemaR.SingleNestedAttribute{ + Optional: true, + }, + DataSource: &schemaD.SingleNestedAttribute{ + Computed: true, + }, + Attributes: superschema.Attributes{ + "tags": superschema.SuperListNestedAttributeOf[BucketLifecycleConfigurationModelTag]{ + Common: &schemaR.ListNestedAttribute{ + MarkdownDescription: "Specifies object tag key and value.", + }, + Resource: &schemaR.ListNestedAttribute{ + Optional: true, + }, + DataSource: &schemaD.ListNestedAttribute{ + Computed: true, + }, + Attributes: superschema.Attributes{ + "key": superschema.SuperStringAttribute{ + Common: &schemaR.StringAttribute{ + MarkdownDescription: "Object tag key.", + }, + Resource: &schemaR.StringAttribute{ + Required: true, + }, + DataSource: &schemaD.StringAttribute{ + Computed: true, + }, + }, + "value": superschema.SuperStringAttribute{ + Common: &schemaR.StringAttribute{ + MarkdownDescription: "Object tag value.", + }, + Resource: &schemaR.StringAttribute{ + Required: true, + }, + DataSource: &schemaD.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "prefix": superschema.SuperStringAttribute{ + Common: &schemaR.StringAttribute{ + MarkdownDescription: "Match objects with this prefix.", + }, + Resource: &schemaR.StringAttribute{ + Optional: true, + }, + DataSource: &schemaD.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "tag": superschema.SuperSingleNestedAttributeOf[BucketLifecycleConfigurationModelTag]{ + Common: &schemaR.SingleNestedAttribute{ + MarkdownDescription: "Specifies object tag key and value.", + }, + Resource: &schemaR.SingleNestedAttribute{ + Optional: true, + }, + DataSource: &schemaD.SingleNestedAttribute{ + Computed: true, + }, + Attributes: superschema.Attributes{ + "key": superschema.SuperStringAttribute{ + Common: &schemaR.StringAttribute{ + MarkdownDescription: "Object tag key.", + }, + Resource: &schemaR.StringAttribute{ + Required: true, + }, + DataSource: &schemaD.StringAttribute{ + Computed: true, + }, + }, + "value": superschema.SuperStringAttribute{ + Common: &schemaR.StringAttribute{ + MarkdownDescription: "Object tag value.", + }, + Resource: &schemaR.StringAttribute{ + Required: true, + }, + DataSource: &schemaD.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "prefix": superschema.SuperStringAttribute{ + Common: &schemaR.StringAttribute{ + MarkdownDescription: "Match objects with this prefix.", + }, + Resource: &schemaR.StringAttribute{ + Optional: true, + }, + DataSource: &schemaD.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "id": superschema.SuperStringAttribute{ + Common: &schemaR.StringAttribute{ + MarkdownDescription: "Unique identifier for the rule.", + }, + Resource: &schemaR.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + }, + }, + DataSource: &schemaD.StringAttribute{ + Computed: true, + }, + }, + // TODO OneOf Expiration, NoncurrentVersionExpiration + "noncurrent_version_expiration": superschema.SuperSingleNestedAttributeOf[BucketLifecycleConfigurationModelNoncurrentVersionExpiration]{ + Common: &schemaR.SingleNestedAttribute{ + MarkdownDescription: "Configuration block that specifies when noncurrent object versions expire.", + }, + Resource: &schemaR.SingleNestedAttribute{ + Optional: true, + Validators: []validator.Object{ + objectvalidator.ConflictsWith(path.MatchRelative().AtParent().AtName("expiration"), path.MatchRelative().AtParent().AtName("noncurrent_version_expiration")), + }, + }, + DataSource: &schemaD.SingleNestedAttribute{ + Computed: true, + }, + Attributes: superschema.Attributes{ + "noncurrent_days": superschema.SuperInt64Attribute{ + Common: &schemaR.Int64Attribute{ + MarkdownDescription: "Number of days an object is noncurrent before Amazon S3 can perform the associated action. Must be a positive integer.", + }, + Resource: &schemaR.Int64Attribute{ + Optional: true, + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, + }, + DataSource: &schemaD.Int64Attribute{ + Computed: true, + }, + }, + "newer_noncurrent_versions": superschema.SuperInt64Attribute{ + Common: &schemaR.Int64Attribute{ + MarkdownDescription: "Number of noncurrent versions Amazon S3 will retain.", + }, + Resource: &schemaR.Int64Attribute{ + Optional: true, + Validators: []validator.Int64{ + int64validator.AtLeast(0), + }, + }, + DataSource: &schemaD.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": superschema.SuperStringAttribute{ + Common: &schemaR.StringAttribute{ + MarkdownDescription: "Whether the rule is currently being applied", + Computed: true, + }, + Resource: &schemaR.StringAttribute{ + Optional: true, + Default: stringdefault.StaticString(lifeCycleStatusEnabled), + Validators: []validator.String{ + stringvalidator.OneOf(lifeCycleStatusEnabled, lifeCycleStatusDisabled), + }, + }, + DataSource: &schemaD.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + } +} diff --git a/internal/provider/s3/bucket_lifecycle_configuration_schema_test.go b/internal/provider/s3/bucket_lifecycle_configuration_schema_test.go new file mode 100644 index 000000000..f6ec896d9 --- /dev/null +++ b/internal/provider/s3/bucket_lifecycle_configuration_schema_test.go @@ -0,0 +1,61 @@ +package s3_test + +import ( + "context" + "testing" + + // The fwresource import alias is so there is no collistion + // with the more typical acceptance testing import: + // "github.com/hashicorp/terraform-plugin-testing/helper/resource". + // The fwdatasource import alias is so there is no collistion + // fwdatasource "github.com/hashicorp/terraform-plugin-framework/datasource". + fwresource "github.com/hashicorp/terraform-plugin-framework/resource" + + "github.com/orange-cloudavenue/terraform-provider-cloudavenue/internal/provider/s3" +) + +// Unit test for the schema of the resource cloudavenue_s3_BucketLifecycleConfiguration. +func TestBucketLifecycleConfigurationResourceSchema(t *testing.T) { + t.Parallel() + + ctx := context.Background() + schemaResponse := &fwresource.SchemaResponse{} + + // Instantiate the resource.Resource and call its Schema method + s3.NewBucketLifecycleConfigurationResource().Schema(ctx, fwresource.SchemaRequest{}, schemaResponse) + + if schemaResponse.Diagnostics.HasError() { + t.Fatalf("Schema method diagnostics: %+v", schemaResponse.Diagnostics) + } + + // Validate the schema + diagnostics := schemaResponse.Schema.ValidateImplementation(ctx) + + if diagnostics.HasError() { + t.Fatalf("Schema validation diagnostics: %+v", diagnostics) + } +} + +// Unit test for the schema of the datasource cloudavenue_s3_BucketLifecycleConfiguration +/* +func TestBucketLifecycleConfigurationDataSourceSchema(t *testing.T) { + t.Parallel() + + ctx := context.Background() + schemaResponse := &fwdatasource.SchemaResponse{} + + // Instantiate the datasource.Datasource and call its Schema method + s3.NewBucketLifecycleConfigurationDataSource().Schema(ctx, fwdatasource.SchemaRequest{}, schemaResponse) + + if schemaResponse.Diagnostics.HasError() { + t.Fatalf("Schema method diagnostics: %+v", schemaResponse.Diagnostics) + } + + // Validate the schema + diagnostics := schemaResponse.Schema.ValidateImplementation(ctx) + + if diagnostics.HasError() { + t.Fatalf("Schema validation diagnostics: %+v", diagnostics) + } +} +*/ diff --git a/internal/provider/s3/bucket_lifecycle_configuration_types.go b/internal/provider/s3/bucket_lifecycle_configuration_types.go new file mode 100644 index 000000000..943beede6 --- /dev/null +++ b/internal/provider/s3/bucket_lifecycle_configuration_types.go @@ -0,0 +1,282 @@ +package s3 + +import ( + "context" + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/hashicorp/terraform-plugin-framework/diag" + + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + timeoutsD "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + timeoutsR "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + + supertypes "github.com/FrangipaneTeam/terraform-plugin-framework-supertypes" + + "github.com/orange-cloudavenue/terraform-provider-cloudavenue/pkg/utils" +) + +type ( + BucketLifecycleConfigurationDatasourceModel struct { + Timeouts timeoutsD.Value `tfsdk:"timeouts"` + Bucket supertypes.StringValue `tfsdk:"bucket"` + ID supertypes.StringValue `tfsdk:"id"` + Rules supertypes.ListNestedObjectValueOf[BucketLifecycleConfigurationModelRule] `tfsdk:"rules"` + } + BucketLifecycleConfigurationModel struct { + Timeouts timeoutsR.Value `tfsdk:"timeouts"` + Bucket supertypes.StringValue `tfsdk:"bucket"` + ID supertypes.StringValue `tfsdk:"id"` + Rules supertypes.ListNestedObjectValueOf[BucketLifecycleConfigurationModelRule] `tfsdk:"rules"` + } + + BucketLifecycleConfigurationModelRules []*BucketLifecycleConfigurationModelRule + BucketLifecycleConfigurationModelRule struct { + AbortIncompleteMultipartUpload supertypes.SingleNestedObjectValueOf[BucketLifecycleConfigurationModelAbortIncompleteMultipartUpload] `tfsdk:"abort_incomplete_multipart_upload"` + Expiration supertypes.SingleNestedObjectValueOf[BucketLifecycleConfigurationModelExpiration] `tfsdk:"expiration"` + Filter supertypes.SingleNestedObjectValueOf[BucketLifecycleConfigurationModelFilter] `tfsdk:"filter"` + ID supertypes.StringValue `tfsdk:"id"` + NoncurrentVersionExpiration supertypes.SingleNestedObjectValueOf[BucketLifecycleConfigurationModelNoncurrentVersionExpiration] `tfsdk:"noncurrent_version_expiration"` + Status supertypes.StringValue `tfsdk:"status"` + } + BucketLifecycleConfigurationModelAbortIncompleteMultipartUpload struct { + DaysAfterInitiation supertypes.Int64Value `tfsdk:"days_after_initiation"` + } + BucketLifecycleConfigurationModelExpiration struct { + Date supertypes.StringValue `tfsdk:"date"` + Days supertypes.Int64Value `tfsdk:"days"` + ExpiredObjectDeleteMarker supertypes.BoolValue `tfsdk:"expired_object_delete_marker"` + } + BucketLifecycleConfigurationModelFilter struct { + And supertypes.SingleNestedObjectValueOf[BucketLifecycleConfigurationModelAnd] `tfsdk:"and"` + Tag supertypes.SingleNestedObjectValueOf[BucketLifecycleConfigurationModelTag] `tfsdk:"tag"` + Prefix supertypes.StringValue `tfsdk:"prefix"` + } + BucketLifecycleConfigurationModelAnd struct { + Tags supertypes.ListNestedObjectValueOf[BucketLifecycleConfigurationModelTag] `tfsdk:"tags"` + Prefix supertypes.StringValue `tfsdk:"prefix"` + } + BucketLifecycleConfigurationModelTag struct { + Key supertypes.StringValue `tfsdk:"key"` + Value supertypes.StringValue `tfsdk:"value"` + } + BucketLifecycleConfigurationModelNoncurrentVersionExpiration struct { + NewerNoncurrentVersions supertypes.Int64Value `tfsdk:"newer_noncurrent_versions"` + NoncurrentDays supertypes.Int64Value `tfsdk:"noncurrent_days"` + } +) + +// ToBucketLifecycleConfigurationModel converts a slice of *s3.GetBucketLifecycleConfigurationOutput to a slice of BucketLifecycleConfigurationModelRule. +func ToBucketLifecycleConfigurationModel(ctx context.Context, lifecycle *s3.GetBucketLifecycleConfigurationOutput) (values BucketLifecycleConfigurationModelRules, diags diag.Diagnostics) { + rulesRefreshed := BucketLifecycleConfigurationModelRules{} + + if len(lifecycle.Rules) > 0 { + for _, rule := range lifecycle.Rules { + ruleRefreshed := &BucketLifecycleConfigurationModelRule{ + ID: supertypes.NewStringNull(), + Status: supertypes.NewStringNull(), + AbortIncompleteMultipartUpload: supertypes.NewSingleNestedObjectValueOfNull[BucketLifecycleConfigurationModelAbortIncompleteMultipartUpload](ctx), + Expiration: supertypes.NewSingleNestedObjectValueOfNull[BucketLifecycleConfigurationModelExpiration](ctx), + Filter: supertypes.NewSingleNestedObjectValueOfNull[BucketLifecycleConfigurationModelFilter](ctx), + NoncurrentVersionExpiration: supertypes.NewSingleNestedObjectValueOfNull[BucketLifecycleConfigurationModelNoncurrentVersionExpiration](ctx), + } + + ruleRefreshed.ID.SetPtr(rule.ID) + ruleRefreshed.Status.SetPtr(rule.Status) + + if rule.AbortIncompleteMultipartUpload != nil { + abortIncompleteMultipartUpload := &BucketLifecycleConfigurationModelAbortIncompleteMultipartUpload{ + DaysAfterInitiation: supertypes.NewInt64Null(), + } + abortIncompleteMultipartUpload.DaysAfterInitiation.SetPtr(rule.AbortIncompleteMultipartUpload.DaysAfterInitiation) + diags.Append(ruleRefreshed.AbortIncompleteMultipartUpload.Set(ctx, abortIncompleteMultipartUpload)...) + } + + if rule.Expiration != nil { + expiration := &BucketLifecycleConfigurationModelExpiration{ + Date: supertypes.NewStringNull(), + Days: supertypes.NewInt64Null(), + ExpiredObjectDeleteMarker: supertypes.NewBoolNull(), + } + if rule.Expiration.Date != nil { + expiration.Date.Set(rule.Expiration.Date.Format(time.RFC3339)) + } + expiration.Days.SetPtr(rule.Expiration.Days) + expiration.ExpiredObjectDeleteMarker.SetPtr(rule.Expiration.ExpiredObjectDeleteMarker) + + diags.Append(ruleRefreshed.Expiration.Set(ctx, expiration)...) + } + + if rule.Filter != nil { + filter := &BucketLifecycleConfigurationModelFilter{ + And: supertypes.NewSingleNestedObjectValueOfNull[BucketLifecycleConfigurationModelAnd](ctx), + Tag: supertypes.NewSingleNestedObjectValueOfNull[BucketLifecycleConfigurationModelTag](ctx), + Prefix: supertypes.NewStringNull(), + } + filter.Prefix.SetPtr(rule.Filter.Prefix) + + if rule.Filter.Tag != nil { + tag := &BucketLifecycleConfigurationModelTag{ + Key: supertypes.NewStringNull(), + Value: supertypes.NewStringNull(), + } + + tag.Key.SetPtr(rule.Filter.Tag.Key) + tag.Value.SetPtr(rule.Filter.Tag.Value) + diags.Append(filter.Tag.Set(ctx, tag)...) + } + + if rule.Filter.And != nil { + and := &BucketLifecycleConfigurationModelAnd{ + Tags: supertypes.NewListNestedObjectValueOfNull[BucketLifecycleConfigurationModelTag](ctx), + Prefix: supertypes.NewStringNull(), + } + and.Prefix.SetPtr(rule.Filter.And.Prefix) + tags := make([]*BucketLifecycleConfigurationModelTag, 0) + if rule.Filter.And.Tags != nil && len(rule.Filter.And.Tags) > 0 { + for _, tag := range rule.Filter.And.Tags { + tagRefreshed := &BucketLifecycleConfigurationModelTag{ + Key: supertypes.NewStringNull(), + Value: supertypes.NewStringNull(), + } + + tagRefreshed.Key.SetPtr(tag.Key) + tagRefreshed.Value.SetPtr(tag.Value) + tags = append(tags, tagRefreshed) + } + } + diags.Append(and.Tags.Set(ctx, tags)...) + diags.Append(filter.And.Set(ctx, and)...) + } + diags.Append(ruleRefreshed.Filter.Set(ctx, filter)...) + } + + if rule.NoncurrentVersionExpiration != nil { + noncurrentVersionExpiration := &BucketLifecycleConfigurationModelNoncurrentVersionExpiration{ + NewerNoncurrentVersions: supertypes.NewInt64Null(), + NoncurrentDays: supertypes.NewInt64Null(), + } + noncurrentVersionExpiration.NewerNoncurrentVersions.SetPtr(rule.NoncurrentVersionExpiration.NewerNoncurrentVersions) + noncurrentVersionExpiration.NoncurrentDays.SetPtr(rule.NoncurrentVersionExpiration.NoncurrentDays) + + diags.Append(ruleRefreshed.NoncurrentVersionExpiration.Set(ctx, noncurrentVersionExpiration)...) + } + + if diags.HasError() { + return rulesRefreshed, diags + } + + rulesRefreshed = append(rulesRefreshed, ruleRefreshed) + } + } + + return rulesRefreshed, diags +} + +type readLifeCycleConfigurationResourceDatasource interface { + *BucketLifecycleConfigurationModel | *BucketLifecycleConfigurationDatasourceModel + SetID(*string) + SetRules(context.Context, BucketLifecycleConfigurationModelRules) diag.Diagnostics + Copy() any +} + +type readLifeCycleConfigurationConfig[T readLifeCycleConfigurationResourceDatasource] struct { + Timeout func() (time.Duration, diag.Diagnostics) + Client *s3.S3 + BucketName func() *string +} + +// genericReadLifeCycleConfiguration is a generic function that reads the lifecycle configuration of a bucket. +func genericReadLifeCycleConfiguration[T readLifeCycleConfigurationResourceDatasource](ctx context.Context, config *readLifeCycleConfigurationConfig[T], planOrState T) (stateRefreshed T, found bool, diags diag.Diagnostics) { + stateRefreshed = planOrState.Copy().(T) + + readTimeout, d := config.Timeout() + if d.HasError() { + diags.Append(d...) + return + } + + var lastOutput, lifecycle *s3.GetBucketLifecycleConfigurationOutput + + err := retry.RetryContext(ctx, readTimeout, func() *retry.RetryError { + var err error + + time.Sleep(lifecycleConfigurationExtraRetryDelay) + + lifecycle, err = config.Client.GetBucketLifecycleConfigurationWithContext(ctx, &s3.GetBucketLifecycleConfigurationInput{ + Bucket: config.BucketName(), + }) + + if tfawserr.ErrCodeEquals(err, ErrCodeNoSuchLifecycleConfiguration, s3.ErrCodeNoSuchBucket) { + return retry.RetryableError(err) + } + + if err != nil { + return retry.NonRetryableError(err) + } + + if lastOutput == nil || !reflect.DeepEqual(*lastOutput, *lifecycle) { + lastOutput = lifecycle + return retry.RetryableError(fmt.Errorf("bucket lifecycle configuration has not stablized; trying again")) + } + + return nil + }) + + if err != nil && !tfawserr.ErrMessageContains(err, ErrCodeNoSuchLifecycleConfiguration, "") { + diags.AddError("Error retrieving bucket lifecycle configuration", err.Error()) + return stateRefreshed, false, diags + } + + rulesRefreshed, d := ToBucketLifecycleConfigurationModel(ctx, lifecycle) + if d.HasError() { + diags.Append(d...) + return stateRefreshed, true, diags + } + + if len(rulesRefreshed) == 0 { + diags.AddError("Error retrieving bucket lifecycle configuration", "no rules found") + return stateRefreshed, false, diags + } + + stateRefreshed.SetID(config.BucketName()) + diags.Append(stateRefreshed.SetRules(ctx, rulesRefreshed)...) + + return stateRefreshed, true, diags +} + +// BucketLifecycleConfigurationModel SetID. +func (rm *BucketLifecycleConfigurationModel) SetID(id *string) { + rm.ID.SetPtr(id) +} + +// BucketLifecycleConfigurationModel SetRules. +func (rm *BucketLifecycleConfigurationModel) SetRules(ctx context.Context, rules BucketLifecycleConfigurationModelRules) (diags diag.Diagnostics) { + return rm.Rules.Set(ctx, rules) +} + +// BucketLifecycleConfigurationDatasourceModel SetID. +func (rm *BucketLifecycleConfigurationDatasourceModel) SetID(id *string) { + rm.ID.SetPtr(id) +} + +// BucketLifecycleConfigurationDatasourceModel SetRules. +func (rm *BucketLifecycleConfigurationDatasourceModel) SetRules(ctx context.Context, rules BucketLifecycleConfigurationModelRules) (diags diag.Diagnostics) { + return rm.Rules.Set(ctx, rules) +} + +func (rm *BucketLifecycleConfigurationModel) Copy() any { + x := &BucketLifecycleConfigurationModel{} + utils.ModelCopy(rm, x) + return x +} + +func (rm *BucketLifecycleConfigurationDatasourceModel) Copy() any { + x := &BucketLifecycleConfigurationDatasourceModel{} + utils.ModelCopy(rm, x) + return x +} diff --git a/internal/testsacc/acctest.go b/internal/testsacc/acctest.go index 76e4b8733..62441c318 100644 --- a/internal/testsacc/acctest.go +++ b/internal/testsacc/acctest.go @@ -112,21 +112,32 @@ func (r resourceConfig) GetDefaultChecks() []resource.TestCheckFunc { // GetSpecificChecks returns the checks for the test named. func (r resourceConfig) GetSpecificChecks(testName string) []resource.TestCheckFunc { - var x []resource.TestCheckFunc + // var x []resource.TestCheckFunc - if test, ok := localCacheResource[r.GetResourceName()+"."+testName]; ok { - x = test.Create.Checks - } else { - t := r.Tests(context.Background())[testsacc.TestName(testName)]( + // if test, ok := localCacheResource[r.GetResourceName()+"."+testName]; ok { + // x = test.Create.Checks + // } else { + // t := r.Tests(context.Background())[testsacc.TestName(testName)]( + // context.Background(), + // r.GetResourceName()+"."+testName, + // ) + // x = t.Create.Checks + + // localCacheResource[r.GetResourceName()+"."+testName] = t + // } + + // return x + + t, ok := localCacheResource[r.GetResourceName()+"."+testName] + if !ok { + t = r.Tests(context.Background())[testsacc.TestName(testName)]( context.Background(), r.GetResourceName()+"."+testName, ) - x = t.Create.Checks - + t.ComputeDependenciesConfig(r.TestACC) localCacheResource[r.GetResourceName()+"."+testName] = t } - - return x + return t.Create.Checks } // AddConstantConfig returns the create configuration from constant. diff --git a/internal/testsacc/acctest_datasources_test.go b/internal/testsacc/acctest_datasources_test.go index eb4cacfd2..337c58b29 100644 --- a/internal/testsacc/acctest_datasources_test.go +++ b/internal/testsacc/acctest_datasources_test.go @@ -20,5 +20,6 @@ func GetDataSourceConfig() map[testsacc.ResourceName]func() resourceConfig { S3BucketVersioningConfigurationDatasourceName: NewResourceConfig(NewS3BucketVersioningConfigurationDatasourceTest()), S3BucketDatasourceName: NewResourceConfig(NewS3BucketDatasourceTest()), S3BucketCorsConfigurationDataSourceName: NewResourceConfig(NewS3BucketCorsConfigurationDataSourceTest()), + S3BucketLifecycleConfigurationDataSourceName: NewResourceConfig(NewS3BucketLifecycleConfigurationDataSourceTest()), } } diff --git a/internal/testsacc/acctest_resources_test.go b/internal/testsacc/acctest_resources_test.go index a7b81c73b..094f3587d 100644 --- a/internal/testsacc/acctest_resources_test.go +++ b/internal/testsacc/acctest_resources_test.go @@ -29,5 +29,6 @@ func GetResourceConfig() map[testsacc.ResourceName]func() resourceConfig { S3BucketResourceName: NewResourceConfig(NewS3BucketResourceTest()), S3BucketVersioningConfigurationResourceName: NewResourceConfig(NewS3BucketVersioningConfigurationResourceTest()), S3BucketCorsConfigurationResourceName: NewResourceConfig(NewS3BucketCorsConfigurationResourceTest()), + S3BucketLifecycleConfigurationResourceName: NewResourceConfig(NewS3BucketLifecycleConfigurationResourceTest()), } } diff --git a/internal/testsacc/s3_bucket_lifecycle_configuration_datasource_test.go b/internal/testsacc/s3_bucket_lifecycle_configuration_datasource_test.go new file mode 100644 index 000000000..4cd931e3c --- /dev/null +++ b/internal/testsacc/s3_bucket_lifecycle_configuration_datasource_test.go @@ -0,0 +1,61 @@ +package testsacc + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/orange-cloudavenue/terraform-provider-cloudavenue/internal/helpers/testsacc" +) + +var _ testsacc.TestACC = &S3BucketLifecycleConfigurationDataSource{} + +const ( + S3BucketLifecycleConfigurationDataSourceName = testsacc.ResourceName("data.cloudavenue_s3_bucket_lifecycle_configuration") +) + +type S3BucketLifecycleConfigurationDataSource struct{} + +func NewS3BucketLifecycleConfigurationDataSourceTest() testsacc.TestACC { + return &S3BucketLifecycleConfigurationDataSource{} +} + +// GetResourceName returns the name of the resource. +func (r *S3BucketLifecycleConfigurationDataSource) GetResourceName() string { + return S3BucketLifecycleConfigurationDataSourceName.String() +} + +func (r *S3BucketLifecycleConfigurationDataSource) DependenciesConfig() (resp testsacc.DependenciesConfigResponse) { + // Add dependencies config to the resource + resp.Append(GetResourceConfig()[S3BucketLifecycleConfigurationResourceName]().GetDefaultConfig) + return +} + +func (r *S3BucketLifecycleConfigurationDataSource) Tests(ctx context.Context) map[testsacc.TestName]func(ctx context.Context, resourceName string) testsacc.Test { + return map[testsacc.TestName]func(ctx context.Context, resourceName string) testsacc.Test{ + // * Test One (example) + "example": func(_ context.Context, _ string) testsacc.Test { + return testsacc.Test{ + // ! Create testing + Create: testsacc.TFConfig{ + TFConfig: ` + data "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket_lifecycle_configuration.example.bucket + }`, + // Here use resource config test to test the data source + // the field example is the name of the test + Checks: GetResourceConfig()[S3BucketLifecycleConfigurationResourceName]().GetDefaultChecks(), + }, + } + }, + } +} + +func TestAccS3BucketLifecycleConfigurationDataSource(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { TestAccPreCheck(t) }, + ProtoV6ProviderFactories: TestAccProtoV6ProviderFactories, + Steps: testsacc.GenerateTests(&S3BucketLifecycleConfigurationDataSource{}), + }) +} diff --git a/internal/testsacc/s3_bucket_lifecycle_configuration_resource_test.go b/internal/testsacc/s3_bucket_lifecycle_configuration_resource_test.go new file mode 100644 index 000000000..6e19ab7a4 --- /dev/null +++ b/internal/testsacc/s3_bucket_lifecycle_configuration_resource_test.go @@ -0,0 +1,530 @@ +package testsacc + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/orange-cloudavenue/terraform-provider-cloudavenue/internal/helpers/testsacc" +) + +var _ testsacc.TestACC = &S3BucketLifecycleConfigurationResource{} + +const ( + S3BucketLifecycleConfigurationResourceName = testsacc.ResourceName("cloudavenue_s3_bucket_lifecycle_configuration") +) + +type S3BucketLifecycleConfigurationResource struct{} + +func NewS3BucketLifecycleConfigurationResourceTest() testsacc.TestACC { + return &S3BucketLifecycleConfigurationResource{} +} + +// GetResourceName returns the name of the resource. +func (r *S3BucketLifecycleConfigurationResource) GetResourceName() string { + return S3BucketLifecycleConfigurationResourceName.String() +} + +func (r *S3BucketLifecycleConfigurationResource) DependenciesConfig() (resp testsacc.DependenciesConfigResponse) { + resp.Append(GetResourceConfig()[S3BucketVersioningConfigurationResourceName]().GetDefaultConfig) + return +} + +func (r *S3BucketLifecycleConfigurationResource) Tests(ctx context.Context) map[testsacc.TestName]func(ctx context.Context, resourceName string) testsacc.Test { + return map[testsacc.TestName]func(ctx context.Context, resourceName string) testsacc.Test{ + // * First test named "example" + // * Specifying a filter using key prefixes + "example": func(_ context.Context, resourceName string) testsacc.Test { + return testsacc.Test{ + CommonChecks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttrSet(resourceName, "bucket"), + }, + // ! Create testing + Create: testsacc.TFConfig{ + TFConfig: testsacc.GenerateFromTemplate(resourceName, ` + resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.examplewithobjectlock.name + + rules = [{ + id = {{ generate . "rule_id" }} + + filter = { + prefix = "logs/" + } + + noncurrent_version_expiration = { + noncurrent_days = 90 + } + + status = "Enabled" + }] + }`), + Checks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.id", testsacc.GetValueFromTemplate(resourceName, "rule_id")), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.prefix", "logs/"), + resource.TestCheckResourceAttr(resourceName, "rules.0.noncurrent_version_expiration.noncurrent_days", "90"), + resource.TestCheckResourceAttr(resourceName, "rules.0.status", "Enabled"), + }, + }, + // ! Updates testing + Updates: []testsacc.TFConfig{ + { + TFConfig: testsacc.GenerateFromTemplate(resourceName, ` + resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.examplewithobjectlock.name + + rules = [{ + id = {{ get . "rule_id" }} + + filter = { + prefix = "config/" + } + + noncurrent_version_expiration = { + noncurrent_days = 180 + } + + status = "Enabled" + }] + }`), + Checks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.id", testsacc.GetValueFromTemplate(resourceName, "rule_id")), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.prefix", "config/"), + resource.TestCheckResourceAttr(resourceName, "rules.0.noncurrent_version_expiration.noncurrent_days", "180"), + resource.TestCheckResourceAttr(resourceName, "rules.0.status", "Enabled"), + }, + }, + { + TFConfig: testsacc.GenerateFromTemplate(resourceName, ` + resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.examplewithobjectlock.name + + rules = [{ + id = {{ get . "rule_id" }} + + filter = { + prefix = "config/" + } + + noncurrent_version_expiration = { + noncurrent_days = 180 + } + + status = "Enabled" + }, + { + id = {{ generate . "rule_id_2" }} + + filter = { + prefix = "cache/" + } + + noncurrent_version_expiration = { + noncurrent_days = 1 + } + + status = "Enabled" + }] + }`), + Checks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), + resource.TestCheckResourceAttr(resourceName, "rules.0.id", testsacc.GetValueFromTemplate(resourceName, "rule_id")), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.prefix", "config/"), + resource.TestCheckResourceAttr(resourceName, "rules.0.noncurrent_version_expiration.noncurrent_days", "180"), + resource.TestCheckResourceAttr(resourceName, "rules.0.status", "Enabled"), + resource.TestCheckResourceAttr(resourceName, "rules.1.id", testsacc.GetValueFromTemplate(resourceName, "rule_id_2")), + resource.TestCheckResourceAttr(resourceName, "rules.1.filter.prefix", "cache/"), + resource.TestCheckResourceAttr(resourceName, "rules.1.noncurrent_version_expiration.noncurrent_days", "1"), + resource.TestCheckResourceAttr(resourceName, "rules.1.status", "Enabled"), + }, + }, + }, + // ! Imports testing + Imports: []testsacc.TFImport{ + { + ImportStateIDBuilder: []string{"bucket"}, + ImportState: true, + ImportStateVerify: true, + }, + }, + // ! Destroy + Destroy: true, + } + }, + // * Example with a filter object size greater than + "example_filter_tag": func(_ context.Context, resourceName string) testsacc.Test { + return testsacc.Test{ + CommonChecks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttrSet(resourceName, "bucket"), + }, + // ! Create testing + Create: testsacc.TFConfig{ + TFConfig: testsacc.GenerateFromTemplate(resourceName, ` + resource "cloudavenue_s3_bucket_lifecycle_configuration" "example_filter_tag" { + bucket = cloudavenue_s3_bucket.examplewithobjectlock.name + + rules = [{ + id = {{ generate . "rule_id" }} + + filter = { + tag = { + key = "tag1" + value = "value1" + } + } + + noncurrent_version_expiration = { + noncurrent_days = 90 + } + + status = "Enabled" + }] + }`), + Checks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.id", testsacc.GetValueFromTemplate(resourceName, "rule_id")), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.tag.key", "tag1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.tag.value", "value1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.noncurrent_version_expiration.noncurrent_days", "90"), + resource.TestCheckResourceAttr(resourceName, "rules.0.status", "Enabled"), + }, + }, + // ! Updates testing + Updates: []testsacc.TFConfig{ + { + TFConfig: testsacc.GenerateFromTemplate(resourceName, ` + resource "cloudavenue_s3_bucket_lifecycle_configuration" "example_filter_tag" { + bucket = cloudavenue_s3_bucket.examplewithobjectlock.name + + rules = [{ + id = {{ get . "rule_id" }} + + filter = { + tag = { + key = "tag1-updated" + value = "value1-updated" + } + } + + noncurrent_version_expiration = { + noncurrent_days = 180 + } + + status = "Enabled" + }] + }`), + Checks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.id", testsacc.GetValueFromTemplate(resourceName, "rule_id")), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.tag.key", "tag1-updated"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.tag.value", "value1-updated"), + resource.TestCheckResourceAttr(resourceName, "rules.0.noncurrent_version_expiration.noncurrent_days", "180"), + resource.TestCheckResourceAttr(resourceName, "rules.0.status", "Enabled"), + }, + }, + }, + // ! Imports testing + Imports: []testsacc.TFImport{ + { + ImportStateIDBuilder: []string{"bucket"}, + ImportState: true, + ImportStateVerify: true, + }, + }, + // ! Destroy + Destroy: true, + } + }, + // * Example Specifying a filter based on object size range and prefix + "example_filter_tags_and_prefix": func(_ context.Context, resourceName string) testsacc.Test { + return testsacc.Test{ + CommonChecks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttrSet(resourceName, "bucket"), + }, + // ! Create testing + Create: testsacc.TFConfig{ + TFConfig: testsacc.GenerateFromTemplate(resourceName, ` + resource "cloudavenue_s3_bucket_lifecycle_configuration" "example_filter_tags_and_prefix" { + bucket = cloudavenue_s3_bucket.examplewithobjectlock.name + + rules = [{ + id = {{ generate . "rule_id" }} + + filter = { + and = { + prefix = "logs/" + tags = [ + { + key = "tag1" + value = "value1" + } + ] + } + } + + noncurrent_version_expiration = { + noncurrent_days = 180 + } + + status = "Enabled" + }] + }`), + Checks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.id", testsacc.GetValueFromTemplate(resourceName, "rule_id")), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.0.key", "tag1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.0.value", "value1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.prefix", "logs/"), + resource.TestCheckResourceAttr(resourceName, "rules.0.status", "Enabled"), + }, + }, + // ! Updates testing + Updates: []testsacc.TFConfig{ + { + TFConfig: testsacc.GenerateFromTemplate(resourceName, ` + resource "cloudavenue_s3_bucket_lifecycle_configuration" "example_filter_tags_and_prefix" { + bucket = cloudavenue_s3_bucket.examplewithobjectlock.name + + rules = [{ + id = {{ get . "rule_id" }} + + filter = { + and = { + prefix = "log/" + tags = [ + { + key = "tag1" + value = "value1" + }, + { + key = "tag2" + value = "value2" + } + ] + } + } + + noncurrent_version_expiration = { + noncurrent_days = 180 + } + + status = "Enabled" + }] + }`), + Checks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.id", testsacc.GetValueFromTemplate(resourceName, "rule_id")), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.#", "2"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.#", "2"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.0.key", "tag1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.0.value", "value1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.1.key", "tag2"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.1.value", "value2"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.prefix", "log/"), + resource.TestCheckResourceAttr(resourceName, "rules.0.status", "Enabled"), + }, + }, + { + TFConfig: testsacc.GenerateFromTemplate(resourceName, ` + resource "cloudavenue_s3_bucket_lifecycle_configuration" "example_filter_tags_and_prefix" { + bucket = cloudavenue_s3_bucket.examplewithobjectlock.name + + rules = [{ + id = {{ get . "rule_id" }} + + filter = { + and = { + prefix = "log/" + tags = [ + { + key = "tag1" + value = "value1" + }, + { + key = "tag2" + value = "value2" + } + ] + } + } + + noncurrent_version_expiration = { + noncurrent_days = 180 + } + + status = "Enabled" + }, + { + id = {{ generate . "rule_id_2" }} + + filter = { + and = { + prefix = "cache/" + tags = [ + { + key = "tag1-cache" + value = "value1-cache" + } + ] + } + } + + noncurrent_version_expiration = { + noncurrent_days = 180 + } + + status = "Enabled" + }] + + + }`), + Checks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), + // ? 0 + resource.TestCheckResourceAttr(resourceName, "rules.0.id", testsacc.GetValueFromTemplate(resourceName, "rule_id")), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.#", "2"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.0.key", "tag1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.0.value", "value1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.1.key", "tag2"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.tags.1.value", "value2"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.and.prefix", "log/"), + resource.TestCheckResourceAttr(resourceName, "rules.0.status", "Enabled"), + // ? 1 + resource.TestCheckResourceAttr(resourceName, "rules.1.id", testsacc.GetValueFromTemplate(resourceName, "rule_id_2")), + resource.TestCheckResourceAttr(resourceName, "rules.1.filter.and.tags.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rules.1.filter.and.tags.0.key", "tag1-cache"), + resource.TestCheckResourceAttr(resourceName, "rules.1.filter.and.tags.0.value", "value1-cache"), + resource.TestCheckResourceAttr(resourceName, "rules.1.filter.and.prefix", "cache/"), + resource.TestCheckResourceAttr(resourceName, "rules.1.status", "Enabled"), + }, + }, + }, + // ! Imports testing + Imports: []testsacc.TFImport{ + { + ImportStateIDBuilder: []string{"bucket"}, + ImportState: true, + ImportStateVerify: true, + }, + }, + // ! Destroy + Destroy: true, + } + }, + // * Example Lifecycle Configuration for a bucket with versioning + "example_with_versioning": func(_ context.Context, resourceName string) testsacc.Test { + return testsacc.Test{ + CommonChecks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttrSet(resourceName, "bucket"), + }, + // ! Create testing + Create: testsacc.TFConfig{ + TFConfig: testsacc.GenerateFromTemplate(resourceName, ` + resource "cloudavenue_s3_bucket_lifecycle_configuration" "example_with_versioning" { + bucket = cloudavenue_s3_bucket_versioning_configuration.example.bucket + + rules = [{ + id = {{ generate . "rule_id" }} + + filter = { + prefix = "logs/" + } + + expiration = { + days = 90 + } + + status = "Enabled" + }] + }`), + Checks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.id", testsacc.GetValueFromTemplate(resourceName, "rule_id")), + resource.TestCheckResourceAttr(resourceName, "rules.0.expiration.days", "90"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.prefix", "logs/"), + resource.TestCheckResourceAttr(resourceName, "rules.0.status", "Enabled"), + }, + }, + // ! Updates testing + Updates: []testsacc.TFConfig{ + { + TFConfig: testsacc.GenerateFromTemplate(resourceName, ` + resource "cloudavenue_s3_bucket_lifecycle_configuration" "example_with_versioning" { + bucket = cloudavenue_s3_bucket_versioning_configuration.example.bucket + + rules = [{ + id = {{ generate . "rule_id" }} + + filter = { + prefix = "logs/" + } + + expiration = { + date = "2027-01-01T00:00:00Z" + } + + status = "Enabled" + }] + }`), + Checks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.id", testsacc.GetValueFromTemplate(resourceName, "rule_id")), + resource.TestCheckResourceAttr(resourceName, "rules.0.expiration.date", "2027-01-01T00:00:00Z"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.prefix", "logs/"), + resource.TestCheckResourceAttr(resourceName, "rules.0.status", "Enabled"), + }, + }, + { + TFConfig: testsacc.GenerateFromTemplate(resourceName, ` + resource "cloudavenue_s3_bucket_lifecycle_configuration" "example_with_versioning" { + bucket = cloudavenue_s3_bucket_versioning_configuration.example.bucket + + rules = [{ + id = {{ generate . "rule_id" }} + + filter = { + prefix = "logs/" + } + + expiration = { + expired_object_delete_marker = true + } + + status = "Enabled" + }] + }`), + Checks: []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rules.0.id", testsacc.GetValueFromTemplate(resourceName, "rule_id")), + resource.TestCheckResourceAttr(resourceName, "rules.0.expiration.expired_object_delete_marker", "true"), + resource.TestCheckResourceAttr(resourceName, "rules.0.filter.prefix", "logs/"), + resource.TestCheckResourceAttr(resourceName, "rules.0.status", "Enabled"), + }, + }, + }, + // ! Imports testing + Imports: []testsacc.TFImport{ + { + ImportStateIDBuilder: []string{"bucket"}, + ImportState: true, + ImportStateVerify: true, + }, + }, + // ! Destroy + Destroy: true, + } + }, + } +} + +func TestAccS3BucketLifecycleConfigurationResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { TestAccPreCheck(t) }, + ProtoV6ProviderFactories: TestAccProtoV6ProviderFactories, + Steps: testsacc.GenerateTests(&S3BucketLifecycleConfigurationResource{}), + }) +} diff --git a/internal/testsacc/s3_bucket_versioning_configuration_resource_test.go b/internal/testsacc/s3_bucket_versioning_configuration_resource_test.go index 1002b236c..bf63c64f9 100644 --- a/internal/testsacc/s3_bucket_versioning_configuration_resource_test.go +++ b/internal/testsacc/s3_bucket_versioning_configuration_resource_test.go @@ -27,6 +27,7 @@ func (r *S3BucketVersioningConfigurationResource) GetResourceName() string { } func (r *S3BucketVersioningConfigurationResource) DependenciesConfig() (resp testsacc.DependenciesConfigResponse) { + resp.Append(GetResourceConfig()[S3BucketResourceName]().GetSpecificConfig("examplewithobjectlock")) resp.Append(GetResourceConfig()[S3BucketResourceName]().GetDefaultConfig) return } diff --git a/templates/data-sources/s3_bucket_versioning_configuration.md copy.tmpl b/templates/data-sources/s3_bucket_versioning_configuration.md copy.tmpl new file mode 100644 index 000000000..62ff04eeb --- /dev/null +++ b/templates/data-sources/s3_bucket_versioning_configuration.md copy.tmpl @@ -0,0 +1,25 @@ +--- +page_title: "{{.Name}} {{.Type}} - {{.ProviderName}}" +subcategory: "S3 (Object Storage)" +description: |- +{{ .Description | plainmarkdown | trimspace | prefixlines " " }} +--- + +# {{.Name}} ({{.Type}}) + +{{ .Description | trimspace }} + +{{ if .HasExample -}} +## Example Usage + +{{ tffile .ExampleFile }} +{{- end }} + +{{ .SchemaMarkdown | trimspace }} + +{{ if .HasImport -}} +## Import + +Import is supported using the following syntax: +{{ codefile "shell" .ImportFile }} +{{- end }} \ No newline at end of file diff --git a/templates/resources/s3_bucket_lifecycle_configuration.md.tmpl b/templates/resources/s3_bucket_lifecycle_configuration.md.tmpl new file mode 100644 index 000000000..128dce2f0 --- /dev/null +++ b/templates/resources/s3_bucket_lifecycle_configuration.md.tmpl @@ -0,0 +1,189 @@ +--- +page_title: "{{.Name}} {{.Type}} - {{.ProviderName}}" +subcategory: "S3 (Object Storage)" +description: |- +{{ .Description | plainmarkdown | trimspace | prefixlines " " }} +--- + +# {{.Name}} ({{.Type}}) + +Provides an independent configuration resource for S3 bucket [lifecycle configuration](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). + +{{ .Description | trimspace }} + +An S3 Lifecycle configuration consists of one or more Lifecycle rules. Each rule consists of the following: + +* An ID that identifies the rule. The ID must be unique within the configuration. +* A Status that indicates whether the rule is currently being applied. +* A Filter that identifies a subset of objects to which the rule applies. +* One or more Lifecycle actions that you want Amazon S3 to perform on the objects identified by the Filter. + +For more information about Lifecycle configuration, see [Lifecycle Configuration Elements](https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html). + + ~> **NOTE** S3 Buckets only support a single lifecycle configuration. Declaring multiple `cloudavenue_s3_bucket_lifecycle_configuration` resources to the same S3 Bucket will cause a perpetual difference in configuration. + + ~> **NOTE** Lifecycle configurations may take some time to fully propagate to all CloudAvenue S3 systems. Running Terraform operations shortly after creating a lifecycle configuration may result in changes that affect configuration idempotence. See the Amazon S3 User Guide on [setting lifecycle configuration on a bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/how-to-set-lifecycle-configuration-intro.html). + +## Examples Usage + +### Specifying a filter using key prefixes + +The Lifecycle rule applies to a subset of objects based on the key name prefix (`logs/`). + +```hcl +resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.example.name + + rules = [{ + id = "rule_id_1" + + filter = { + prefix = "logs/" + } + + noncurrent_version_expiration = { + noncurrent_days = 90 + } + + status = "Enabled" + }] +} +``` + +If you want to apply a Lifecycle action to a subset of objects based on different key name prefixes, specify separate rules. + +```hcl +resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.example.name + + rules = [ + { + id = "rule_id_1" + + filter = { + prefix = "config/" + } + + noncurrent_version_expiration = { + noncurrent_days = 180 + } + + status = "Enabled" + }, + { + id = "rule_id_2" + + filter = { + prefix = "cache/" + } + + noncurrent_version_expiration = { + noncurrent_days = 10 + } + + status = "Enabled" + } + ] +} +``` + +### Specifying a filter based on tag + +The Lifecycle rule applies to a subset of objects based on the tag key and value (`tag1` and `value1`). + +```hcl +resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.example.name + + rules = [{ + id = "rule_id_1" + + filter = { + tag = { + key = "tag1" + value = "value1" + } + } + + expiration = { + days = 90 + } + + status = "Enabled" + }] +} +``` + +### Specifying a filter based on tags range and prefix + +The Lifecycle rule applies to a subset of objects based on the tag key and value (`tag1` and `value1`) and the key name prefix (`logs/`). + +```hcl +resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket.example.name + + rules = [{ + id = "rule_id_1" + + filter = { + and { + prefix = "logs/" + tags = [ + { + key = "tag1" + value = "value1" + } + ] + } + } + + expiration = { + days = 90 + } + + status = "Enabled" + }] +} +``` + +### Creating a Lifecycle Configuration for a bucket with versioning + +```hcl +resource "cloudavenue_s3_bucket" "example" { + name = "example" +} + +resource "cloudavenue_s3_bucket_versioning_configuration" "example" { + bucket = cloudavenue_s3_bucket.example.name + status = "Enabled" +} + +resource "cloudavenue_s3_bucket_lifecycle_configuration" "example" { + bucket = cloudavenue_s3_bucket_versioning_configuration.example.bucket + + rules = [{ + id = "rule_id_1" + + filter = { + prefix = "logs/" + } + + expiration { + days = 90 + } + + status = "Enabled" + }] +} +``` + +{{ .SchemaMarkdown | trimspace }} + + -> **Timeout** Default timeout is **5 minutes**. + +{{ if .HasImport -}} +## Import + +Import is supported using the following syntax: +{{ codefile "shell" .ImportFile }} +{{- end }} \ No newline at end of file