From 4215c4e36b2529325e4643a20b8c0d8619a6d496 Mon Sep 17 00:00:00 2001 From: Tao <104055472+teowa@users.noreply.github.com> Date: Sat, 20 Jan 2024 03:38:58 +0800 Subject: [PATCH] New Resource: `azurerm_redhat_openshift_cluster` (#24375) --- .github/labeler-issue-triage.yml | 3 + .github/labeler-pull-request-triage.yml | 5 + .teamcity/components/generated/services.kt | 1 + go.mod | 3 + go.sum | 9 +- internal/clients/client.go | 5 + internal/provider/services.go | 2 + .../services/redhatopenshift/client/client.go | 24 + .../redhat_openshift_cluster_resource.go | 785 ++++++++++++++++++ .../redhat_openshift_cluster_resource_test.go | 780 +++++++++++++++++ .../services/redhatopenshift/registration.go | 35 + .../validate/cluster_version.go | 20 + .../validate/cluster_version_test.go | 55 ++ .../2023-09-04/openshiftclusters/README.md | 148 ++++ .../2023-09-04/openshiftclusters/client.go | 26 + .../2023-09-04/openshiftclusters/constants.go | 268 ++++++ .../id_provideropenshiftcluster.go | 125 +++ .../method_createorupdate.go | 74 ++ .../openshiftclusters/method_delete.go | 70 ++ .../openshiftclusters/method_get.go | 51 ++ .../openshiftclusters/method_list.go | 90 ++ .../method_listadmincredentials.go | 52 ++ .../method_listbyresourcegroup.go | 90 ++ .../method_listcredentials.go | 52 ++ .../openshiftclusters/method_update.go | 74 ++ .../model_apiserverprofile.go | 10 + .../openshiftclusters/model_clusterprofile.go | 12 + .../openshiftclusters/model_consoleprofile.go | 8 + .../openshiftclusters/model_ingressprofile.go | 10 + .../openshiftclusters/model_masterprofile.go | 11 + .../openshiftclusters/model_networkprofile.go | 11 + .../model_openshiftcluster.go | 18 + .../model_openshiftclusteradminkubeconfig.go | 8 + .../model_openshiftclustercredentials.go | 9 + .../model_openshiftclusterproperties.go | 17 + .../model_openshiftclusterupdate.go | 14 + .../model_serviceprincipalprofile.go | 9 + .../openshiftclusters/model_workerprofile.go | 14 + .../openshiftclusters/predicates.go | 32 + .../2023-09-04/openshiftclusters/version.go | 12 + vendor/modules.txt | 7 + website/allowed-subcategories | 1 + .../r/redhat_openshift_cluster.html.markdown | 282 +++++++ 43 files changed, 3330 insertions(+), 2 deletions(-) create mode 100644 internal/services/redhatopenshift/client/client.go create mode 100644 internal/services/redhatopenshift/redhat_openshift_cluster_resource.go create mode 100644 internal/services/redhatopenshift/redhat_openshift_cluster_resource_test.go create mode 100644 internal/services/redhatopenshift/registration.go create mode 100644 internal/services/redhatopenshift/validate/cluster_version.go create mode 100644 internal/services/redhatopenshift/validate/cluster_version_test.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/README.md create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/client.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/constants.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/id_provideropenshiftcluster.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/method_createorupdate.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/method_delete.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/method_get.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/method_list.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/method_listadmincredentials.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/method_listbyresourcegroup.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/method_listcredentials.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/method_update.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_apiserverprofile.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_clusterprofile.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_consoleprofile.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_ingressprofile.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_masterprofile.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_networkprofile.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_openshiftcluster.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_openshiftclusteradminkubeconfig.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_openshiftclustercredentials.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_openshiftclusterproperties.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_openshiftclusterupdate.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_serviceprincipalprofile.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/model_workerprofile.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/predicates.go create mode 100644 vendor/github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters/version.go create mode 100644 website/docs/r/redhat_openshift_cluster.html.markdown diff --git a/.github/labeler-issue-triage.yml b/.github/labeler-issue-triage.yml index 5b087549eb32..973b89f18081 100644 --- a/.github/labeler-issue-triage.yml +++ b/.github/labeler-issue-triage.yml @@ -282,6 +282,9 @@ service/purview: service/recovery-services: - '### (|New or )Affected Resource\(s\)\/Data Source\(s\)((.|\n)*)azurerm_(backup_|recovery_services_vault|site_recovery_)((.|\n)*)###' +service/redhatopenshift: + - '### (|New or )Affected Resource\(s\)\/Data Source\(s\)((.|\n)*)azurerm_redhat_openshift_cluster((.|\n)*)###' + service/redis: - '### (|New or )Affected Resource\(s\)\/Data Source\(s\)((.|\n)*)azurerm_redis_((.|\n)*)###' diff --git a/.github/labeler-pull-request-triage.yml b/.github/labeler-pull-request-triage.yml index 41be0f5af977..62a6d3345e8a 100644 --- a/.github/labeler-pull-request-triage.yml +++ b/.github/labeler-pull-request-triage.yml @@ -474,6 +474,11 @@ service/recovery-services: - any-glob-to-any-file: - internal/services/recoveryservices/**/* +service/redhatopenshift: +- changed-files: + - any-glob-to-any-file: + - internal/services/redhatopenshift/**/* + service/redis: - changed-files: - any-glob-to-any-file: diff --git a/.teamcity/components/generated/services.kt b/.teamcity/components/generated/services.kt index 25cce5b39f70..cb6224c45a54 100644 --- a/.teamcity/components/generated/services.kt +++ b/.teamcity/components/generated/services.kt @@ -102,6 +102,7 @@ var services = mapOf( "privatednsresolver" to "Private DNS Resolver", "purview" to "Purview", "recoveryservices" to "Recovery Services", + "redhatopenshift" to "Red Hat OpenShift", "redis" to "Redis", "redisenterprise" to "Redis Enterprise", "relay" to "Relay", diff --git a/go.mod b/go.mod index 2c9e2570b65b..c483c806d636 100644 --- a/go.mod +++ b/go.mod @@ -62,12 +62,14 @@ require ( github.com/hashicorp/terraform-registry-address v0.2.2 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect + github.com/kr/text v0.2.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect github.com/oklog/run v1.1.0 // indirect github.com/rickb777/plural v1.4.1 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect @@ -84,6 +86,7 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/grpc v1.58.3 // indirect google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect software.sslmate.com/src/go-pkcs12 v0.2.1 // indirect ) diff --git a/go.sum b/go.sum index c52d66c710d3..bef74653d350 100644 --- a/go.sum +++ b/go.sum @@ -61,6 +61,7 @@ github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7N github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/dave/jennifer v1.6.0 h1:MQ/6emI2xM7wt0tJzJzyUik2Q3Tcn2eE0vtYgh4GPVI= github.com/dave/jennifer v1.6.0/go.mod h1:AxTG893FiZKqxy3FP1kL80VMshSMuz2G+EgvszgGRnk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -177,8 +178,9 @@ github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= @@ -206,6 +208,8 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -367,8 +371,9 @@ google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= diff --git a/internal/clients/client.go b/internal/clients/client.go index 2e7e06cf93ce..c6969f44dbf6 100644 --- a/internal/clients/client.go +++ b/internal/clients/client.go @@ -118,6 +118,7 @@ import ( dnsresolver "github.com/hashicorp/terraform-provider-azurerm/internal/services/privatednsresolver/client" purview "github.com/hashicorp/terraform-provider-azurerm/internal/services/purview/client" recoveryServices "github.com/hashicorp/terraform-provider-azurerm/internal/services/recoveryservices/client" + redhatopenshift "github.com/hashicorp/terraform-provider-azurerm/internal/services/redhatopenshift/client" redis "github.com/hashicorp/terraform-provider-azurerm/internal/services/redis/client" redisenterprise "github.com/hashicorp/terraform-provider-azurerm/internal/services/redisenterprise/client" relay "github.com/hashicorp/terraform-provider-azurerm/internal/services/relay/client" @@ -251,6 +252,7 @@ type Client struct { PrivateDnsResolver *dnsresolver.Client Purview *purview.Client RecoveryServices *recoveryServices.Client + RedHatOpenShift *redhatopenshift.Client Redis *redis_2023_08_01.Client RedisEnterprise *redisenterprise.Client Relay *relay.Client @@ -558,6 +560,9 @@ func (client *Client) Build(ctx context.Context, o *common.ClientOptions) error if client.RecoveryServices, err = recoveryServices.NewClient(o); err != nil { return fmt.Errorf("building clients for RecoveryServices: %+v", err) } + if client.RedHatOpenShift, err = redhatopenshift.NewClient(o); err != nil { + return fmt.Errorf("building clients for RedHatOpenShift: %+v", err) + } if client.Redis, err = redis.NewClient(o); err != nil { return fmt.Errorf("building clients for Redis: %+v", err) } diff --git a/internal/provider/services.go b/internal/provider/services.go index 6c092f7c39be..0ffc6157af19 100644 --- a/internal/provider/services.go +++ b/internal/provider/services.go @@ -103,6 +103,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/services/privatednsresolver" "github.com/hashicorp/terraform-provider-azurerm/internal/services/purview" "github.com/hashicorp/terraform-provider-azurerm/internal/services/recoveryservices" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/redhatopenshift" "github.com/hashicorp/terraform-provider-azurerm/internal/services/redis" "github.com/hashicorp/terraform-provider-azurerm/internal/services/redisenterprise" "github.com/hashicorp/terraform-provider-azurerm/internal/services/relay" @@ -191,6 +192,7 @@ func SupportedTypedServices() []sdk.TypedServiceRegistration { policy.Registration{}, privatednsresolver.Registration{}, recoveryservices.Registration{}, + redhatopenshift.Registration{}, resource.Registration{}, sentinel.Registration{}, serviceconnector.Registration{}, diff --git a/internal/services/redhatopenshift/client/client.go b/internal/services/redhatopenshift/client/client.go new file mode 100644 index 000000000000..d804595844c0 --- /dev/null +++ b/internal/services/redhatopenshift/client/client.go @@ -0,0 +1,24 @@ +package client + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters" + "github.com/hashicorp/terraform-provider-azurerm/internal/common" +) + +type Client struct { + OpenShiftClustersClient *openshiftclusters.OpenShiftClustersClient +} + +func NewClient(o *common.ClientOptions) (*Client, error) { + openShiftClustersClient, err := openshiftclusters.NewOpenShiftClustersClientWithBaseURI(o.Environment.ResourceManager) + if err != nil { + return nil, fmt.Errorf("instantiating OpenShiftClustersClient: %+v", err) + } + o.Configure(openShiftClustersClient.Client, o.Authorizers.ResourceManager) + + return &Client{ + OpenShiftClustersClient: openShiftClustersClient, + }, nil +} diff --git a/internal/services/redhatopenshift/redhat_openshift_cluster_resource.go b/internal/services/redhatopenshift/redhat_openshift_cluster_resource.go new file mode 100644 index 000000000000..b2ae92711d72 --- /dev/null +++ b/internal/services/redhatopenshift/redhat_openshift_cluster_resource.go @@ -0,0 +1,785 @@ +package redhatopenshift + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" + "github.com/hashicorp/go-azure-helpers/resourcemanager/location" + "github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" + commonValidate "github.com/hashicorp/terraform-provider-azurerm/helpers/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/redhatopenshift/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/suppress" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" +) + +var _ sdk.ResourceWithUpdate = RedHatOpenShiftCluster{} + +type RedHatOpenShiftCluster struct{} + +type RedHatOpenShiftClusterModel struct { + Tags map[string]string `tfschema:"tags"` + Name string `tfschema:"name"` + Location string `tfschema:"location"` + ResourceGroup string `tfschema:"resource_group_name"` + ConsoleUrl string `tfschema:"console_url"` + ServicePrincipal []ServicePrincipal `tfschema:"service_principal"` + ClusterProfile []ClusterProfile `tfschema:"cluster_profile"` + NetworkProfile []NetworkProfile `tfschema:"network_profile"` + MainProfile []MainProfile `tfschema:"main_profile"` + WorkerProfile []WorkerProfile `tfschema:"worker_profile"` + ApiServerProfile []ApiServerProfile `tfschema:"api_server_profile"` + IngressProfile []IngressProfile `tfschema:"ingress_profile"` +} + +type ServicePrincipal struct { + ClientId string `tfschema:"client_id"` + ClientSecret string `tfschema:"client_secret"` +} + +type ClusterProfile struct { + PullSecret string `tfschema:"pull_secret"` + Domain string `tfschema:"domain"` + ResourceGroupId string `tfschema:"resource_group_id"` + Version string `tfschema:"version"` + FipsEnabled bool `tfschema:"fips_enabled"` +} + +type NetworkProfile struct { + OutboundType string `tfschema:"outbound_type"` + PodCidr string `tfschema:"pod_cidr"` + ServiceCidr string `tfschema:"service_cidr"` +} + +type MainProfile struct { + SubnetId string `tfschema:"subnet_id"` + VmSize string `tfschema:"vm_size"` + DiskEncryptionSetId string `tfschema:"disk_encryption_set_id"` + EncryptionAtHostEnabled bool `tfschema:"encryption_at_host_enabled"` +} + +type WorkerProfile struct { + VmSize string `tfschema:"vm_size"` + SubnetId string `tfschema:"subnet_id"` + DiskEncryptionSetId string `tfschema:"disk_encryption_set_id"` + DiskSizeGb int `tfschema:"disk_size_gb"` + NodeCount int `tfschema:"node_count"` + EncryptionAtHostEnabled bool `tfschema:"encryption_at_host_enabled"` +} + +type IngressProfile struct { + Visibility string `tfschema:"visibility"` + IpAddress string `tfschema:"ip_address"` + Name string `tfschema:"name"` +} + +type ApiServerProfile struct { + Visibility string `tfschema:"visibility"` + IpAddress string `tfschema:"ip_address"` + Url string `tfschema:"url"` +} + +func (r RedHatOpenShiftCluster) Arguments() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + ForceNew: true, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "location": commonschema.Location(), + + "resource_group_name": commonschema.ResourceGroupName(), + + "cluster_profile": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + ForceNew: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "domain": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "version": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ClusterVersion, + }, + "fips_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + "pull_secret": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "resource_group_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + }, + + "service_principal": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "client_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.IsUUID, + }, + "client_secret": { + Type: pluginsdk.TypeString, + Required: true, + Sensitive: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + }, + + "network_profile": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "pod_cidr": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: commonValidate.CIDR, + }, + "service_cidr": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: commonValidate.CIDR, + }, + "outbound_type": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Default: string(openshiftclusters.OutboundTypeLoadbalancer), + ValidateFunc: validation.StringInSlice( + openshiftclusters.PossibleValuesForOutboundType(), + false, + ), + }, + }, + }, + }, + + "main_profile": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "subnet_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + "vm_size": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: validation.StringIsNotEmpty, + }, + "encryption_at_host_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + "disk_encryption_set_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + }, + }, + }, + + "worker_profile": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "vm_size": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: validation.StringIsNotEmpty, + }, + "disk_size_gb": { + Type: pluginsdk.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(128), + }, + "node_count": { + Type: pluginsdk.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(3, 60), + }, + "subnet_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + "encryption_at_host_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + "disk_encryption_set_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + }, + }, + }, + + "api_server_profile": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "visibility": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(openshiftclusters.PossibleValuesForVisibility(), false), + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "url": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "ingress_profile": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "visibility": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(openshiftclusters.PossibleValuesForVisibility(), false), + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "tags": commonschema.Tags(), + } +} + +func (r RedHatOpenShiftCluster) Attributes() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "console_url": { + Type: pluginsdk.TypeString, + Computed: true, + }, + } +} + +func (r RedHatOpenShiftCluster) ModelObject() interface{} { + return &RedHatOpenShiftClusterModel{} +} + +func (r RedHatOpenShiftCluster) ResourceType() string { + return "azurerm_redhat_openshift_cluster" +} + +func (r RedHatOpenShiftCluster) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return openshiftclusters.ValidateProviderOpenShiftClusterID +} + +func (r RedHatOpenShiftCluster) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 90 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.RedHatOpenShift.OpenShiftClustersClient + subscriptionId := metadata.Client.Account.SubscriptionId + + var config RedHatOpenShiftClusterModel + if err := metadata.Decode(&config); err != nil { + return fmt.Errorf("decoding %+v", err) + } + + id := openshiftclusters.NewProviderOpenShiftClusterID(subscriptionId, config.ResourceGroup, config.Name) + + existing, err := client.Get(ctx, id) + if err != nil { + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for presence of existing %s: %s", id, err) + } + } + + if !response.WasNotFound(existing.HttpResponse) { + return metadata.ResourceRequiresImport(r.ResourceType(), id) + } + + parameters := openshiftclusters.OpenShiftCluster{ + Name: pointer.To(id.OpenShiftClusterName), + Location: location.Normalize(config.Location), + Properties: &openshiftclusters.OpenShiftClusterProperties{ + ClusterProfile: expandOpenshiftClusterProfile(config.ClusterProfile, id.SubscriptionId), + ServicePrincipalProfile: expandOpenshiftServicePrincipalProfile(config.ServicePrincipal), + NetworkProfile: expandOpenshiftNetworkProfile(config.NetworkProfile), + MasterProfile: expandOpenshiftMainProfile(config.MainProfile), + WorkerProfiles: expandOpenshiftWorkerProfiles(config.WorkerProfile), + ApiserverProfile: expandOpenshiftApiServerProfile(config.ApiServerProfile), + IngressProfiles: expandOpenshiftIngressProfiles(config.IngressProfile), + }, + Tags: pointer.To(config.Tags), + } + + if err = client.CreateOrUpdateThenPoll(ctx, id, parameters); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + metadata.SetID(id) + + return nil + }, + } +} + +func (r RedHatOpenShiftCluster) Update() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 90 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.RedHatOpenShift.OpenShiftClustersClient + + id, err := openshiftclusters.ParseProviderOpenShiftClusterID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + var state RedHatOpenShiftClusterModel + if err := metadata.Decode(&state); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + parameter := openshiftclusters.OpenShiftClusterUpdate{} + + if metadata.ResourceData.HasChange("tags") { + parameter.Tags = pointer.To(state.Tags) + } + + if metadata.ResourceData.HasChange("service_principal") { + parameter.Properties = &openshiftclusters.OpenShiftClusterProperties{ + ServicePrincipalProfile: expandOpenshiftServicePrincipalProfile(state.ServicePrincipal), + } + } + + if err := client.UpdateThenPoll(ctx, *id, parameter); err != nil { + return fmt.Errorf("updating %s: %+v", id, err) + } + + return nil + }, + } +} + +func (r RedHatOpenShiftCluster) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.RedHatOpenShift.OpenShiftClustersClient + + id, err := openshiftclusters.ParseProviderOpenShiftClusterID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, *id) + if err != nil { + if response.WasNotFound(resp.HttpResponse) { + return metadata.MarkAsGone(id) + } + + return fmt.Errorf("retrieving %s: %+v", id, err) + } + + var config RedHatOpenShiftClusterModel + if err := metadata.Decode(&config); err != nil { + return fmt.Errorf("decoding %+v", err) + } + + state := RedHatOpenShiftClusterModel{ + Name: id.OpenShiftClusterName, + ResourceGroup: id.ResourceGroupName, + } + + if model := resp.Model; model != nil { + state.Location = location.Normalize(model.Location) + state.Tags = pointer.From(model.Tags) + + if props := model.Properties; props != nil { + state.ClusterProfile = flattenOpenShiftClusterProfile(props.ClusterProfile, config) + state.ServicePrincipal = flattenOpenShiftServicePrincipalProfile(props.ServicePrincipalProfile, config) + state.NetworkProfile = flattenOpenShiftNetworkProfile(props.NetworkProfile) + state.MainProfile = flattenOpenShiftMainProfile(props.MasterProfile) + state.ApiServerProfile = flattenOpenShiftAPIServerProfile(props.ApiserverProfile) + state.IngressProfile = flattenOpenShiftIngressProfiles(props.IngressProfiles) + + workerProfiles, err := flattenOpenShiftWorkerProfiles(props.WorkerProfiles) + if err != nil { + return fmt.Errorf("flattening worker profiles: %+v", err) + } + state.WorkerProfile = workerProfiles + + if props.ConsoleProfile != nil { + state.ConsoleUrl = pointer.From(props.ConsoleProfile.Url) + } + } + } + + return metadata.Encode(&state) + }, + } +} + +func (r RedHatOpenShiftCluster) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 90 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + id, err := openshiftclusters.ParseProviderOpenShiftClusterID(metadata.ResourceData.Id()) + if err != nil { + return fmt.Errorf("while parsing resource ID: %+v", err) + } + + client := metadata.Client.RedHatOpenShift.OpenShiftClustersClient + + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + return nil + }, + } +} + +func expandOpenshiftClusterProfile(input []ClusterProfile, subscriptionId string) *openshiftclusters.ClusterProfile { + if len(input) == 0 { + return nil + } + + fipsValidatedModules := openshiftclusters.FipsValidatedModulesDisabled + if input[0].FipsEnabled { + fipsValidatedModules = openshiftclusters.FipsValidatedModulesEnabled + } + + return &openshiftclusters.ClusterProfile{ + // the api needs a ResourceGroupId value and the portal doesn't allow you to set it but the portal returns the + // resource id being `aro-{domain}` so we'll follow that here. + ResourceGroupId: pointer.To(commonids.NewResourceGroupID(subscriptionId, fmt.Sprintf("aro-%s", input[0].Domain)).ID()), + Domain: pointer.To(input[0].Domain), + PullSecret: pointer.To(input[0].PullSecret), + FipsValidatedModules: pointer.To(fipsValidatedModules), + Version: pointer.To(input[0].Version), + } +} + +func flattenOpenShiftClusterProfile(profile *openshiftclusters.ClusterProfile, config RedHatOpenShiftClusterModel) []ClusterProfile { + if profile == nil { + return []ClusterProfile{} + } + + // pull secret isn't returned by the API so pass the existing value along + pullSecret := "" + if len(config.ClusterProfile) != 0 { + pullSecret = config.ClusterProfile[0].PullSecret + } + + fipsEnabled := false + if profile.FipsValidatedModules != nil { + fipsEnabled = *profile.FipsValidatedModules == openshiftclusters.FipsValidatedModulesEnabled + } + + return []ClusterProfile{ + { + PullSecret: pullSecret, + Domain: pointer.From(profile.Domain), + FipsEnabled: fipsEnabled, + ResourceGroupId: pointer.From(profile.ResourceGroupId), + Version: pointer.From(profile.Version), + }, + } +} + +func expandOpenshiftServicePrincipalProfile(input []ServicePrincipal) *openshiftclusters.ServicePrincipalProfile { + if len(input) == 0 { + return nil + } + + return &openshiftclusters.ServicePrincipalProfile{ + ClientId: pointer.To(input[0].ClientId), + ClientSecret: pointer.To(input[0].ClientSecret), + } +} + +func flattenOpenShiftServicePrincipalProfile(profile *openshiftclusters.ServicePrincipalProfile, config RedHatOpenShiftClusterModel) []ServicePrincipal { + if profile == nil { + return []ServicePrincipal{} + } + + // client secret isn't returned by the API so pass the existing value along + clientSecret := "" + if len(config.ServicePrincipal) != 0 { + clientSecret = config.ServicePrincipal[0].ClientSecret + } + + return []ServicePrincipal{ + { + ClientId: pointer.From(profile.ClientId), + ClientSecret: clientSecret, + }, + } +} + +func expandOpenshiftNetworkProfile(input []NetworkProfile) *openshiftclusters.NetworkProfile { + if len(input) == 0 { + return nil + } + + return &openshiftclusters.NetworkProfile{ + OutboundType: pointer.To(openshiftclusters.OutboundType(input[0].OutboundType)), + PodCidr: pointer.To(input[0].PodCidr), + ServiceCidr: pointer.To(input[0].ServiceCidr), + } +} + +func flattenOpenShiftNetworkProfile(profile *openshiftclusters.NetworkProfile) []NetworkProfile { + if profile == nil { + return []NetworkProfile{} + } + + return []NetworkProfile{ + { + OutboundType: string(pointer.From(profile.OutboundType)), + PodCidr: pointer.From(profile.PodCidr), + ServiceCidr: pointer.From(profile.ServiceCidr), + }, + } +} + +func expandOpenshiftMainProfile(input []MainProfile) *openshiftclusters.MasterProfile { + if len(input) == 0 { + return nil + } + + encryptionAtHost := openshiftclusters.EncryptionAtHostDisabled + if input[0].EncryptionAtHostEnabled { + encryptionAtHost = openshiftclusters.EncryptionAtHostEnabled + } + + return &openshiftclusters.MasterProfile{ + VMSize: pointer.To(input[0].VmSize), + SubnetId: pointer.To(input[0].SubnetId), + EncryptionAtHost: pointer.To(encryptionAtHost), + DiskEncryptionSetId: pointer.To(input[0].DiskEncryptionSetId), + } +} + +func flattenOpenShiftMainProfile(profile *openshiftclusters.MasterProfile) []MainProfile { + if profile == nil { + return []MainProfile{} + } + + encryptionAtHostEnabled := false + if profile.EncryptionAtHost != nil { + encryptionAtHostEnabled = *profile.EncryptionAtHost == openshiftclusters.EncryptionAtHostEnabled + } + + return []MainProfile{ + { + VmSize: pointer.From(profile.VMSize), + SubnetId: pointer.From(profile.SubnetId), + EncryptionAtHostEnabled: encryptionAtHostEnabled, + DiskEncryptionSetId: pointer.From(profile.DiskEncryptionSetId), + }, + } +} + +func expandOpenshiftWorkerProfiles(input []WorkerProfile) *[]openshiftclusters.WorkerProfile { + if len(input) == 0 { + return nil + } + + profiles := make([]openshiftclusters.WorkerProfile, 0) + + encryptionAtHost := openshiftclusters.EncryptionAtHostDisabled + if input[0].EncryptionAtHostEnabled { + encryptionAtHost = openshiftclusters.EncryptionAtHostEnabled + } + + profile := openshiftclusters.WorkerProfile{ + Name: pointer.To("worker"), + VMSize: pointer.To(input[0].VmSize), + DiskSizeGB: pointer.To(int64(input[0].DiskSizeGb)), + SubnetId: pointer.To(input[0].SubnetId), + Count: pointer.To(int64(input[0].NodeCount)), + EncryptionAtHost: pointer.To(encryptionAtHost), + DiskEncryptionSetId: pointer.To(input[0].DiskEncryptionSetId), + } + + profiles = append(profiles, profile) + + return &profiles +} + +func flattenOpenShiftWorkerProfiles(profiles *[]openshiftclusters.WorkerProfile) ([]WorkerProfile, error) { + if profiles == nil || len(*profiles) == 0 { + return []WorkerProfile{}, nil + } + + rawProfiles := *profiles + profile := rawProfiles[0] + + encryptionAtHostEnabled := false + if profile.EncryptionAtHost != nil { + encryptionAtHostEnabled = *profile.EncryptionAtHost == openshiftclusters.EncryptionAtHostEnabled + } + + subnetIdString := "" + if profile.SubnetId != nil { + subnetId, err := commonids.ParseSubnetIDInsensitively(*profile.SubnetId) + if err != nil { + return []WorkerProfile{}, fmt.Errorf("parsing subnet id: %+v", err) + } + subnetIdString = subnetId.ID() + } + + return []WorkerProfile{ + { + NodeCount: int(pointer.From(profile.Count)), + VmSize: pointer.From(profile.VMSize), + DiskSizeGb: int(pointer.From(profile.DiskSizeGB)), + SubnetId: subnetIdString, + EncryptionAtHostEnabled: encryptionAtHostEnabled, + DiskEncryptionSetId: pointer.From(profile.DiskEncryptionSetId), + }, + }, nil +} + +func expandOpenshiftApiServerProfile(input []ApiServerProfile) *openshiftclusters.APIServerProfile { + if len(input) == 0 { + return nil + } + + visibility := openshiftclusters.Visibility(input[0].Visibility) + + return &openshiftclusters.APIServerProfile{ + Visibility: &visibility, + } +} + +func flattenOpenShiftAPIServerProfile(profile *openshiftclusters.APIServerProfile) []ApiServerProfile { + if profile == nil { + return []ApiServerProfile{} + } + + return []ApiServerProfile{ + { + Visibility: string(pointer.From(profile.Visibility)), + Url: pointer.From(profile.Url), + IpAddress: pointer.From(profile.IP), + }, + } +} + +func expandOpenshiftIngressProfiles(input []IngressProfile) *[]openshiftclusters.IngressProfile { + if len(input) == 0 { + return nil + } + + profiles := make([]openshiftclusters.IngressProfile, 0) + + profile := openshiftclusters.IngressProfile{ + Name: pointer.To("default"), + Visibility: pointer.To(openshiftclusters.Visibility(input[0].Visibility)), + } + + profiles = append(profiles, profile) + + return &profiles +} + +func flattenOpenShiftIngressProfiles(profiles *[]openshiftclusters.IngressProfile) []IngressProfile { + if profiles == nil { + return []IngressProfile{} + } + + results := make([]IngressProfile, 0) + + for _, profile := range *profiles { + results = append(results, IngressProfile{ + Visibility: string(pointer.From(profile.Visibility)), + IpAddress: pointer.From(profile.IP), + Name: pointer.From(profile.Name), + }) + } + + return results +} diff --git a/internal/services/redhatopenshift/redhat_openshift_cluster_resource_test.go b/internal/services/redhatopenshift/redhat_openshift_cluster_resource_test.go new file mode 100644 index 000000000000..e49e2ea491c8 --- /dev/null +++ b/internal/services/redhatopenshift/redhat_openshift_cluster_resource_test.go @@ -0,0 +1,780 @@ +package redhatopenshift_test + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/hashicorp/go-azure-sdk/resource-manager/redhatopenshift/2023-09-04/openshiftclusters" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +type OpenShiftClusterResource struct{} + +func TestAccOpenShiftCluster_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_redhat_openshift_cluster", "test") + r := OpenShiftClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("service_principal.0.client_secret"), + }) +} + +func TestAccOpenShiftCluster_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_redhat_openshift_cluster", "test") + r := OpenShiftClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("service_principal.0.client_secret"), + { + Config: r.update(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("service_principal.0.client_secret"), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("service_principal.0.client_secret"), + }) +} + +func TestAccOpenShiftCluster_private(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_redhat_openshift_cluster", "test") + r := OpenShiftClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.private(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("service_principal.0.client_secret"), + }) +} + +func TestAccOpenShiftCluster_userDefinedRouting(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_redhat_openshift_cluster", "test") + r := OpenShiftClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.userDefinedRouting(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("service_principal.0.client_secret"), + }) +} + +func TestAccOpenShiftCluster_encryptionAtHost(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_redhat_openshift_cluster", "test") + r := OpenShiftClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.encryptionAtHost(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("service_principal.0.client_secret"), + }) +} + +func TestAccOpenShiftCluster_pullSecret(t *testing.T) { + // the pull secret can be generated from https://console.redhat.com/openshift/install/pull-secret + pullSecret := os.Getenv("ARM_TEST_ARO_PULL_SECRET") + if pullSecret == "" { + t.Skip("skip the test due to missing environment variable ARM_TEST_ARO_PULL_SECRET") + } + + data := acceptance.BuildTestData(t, "azurerm_redhat_openshift_cluster", "test") + r := OpenShiftClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.pullSecret(data, pullSecret), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("service_principal.0.client_secret", "cluster_profile.0.pull_secret"), + }) +} + +func TestAccOpenShiftCluster_basicWithFipsEnabled(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_redhat_openshift_cluster", "test") + r := OpenShiftClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicWithFipsEnabled(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("service_principal.0.client_secret"), + }) +} + +func TestAccOpenShiftCluster_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_redhat_openshift_cluster", "test") + r := OpenShiftClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func (t OpenShiftClusterResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := openshiftclusters.ParseProviderOpenShiftClusterID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.RedHatOpenShift.OpenShiftClustersClient.Get(ctx, *id) + if err != nil { + return nil, fmt.Errorf("reading Red Hat Openshift Cluster (%s): %+v", id.String(), err) + } + + return utils.Bool(resp.Model != nil), nil +} + +func (r OpenShiftClusterResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%[1]s + +resource "azurerm_redhat_openshift_cluster" "test" { + name = "acctestaro%[2]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + cluster_profile { + domain = "aro-%[3]s.com" + version = "4.13.23" + } + + network_profile { + pod_cidr = "10.128.0.0/14" + service_cidr = "172.30.0.0/16" + } + + main_profile { + vm_size = "Standard_D8s_v3" + subnet_id = azurerm_subnet.main_subnet.id + } + + api_server_profile { + visibility = "Public" + } + + ingress_profile { + visibility = "Public" + } + + worker_profile { + vm_size = "Standard_D4s_v3" + disk_size_gb = 128 + node_count = 3 + subnet_id = azurerm_subnet.worker_subnet.id + } + + service_principal { + client_id = azuread_application.test.application_id + client_secret = azuread_service_principal_password.test.value + } + + depends_on = [ + "azurerm_role_assignment.role_network1", + "azurerm_role_assignment.role_network2", + ] +} + `, r.template(data), data.RandomInteger, data.RandomString) +} + +func (r OpenShiftClusterResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` + %[1]s + +resource "azurerm_redhat_openshift_cluster" "import" { + name = azurerm_redhat_openshift_cluster.test.name + resource_group_name = azurerm_redhat_openshift_cluster.test.resource_group_name + location = azurerm_redhat_openshift_cluster.test.location + + cluster_profile { + domain = azurerm_redhat_openshift_cluster.test.cluster_profile.0.domain + version = azurerm_redhat_openshift_cluster.test.cluster_profile.0.version + } + + network_profile { + pod_cidr = azurerm_redhat_openshift_cluster.test.network_profile.0.pod_cidr + service_cidr = azurerm_redhat_openshift_cluster.test.network_profile.0.service_cidr + } + + main_profile { + vm_size = azurerm_redhat_openshift_cluster.test.main_profile.0.vm_size + subnet_id = azurerm_redhat_openshift_cluster.test.main_profile.0.subnet_id + } + + api_server_profile { + visibility = azurerm_redhat_openshift_cluster.test.api_server_profile.0.visibility + } + + ingress_profile { + visibility = azurerm_redhat_openshift_cluster.test.ingress_profile.0.visibility + } + + worker_profile { + vm_size = azurerm_redhat_openshift_cluster.test.worker_profile.0.vm_size + disk_size_gb = azurerm_redhat_openshift_cluster.test.worker_profile.0.disk_size_gb + node_count = azurerm_redhat_openshift_cluster.test.worker_profile.0.node_count + subnet_id = azurerm_redhat_openshift_cluster.test.worker_profile.0.subnet_id + } + + service_principal { + client_id = azurerm_redhat_openshift_cluster.test.service_principal.0.client_id + client_secret = azurerm_redhat_openshift_cluster.test.service_principal.0.client_secret + } + + depends_on = [ + "azurerm_role_assignment.role_network1", + "azurerm_role_assignment.role_network2", + ] +} + `, r.basic(data)) +} + +func (r OpenShiftClusterResource) update(data acceptance.TestData) string { + return fmt.Sprintf(` +%[1]s + +resource "azuread_application" "test2" { + display_name = "acctest-aro-2-%[2]d" +} + +resource "azuread_service_principal" "test2" { + application_id = azuread_application.test2.application_id +} + +resource "azuread_service_principal_password" "test2" { + service_principal_id = azuread_service_principal.test2.object_id +} + +resource "azurerm_role_assignment" "role_network3" { + scope = azurerm_virtual_network.test.id + role_definition_name = "Network Contributor" + principal_id = azuread_service_principal.test2.object_id +} + +resource "azurerm_redhat_openshift_cluster" "test" { + name = "acctestaro%[2]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + cluster_profile { + domain = "aro-%[3]s.com" + version = "4.13.23" + } + + network_profile { + pod_cidr = "10.128.0.0/14" + service_cidr = "172.30.0.0/16" + } + + main_profile { + vm_size = "Standard_D8s_v3" + subnet_id = azurerm_subnet.main_subnet.id + } + + api_server_profile { + visibility = "Public" + } + + ingress_profile { + visibility = "Public" + } + + worker_profile { + vm_size = "Standard_D4s_v3" + disk_size_gb = 128 + node_count = 3 + subnet_id = azurerm_subnet.worker_subnet.id + } + + service_principal { + client_id = azuread_application.test2.application_id + client_secret = azuread_service_principal_password.test2.value + } + + tags = { + foo = "bar" + } + + depends_on = [ + "azurerm_role_assignment.role_network1", + "azurerm_role_assignment.role_network2", + "azurerm_role_assignment.role_network3", + ] +} + `, r.template(data), data.RandomInteger, data.RandomString) +} + +func (r OpenShiftClusterResource) pullSecret(data acceptance.TestData, pullSecret string) string { + return fmt.Sprintf(` +%[1]s + +resource "azurerm_redhat_openshift_cluster" "test" { + name = "acctestaro%[2]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + cluster_profile { + domain = "aro-%[3]s.com" + version = "4.13.23" + pull_secret = < **Note:** All arguments including the client secret will be stored in the raw state as plain-text. [Read more about sensitive data in state](/docs/state/sensitive-data.html). + +## Example Usage + +```hcl +data "azurerm_client_config" "example" {} + +data "azuread_client_config" "example" {} + +resource "azuread_application" "example" { + display_name = "example-aro" +} + +resource "azuread_service_principal" "example" { + application_id = azuread_application.example.application_id +} + +resource "azuread_service_principal_password" "example" { + service_principal_id = azuread_service_principal.example.object_id +} + +data "azuread_service_principal" "redhatopenshift" { + // This is the Azure Red Hat OpenShift RP service principal id, do NOT delete it + application_id = "f1dd0a37-89c6-4e07-bcd1-ffd3d43d8875" +} + +resource "azurerm_role_assignment" "role_network1" { + scope = azurerm_virtual_network.example.id + role_definition_name = "Network Contributor" + principal_id = azuread_service_principal.example.object_id +} + +resource "azurerm_role_assignment" "role_network2" { + scope = azurerm_virtual_network.example.id + role_definition_name = "Network Contributor" + principal_id = data.azuread_service_principal.redhatopenshift.object_id +} + +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West US" +} + +resource "azurerm_virtual_network" "example" { + name = "example-vnet" + address_space = ["10.0.0.0/22"] + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name +} + +resource "azurerm_subnet" "main_subnet" { + name = "main-subnet" + resource_group_name = azurerm_resource_group.example.name + virtual_network_name = azurerm_virtual_network.example.name + address_prefixes = ["10.0.0.0/23"] + service_endpoints = ["Microsoft.Storage", "Microsoft.ContainerRegistry"] +} + +resource "azurerm_subnet" "worker_subnet" { + name = "worker-subnet" + resource_group_name = azurerm_resource_group.example.name + virtual_network_name = azurerm_virtual_network.example.name + address_prefixes = ["10.0.2.0/23"] + service_endpoints = ["Microsoft.Storage", "Microsoft.ContainerRegistry"] +} + +resource "azurerm_redhat_openshift_cluster" "example" { + name = "examplearo" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + + cluster_profile { + domain = "aro-example.com" + } + + network_profile { + pod_cidr = "10.128.0.0/14" + service_cidr = "172.30.0.0/16" + } + + main_profile { + vm_size = "Standard_D8s_v3" + subnet_id = azurerm_subnet.main_subnet.id + } + + api_server_profile { + visibility = "Public" + } + + ingress_profile { + visibility = "Public" + } + + worker_profile { + vm_size = "Standard_D4s_v3" + disk_size_gb = 128 + node_count = 3 + subnet_id = azurerm_subnet.worker_subnet.id + } + + service_principal { + client_id = azuread_application.example.application_id + client_secret = azuread_service_principal_password.example.value + } + + depends_on = [ + "azurerm_role_assignment.role_network1", + "azurerm_role_assignment.role_network2", + ] +} + +output "console_url" { + value = azurerm_redhatopenshift_cluster.example.console_url +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Azure Red Hat OpenShift Cluster to create. Changing this forces a new resource to be created. + +* `location` - (Required) The location where the Azure Red Hat OpenShift Cluster should be created. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) Specifies the Resource Group where the Azure Red Hat OpenShift Cluster should exist. Changing this forces a new resource to be created. + +* `service_principal` - (Required) A `service_principal` block as defined below. + +* `main_profile` - (Required) A `main_profile` block as defined below. + +* `worker_profile` - (Required) A `worker_profile` block as defined below. + +* `cluster_profile` - (Required) A `cluster_profile` block as defined below. + +* `api_server_profile` - (Required) An `api_server_profile` block as defined below. + +* `ingress_profile` - (Required) An `ingress_profile` block as defined below. + +* `network_profile` - (Required) A `network_profile` block as defined below. + +* `tags` - (Optional) A mapping of tags to assign to the resource. + +--- + +A `service_principal` block supports the following: + +* `client_id` - (Required) The Client ID for the Service Principal. + +* `client_secret` - (Required) The Client Secret for the Service Principal. + +~> **Note:** Currently a service principal cannot be associated with more than one ARO clusters on the Azure subscription. + +--- + +A `main_profile` block supports the following: + +* `subnet_id` - (Required) The ID of the subnet where main nodes will be hosted. Changing this forces a new resource to be created. + +* `vm_size` - (Required) The size of the Virtual Machines for the main nodes. Changing this forces a new resource to be created. + +* `encryption_at_host_enabled` - (Optional) Whether main virtual machines are encrypted at host. Defaults to `false`. Changing this forces a new resource to be created. + +**NOTE:** `encryption_at_host_enabled` is only available for certain VM sizes and the `EncryptionAtHost` feature must be enabled for your subscription. Please see the [Azure documentation](https://learn.microsoft.com/azure/virtual-machines/disks-enable-host-based-encryption-portal?tabs=azure-powershell) for more information. + +* `disk_encryption_set_id` - (Optional) The resource ID of an associated disk encryption set. Changing this forces a new resource to be created. + +--- + +A `worker_profile` block supports the following: + +* `subnet_id` - (Required) The ID of the subnet where worker nodes will be hosted. Changing this forces a new resource to be created. + +* `vm_size` - (Required) The size of the Virtual Machines for the worker nodes. Changing this forces a new resource to be created. + +* `disk_size_gb` - (Required) The internal OS disk size of the worker Virtual Machines in GB. Changing this forces a new resource to be created. + +* `node_count` - (Required) The initial number of worker nodes which should exist in the cluster. Changing this forces a new resource to be created. + +* `encryption_at_host_enabled` - (Optional) Whether worker virtual machines are encrypted at host. Defaults to `false`. Changing this forces a new resource to be created. + +**NOTE:** `encryption_at_host_enabled` is only available for certain VM sizes and the `EncryptionAtHost` feature must be enabled for your subscription. Please see the [Azure documentation](https://learn.microsoft.com/azure/virtual-machines/disks-enable-host-based-encryption-portal?tabs=azure-powershell) for more information. + +* `disk_encryption_set_id` - (Optional) The resource ID of an associated disk encryption set. Changing this forces a new resource to be created. + +--- + +A `cluster_profile` block supports the following: + +* `version` - (Required) The version of the OpenShift cluster. Changing this forces a new resource to be created. + +* `domain` - (Required) The custom domain for the cluster. For more info, see [Prepare a custom domain for your cluster](https://docs.microsoft.com/azure/openshift/tutorial-create-cluster#prepare-a-custom-domain-for-your-cluster-optional). Changing this forces a new resource to be created. + +* `pull_secret` - (Optional) The Red Hat pull secret for the cluster. For more info, see [Get a Red Hat pull secret](https://learn.microsoft.com/azure/openshift/tutorial-create-cluster#get-a-red-hat-pull-secret-optional). Changing this forces a new resource to be created. + +* `fips_enabled` - (Optional) Whether Federal Information Processing Standard (FIPS) validated cryptographic modules are used. Defaults to `false`. Changing this forces a new resource to be created. + +--- + +A `network_profile` block supports the following: + +* `pod_cidr` - (Required) The CIDR to use for pod IP addresses. Changing this forces a new resource to be created. + +* `service_cidr` - (Required) The network range used by the OpenShift service. Changing this forces a new resource to be created. + +* `outbound_type` - (Optional) The outbound (egress) routing method. Possible values are `Loadbalancer` and `UserDefinedRouting`. Defaults to `LoadBalancer`. Changing this forces a new resource to be created. + +--- + +A `api_server_profile` block supports the following: + +* `visibility` - (Required) Cluster API server visibility. Supported values are `Public` and `Private`. Defaults to `Public`. Changing this forces a new resource to be created. + +--- + +A `ingress_profile` block supports the following: + +* `visibility` - (Required) Cluster Ingress visibility. Supported values are `Public` and `Private`. Defaults to `Public`. Changing this forces a new resource to be created. + +--- + +## Attributes Reference + +The following attributes are exported: + +* `console_url` - The Red Hat OpenShift cluster console URL. + +* `cluster_profile` - A `cluster_profile` block as defined below. + +* `api_server_profile` - An `api_server_profile` block as defined below. + +* `ingress_profile` - An `ingress_profile` block as defined below. + +--- + +A `cluster_profile` block exports the following: + +* `resource_group_id` - The resource group that the cluster profile is attached to. + +--- + +A `api_server_profile` block exports the following: + +* `ip_address` - The IP Address the API Server Profile is associated with. + +* `url` - The URL the API Server Profile is associated with. + +--- + +A `ingress_profile` block exports the following: + +* `name` - The name of the Ingress Profile. + +* `ip_address` - The IP Address the Ingress Profile is associated with. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/language/resources/syntax.html#operation-timeouts) for certain actions: + +* `create` - (Defaults to 90 minutes) Used when creating the Red Hat OpenShift cluster. +* `update` - (Defaults to 90 minutes) Used when updating the Red Hat OpenShift cluster. +* `read` - (Defaults to 5 minutes) Used when retrieving the Red Hat OpenShift cluster. +* `delete` - (Defaults to 90 minutes) Used when deleting the Red Hat OpenShift cluster. + +## Import + +Red Hat OpenShift Clusters can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_redhatopenshift_cluster.cluster1 /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/group1/providers/Microsoft.RedHatOpenShift/openShiftClusters/cluster1 +```