diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 0ba73c5..b230914 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -4,6 +4,8 @@ on:
paths-ignore:
- 'README.md'
push:
+ branches:
+ - "main"
paths-ignore:
- 'README.md'
permissions:
diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md
index f8fe6b6..2cdbaa1 100644
--- a/docs/resources/cluster.md
+++ b/docs/resources/cluster.md
@@ -138,7 +138,7 @@ Required:
Optional:
- `args` (List of String) Arguments to pass to the command when executing it.
-- `env` (Map of String) Defines environment variables to expose to the process.
+- `env` (Map of String) Defines environment variables to expose to the process.
@@ -186,7 +186,7 @@ Required:
Optional:
- `args` (List of String) Arguments to pass to the command when executing it.
-- `env` (Map of String) Defines environment variables to expose to the process.
+- `env` (Map of String) Defines environment variables to expose to the process.
diff --git a/docs/resources/infrastructure_stack.md b/docs/resources/infrastructure_stack.md
new file mode 100644
index 0000000..0b19547
--- /dev/null
+++ b/docs/resources/infrastructure_stack.md
@@ -0,0 +1,137 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "plural_infrastructure_stack Resource - terraform-provider-plural"
+subcategory: ""
+description: |-
+
+---
+
+# plural_infrastructure_stack (Resource)
+
+
+
+
+
+
+## Schema
+
+### Required
+
+- `cluster_id` (String) The cluster on which the stack will be applied.
+- `configuration` (Attributes) Stack configuration. (see [below for nested schema](#nestedatt--configuration))
+- `name` (String) Human-readable name of this stack.
+- `repository` (Attributes) Repository information used to pull stack. (see [below for nested schema](#nestedatt--repository))
+- `type` (String) A type for the stack, specifies the tool to use to apply it. Allowed values include `ANSIBLE` and `TERRAFORM`.
+
+### Optional
+
+- `approval` (Boolean) Determines whether to require approval.
+- `bindings` (Attributes) Read and write policies of this stack. (see [below for nested schema](#nestedatt--bindings))
+- `detach` (Boolean) Determines behavior during resource destruction, if true it will detach resource instead of deleting it.
+- `environment` (Attributes Set) Defines environment variables for the stack. (see [below for nested schema](#nestedatt--environment))
+- `files` (Map of String) File path-content map.
+- `job_spec` (Attributes) Repository information used to pull stack. (see [below for nested schema](#nestedatt--job_spec))
+
+### Read-Only
+
+- `id` (String) Internal identifier of this stack.
+
+
+### Nested Schema for `configuration`
+
+Required:
+
+- `version` (String) The semver of the tool you wish to use.
+
+Optional:
+
+- `image` (String) Optional custom image you might want to use.
+
+
+
+### Nested Schema for `repository`
+
+Required:
+
+- `folder` (String) The folder where manifests live.
+- `id` (String) ID of the repository to pull from.
+- `ref` (String) A general git ref, either a branch name or commit sha understandable by "git checkout [".
+
+
+
+### Nested Schema for `bindings`
+
+Optional:
+
+- `read` (Attributes Set) Read policies of this stack. (see [below for nested schema](#nestedatt--bindings--read))
+- `write` (Attributes Set) Write policies of this stack. (see [below for nested schema](#nestedatt--bindings--write))
+
+
+### Nested Schema for `bindings.read`
+
+Optional:
+
+- `group_id` (String)
+- `id` (String)
+- `user_id` (String)
+
+
+
+### Nested Schema for `bindings.write`
+
+Optional:
+
+- `group_id` (String)
+- `id` (String)
+- `user_id` (String)
+
+
+
+
+### Nested Schema for `environment`
+
+Required:
+
+- `name` (String) Environment variable name.
+- `value` (String) Environment variable value.
+
+Optional:
+
+- `secret` (Boolean) Indicates if environment variable is secret.
+
+
+
+### Nested Schema for `job_spec`
+
+Required:
+
+- `namespace` (String) Namespace where job will be deployed.
+
+Optional:
+
+- `annotations` (Map of String) Kubernetes annotations applied to the job.
+- `containers` (Attributes Set) (see [below for nested schema](#nestedatt--job_spec--containers))
+- `labels` (Map of String) Kubernetes labels applied to the job.
+- `raw` (String) If you'd rather define the job spec via straight Kubernetes YAML.
+- `service_account` (String) Kubernetes service account for this job.
+
+
+### Nested Schema for `job_spec.containers`
+
+Required:
+
+- `image` (String)
+
+Optional:
+
+- `args` (List of String) Arguments to pass to the command when executing it.
+- `env` (Map of String) Defines environment variables to expose to the process.
+- `env_from` (Attributes Set) (see [below for nested schema](#nestedatt--job_spec--containers--env_from))
+
+
+### Nested Schema for `job_spec.containers.env_from`
+
+Required:
+
+- `config_map` (String)
+- `secret` (String)
diff --git a/example/aws/main.tf b/example/cluster/aws/main.tf
similarity index 100%
rename from example/aws/main.tf
rename to example/cluster/aws/main.tf
diff --git a/example/azure/main.tf b/example/cluster/azure/main.tf
similarity index 100%
rename from example/azure/main.tf
rename to example/cluster/azure/main.tf
diff --git a/example/byok/main.tf b/example/cluster/byok/main.tf
similarity index 94%
rename from example/byok/main.tf
rename to example/cluster/byok/main.tf
index 5cd75a7..cffceb5 100644
--- a/example/byok/main.tf
+++ b/example/cluster/byok/main.tf
@@ -21,9 +21,9 @@ resource "plural_cluster" "byok" {
metadata = jsonencode({
test1 = "test"
test2 = false
- test3 = jsonencode({
+ test3 = {
abc = false
- })
+ }
})
helm_repo_url = "https://pluralsh.github.io/deployment-operator"
tags = {
diff --git a/example/gcp/main.tf b/example/cluster/gcp/main.tf
similarity index 100%
rename from example/gcp/main.tf
rename to example/cluster/gcp/main.tf
diff --git a/example/cluster_data/main.tf b/example/cluster_data/main.tf
deleted file mode 100644
index 01c6f19..0000000
--- a/example/cluster_data/main.tf
+++ /dev/null
@@ -1,16 +0,0 @@
-terraform {
- required_providers {
- plural = {
- source = "pluralsh/plural"
- version = "0.0.1"
- }
- }
-}
-
-provider "plural" {
- use_cli = true
-}
-
-data "plural_cluster" "cluster" {
- handle = "mgmt"
-}
\ No newline at end of file
diff --git a/example/stack/main.tf b/example/stack/main.tf
new file mode 100644
index 0000000..b8e2174
--- /dev/null
+++ b/example/stack/main.tf
@@ -0,0 +1,162 @@
+terraform {
+ required_providers {
+ plural = {
+ source = "pluralsh/plural"
+ version = "0.0.1"
+ }
+ }
+}
+
+provider "plural" {
+ use_cli = true
+}
+
+data "plural_cluster" "cluster" {
+ handle = "mgmt"
+}
+
+data "plural_git_repository" "repository" {
+ url = "https://github.com/zreigz/tf-hello.git"
+}
+
+resource "random_string" "random" {
+ length = 5
+ upper = false
+ special = false
+}
+
+resource "plural_infrastructure_stack" "stack-full" {
+ name = "stack-tf-full-${random_string.random.result}"
+ type = "TERRAFORM"
+ approval = true
+ detach = true
+ cluster_id = data.plural_cluster.cluster.id
+ repository = {
+ id = data.plural_git_repository.repository.id
+ ref = "main"
+ folder = "terraform"
+ }
+ configuration = {
+ image = "hashicorp/terraform:1.8.1"
+ version = "1.8.1"
+ }
+ files = {
+ "test.yml" = "value: 123"
+ }
+ environment = [
+ {
+ name = "USERNAME"
+ value = "joe"
+ },
+ {
+ name = "PASSWORD"
+ value = "test"
+ secret = true
+ }
+ ]
+ job_spec = {
+ namespace = "default"
+ labels = {
+ test = "123"
+ }
+ service_account = "default"
+ containers = [
+ {
+ image = "perl:5.34.0"
+ args = ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
+ env = {}
+ env_from = []
+ }
+ ]
+ }
+ bindings = {
+ read = []
+ write = []
+ }
+}
+
+resource "plural_infrastructure_stack" "stack-raw" {
+ name = "stack-tf-raw-${random_string.random.result}"
+ type = "TERRAFORM"
+ approval = true
+ detach = true
+ cluster_id = data.plural_cluster.cluster.id
+ repository = {
+ id = data.plural_git_repository.repository.id
+ ref = "main"
+ folder = "terraform"
+ }
+ configuration = {
+ image = "hashicorp/terraform:1.8.1"
+ version = "1.8.1"
+ }
+ files = {
+ "test.yml" = "value: 123"
+ }
+ environment = [
+ {
+ name = "USERNAME"
+ value = "joe"
+ },
+ {
+ name = "PASSWORD"
+ value = "test"
+ secret = true
+ }
+ ]
+ job_spec = {
+ namespace = "default"
+ raw = yamlencode({
+ containers = [
+ {
+ name = "pi"
+ image = "perl:5.34.0"
+ command = ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
+ }
+ ]
+ restartPolicy = "Never"
+ })
+ }
+ bindings = {
+ read = []
+ write = []
+ }
+}
+
+resource "plural_infrastructure_stack" "stack-empty" {
+ name = "stack-tf-empty-${random_string.random.result}"
+ type = "TERRAFORM"
+ approval = true
+ detach = true
+ cluster_id = data.plural_cluster.cluster.id
+ repository = {
+ id = data.plural_git_repository.repository.id
+ ref = "main"
+ folder = "terraform"
+ }
+ configuration = {
+ version = "1.8.1"
+ }
+ files = {}
+ environment = []
+ job_spec = {
+ namespace = "default"
+ raw = yamlencode({ test = true })
+ }
+ bindings = {}
+}
+
+resource "plural_infrastructure_stack" "stack-minimal" {
+ name = "stack-tf-minimal-${random_string.random.result}"
+ type = "TERRAFORM"
+ detach = true
+ cluster_id = data.plural_cluster.cluster.id
+ repository = {
+ id = data.plural_git_repository.repository.id
+ ref = "main"
+ folder = "terraform"
+ }
+ configuration = {
+ version = "1.8.1"
+ }
+}
diff --git a/go.mod b/go.mod
index 60cca6e..3d30a28 100644
--- a/go.mod
+++ b/go.mod
@@ -12,13 +12,14 @@ require (
github.com/hashicorp/terraform-plugin-framework-validators v0.12.0
github.com/hashicorp/terraform-plugin-log v0.9.0
github.com/mitchellh/go-homedir v1.1.0
- github.com/pluralsh/console-client-go v0.1.17
+ github.com/pluralsh/console-client-go v0.4.0
github.com/pluralsh/plural-cli v0.8.5-0.20240216094552-efc34ee6de37
github.com/pluralsh/polly v0.1.7
github.com/samber/lo v1.38.1
github.com/sirupsen/logrus v1.9.3
gopkg.in/yaml.v2 v2.4.0
helm.sh/helm/v3 v3.11.2
+ k8s.io/api v0.26.4
k8s.io/apimachinery v0.26.4
k8s.io/client-go v0.26.4
sigs.k8s.io/yaml v1.3.0
@@ -335,7 +336,6 @@ require (
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
honnef.co/go/tools v0.4.6 // indirect
- k8s.io/api v0.26.4 // indirect
k8s.io/apiextensions-apiserver v0.26.1 // indirect
k8s.io/apiserver v0.26.1 // indirect
k8s.io/cli-runtime v0.26.1 // indirect
diff --git a/go.sum b/go.sum
index dcdc0a2..850b0bc 100644
--- a/go.sum
+++ b/go.sum
@@ -856,8 +856,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
-github.com/pluralsh/console-client-go v0.1.17 h1:QMtnWdRvV13/sND/CFjFBUR8nyg3JJgwXReSyM6bK7A=
-github.com/pluralsh/console-client-go v0.1.17/go.mod h1:eyCiLA44YbXiYyJh8303jk5JdPkt9McgCo5kBjk4lKo=
+github.com/pluralsh/console-client-go v0.4.0 h1:lgKaVGi8jB7S8wFF6L3P6H/4Xc88e4FozhyW58O1w3Q=
+github.com/pluralsh/console-client-go v0.4.0/go.mod h1:eyCiLA44YbXiYyJh8303jk5JdPkt9McgCo5kBjk4lKo=
github.com/pluralsh/gqlclient v1.11.0 h1:FfXW7FiEJLHOfTAa7NxDb8jb3aMZNIpCAcG+bg8uHYA=
github.com/pluralsh/gqlclient v1.11.0/go.mod h1:qSXKUlio1F2DRPy8el4oFYsmpKbkUYspgPB87T4it5I=
github.com/pluralsh/plural-cli v0.8.5-0.20240216094552-efc34ee6de37 h1:DBnaKvKmbTbKwbkrh/2gJBwyHYfaXdxeT3UGh+94K4g=
diff --git a/internal/common/cluster_bindings.go b/internal/common/cluster_bindings.go
index f861197..14e1180 100644
--- a/internal/common/cluster_bindings.go
+++ b/internal/common/cluster_bindings.go
@@ -1,29 +1,94 @@
package common
import (
+ "context"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
console "github.com/pluralsh/console-client-go"
)
type ClusterBindings struct {
- Read []*ClusterPolicyBinding `tfsdk:"read"`
- Write []*ClusterPolicyBinding `tfsdk:"write"`
+ Read types.Set `tfsdk:"read"`
+ Write types.Set `tfsdk:"write"`
}
-func (cb *ClusterBindings) ReadAttributes() []*console.PolicyBindingAttributes {
+func (cb *ClusterBindings) ReadAttributes(ctx context.Context, d diag.Diagnostics) []*console.PolicyBindingAttributes {
if cb == nil {
- return []*console.PolicyBindingAttributes{}
+ return nil
+ }
+
+ return clusterPolicyBindingAttributes(cb.Read, ctx, d)
+}
+
+func (cb *ClusterBindings) WriteAttributes(ctx context.Context, d diag.Diagnostics) []*console.PolicyBindingAttributes {
+ if cb == nil {
+ return nil
+ }
+
+ return clusterPolicyBindingAttributes(cb.Write, ctx, d)
+}
+
+func clusterPolicyBindingAttributes(bindings types.Set, ctx context.Context, d diag.Diagnostics) []*console.PolicyBindingAttributes {
+ if bindings.IsNull() {
+ return nil
}
- return clusterPolicyBindingAttributes(cb.Read)
+ result := make([]*console.PolicyBindingAttributes, 0, len(bindings.Elements()))
+ elements := make([]ClusterPolicyBinding, len(bindings.Elements()))
+ d.Append(bindings.ElementsAs(ctx, &elements, false)...)
+
+ for _, binding := range elements {
+ result = append(result, &console.PolicyBindingAttributes{
+ ID: binding.ID.ValueStringPointer(),
+ UserID: binding.UserID.ValueStringPointer(),
+ GroupID: binding.GroupID.ValueStringPointer(),
+ })
+ }
+
+ return result
}
-func (cb *ClusterBindings) WriteAttributes() []*console.PolicyBindingAttributes {
+func (cb *ClusterBindings) From(readBindings []*console.PolicyBindingFragment, writeBindings []*console.PolicyBindingFragment, ctx context.Context, d diag.Diagnostics) {
if cb == nil {
- return []*console.PolicyBindingAttributes{}
+ return
}
- return clusterPolicyBindingAttributes(cb.Write)
+ cb.Read = clusterBindingsFrom(readBindings, cb.Read, ctx, d)
+ cb.Write = clusterBindingsFrom(writeBindings, cb.Write, ctx, d)
+}
+
+func clusterBindingsFrom(bindings []*console.PolicyBindingFragment, config types.Set, ctx context.Context, d diag.Diagnostics) types.Set {
+ if len(bindings) == 0 {
+ // Rewriting config to state to avoid inconsistent result errors.
+ // This could happen, for example, when sending "nil" to API and "[]" is returned as a result.
+ return config
+ }
+
+ values := make([]attr.Value, len(bindings))
+ for i, binding := range bindings {
+ value := ClusterPolicyBinding{
+ ID: types.StringPointerValue(binding.ID),
+ }
+
+ if binding.User != nil {
+ value.UserID = types.StringValue(binding.User.ID)
+ }
+
+ if binding.Group != nil {
+ value.GroupID = types.StringValue(binding.Group.ID)
+ }
+
+ objValue, diags := types.ObjectValueFrom(ctx, ClusterPolicyBindingAttrTypes, value)
+ values[i] = objValue
+ d.Append(diags...)
+ }
+
+ setValue, diags := types.SetValue(basetypes.ObjectType{AttrTypes: ClusterPolicyBindingAttrTypes}, values)
+ d.Append(diags...)
+ return setValue
}
type ClusterPolicyBinding struct {
@@ -32,19 +97,16 @@ type ClusterPolicyBinding struct {
UserID types.String `tfsdk:"user_id"`
}
-func (c *ClusterPolicyBinding) Attributes() *console.PolicyBindingAttributes {
- return &console.PolicyBindingAttributes{
- ID: c.ID.ValueStringPointer(),
- UserID: c.UserID.ValueStringPointer(),
- GroupID: c.GroupID.ValueStringPointer(),
- }
+var ClusterPolicyBindingAttrTypes = map[string]attr.Type{
+ "group_id": types.StringType,
+ "id": types.StringType,
+ "user_id": types.StringType,
}
-func clusterPolicyBindingAttributes(bindings []*ClusterPolicyBinding) []*console.PolicyBindingAttributes {
- result := make([]*console.PolicyBindingAttributes, len(bindings))
- for i, b := range bindings {
- result[i] = b.Attributes()
+func (cpb *ClusterPolicyBinding) Attributes() *console.PolicyBindingAttributes {
+ return &console.PolicyBindingAttributes{
+ ID: cpb.ID.ValueStringPointer(),
+ UserID: cpb.UserID.ValueStringPointer(),
+ GroupID: cpb.GroupID.ValueStringPointer(),
}
-
- return result
}
diff --git a/internal/common/cluster_node_pool.go b/internal/common/cluster_node_pool.go
index 0de8c68..08c59ce 100644
--- a/internal/common/cluster_node_pool.go
+++ b/internal/common/cluster_node_pool.go
@@ -141,7 +141,7 @@ func ClusterNodePoolsFrom(nodePools []*console.NodePoolFragment, configNodePools
MinSize: types.Int64Value(nodePool.MinSize),
MaxSize: types.Int64Value(nodePool.MaxSize),
InstanceType: types.StringValue(nodePool.InstanceType),
- Labels: clusterNodePoolLabelsFrom(nodePool, ctx, d),
+ Labels: MapFrom(nodePool.Labels, ctx, d),
Taints: clusterNodePoolTaintsFrom(nodePool, ctx, d),
CloudSettings: configNodePoolsElements[nodePool.Name].CloudSettings, // Rewriting config to state to avoid unknown values.
}).Element()
@@ -154,16 +154,6 @@ func ClusterNodePoolsFrom(nodePools []*console.NodePoolFragment, configNodePools
return mapValue
}
-func clusterNodePoolLabelsFrom(nodePool *console.NodePoolFragment, ctx context.Context, d diag.Diagnostics) types.Map {
- if len(nodePool.Labels) == 0 {
- return types.MapNull(types.StringType)
- }
-
- mapValue, diags := types.MapValueFrom(ctx, types.StringType, nodePool.Labels)
- d.Append(diags...)
- return mapValue
-}
-
func clusterNodePoolTaintsFrom(nodePool *console.NodePoolFragment, ctx context.Context, d diag.Diagnostics) types.Set {
if len(nodePool.Taints) == 0 {
return types.SetNull(basetypes.ObjectType{AttrTypes: NodePoolTaintAttrTypes})
diff --git a/internal/common/map.go b/internal/common/map.go
new file mode 100644
index 0000000..86d2049
--- /dev/null
+++ b/internal/common/map.go
@@ -0,0 +1,30 @@
+package common
+
+import (
+ "context"
+
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func MapFrom(values map[string]any, ctx context.Context, d diag.Diagnostics) types.Map {
+ if values == nil {
+ return types.MapNull(types.StringType)
+ }
+
+ mapValue, diags := types.MapValueFrom(ctx, types.StringType, values)
+ d.Append(diags...)
+ return mapValue
+}
+
+func MapFromWithConfig(values map[string]any, config types.Map, ctx context.Context, d diag.Diagnostics) types.Map {
+ if len(values) == 0 {
+ // Rewriting config to state to avoid inconsistent result errors.
+ // This could happen, for example, when sending "nil" to API and "[]" is returned as a result.
+ return config
+ }
+
+ mapValue, diags := types.MapValueFrom(ctx, types.StringType, values)
+ d.Append(diags...)
+ return mapValue
+}
diff --git a/internal/defaults/env.go b/internal/defaults/env.go
index 11b1a54..b1c104b 100644
--- a/internal/defaults/env.go
+++ b/internal/defaults/env.go
@@ -38,7 +38,7 @@ func (d envDefaultValue[_]) MarkdownDescription(_ context.Context) string {
}
func (d envDefaultValue[T]) DefaultString(_ context.Context, _ defaults.StringRequest, resp *defaults.StringResponse) {
- value := interface{}(d.defaultValue)
+ value := any(d.defaultValue)
if v := os.Getenv(d.envVar); len(v) > 0 {
value = v
}
@@ -48,7 +48,7 @@ func (d envDefaultValue[T]) DefaultString(_ context.Context, _ defaults.StringRe
}
func (d envDefaultValue[T]) DefaultBool(_ context.Context, _ defaults.BoolRequest, resp *defaults.BoolResponse) {
- value := interface{}(d.defaultValue)
+ value := any(d.defaultValue)
if v := os.Getenv(d.envVar); len(v) > 0 {
value = v == "true"
}
diff --git a/internal/provider/provider.go b/internal/provider/provider.go
index 3dd3ae4..0bb22fb 100644
--- a/internal/provider/provider.go
+++ b/internal/provider/provider.go
@@ -184,6 +184,7 @@ func (p *PluralProvider) Resources(_ context.Context) []func() resource.Resource
r.NewProviderResource,
r.NewServiceDeploymentResource,
r.NewServiceContextResource,
+ r.NewInfrastructureStackResource,
}
}
diff --git a/internal/resource/cluster_kubeconfig.go b/internal/resource/cluster_kubeconfig.go
index f1cc462..7978ce1 100644
--- a/internal/resource/cluster_kubeconfig.go
+++ b/internal/resource/cluster_kubeconfig.go
@@ -66,7 +66,7 @@ func newKubeconfig(ctx context.Context, kubeconfig *Kubeconfig, namespace *strin
loader := &clientcmd.ClientConfigLoadingRules{}
if !lo.IsEmpty(kubeconfig.ConfigPath.ValueString()) {
- tflog.Info(ctx, "using kubeconfig", map[string]interface{}{
+ tflog.Info(ctx, "using kubeconfig", map[string]any{
"kubeconfig": kubeconfig.ConfigPath.ValueString(),
})
@@ -79,7 +79,7 @@ func newKubeconfig(ctx context.Context, kubeconfig *Kubeconfig, namespace *strin
if !lo.IsEmpty(kubeconfig.ConfigContext.ValueString()) || !lo.IsEmpty(kubeconfig.ConfigContextAuthInfo.ValueString()) || !lo.IsEmpty(kubeconfig.ConfigContextCluster.ValueString()) {
if !lo.IsEmpty(kubeconfig.ConfigContext.ValueString()) {
overrides.CurrentContext = kubeconfig.ConfigContext.ValueString()
- tflog.Info(ctx, "using custom current context", map[string]interface{}{
+ tflog.Info(ctx, "using custom current context", map[string]any{
"context": overrides.CurrentContext,
})
}
@@ -91,7 +91,7 @@ func newKubeconfig(ctx context.Context, kubeconfig *Kubeconfig, namespace *strin
if !lo.IsEmpty(kubeconfig.ConfigContextCluster.ValueString()) {
overrides.Context.Cluster = kubeconfig.ConfigContextCluster.ValueString()
}
- tflog.Info(ctx, "using overridden context", map[string]interface{}{
+ tflog.Info(ctx, "using overridden context", map[string]any{
"context": overrides.Context,
})
}
diff --git a/internal/resource/cluster_model.go b/internal/resource/cluster_model.go
index 4eff890..716d09b 100644
--- a/internal/resource/cluster_model.go
+++ b/internal/resource/cluster_model.go
@@ -76,8 +76,8 @@ func (c *cluster) Attributes(ctx context.Context, d diag.Diagnostics) console.Cl
Version: c.Version.ValueStringPointer(),
Protect: c.Protect.ValueBoolPointer(),
CloudSettings: c.CloudSettings.Attributes(),
- ReadBindings: c.Bindings.ReadAttributes(),
- WriteBindings: c.Bindings.WriteAttributes(),
+ ReadBindings: c.Bindings.ReadAttributes(ctx, d),
+ WriteBindings: c.Bindings.WriteAttributes(ctx, d),
Tags: c.TagsAttribute(ctx, d),
NodePools: c.NodePoolsAttribute(ctx, d),
Metadata: c.Metadata.ValueStringPointer(),
diff --git a/internal/resource/cluster_operator_handler.go b/internal/resource/cluster_operator_handler.go
index 4cc0efe..326a7a2 100644
--- a/internal/resource/cluster_operator_handler.go
+++ b/internal/resource/cluster_operator_handler.go
@@ -40,7 +40,7 @@ type OperatorHandler struct {
repoUrl string
// additional values used on install
- vals map[string]interface{}
+ vals map[string]any
// Preconfigured helm actions and chart
chart *chart.Chart
@@ -152,9 +152,9 @@ func (oh *OperatorHandler) listReleases(state action.ListStates) ([]*release.Rel
return client.Run()
}
-func (oh *OperatorHandler) values(token string) (map[string]interface{}, error) {
- globalVals := map[string]interface{}{}
- vals := map[string]interface{}{
+func (oh *OperatorHandler) values(token string) (map[string]any, error) {
+ globalVals := map[string]any{}
+ vals := map[string]any{
"secrets": map[string]string{
"deployToken": token,
},
@@ -232,7 +232,7 @@ func (oh *OperatorHandler) Uninstall() error {
}
func NewOperatorHandler(ctx context.Context, client *client.Client, kubeconfig *Kubeconfig, repoUrl string, values *string, consoleUrl string) (*OperatorHandler, error) {
- vals := map[string]interface{}{}
+ vals := map[string]any{}
if values != nil {
if err := yaml.Unmarshal([]byte(*values), &vals); err != nil {
return nil, err
diff --git a/internal/resource/cluster_schema.go b/internal/resource/cluster_schema.go
index dae73df..50560c1 100644
--- a/internal/resource/cluster_schema.go
+++ b/internal/resource/cluster_schema.go
@@ -235,19 +235,13 @@ func (r *clusterResource) schema() schema.Schema {
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"group_id": schema.StringAttribute{
- Description: "",
- MarkdownDescription: "",
- Optional: true,
+ Optional: true,
},
"id": schema.StringAttribute{
- Description: "",
- MarkdownDescription: "",
- Optional: true,
+ Optional: true,
},
"user_id": schema.StringAttribute{
- Description: "",
- MarkdownDescription: "",
- Optional: true,
+ Optional: true,
},
},
},
@@ -259,19 +253,13 @@ func (r *clusterResource) schema() schema.Schema {
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"group_id": schema.StringAttribute{
- Description: "",
- MarkdownDescription: "",
- Optional: true,
+ Optional: true,
},
"id": schema.StringAttribute{
- Description: "",
- MarkdownDescription: "",
- Optional: true,
+ Optional: true,
},
"user_id": schema.StringAttribute{
- Description: "",
- MarkdownDescription: "",
- Optional: true,
+ Optional: true,
},
},
},
@@ -477,8 +465,8 @@ func (r *clusterResource) kubeconfigSchema(deprecated bool) schema.SingleNestedA
ElementType: types.StringType,
},
"env": schema.MapAttribute{
- Description: "Defines environment variables to expose to the process.",
- MarkdownDescription: "Defines environment variables to expose to the process.",
+ Description: "Defines environment variables to expose to the process.",
+ MarkdownDescription: "Defines environment variables to expose to the process.",
Optional: true,
ElementType: types.StringType,
},
diff --git a/internal/resource/infrastructure_stack.go b/internal/resource/infrastructure_stack.go
new file mode 100644
index 0000000..5bee9ae
--- /dev/null
+++ b/internal/resource/infrastructure_stack.go
@@ -0,0 +1,141 @@
+package resource
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "terraform-provider-plural/internal/client"
+ "terraform-provider-plural/internal/common"
+
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "k8s.io/apimachinery/pkg/util/wait"
+)
+
+var _ resource.Resource = &InfrastructureStackResource{}
+var _ resource.ResourceWithImportState = &InfrastructureStackResource{}
+
+func NewInfrastructureStackResource() resource.Resource {
+ return &InfrastructureStackResource{}
+}
+
+// InfrastructureStackResource defines the infrastructure stack resource implementation.
+type InfrastructureStackResource struct {
+ client *client.Client
+}
+
+func (r *InfrastructureStackResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_infrastructure_stack"
+}
+
+func (r *InfrastructureStackResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
+ resp.Schema = r.schema()
+}
+
+func (r *InfrastructureStackResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
+ if req.ProviderData == nil {
+ return
+ }
+
+ data, ok := req.ProviderData.(*common.ProviderData)
+ if !ok {
+ resp.Diagnostics.AddError(
+ "Unexpected Infrastructure Stack Resource Configure Type",
+ fmt.Sprintf("Expected *common.ProviderData, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ )
+
+ return
+ }
+
+ r.client = data.Client
+}
+
+func (r *InfrastructureStackResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
+ data := new(infrastructureStack)
+ resp.Diagnostics.Append(req.Plan.Get(ctx, data)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ sd, err := r.client.CreateStack(ctx, data.Attributes(ctx, resp.Diagnostics))
+ if err != nil {
+ resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to create infrastructure stack, got error: %s", err))
+ return
+ }
+
+ data.From(sd.CreateStack, ctx, resp.Diagnostics)
+ resp.Diagnostics.Append(resp.State.Set(ctx, data)...)
+}
+
+func (r *InfrastructureStackResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
+ data := new(infrastructureStack)
+ resp.Diagnostics.Append(req.State.Get(ctx, data)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ response, err := r.client.GetInfrastructureStack(ctx, data.Id.ValueString())
+ if err != nil {
+ resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to read infrastructure stack, got error: %s", err))
+ return
+ }
+
+ data.From(response.InfrastructureStack, ctx, resp.Diagnostics)
+ resp.Diagnostics.Append(resp.State.Set(ctx, data)...)
+}
+
+func (r *InfrastructureStackResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
+ data := new(infrastructureStack)
+ resp.Diagnostics.Append(req.Plan.Get(ctx, data)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ _, err := r.client.UpdateStack(ctx, data.Id.ValueString(), data.Attributes(ctx, resp.Diagnostics))
+ if err != nil {
+ resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to update infrastructure stack, got error: %s", err))
+ return
+ }
+
+ resp.Diagnostics.Append(resp.State.Set(ctx, data)...)
+}
+
+func (r *InfrastructureStackResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
+ data := new(infrastructureStack)
+ resp.Diagnostics.Append(req.State.Get(ctx, data)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ if data.Detach.ValueBool() {
+ _, err := r.client.DetachStack(ctx, data.Id.ValueString())
+ if err != nil {
+ resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to detach infrastructure stack, got error: %s", err))
+ return
+ }
+ } else {
+ _, err := r.client.DeleteStack(ctx, data.Id.ValueString())
+ if err != nil {
+ resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to delete infrastructure stack, got error: %s", err))
+ return
+ }
+
+ err = wait.WaitForWithContext(ctx, client.Ticker(5*time.Second), func(ctx context.Context) (bool, error) {
+ _, err := r.client.GetInfrastructureStack(ctx, data.Id.ValueString())
+ if client.IsNotFound(err) {
+ return true, nil
+ }
+
+ return false, err
+ })
+ if err != nil {
+ resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Error during watiting for infrastructure stack to be deleted, got error: %s", err))
+ return
+ }
+ }
+}
+
+func (r *InfrastructureStackResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
+ resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
+}
diff --git a/internal/resource/infrastructure_stack_model.go b/internal/resource/infrastructure_stack_model.go
new file mode 100644
index 0000000..e4cf801
--- /dev/null
+++ b/internal/resource/infrastructure_stack_model.go
@@ -0,0 +1,442 @@
+package resource
+
+import (
+ "context"
+
+ "terraform-provider-plural/internal/common"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ gqlclient "github.com/pluralsh/console-client-go"
+ "github.com/pluralsh/polly/algorithms"
+)
+
+type infrastructureStack struct {
+ Id types.String `tfsdk:"id"`
+ Name types.String `tfsdk:"name"`
+ Type types.String `tfsdk:"type"`
+ Approval types.Bool `tfsdk:"approval"`
+ Detach types.Bool `tfsdk:"detach"`
+ ClusterId types.String `tfsdk:"cluster_id"`
+ Repository *InfrastructureStackRepository `tfsdk:"repository"`
+ Configuration *InfrastructureStackConfiguration `tfsdk:"configuration"`
+ Files types.Map `tfsdk:"files"`
+ Environment types.Set `tfsdk:"environment"`
+ JobSpec *InfrastructureStackJobSpec `tfsdk:"job_spec"`
+ Bindings *common.ClusterBindings `tfsdk:"bindings"`
+}
+
+func (is *infrastructureStack) Attributes(ctx context.Context, d diag.Diagnostics) gqlclient.StackAttributes {
+ return gqlclient.StackAttributes{
+ Name: is.Name.ValueString(),
+ Type: gqlclient.StackType(is.Type.ValueString()),
+ RepositoryID: is.Repository.Id.ValueString(),
+ ClusterID: is.ClusterId.ValueString(),
+ Git: is.Repository.Attributes(),
+ JobSpec: is.JobSpec.Attributes(ctx, d),
+ Configuration: is.Configuration.Attributes(),
+ Approval: is.Approval.ValueBoolPointer(),
+ ReadBindings: is.Bindings.ReadAttributes(ctx, d),
+ WriteBindings: is.Bindings.WriteAttributes(ctx, d),
+ Files: is.FilesAttributes(ctx, d),
+ Environment: is.EnvironmentAttributes(ctx, d),
+ }
+}
+
+func (is *infrastructureStack) FilesAttributes(ctx context.Context, d diag.Diagnostics) []*gqlclient.StackFileAttributes {
+ if is.Files.IsNull() {
+ return nil
+ }
+
+ result := make([]*gqlclient.StackFileAttributes, 0)
+ elements := make(map[string]types.String, len(is.Files.Elements()))
+ d.Append(is.Files.ElementsAs(ctx, &elements, false)...)
+
+ for k, v := range elements {
+ result = append(result, &gqlclient.StackFileAttributes{Path: k, Content: v.ValueString()})
+ }
+
+ return result
+}
+
+func (is *infrastructureStack) EnvironmentAttributes(ctx context.Context, d diag.Diagnostics) []*gqlclient.StackEnvironmentAttributes {
+ if is.Environment.IsNull() {
+ return nil
+ }
+
+ result := make([]*gqlclient.StackEnvironmentAttributes, 0, len(is.Environment.Elements()))
+ elements := make([]InfrastructureStackEnvironment, len(is.Environment.Elements()))
+ d.Append(is.Environment.ElementsAs(ctx, &elements, false)...)
+
+ for _, env := range elements {
+ result = append(result, &gqlclient.StackEnvironmentAttributes{
+ Name: env.Name.ValueString(),
+ Value: env.Value.ValueString(),
+ Secret: env.Secret.ValueBoolPointer(),
+ })
+ }
+
+ return result
+}
+
+func (is *infrastructureStack) From(stack *gqlclient.InfrastructureStackFragment, ctx context.Context, d diag.Diagnostics) {
+ is.Id = types.StringPointerValue(stack.ID)
+ is.Name = types.StringValue(stack.Name)
+ is.Type = types.StringValue(string(stack.Type))
+ is.Approval = types.BoolPointerValue(stack.Approval)
+ is.ClusterId = types.StringValue(stack.Cluster.ID)
+ is.Repository.From(stack.Repository, stack.Git)
+ is.Configuration.From(stack.Configuration)
+ is.Files = infrastructureStackFilesFrom(stack.Files, is.Files, d)
+ is.Environment = infrastructureStackEnvironmentsFrom(stack.Environment, is.Environment, ctx, d)
+ is.Bindings.From(stack.ReadBindings, stack.WriteBindings, ctx, d)
+ is.JobSpec.From(stack.JobSpec, ctx, d)
+}
+
+func infrastructureStackFilesFrom(files []*gqlclient.StackFileFragment, config types.Map, d diag.Diagnostics) types.Map {
+ if len(files) == 0 {
+ // Rewriting config to state to avoid inconsistent result errors.
+ // This could happen, for example, when sending "nil" to API and "[]" is returned as a result.
+ return config
+ }
+
+ resultMap := make(map[string]attr.Value, len(files))
+ for _, file := range files {
+ resultMap[file.Path] = types.StringValue(file.Content)
+ }
+
+ result, tagsDiagnostics := types.MapValue(types.StringType, resultMap)
+ d.Append(tagsDiagnostics...)
+
+ return result
+}
+
+func infrastructureStackEnvironmentsFrom(envs []*gqlclient.StackEnvironmentFragment, config types.Set, ctx context.Context, d diag.Diagnostics) types.Set {
+ if len(envs) == 0 {
+ // Rewriting config to state to avoid inconsistent result errors.
+ // This could happen, for example, when sending "nil" to API and "[]" is returned as a result.
+ return config
+ }
+
+ values := make([]attr.Value, len(envs))
+ for i, file := range envs {
+ objValue, diags := types.ObjectValueFrom(ctx, InfrastructureStackEnvironmentAttrTypes, InfrastructureStackEnvironment{
+ Name: types.StringValue(file.Name),
+ Value: types.StringValue(file.Value),
+ Secret: types.BoolPointerValue(file.Secret),
+ })
+ values[i] = objValue
+ d.Append(diags...)
+ }
+
+ setValue, diags := types.SetValue(basetypes.ObjectType{AttrTypes: InfrastructureStackEnvironmentAttrTypes}, values)
+ d.Append(diags...)
+ return setValue
+}
+
+type InfrastructureStackRepository struct {
+ Id types.String `tfsdk:"id"`
+ Ref types.String `tfsdk:"ref"`
+ Folder types.String `tfsdk:"folder"`
+}
+
+func (isr *InfrastructureStackRepository) Attributes() gqlclient.GitRefAttributes {
+ if isr == nil {
+ return gqlclient.GitRefAttributes{}
+ }
+
+ return gqlclient.GitRefAttributes{
+ Ref: isr.Ref.ValueString(),
+ Folder: isr.Folder.ValueString(),
+ }
+}
+
+func (isr *InfrastructureStackRepository) From(repository *gqlclient.GitRepositoryFragment, ref *gqlclient.GitRefFragment) {
+ if isr == nil {
+ return
+ }
+
+ isr.Id = types.StringValue(repository.ID)
+
+ if ref == nil {
+ return
+ }
+
+ isr.Ref = types.StringValue(ref.Ref)
+ isr.Folder = types.StringValue(ref.Folder)
+}
+
+type InfrastructureStackConfiguration struct {
+ Image types.String `tfsdk:"image"`
+ Version types.String `tfsdk:"version"`
+}
+
+func (isc *InfrastructureStackConfiguration) Attributes() gqlclient.StackConfigurationAttributes {
+ if isc == nil {
+ return gqlclient.StackConfigurationAttributes{}
+ }
+
+ return gqlclient.StackConfigurationAttributes{
+ Image: isc.Image.ValueStringPointer(),
+ Version: isc.Version.ValueString(),
+ }
+}
+
+func (isc *InfrastructureStackConfiguration) From(configuration *gqlclient.StackConfigurationFragment) {
+ if isc == nil || configuration == nil {
+ return
+ }
+
+ isc.Image = types.StringPointerValue(configuration.Image)
+ isc.Version = types.StringValue(configuration.Version)
+}
+
+type InfrastructureStackEnvironment struct {
+ Name types.String `tfsdk:"name"`
+ Value types.String `tfsdk:"value"`
+ Secret types.Bool `tfsdk:"secret"`
+}
+
+var InfrastructureStackEnvironmentAttrTypes = map[string]attr.Type{
+ "name": types.StringType,
+ "value": types.StringType,
+ "secret": types.BoolType,
+}
+
+type InfrastructureStackBindings struct {
+ Read []*InfrastructureStackPolicyBinding `tfsdk:"read"`
+ Write []*InfrastructureStackPolicyBinding `tfsdk:"write"`
+}
+
+type InfrastructureStackPolicyBinding struct {
+ GroupID types.String `tfsdk:"group_id"`
+ ID types.String `tfsdk:"id"`
+ UserID types.String `tfsdk:"user_id"`
+}
+
+type InfrastructureStackJobSpec struct {
+ Namespace types.String `tfsdk:"namespace"`
+ Raw types.String `tfsdk:"raw"`
+ Containers types.Set `tfsdk:"containers"`
+ Labels types.Map `tfsdk:"labels"`
+ Annotations types.Map `tfsdk:"annotations"`
+ ServiceAccount types.String `tfsdk:"service_account"`
+}
+
+func (isjs *InfrastructureStackJobSpec) Attributes(ctx context.Context, d diag.Diagnostics) *gqlclient.GateJobAttributes {
+ if isjs == nil {
+ return nil
+ }
+
+ return &gqlclient.GateJobAttributes{
+ Namespace: isjs.Namespace.ValueString(),
+ Raw: isjs.Raw.ValueStringPointer(),
+ Containers: isjs.ContainersAttributes(ctx, d),
+ Labels: isjs.LabelsAttributes(ctx, d),
+ Annotations: isjs.AnnotationsAttributes(ctx, d),
+ ServiceAccount: isjs.ServiceAccount.ValueStringPointer(),
+ }
+}
+
+func (isjs *InfrastructureStackJobSpec) LabelsAttributes(ctx context.Context, d diag.Diagnostics) *string {
+ if isjs.Labels.IsNull() {
+ return nil
+ }
+
+ elements := make(map[string]types.String, len(isjs.Labels.Elements()))
+ d.Append(isjs.Labels.ElementsAs(ctx, &elements, false)...)
+ return common.AttributesJson(elements, d)
+}
+
+func (isjs *InfrastructureStackJobSpec) AnnotationsAttributes(ctx context.Context, d diag.Diagnostics) *string {
+ if isjs.Annotations.IsNull() {
+ return nil
+ }
+
+ elements := make(map[string]types.String, len(isjs.Annotations.Elements()))
+ d.Append(isjs.Annotations.ElementsAs(ctx, &elements, false)...)
+ return common.AttributesJson(elements, d)
+}
+
+func (isjs *InfrastructureStackJobSpec) ContainersAttributes(ctx context.Context, d diag.Diagnostics) []*gqlclient.ContainerAttributes {
+ if isjs.Containers.IsNull() {
+ return nil
+ }
+
+ result := make([]*gqlclient.ContainerAttributes, 0, len(isjs.Containers.Elements()))
+ elements := make([]InfrastructureStackContainerSpec, len(isjs.Containers.Elements()))
+ d.Append(isjs.Containers.ElementsAs(ctx, &elements, false)...)
+
+ for _, container := range elements {
+ result = append(result, container.Attributes(ctx, d))
+ }
+
+ return result
+}
+
+func (isjs *InfrastructureStackJobSpec) From(spec *gqlclient.JobGateSpecFragment, ctx context.Context, d diag.Diagnostics) {
+ if isjs == nil {
+ return
+ }
+
+ isjs.Namespace = types.StringValue(spec.Namespace)
+ isjs.Raw = types.StringPointerValue(spec.Raw)
+ isjs.Containers = infrastructureStackJobSpecContainersFrom(spec.Containers, isjs.Containers, ctx, d)
+ isjs.Labels = common.MapFromWithConfig(spec.Labels, isjs.Labels, ctx, d)
+ isjs.Annotations = common.MapFromWithConfig(spec.Annotations, isjs.Annotations, ctx, d)
+ isjs.ServiceAccount = types.StringPointerValue(spec.ServiceAccount)
+}
+
+func infrastructureStackJobSpecContainersFrom(containers []*gqlclient.ContainerSpecFragment, config types.Set, ctx context.Context, d diag.Diagnostics) types.Set {
+ if len(containers) == 0 {
+ // Rewriting config to state to avoid inconsistent result errors.
+ // This could happen, for example, when sending "nil" to API and "[]" is returned as a result.
+ return config
+ }
+
+ values := make([]attr.Value, len(containers))
+ for i, container := range containers {
+ objValue, diags := types.ObjectValueFrom(ctx, InfrastructureStackContainerSpecAttrTypes, InfrastructureStackContainerSpec{
+ Image: types.StringValue(container.Image),
+ Args: infrastructureStackContainerSpecArgsFrom(container.Args, ctx, d),
+ Env: infrastructureStackContainerSpecEnvFrom(container.Env, d),
+ EnvFrom: infrastructureStackContainerSpecEnvFromFrom(container.EnvFrom, ctx, d),
+ })
+ values[i] = objValue
+ d.Append(diags...)
+ }
+
+ setValue, diags := types.SetValue(basetypes.ObjectType{AttrTypes: InfrastructureStackContainerSpecAttrTypes}, values)
+ d.Append(diags...)
+ return setValue
+}
+
+func infrastructureStackContainerSpecArgsFrom(values []*string, ctx context.Context, d diag.Diagnostics) types.List {
+ if values == nil {
+ return types.ListNull(types.StringType)
+ }
+
+ listValue, diags := types.ListValueFrom(ctx, types.StringType, values)
+ d.Append(diags...)
+ return listValue
+}
+
+func infrastructureStackContainerSpecEnvFrom(env []*gqlclient.ContainerSpecFragment_Env, d diag.Diagnostics) types.Map {
+ if env == nil {
+ return types.MapNull(types.StringType)
+ }
+
+ resultMap := map[string]attr.Value{}
+ for _, v := range env {
+ resultMap[v.Name] = types.StringValue(v.Value)
+ }
+
+ result, diags := types.MapValue(types.StringType, resultMap)
+ d.Append(diags...)
+
+ return result
+}
+
+func infrastructureStackContainerSpecEnvFromFrom(envFroms []*gqlclient.ContainerSpecFragment_EnvFrom, ctx context.Context, d diag.Diagnostics) types.Set {
+ if envFroms == nil {
+ return types.SetNull(basetypes.ObjectType{AttrTypes: InfrastructureStackContainerEnvFromAttrTypes})
+ }
+
+ values := make([]attr.Value, len(envFroms))
+ for i, envFrom := range envFroms {
+ objValue, diags := types.ObjectValueFrom(ctx, InfrastructureStackContainerEnvFromAttrTypes, InfrastructureStackContainerEnvFrom{
+ ConfigMap: types.StringValue(envFrom.ConfigMap),
+ Secret: types.StringValue(envFrom.Secret),
+ })
+ values[i] = objValue
+ d.Append(diags...)
+ }
+
+ setValue, diags := types.SetValue(basetypes.ObjectType{AttrTypes: InfrastructureStackContainerEnvFromAttrTypes}, values)
+ d.Append(diags...)
+ return setValue
+}
+
+type InfrastructureStackContainerSpec struct {
+ Image types.String `tfsdk:"image"`
+ Args types.List `tfsdk:"args"`
+ Env types.Map `tfsdk:"env"`
+ EnvFrom types.Set `tfsdk:"env_from"`
+}
+
+var InfrastructureStackContainerSpecAttrTypes = map[string]attr.Type{
+ "image": types.StringType,
+ "args": types.ListType{ElemType: types.StringType},
+ "env": types.MapType{ElemType: types.StringType},
+ "env_from": types.SetType{ElemType: types.ObjectType{AttrTypes: InfrastructureStackContainerEnvFromAttrTypes}},
+}
+
+func (iscs *InfrastructureStackContainerSpec) Attributes(ctx context.Context, d diag.Diagnostics) *gqlclient.ContainerAttributes {
+ if iscs == nil {
+ return nil
+ }
+
+ return &gqlclient.ContainerAttributes{
+ Image: iscs.Image.ValueString(),
+ Args: iscs.ArgsAttributes(ctx, d),
+ Env: iscs.EnvAttributes(ctx, d),
+ EnvFrom: iscs.EnvFromAttributes(ctx, d),
+ }
+}
+
+func (isjs *InfrastructureStackContainerSpec) ArgsAttributes(ctx context.Context, d diag.Diagnostics) []*string {
+ if isjs.Args.IsNull() {
+ return nil
+ }
+
+ elements := make([]types.String, len(isjs.Args.Elements()))
+ d.Append(isjs.Args.ElementsAs(ctx, &elements, false)...)
+ return algorithms.Map(elements, func(v types.String) *string { return v.ValueStringPointer() })
+}
+
+func (isjs *InfrastructureStackContainerSpec) EnvAttributes(ctx context.Context, d diag.Diagnostics) []*gqlclient.EnvAttributes {
+ if isjs.Env.IsNull() {
+ return nil
+ }
+
+ result := make([]*gqlclient.EnvAttributes, 0)
+ elements := make(map[string]types.String, len(isjs.Env.Elements()))
+ d.Append(isjs.Env.ElementsAs(ctx, &elements, false)...)
+
+ for k, v := range elements {
+ result = append(result, &gqlclient.EnvAttributes{Name: k, Value: v.ValueString()})
+ }
+
+ return result
+}
+
+func (isjs *InfrastructureStackContainerSpec) EnvFromAttributes(ctx context.Context, d diag.Diagnostics) []*gqlclient.EnvFromAttributes {
+ if isjs.EnvFrom.IsNull() {
+ return nil
+ }
+
+ result := make([]*gqlclient.EnvFromAttributes, 0, len(isjs.EnvFrom.Elements()))
+ elements := make([]InfrastructureStackContainerEnvFrom, len(isjs.EnvFrom.Elements()))
+ d.Append(isjs.EnvFrom.ElementsAs(ctx, &elements, false)...)
+
+ for _, envFrom := range elements {
+ result = append(result, &gqlclient.EnvFromAttributes{
+ Secret: envFrom.Secret.ValueString(),
+ ConfigMap: envFrom.ConfigMap.ValueString(),
+ })
+ }
+
+ return result
+}
+
+type InfrastructureStackContainerEnvFrom struct {
+ Secret types.String `tfsdk:"secret"`
+ ConfigMap types.String `tfsdk:"config_map"`
+}
+
+var InfrastructureStackContainerEnvFromAttrTypes = map[string]attr.Type{
+ "secret": types.StringType,
+ "config_map": types.StringType,
+}
diff --git a/internal/resource/infrastructure_stack_schema.go b/internal/resource/infrastructure_stack_schema.go
new file mode 100644
index 0000000..49577bc
--- /dev/null
+++ b/internal/resource/infrastructure_stack_schema.go
@@ -0,0 +1,258 @@
+package resource
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework-validators/mapvalidator"
+ "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ gqlclient "github.com/pluralsh/console-client-go"
+)
+
+func (r *InfrastructureStackResource) schema() schema.Schema {
+ return schema.Schema{
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "Internal identifier of this stack.",
+ MarkdownDescription: "Internal identifier of this stack.",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()},
+ },
+ "name": schema.StringAttribute{
+ Description: "Human-readable name of this stack.",
+ MarkdownDescription: "Human-readable name of this stack.",
+ Required: true,
+ PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()},
+ },
+ "type": schema.StringAttribute{
+ Description: fmt.Sprintf("A type for the stack, specifies the tool to use to apply it. Allowed values include \"%s\" and \"%s\".", gqlclient.StackTypeAnsible, gqlclient.StackTypeTerraform),
+ MarkdownDescription: fmt.Sprintf("A type for the stack, specifies the tool to use to apply it. Allowed values include `%s` and `%s`.", gqlclient.StackTypeAnsible, gqlclient.StackTypeTerraform),
+ Required: true,
+ PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()},
+ Validators: []validator.String{stringvalidator.OneOf(string(gqlclient.StackTypeAnsible), string(gqlclient.StackTypeTerraform))},
+ },
+ "approval": schema.BoolAttribute{
+ Description: "Determines whether to require approval.",
+ MarkdownDescription: "Determines whether to require approval.",
+ Optional: true,
+ },
+ "detach": schema.BoolAttribute{
+ Description: "Determines behavior during resource destruction, if true it will detach resource instead of deleting it.",
+ MarkdownDescription: "Determines behavior during resource destruction, if true it will detach resource instead of deleting it.",
+ Optional: true,
+ Computed: true,
+ Default: booldefault.StaticBool(false),
+ },
+ "cluster_id": schema.StringAttribute{
+ Description: "The cluster on which the stack will be applied.",
+ MarkdownDescription: "The cluster on which the stack will be applied.",
+ Required: true,
+ PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()},
+ },
+ "repository": schema.SingleNestedAttribute{
+ Description: "Repository information used to pull stack.",
+ MarkdownDescription: "Repository information used to pull stack.",
+ Required: true,
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "ID of the repository to pull from.",
+ MarkdownDescription: "ID of the repository to pull from.",
+ Required: true,
+ },
+ "ref": schema.StringAttribute{
+ Description: "A general git ref, either a branch name or commit sha understandable by `git checkout ][`.",
+ MarkdownDescription: "A general git ref, either a branch name or commit sha understandable by \"git checkout ][\".",
+ Required: true,
+ },
+ "folder": schema.StringAttribute{
+ Description: "The folder where manifests live.",
+ MarkdownDescription: "The folder where manifests live.",
+ Required: true,
+ },
+ },
+ },
+ "configuration": schema.SingleNestedAttribute{
+ Description: "Stack configuration.",
+ MarkdownDescription: "Stack configuration.",
+ Required: true,
+ Attributes: map[string]schema.Attribute{
+ "image": schema.StringAttribute{
+ Description: "Optional custom image you might want to use.",
+ MarkdownDescription: "Optional custom image you might want to use.",
+ Optional: true,
+ },
+ "version": schema.StringAttribute{
+ Description: "The semver of the tool you wish to use.",
+ MarkdownDescription: "The semver of the tool you wish to use.",
+ Required: true,
+ },
+ },
+ },
+ "files": schema.MapAttribute{
+ Description: "File path-content map.",
+ MarkdownDescription: "File path-content map.",
+ Optional: true,
+ ElementType: types.StringType,
+ },
+ "environment": schema.SetNestedAttribute{
+ Description: "Defines environment variables for the stack.",
+ MarkdownDescription: "Defines environment variables for the stack.",
+ Optional: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ Description: "Environment variable name.",
+ MarkdownDescription: "Environment variable name.",
+ Required: true,
+ },
+ "value": schema.StringAttribute{
+ Description: "Environment variable value.",
+ MarkdownDescription: "Environment variable value.",
+ Required: true,
+ },
+ "secret": schema.BoolAttribute{
+ Description: "Indicates if environment variable is secret.",
+ MarkdownDescription: "Indicates if environment variable is secret.",
+ Optional: true,
+ },
+ },
+ },
+ },
+ "job_spec": schema.SingleNestedAttribute{
+ Description: "Repository information used to pull stack.",
+ MarkdownDescription: "Repository information used to pull stack.",
+ Optional: true,
+ Attributes: map[string]schema.Attribute{
+ "namespace": schema.StringAttribute{
+ Description: "Namespace where job will be deployed.",
+ MarkdownDescription: "Namespace where job will be deployed.",
+ Required: true,
+ },
+ "raw": schema.StringAttribute{
+ Description: "If you'd rather define the job spec via straight Kubernetes YAML.",
+ MarkdownDescription: "If you'd rather define the job spec via straight Kubernetes YAML.",
+ Optional: true,
+ PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()},
+ Validators: []validator.String{
+ stringvalidator.LengthAtLeast(1),
+ stringvalidator.ExactlyOneOf(path.MatchRelative().AtParent().AtName("containers")),
+ stringvalidator.ConflictsWith(path.MatchRelative().AtParent().AtName("containers")),
+ },
+ },
+ "labels": schema.MapAttribute{
+ Description: "Kubernetes labels applied to the job.",
+ MarkdownDescription: "Kubernetes labels applied to the job.",
+ ElementType: types.StringType,
+ Optional: true,
+ Validators: []validator.Map{mapvalidator.ConflictsWith(path.MatchRelative().AtParent().AtName("raw"))},
+ },
+ "annotations": schema.MapAttribute{
+ Description: "Kubernetes annotations applied to the job.",
+ MarkdownDescription: "Kubernetes annotations applied to the job.",
+ ElementType: types.StringType,
+ Optional: true,
+ Validators: []validator.Map{mapvalidator.ConflictsWith(path.MatchRelative().AtParent().AtName("raw"))},
+ },
+ "service_account": schema.StringAttribute{
+ Description: "Kubernetes service account for this job.",
+ MarkdownDescription: "Kubernetes service account for this job.",
+ Optional: true,
+ Validators: []validator.String{stringvalidator.ConflictsWith(path.MatchRelative().AtParent().AtName("raw"))},
+ },
+ "containers": schema.SetNestedAttribute{
+ Optional: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "image": schema.StringAttribute{
+ Required: true,
+ },
+ "args": schema.ListAttribute{
+ Description: "Arguments to pass to the command when executing it.",
+ MarkdownDescription: "Arguments to pass to the command when executing it.",
+ Optional: true,
+ ElementType: types.StringType,
+ },
+ "env": schema.MapAttribute{
+ Description: "Defines environment variables to expose to the process.",
+ MarkdownDescription: "Defines environment variables to expose to the process.",
+ Optional: true,
+ ElementType: types.StringType,
+ },
+ "env_from": schema.SetNestedAttribute{
+ Optional: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "secret": schema.StringAttribute{
+ Required: true,
+ },
+ "config_map": schema.StringAttribute{
+ Required: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ Validators: []validator.Set{
+ setvalidator.SizeAtLeast(1),
+ setvalidator.ExactlyOneOf(path.MatchRelative().AtParent().AtName("raw")),
+ setvalidator.ConflictsWith(path.MatchRelative().AtParent().AtName("raw")),
+ },
+ },
+ },
+ },
+ "bindings": schema.SingleNestedAttribute{
+ Description: "Read and write policies of this stack.",
+ MarkdownDescription: "Read and write policies of this stack.",
+ Optional: true,
+ Attributes: map[string]schema.Attribute{
+ "read": schema.SetNestedAttribute{
+ Description: "Read policies of this stack.",
+ MarkdownDescription: "Read policies of this stack.",
+ Optional: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "group_id": schema.StringAttribute{
+ Optional: true,
+ },
+ "id": schema.StringAttribute{
+ Optional: true,
+ },
+ "user_id": schema.StringAttribute{
+ Optional: true,
+ },
+ },
+ },
+ },
+ "write": schema.SetNestedAttribute{
+ Description: "Write policies of this stack.",
+ MarkdownDescription: "Write policies of this stack.",
+ Optional: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "group_id": schema.StringAttribute{
+ Optional: true,
+ },
+ "id": schema.StringAttribute{
+ Optional: true,
+ },
+ "user_id": schema.StringAttribute{
+ Optional: true,
+ },
+ },
+ },
+ },
+ },
+ PlanModifiers: []planmodifier.Object{objectplanmodifier.UseStateForUnknown()},
+ },
+ },
+ }
+}
diff --git a/internal/resource/rbac.go b/internal/resource/rbac.go
index c432633..4d8a0a6 100644
--- a/internal/resource/rbac.go
+++ b/internal/resource/rbac.go
@@ -126,7 +126,7 @@ func (r *rbacResource) Create(ctx context.Context, req resource.CreateRequest, r
return
}
- _, err := r.client.UpdateRbac(ctx, data.Attributes(), data.ServiceId.ValueStringPointer(), data.ClusterId.ValueStringPointer(), nil)
+ _, err := r.client.UpdateRbac(ctx, data.Attributes(ctx, resp.Diagnostics), data.ServiceId.ValueStringPointer(), data.ClusterId.ValueStringPointer(), nil)
if err != nil {
resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to update rbac, got error: %s", err))
return
@@ -146,7 +146,7 @@ func (r *rbacResource) Update(ctx context.Context, req resource.UpdateRequest, r
return
}
- _, err := r.client.UpdateRbac(ctx, data.Attributes(), data.ServiceId.ValueStringPointer(), data.ClusterId.ValueStringPointer(), nil)
+ _, err := r.client.UpdateRbac(ctx, data.Attributes(ctx, resp.Diagnostics), data.ServiceId.ValueStringPointer(), data.ClusterId.ValueStringPointer(), nil)
if err != nil {
resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to update rbac, got error: %s", err))
return
diff --git a/internal/resource/rbac_model.go b/internal/resource/rbac_model.go
index 3f10ce0..b89b4bb 100644
--- a/internal/resource/rbac_model.go
+++ b/internal/resource/rbac_model.go
@@ -1,8 +1,11 @@
package resource
import (
+ "context"
+
"terraform-provider-plural/internal/common"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
gqlclient "github.com/pluralsh/console-client-go"
)
@@ -13,9 +16,9 @@ type rbac struct {
Bindings *common.ClusterBindings `tfsdk:"rbac"`
}
-func (g *rbac) Attributes() gqlclient.RbacAttributes {
+func (g *rbac) Attributes(ctx context.Context, d diag.Diagnostics) gqlclient.RbacAttributes {
return gqlclient.RbacAttributes{
- ReadBindings: g.Bindings.ReadAttributes(),
- WriteBindings: g.Bindings.WriteAttributes(),
+ ReadBindings: g.Bindings.ReadAttributes(ctx, d),
+ WriteBindings: g.Bindings.WriteAttributes(ctx, d),
}
}
diff --git a/internal/resource/service_context_model.go b/internal/resource/service_context_model.go
index 6c36f2f..7a55c92 100644
--- a/internal/resource/service_context_model.go
+++ b/internal/resource/service_context_model.go
@@ -19,17 +19,7 @@ type serviceContext struct {
func (sc *serviceContext) From(scf *console.ServiceContextFragment, ctx context.Context, d diag.Diagnostics) {
sc.Id = types.StringValue(scf.ID)
- sc.Configuration = serviceContextConfigurationFrom(scf.Configuration, ctx, d)
-}
-
-func serviceContextConfigurationFrom(configuration map[string]any, ctx context.Context, d diag.Diagnostics) types.Map {
- if len(configuration) == 0 {
- return types.MapNull(types.StringType)
- }
-
- mapValue, diags := types.MapValueFrom(ctx, types.StringType, configuration)
- d.Append(diags...)
- return mapValue
+ sc.Configuration = common.MapFrom(scf.Configuration, ctx, d)
}
func (sc *serviceContext) Attributes(ctx context.Context, d diag.Diagnostics) console.ServiceContextAttributes {
diff --git a/internal/validator/also_requires_if.go b/internal/validator/also_requires_if.go
index 4180e28..1affbea 100644
--- a/internal/validator/also_requires_if.go
+++ b/internal/validator/also_requires_if.go
@@ -117,7 +117,6 @@ func (a alsoRequiresIfValidator) ValidateString(ctx context.Context, req validat
resp.Diagnostics.Append(validateResp.Diagnostics...)
}
-// AlsoRequiresIf todo.
func AlsoRequiresIf(f RequiresIf, expressions ...path.Expression) validator.String {
return &alsoRequiresIfValidator{
PathExpressions: expressions,
]