Skip to content

Commit

Permalink
fix: fixes #77 import for k8s node pool and rollback of #71 fix
Browse files Browse the repository at this point in the history
* fix: fixes #77 import for k8s node pool fixed with nil check

* rollback to the node pool behavior before the fix of issue #71
  • Loading branch information
iblindu authored Aug 10, 2021
1 parent 2410fe7 commit 6ef6e9f
Show file tree
Hide file tree
Showing 5 changed files with 10 additions and 24 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
## 6.0.0-beta.5

- rollback to the node pool behaviour before the fix of issue #71
- issue #77 - fix import for k8s nodepool

## 6.0.0-beta.4

- fix: issue #71 - recreate nodepool on change of specifications
Expand Down
4 changes: 0 additions & 4 deletions docs/resources/k8s_node_pool.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,3 @@ terraform import ionoscloud_k8s_node_pool.demo {k8s_cluster_uuid}/{k8s_nodepool_
```

This can be helpful when you want to import kubernetes node pools which you have already created manually or using other means, outside of terraform, towards the goal of managing them via Terraform

## Notes

Please note that every time the arguments of the nodepool are modified, the resource is destroyed and recreated.
17 changes: 1 addition & 16 deletions ionoscloud/resource_k8s_node_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ func resourcek8sNodePool() *schema.Resource {
return &schema.Resource{
CreateContext: resourcek8sNodePoolCreate,
ReadContext: resourcek8sNodePoolRead,
UpdateContext: nil,
UpdateContext: resourcek8sNodePoolUpdate,
DeleteContext: resourcek8sNodePoolDelete,
Importer: &schema.ResourceImporter{
StateContext: resourceK8sNodepoolImport,
Expand All @@ -29,7 +29,6 @@ func resourcek8sNodePool() *schema.Resource {
Description: "The desired name for the node pool",
Required: true,
ValidateFunc: validation.All(validation.StringIsNotWhiteSpace),
ForceNew: true,
},
"k8s_version": {
Type: schema.TypeString,
Expand All @@ -52,7 +51,6 @@ func resourcek8sNodePool() *schema.Resource {
}
return false
},
ForceNew: true,
},
"auto_scaling": {
Type: schema.TypeList,
Expand All @@ -73,7 +71,6 @@ func resourcek8sNodePool() *schema.Resource {
},
},
},
ForceNew: true,
},
"lans": {
Type: schema.TypeList,
Expand Down Expand Up @@ -113,7 +110,6 @@ func resourcek8sNodePool() *schema.Resource {
},
},
},
ForceNew: true,
},
"maintenance_window": {
Type: schema.TypeList,
Expand All @@ -134,66 +130,56 @@ func resourcek8sNodePool() *schema.Resource {
},
},
},
ForceNew: true,
},
"datacenter_id": {
Type: schema.TypeString,
Description: "The UUID of the VDC",
Required: true,
ValidateFunc: validation.All(validation.StringIsNotWhiteSpace),
ForceNew: true,
},
"k8s_cluster_id": {
Type: schema.TypeString,
Description: "The UUID of an existing kubernetes cluster",
Required: true,
ValidateFunc: validation.All(validation.StringIsNotWhiteSpace),
ForceNew: true,
},
"cpu_family": {
Type: schema.TypeString,
Description: "CPU Family",
Required: true,
ValidateFunc: validation.All(validation.StringIsNotWhiteSpace),
ForceNew: true,
},
"availability_zone": {
Type: schema.TypeString,
Description: "The compute availability zone in which the nodes should exist",
Required: true,
ValidateFunc: validation.All(validation.StringIsNotWhiteSpace),
ForceNew: true,
},
"storage_type": {
Type: schema.TypeString,
Description: "Storage type to use",
Required: true,
ValidateFunc: validation.All(validation.StringIsNotWhiteSpace),
ForceNew: true,
},
"node_count": {
Type: schema.TypeInt,
Description: "The number of nodes in this node pool",
Required: true,
ForceNew: true,
},
"cores_count": {
Type: schema.TypeInt,
Description: "CPU cores count",
Required: true,
ForceNew: true,
},
"ram_size": {
Type: schema.TypeInt,
Description: "The amount of RAM in MB",
Required: true,
ForceNew: true,
},
"storage_size": {
Type: schema.TypeInt,
Description: "The total allocated storage capacity of a node in GB",
Required: true,
ForceNew: true,
},
"public_ips": {
Type: schema.TypeList,
Expand All @@ -202,7 +188,6 @@ func resourcek8sNodePool() *schema.Resource {
Elem: &schema.Schema{
Type: schema.TypeString,
},
ForceNew: true,
},
},
Timeouts: &resourceDefaultTimeouts,
Expand Down
6 changes: 3 additions & 3 deletions ionoscloud/resource_k8s_node_pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,10 @@ func TestAcck8sNodepool_Basic(t *testing.T) {
),
},
{
Config: fmt.Sprintf(testAccCheckk8sNodepoolConfigUpdate, publicIp1, publicIp2, publicIp3),
Config: fmt.Sprintf(testAccCheckk8sNodepoolConfigUpdate, k8sNodepoolName, publicIp1, publicIp2, publicIp3),
Check: resource.ComposeTestCheckFunc(
testAccCheckk8sNodepoolExists("ionoscloud_k8s_node_pool.terraform_acctest", &k8sNodepool),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "name", "updated"),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "name", k8sNodepoolName),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.0", publicIp1),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.1", publicIp2),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.2", publicIp3),
Expand Down Expand Up @@ -247,7 +247,7 @@ resource "ionoscloud_k8s_cluster" "terraform_acctest" {
}
resource "ionoscloud_k8s_node_pool" "terraform_acctest" {
name = "updated"
name = "%s"
k8s_version = "${ionoscloud_k8s_cluster.terraform_acctest.k8s_version}"
auto_scaling {
min_node_count = 1
Expand Down
2 changes: 1 addition & 1 deletion ionoscloud/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ func resourceK8sNodepoolImport(ctx context.Context, d *schema.ResourceData, meta
log.Printf("[INFO] Setting Public IPs for k8s node pool %s to %+v...", d.Id(), d.Get("public_ips"))
}

if k8sNodepool.Properties.AutoScaling != nil && (*k8sNodepool.Properties.AutoScaling.MinNodeCount != 0 && *k8sNodepool.Properties.AutoScaling.MaxNodeCount != 0) {
if k8sNodepool.Properties.AutoScaling != nil && k8sNodepool.Properties.AutoScaling.MinNodeCount != nil && k8sNodepool.Properties.AutoScaling.MaxNodeCount != nil && (*k8sNodepool.Properties.AutoScaling.MinNodeCount != 0 && *k8sNodepool.Properties.AutoScaling.MaxNodeCount != 0) {
if err := d.Set("auto_scaling", []map[string]int32{
{
"min_node_count": *k8sNodepool.Properties.AutoScaling.MinNodeCount,
Expand Down

0 comments on commit 6ef6e9f

Please sign in to comment.