Skip to content

Commit

Permalink
Fix issue #77 and roll-backed fix of issue #71 (#79)
Browse files Browse the repository at this point in the history
* rollback to the node pool behaviour before the fix of issue #71

* fix #77 - fix import for k8s nodepool

* modified tests with ip_blocks instead of env virables
  • Loading branch information
iblindu authored Aug 11, 2021
1 parent 868018a commit 56ac039
Show file tree
Hide file tree
Showing 9 changed files with 52 additions and 108 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
## 5.2.8

- rollback to the node pool behaviour before the fix of issue #71
- issue #77 - fix import for k8s nodepool

## 5.2.7

- fix: issue #71 - recreate nodepool on change of specifications
Expand Down
4 changes: 0 additions & 4 deletions docs/resources/k8s_node_pool.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,3 @@ terraform import ionoscloud_k8s_node_pool.demo {k8s_cluster_uuid}/{k8s_nodepool_
```

This can be helpful when you want to import kubernetes node pools which you have already created manually or using other means, outside of terraform, towards the goal of managing them via Terraform

## Notes

Please note that every time the arguments of the nodepool are modified, the resource is destroyed and recreated.
1 change: 0 additions & 1 deletion ionoscloud/data_source_private_crossconnect.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,6 @@ func dataSourcePccRead(d *schema.ResourceData, meta interface{}) error {

if idOk {
/* search by ID */
fmt.Printf("searching for ID %s\n", id.(string))
pcc, _, err = client.PrivateCrossConnectApi.PccsFindById(ctx, id.(string)).Execute()
if err != nil {
return fmt.Errorf("an error occurred while fetching the pcc with ID %s: %s", id.(string), err)
Expand Down
13 changes: 1 addition & 12 deletions ionoscloud/import_k8s_node_pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ package ionoscloud

import (
"fmt"
"os"
"testing"

"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
Expand All @@ -13,24 +12,14 @@ import (

func TestAcck8sNodepool_ImportBasic(t *testing.T) {
resourceName := "terraform_acctest"
publicIp1 := os.Getenv("TF_ACC_IONOS_PUBLIC_IP_1")
if publicIp1 == "" {
t.Errorf("TF_ACC_IONOS_PUBLIC_1 not set; please set it to a valid public IP for the us/las zone")
t.FailNow()
}
publicIp2 := os.Getenv("TF_ACC_IONOS_PUBLIC_IP_2")
if publicIp2 == "" {
t.Errorf("TF_ACC_IONOS_PUBLIC_2 not set; please set it to a valid public IP for the us/las zone")
t.FailNow()
}

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckk8sNodepoolDestroyCheck,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(testAccCheckk8sNodepoolConfigBasic, resourceName, publicIp1, publicIp2),
Config: fmt.Sprintf(testAccCheckk8sNodepoolConfigBasic, resourceName),
},
{
ResourceName: fmt.Sprintf("ionoscloud_k8s_node_pool.%s", resourceName),
Expand Down
18 changes: 2 additions & 16 deletions ionoscloud/resource_k8s_node_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ func resourcek8sNodePool() *schema.Resource {
return &schema.Resource{
CreateContext: resourcek8sNodePoolCreate,
ReadContext: resourcek8sNodePoolRead,
UpdateContext: nil,
UpdateContext: resourcek8sNodePoolUpdate,
DeleteContext: resourcek8sNodePoolDelete,
Importer: &schema.ResourceImporter{
StateContext: resourceK8sNodepoolImport,
Expand All @@ -29,7 +29,6 @@ func resourcek8sNodePool() *schema.Resource {
Description: "The desired name for the node pool",
Required: true,
ValidateFunc: validation.All(validation.StringIsNotWhiteSpace),
ForceNew: true,
},
"k8s_version": {
Type: schema.TypeString,
Expand All @@ -53,7 +52,6 @@ func resourcek8sNodePool() *schema.Resource {
}
return false
},
ForceNew: true,
},
"auto_scaling": {
Type: schema.TypeList,
Expand All @@ -74,7 +72,6 @@ func resourcek8sNodePool() *schema.Resource {
},
},
},
ForceNew: true,
},
"lans": {
Type: schema.TypeList,
Expand All @@ -83,7 +80,6 @@ func resourcek8sNodePool() *schema.Resource {
Elem: &schema.Schema{
Type: schema.TypeInt,
},
ForceNew: true,
},
"maintenance_window": {
Type: schema.TypeList,
Expand All @@ -104,66 +100,56 @@ func resourcek8sNodePool() *schema.Resource {
},
},
},
ForceNew: true,
},
"datacenter_id": {
Type: schema.TypeString,
Description: "The UUID of the VDC",
Required: true,
ValidateFunc: validation.All(validation.StringIsNotWhiteSpace),
ForceNew: true,
},
"k8s_cluster_id": {
Type: schema.TypeString,
Description: "The UUID of an existing kubernetes cluster",
Required: true,
ValidateFunc: validation.All(validation.StringIsNotWhiteSpace),
ForceNew: true,
},
"cpu_family": {
Type: schema.TypeString,
Description: "CPU Family",
Required: true,
ValidateFunc: validation.All(validation.StringIsNotWhiteSpace),
ForceNew: true,
},
"availability_zone": {
Type: schema.TypeString,
Description: "The compute availability zone in which the nodes should exist",
Required: true,
ValidateFunc: validation.All(validation.StringIsNotWhiteSpace),
ForceNew: true,
},
"storage_type": {
Type: schema.TypeString,
Description: "Storage type to use",
Required: true,
ValidateFunc: validation.All(validation.StringIsNotWhiteSpace),
ForceNew: true,
},
"node_count": {
Type: schema.TypeInt,
Description: "The number of nodes in this node pool",
Required: true,
ForceNew: true,
},
"cores_count": {
Type: schema.TypeInt,
Description: "CPU cores count",
Required: true,
ForceNew: true,
},
"ram_size": {
Type: schema.TypeInt,
Description: "The amount of RAM in MB",
Required: true,
ForceNew: true,
},
"storage_size": {
Type: schema.TypeInt,
Description: "The total allocated storage capacity of a node in GB",
Required: true,
ForceNew: true,
},
"public_ips": {
Type: schema.TypeList,
Expand All @@ -172,7 +158,6 @@ func resourcek8sNodePool() *schema.Resource {
Elem: &schema.Schema{
Type: schema.TypeString,
},
ForceNew: true,
},
},
Timeouts: &resourceDefaultTimeouts,
Expand Down Expand Up @@ -684,6 +669,7 @@ func resourcek8sNodePoolUpdate(ctx context.Context, d *schema.ResourceData, meta
break
}

time.Sleep(SleepInterval * 3)
select {
case <-time.After(SleepInterval):
log.Printf("[INFO] trying again ...")
Expand Down
52 changes: 21 additions & 31 deletions ionoscloud/resource_k8s_node_pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import (
"fmt"
ionoscloud "github.com/ionos-cloud/sdk-go/v5"
"log"
"os"
"testing"

"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
Expand All @@ -18,22 +17,6 @@ func TestAcck8sNodepool_Basic(t *testing.T) {
var k8sNodepool ionoscloud.KubernetesNodePool
k8sNodepoolName := "terraform_acctest"

publicIp1 := os.Getenv("TF_ACC_IONOS_PUBLIC_IP_1")
if publicIp1 == "" {
t.Errorf("TF_ACC_IONOS_PUBLIC_1 not set; please set it to a valid public IP for the us/las zone")
t.FailNow()
}
publicIp2 := os.Getenv("TF_ACC_IONOS_PUBLIC_IP_2")
if publicIp2 == "" {
t.Errorf("TF_ACC_IONOS_PUBLIC_2 not set; please set it to a valid public IP for the us/las zone")
t.FailNow()
}
publicIp3 := os.Getenv("TF_ACC_IONOS_PUBLIC_IP_3")
if publicIp3 == "" {
t.Errorf("TF_ACC_IONOS_PUBLIC_3 not set; please set it to a valid public IP for the us/las zone")
t.FailNow()
}

resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
Expand All @@ -42,22 +25,22 @@ func TestAcck8sNodepool_Basic(t *testing.T) {
CheckDestroy: testAccCheckk8sNodepoolDestroyCheck,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(testAccCheckk8sNodepoolConfigBasic, k8sNodepoolName, publicIp1, publicIp2),
Config: fmt.Sprintf(testAccCheckk8sNodepoolConfigBasic, k8sNodepoolName),
Check: resource.ComposeTestCheckFunc(
testAccCheckk8sNodepoolExists("ionoscloud_k8s_node_pool.terraform_acctest", &k8sNodepool),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "name", k8sNodepoolName),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.0", publicIp1),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.1", publicIp2),
resource.TestCheckResourceAttrPair("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.0", "ionoscloud_ipblock.terraform_acctest", "ips.0"),
resource.TestCheckResourceAttrPair("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.1", "ionoscloud_ipblock.terraform_acctest", "ips.1"),
),
},
{
Config: fmt.Sprintf(testAccCheckk8sNodepoolConfigUpdate, publicIp1, publicIp2, publicIp3),
Config: fmt.Sprintf(testAccCheckk8sNodepoolConfigUpdate, k8sNodepoolName),
Check: resource.ComposeTestCheckFunc(
testAccCheckk8sNodepoolExists("ionoscloud_k8s_node_pool.terraform_acctest", &k8sNodepool),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "name", "updated"),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.0", publicIp1),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.1", publicIp2),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.2", publicIp3),
resource.TestCheckResourceAttr("ionoscloud_k8s_node_pool.terraform_acctest", "name", k8sNodepoolName),
resource.TestCheckResourceAttrPair("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.0", "ionoscloud_ipblock.terraform_acctest", "ips.0"),
resource.TestCheckResourceAttrPair("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.1", "ionoscloud_ipblock.terraform_acctest", "ips.1"),
resource.TestCheckResourceAttrPair("ionoscloud_k8s_node_pool.terraform_acctest", "public_ips.2", "ionoscloud_ipblock.terraform_acctest", "ips.2"),
),
},
},
Expand Down Expand Up @@ -170,7 +153,11 @@ resource "ionoscloud_datacenter" "terraform_acctest" {
location = "us/las"
description = "Datacenter created through terraform"
}
resource "ionoscloud_ipblock" "terraform_acctest" {
location = ionoscloud_datacenter.terraform_acctest.location
size = 3
name = "terraform_acctest"
}
resource "ionoscloud_k8s_cluster" "terraform_acctest" {
name = "terraform_acctest"
k8s_version = "1.18.9"
Expand All @@ -179,7 +166,6 @@ resource "ionoscloud_k8s_cluster" "terraform_acctest" {
time = "09:00:00Z"
}
}
resource "ionoscloud_k8s_node_pool" "terraform_acctest" {
name = "%s"
k8s_version = "${ionoscloud_k8s_cluster.terraform_acctest.k8s_version}"
Expand All @@ -196,7 +182,7 @@ resource "ionoscloud_k8s_node_pool" "terraform_acctest" {
cores_count = 2
ram_size = 2048
storage_size = 40
public_ips = [ "%s", "%s" ]
public_ips = [ ionoscloud_ipblock.terraform_acctest.ips[0], ionoscloud_ipblock.terraform_acctest.ips[1] ]
}`

const testAccCheckk8sNodepoolConfigUpdate = `
Expand All @@ -205,7 +191,11 @@ resource "ionoscloud_datacenter" "terraform_acctest" {
location = "us/las"
description = "Datacenter created through terraform"
}
resource "ionoscloud_ipblock" "terraform_acctest" {
location = ionoscloud_datacenter.terraform_acctest.location
size = 3
name = "terraform_acctest"
}
resource "ionoscloud_k8s_cluster" "terraform_acctest" {
name = "terraform_acctest"
k8s_version = "1.18.9"
Expand All @@ -216,7 +206,7 @@ resource "ionoscloud_k8s_cluster" "terraform_acctest" {
}
resource "ionoscloud_k8s_node_pool" "terraform_acctest" {
name = "updated"
name = "%s"
k8s_version = "${ionoscloud_k8s_cluster.terraform_acctest.k8s_version}"
auto_scaling {
min_node_count = 1
Expand All @@ -235,7 +225,7 @@ resource "ionoscloud_k8s_node_pool" "terraform_acctest" {
cores_count = 2
ram_size = 2048
storage_size = 40
public_ips = [ "%s", "%s", "%s" ]
public_ips = [ ionoscloud_ipblock.terraform_acctest.ips[0], ionoscloud_ipblock.terraform_acctest.ips[1], ionoscloud_ipblock.terraform_acctest.ips[2] ]
}`

const testAccCheckk8sNodepoolConfigVersion = `
Expand Down
26 changes: 10 additions & 16 deletions ionoscloud/resource_nic_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"context"
"fmt"
ionoscloud "github.com/ionos-cloud/sdk-go/v5"
"os"
"testing"

"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
Expand Down Expand Up @@ -44,17 +43,6 @@ func TestAccNic_Basic(t *testing.T) {

func TestAccNic_Ips(t *testing.T) {
var nic ionoscloud.Nic
publicIp1 := os.Getenv("TF_ACC_IONOS_PUBLIC_IP_1")
if publicIp1 == "" {
t.Errorf("TF_ACC_IONOS_PUBLIC_1 not set; please set it to a valid public IP for the us/las zone")
t.FailNow()
}
publicIp2 := os.Getenv("TF_ACC_IONOS_PUBLIC_IP_2")
if publicIp2 == "" {
t.Errorf("TF_ACC_IONOS_PUBLIC_2 not set; please set it to a valid public IP for the us/las zone")
t.FailNow()
}

resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
Expand All @@ -63,12 +51,12 @@ func TestAccNic_Ips(t *testing.T) {
CheckDestroy: testAccCheckNicDestroyCheck,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(testaccchecknicconfigIps, publicIp1, publicIp2),
Config: fmt.Sprintf(testaccchecknicconfigIps),
Check: resource.ComposeTestCheckFunc(
testAccCheckNICExists("ionoscloud_nic.database_nic", &nic),
resource.TestCheckResourceAttrSet("ionoscloud_nic.database_nic", "mac"),
resource.TestCheckResourceAttr("ionoscloud_nic.database_nic", "ips.0", publicIp1),
resource.TestCheckResourceAttr("ionoscloud_nic.database_nic", "ips.1", publicIp2),
resource.TestCheckResourceAttrPair("ionoscloud_nic.database_nic", "ips.0", "ionoscloud_ipblock.webserver", "ips.0"),
resource.TestCheckResourceAttrPair("ionoscloud_nic.database_nic", "ips.1", "ionoscloud_ipblock.webserver", "ips.1"),
),
},
},
Expand Down Expand Up @@ -235,6 +223,12 @@ resource "ionoscloud_datacenter" "foobar" {
location = "us/las"
}
resource "ionoscloud_ipblock" "webserver" {
location = ionoscloud_datacenter.foobar.location
size = 2
name = "webserver_ipblock"
}
resource "ionoscloud_server" "webserver" {
name = "webserver"
datacenter_id = "${ionoscloud_datacenter.foobar.id}"
Expand Down Expand Up @@ -263,6 +257,6 @@ resource "ionoscloud_nic" "database_nic" {
lan = 2
dhcp = false
firewall_active = true
ips = ["%s","%s"]
ips = [ ionoscloud_ipblock.webserver.ips[0], ionoscloud_ipblock.webserver.ips[1] ]
name = "test_nic"
}`
Loading

0 comments on commit 56ac039

Please sign in to comment.