diff --git a/docs/resources/cce_cluster_v3.md b/docs/resources/cce_cluster_v3.md index 5d1982eb..6223255c 100644 --- a/docs/resources/cce_cluster_v3.md +++ b/docs/resources/cce_cluster_v3.md @@ -120,6 +120,10 @@ The following arguments are supported: * `masters` - (Optional, List, ForceNew) Advanced configuration of master nodes. Changing this creates a new cluster. The [masters](#cce_masters) object structure is documented below. +* `hibernate` - (Optional, Bool) Specifies whether to hibernate the CCE cluster. Defaults to **false**. After a cluster is + hibernated, resources such as workloads cannot be created or managed in the cluster, and the cluster cannot be + deleted. + The `masters` block supports: diff --git a/flexibleengine/resource_flexibleengine_cce_cluster_v3.go b/flexibleengine/resource_flexibleengine_cce_cluster_v3.go index 3f435c5f..508a6690 100644 --- a/flexibleengine/resource_flexibleengine_cce_cluster_v3.go +++ b/flexibleengine/resource_flexibleengine_cce_cluster_v3.go @@ -159,6 +159,10 @@ func resourceCCEClusterV3() *schema.Resource { Optional: true, ForceNew: true, }, + "hibernate": { + Type: schema.TypeBool, + Optional: true, + }, "status": { Type: schema.TypeString, Computed: true, @@ -345,9 +349,10 @@ func resourceCCEClusterV3Create(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Waiting for flexibleengine CCE cluster (%s) to become available", create.Metadata.Id) stateConf := &resource.StateChangeConf{ - Pending: []string{"Creating"}, - Target: []string{"Available"}, - Refresh: waitForCCEClusterActive(cceClient, create.Metadata.Id), + // The statuses of pending phase include "Creating". + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED"}, + Refresh: clusterStateRefreshFunc(cceClient, create.Metadata.Id, []string{"Available"}), Timeout: d.Timeout(schema.TimeoutCreate), Delay: 120 * time.Second, PollInterval: 10 * time.Second, @@ -356,8 +361,15 @@ func resourceCCEClusterV3Create(d *schema.ResourceData, meta interface{}) error _, err = stateConf.WaitForState() d.SetId(create.Metadata.Id) - return resourceCCEClusterV3Read(d, meta) + // create a hibernating cluster + if d.Get("hibernate").(bool) { + err = resourceClusterHibernate(d, cceClient) + if err != nil { + return err + } + } + return resourceCCEClusterV3Read(d, meta) } func resourceCCEClusterV3Read(d *schema.ResourceData, meta interface{}) error { @@ -498,6 +510,20 @@ func resourceCCEClusterV3Update(d *schema.ResourceData, meta interface{}) error } } + if d.HasChange("hibernate") { + if d.Get("hibernate").(bool) { + err = resourceClusterHibernate(d, cceClient) + if err != nil { + return err + } + } else { + err = resourceClusterAwake(d, cceClient) + if err != nil { + return err + } + } + } + return resourceCCEClusterV3Read(d, meta) } @@ -512,9 +538,10 @@ func resourceCCEClusterV3Delete(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error deleting flexibleengine CCE Cluster: %s", err) } stateConf := &resource.StateChangeConf{ - Pending: []string{"Deleting", "Available", "Unavailable"}, - Target: []string{"Deleted"}, - Refresh: waitForCCEClusterDelete(cceClient, d.Id()), + // The statuses of pending phase includes "Deleting", "Available" and "Unavailable". + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED"}, + Refresh: clusterStateRefreshFunc(cceClient, d.Id(), nil), Timeout: d.Timeout(schema.TimeoutDelete), Delay: 60 * time.Second, PollInterval: 10 * time.Second, @@ -530,37 +557,6 @@ func resourceCCEClusterV3Delete(d *schema.ResourceData, meta interface{}) error return nil } -func waitForCCEClusterActive(cceClient *golangsdk.ServiceClient, clusterId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - n, err := clusters.Get(cceClient, clusterId).Extract() - if err != nil { - return nil, "", err - } - - return n, n.Status.Phase, nil - } -} - -func waitForCCEClusterDelete(cceClient *golangsdk.ServiceClient, clusterId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete CCE cluster %s.\n", clusterId) - - r, err := clusters.Get(cceClient, clusterId).Extract() - - if err != nil { - if _, ok := err.(golangsdk.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted flexibleengine CCE cluster %s", clusterId) - return r, "Deleted", nil - } - } - if r.Status.Phase == "Deleting" { - return r, "Deleting", nil - } - log.Printf("[DEBUG] flexibleengine CCE cluster %s still available.\n", clusterId) - return r, "Available", nil - } -} - func resourceCCEClusterV3EipAction(cceClient, eipClient *golangsdk.ServiceClient, clusterID, eip, action string) error { eipID, err := getEipIDbyAddress(eipClient, eip) @@ -605,3 +601,78 @@ func getEipIDbyAddress(client *golangsdk.ServiceClient, address string) (string, return allEips[0].ID, nil } + +func resourceClusterHibernate(d *schema.ResourceData, cceClient *golangsdk.ServiceClient) error { + clusterID := d.Id() + err := clusters.Operation(cceClient, clusterID, "hibernate").ExtractErr() + if err != nil { + return fmt.Errorf("error hibernating CCE cluster: %s", err) + } + + log.Printf("[DEBUG] Waiting for CCE cluster (%s) to become hibernate", clusterID) + stateConf := &resource.StateChangeConf{ + // The statuses of pending phase includes "Available" and "Hibernating". + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED"}, + Refresh: clusterStateRefreshFunc(cceClient, clusterID, []string{"Hibernation"}), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 20 * time.Second, + PollInterval: 20 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("error hibernating CCE cluster: %s", err) + } + return nil +} + +func resourceClusterAwake(d *schema.ResourceData, cceClient *golangsdk.ServiceClient) error { + clusterID := d.Id() + err := clusters.Operation(cceClient, clusterID, "awake").ExtractErr() + if err != nil { + return fmt.Errorf("error awaking CCE cluster: %s", err) + } + + log.Printf("[DEBUG] Waiting for CCE cluster (%s) to become available", clusterID) + stateConf := &resource.StateChangeConf{ + // The statuses of pending phase include "Awaking". + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED"}, + Refresh: clusterStateRefreshFunc(cceClient, clusterID, []string{"Available"}), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 100 * time.Second, + PollInterval: 20 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("error awaking CCE cluster: %s", err) + } + return nil +} + +func clusterStateRefreshFunc(cceClient *golangsdk.ServiceClient, clusterId string, + targets []string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Expect the status of CCE cluster to be any one of the status list: %v", targets) + resp, err := clusters.Get(cceClient, clusterId).Extract() + if err != nil { + if _, ok := err.(golangsdk.ErrDefault404); ok { + log.Printf("[DEBUG] The cluster (%s) has been deleted", clusterId) + return resp, "COMPLETED", nil + } + return nil, "ERROR", err + } + + invalidStatuses := []string{"Error", "Shelved", "Unknow"} + if isStrContainsSliceElement(resp.Status.Phase, invalidStatuses, true, true) { + return resp, "ERROR", fmt.Errorf("unexpected status: %s", resp.Status.Phase) + } + + if strSliceContains(targets, resp.Status.Phase) { + return resp, "COMPLETED", nil + } + return resp, "PENDING", nil + } +} diff --git a/flexibleengine/resource_flexibleengine_cce_cluster_v3_test.go b/flexibleengine/resource_flexibleengine_cce_cluster_v3_test.go index c21239d0..c2a41333 100644 --- a/flexibleengine/resource_flexibleengine_cce_cluster_v3_test.go +++ b/flexibleengine/resource_flexibleengine_cce_cluster_v3_test.go @@ -193,3 +193,92 @@ resource "flexibleengine_cce_cluster_v3" "cluster_1" { eip = flexibleengine_vpc_eip.update.address }`, testAccCCEClusterV3_Base(cceName), cceName) } + +func TestAccCluster_hibernate(t *testing.T) { + var cluster clusters.Clusters + + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(5)) + resourceName := "flexibleengine_cce_cluster_v3.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: TestAccProviderFactories, + CheckDestroy: testAccCheckCCEClusterV3Destroy, + Steps: []resource.TestStep{ + { + Config: testAccCluster_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckCCEClusterV3Exists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", "Available"), + ), + }, + { + Config: testAccCluster_hibernate(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckCCEClusterV3Exists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", "Hibernation"), + ), + }, + { + Config: testAccCluster_awake(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckCCEClusterV3Exists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", "Available"), + ), + }, + }, + }) +} + +func testAccCluster_basic(rName string) string { + return fmt.Sprintf(` +%s + +resource "flexibleengine_cce_cluster_v3" "test" { + name = "%s" + flavor_id = "cce.s1.small" + cluster_type = "VirtualMachine" + vpc_id = flexibleengine_vpc_v1.test.id + subnet_id = flexibleengine_vpc_subnet_v1.test.id + container_network_type = "overlay_l2" + service_network_cidr = "10.248.0.0/16" +} +`, testAccCCEClusterV3_Base(rName), rName) +} + +func testAccCluster_hibernate(rName string) string { + return fmt.Sprintf(` +%s + +resource "flexibleengine_cce_cluster_v3" "test" { + name = "%s" + flavor_id = "cce.s1.small" + cluster_type = "VirtualMachine" + vpc_id = flexibleengine_vpc_v1.test.id + subnet_id = flexibleengine_vpc_subnet_v1.test.id + container_network_type = "overlay_l2" + service_network_cidr = "10.248.0.0/16" + hibernate = true +} +`, testAccCCEClusterV3_Base(rName), rName) +} + +func testAccCluster_awake(rName string) string { + return fmt.Sprintf(` +%s + +resource "flexibleengine_cce_cluster_v3" "test" { + name = "%s" + flavor_id = "cce.s1.small" + cluster_type = "VirtualMachine" + vpc_id = flexibleengine_vpc_v1.test.id + subnet_id = flexibleengine_vpc_subnet_v1.test.id + container_network_type = "overlay_l2" + service_network_cidr = "10.248.0.0/16" + hibernate = false +} +`, testAccCCEClusterV3_Base(rName), rName) +} diff --git a/flexibleengine/utils.go b/flexibleengine/utils.go index 0b92868a..7fa7e58b 100644 --- a/flexibleengine/utils.go +++ b/flexibleengine/utils.go @@ -200,3 +200,23 @@ func IsUUIDFormat(str string) bool { } return true } + +// isStrContainsSliceElement returns true if the string exists in given slice or contains in one of slice elements when +// open exact flag. Also you can ignore case for this check. +func isStrContainsSliceElement(str string, sl []string, ignoreCase, isExcat bool) bool { + if ignoreCase { + str = strings.ToLower(str) + } + for _, s := range sl { + if ignoreCase { + s = strings.ToLower(s) + } + if isExcat && s == str { + return true + } + if !isExcat && strings.Contains(str, s) { + return true + } + } + return false +}