From a8ed6b94d8b856636bc92118dd808b0ea16db130 Mon Sep 17 00:00:00 2001 From: marhode Date: Fri, 27 Oct 2023 16:07:15 +0200 Subject: [PATCH 01/94] Initial commit with changes to introduce auto scaling node pools --- modules/ionos-k8s-cluster/locals.tf | 3 +- modules/ionos-k8s-cluster/main.tf | 145 +++++++++++++++++++++++++ modules/ionos-k8s-cluster/variables.tf | 18 +++ 3 files changed, 165 insertions(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 1b2e32b..09d99d5 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -6,7 +6,8 @@ locals { # Number of nodes per nodepool. # Note that one nodepool is created in each availability zone. # Example: With 2 zones, the actual total node count is twice as high as the number stated here. - node_count = var.node_count + #node_count = var.node_count + node_count = var.auto_scaling ? (var.min_node_count - 1) : (var.node_count != null ? var.node_count : 1) #What should the default node count be? # This cannot be changed, after the nodepool is created, because all worker nodes must be equal at any time. core_count = var.core_count # This cannot be changed, after the nodepool is created, because all worker nodes must be equal at any time. diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index a9cd238..c24c091 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -8,6 +8,10 @@ resource "ionoscloud_k8s_cluster" "cluster" { api_subnet_allow_list = local.api_subnet_allow_list } +#---- +#Node Pool 1 +#---- + resource "ionoscloud_k8s_node_pool" "nodepool_zone1" { count = local.nodepool_per_zone_count availability_zone = "ZONE_1" @@ -60,6 +64,77 @@ resource "ionoscloud_k8s_node_pool" "nodepool_zone1" { public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] } +#---- +#Node Pool 1 autoscaling +#node_count adjusted to act as a 2 in 1 nodepool with partially enabled autoscaling activated +#---- + +resource "ionoscloud_k8s_node_pool" "nodepool_zone1_autoscaling" { + count = var.auto_scaling ? 1 : 0 + availability_zone = "ZONE_1" + name = "${local.cluster_name}-zone1-nodepool-autoscaling-${count.index}" + k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version + allow_replace = var.allow_node_pool_replacement + # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks + # it iterates through the list of var.associated_lans and sets the appropriate lan id + # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) + dynamic "lans" { + for_each = var.associated_lans + content { + id = lans.value["id"] + dynamic "routes" { + # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes + # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... + for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list + + content { + # graps the values from the objects of the routes_list + network = routes.value["network"] + gateway_ip = routes.value["gateway_ip"] + } + } + } + } + + maintenance_window { + # The maintenance of the nodepools starts 1 hour after the cluster (control plane) + # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) + # Example: + # Maintenance start (local.maintenance_hour): 2am + # Control plane (see cluster resource) starts at 2am + # Maintenance of ...-zone1-nodepool-0 starts at 3am (+1h) + # Maintenance of ...-zone1-nodepool-1 starts at 7am (+4h) + # Maintenance of ...-zone1-nodepool-2 starts at 11am (+4h) ... + # if the number of hour exceeds 24, the maintenance shall start on the next day + day_of_the_week = (local.maintenance_hour + 1 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) + time = format("%02d:00:00Z", (local.maintenance_hour + 1 + count.index * 4) % 24) + } + + datacenter_id = var.datacenter_id + k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id + cpu_family = local.cpu_family + storage_type = "SSD" + auto_scaling{ + min_node_count = 1 + max_node_count = var.max_node_count - local.node_count + } + node_count = local.node_count + cores_count = local.core_count + ram_size = local.ram_size + storage_size = 100 + public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] + #Ignore node count changes because of autoscaling to avoid unneeded updates + lifecycle { + ignore_changes = [ + node_count + ] + } +} + +#---- +#Node Pool 2 +#---- + resource "ionoscloud_k8s_node_pool" "nodepool_zone2" { count = local.nodepool_per_zone_count @@ -115,6 +190,76 @@ resource "ionoscloud_k8s_node_pool" "nodepool_zone2" { public_ips = local.public_ip_pool_zone2 != null ? slice(local.public_ip_pool_zone2[count.index], 0, local.node_count + 1) : [] } +#---- +#Node Pool 2 autoscaling +#node_count adjusted to act as a 2 in 1 nodepool with partially enabled autoscaling activated +#---- + +resource "ionoscloud_k8s_node_pool" "nodepool_zone2_autoscaling" { + count = var.auto_scaling ? 1 : 0 + + availability_zone = "ZONE_2" + name = "${local.cluster_name}-zone2-nodepool-autoscaling-${count.index}" + k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version + allow_replace = var.allow_node_pool_replacement + # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks + # it iterates through the list of var.associated_lans and sets the appropriate lan id + # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) + dynamic "lans" { + for_each = var.associated_lans + content { + id = lans.value["id"] + dynamic "routes" { + # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes + # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... + for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list + + content { + # graps the values from the objects of the routes_list + network = routes.value["network"] + gateway_ip = routes.value["gateway_ip"] + } + } + } + } + + maintenance_window { + # The maintenance of the nodepools starts 1 hour after the cluster (control plane) + # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) + # Additionally the zones are shifted by 2 hours, so this one starts 2 hours later + # Example: + # Maintenance start (local.maintenance_hour): 2am + # Control plane (see cluster resource) starts at 2am + # Zone1 (see nodepool_zone1 resource) starts at 3am (+1h) + # Maintenance of ...-zone2-nodepool-0 starts at 5am (+2h) + # Maintenance of ...-zone2-nodepool-1 starts at 9am (+4h) + # Maintenance of ...-zone2-nodepool-2 starts at 1pm (+4h) ... + # if the number of hour exceeds 24, the maintenance shall start on the next day + day_of_the_week = (local.maintenance_hour + 1 + 2 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) + time = format("%02d:00:00Z", (local.maintenance_hour + 1 + 2 + count.index * 4) % 24) + } + + datacenter_id = var.datacenter_id + k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id + cpu_family = local.cpu_family + storage_type = "SSD" + auto_scaling{ + min_node_count = 1 + max_node_count = var.max_node_count - local.node_count + } + node_count = local.node_count + cores_count = local.core_count + ram_size = local.ram_size + storage_size = 100 + public_ips = local.public_ip_pool_zone2 != null ? slice(local.public_ip_pool_zone2[count.index], 0, local.node_count + 1) : [] + #Ignore node count changes because of autoscaling to avoid unneeded updates + lifecycle { + ignore_changes = [ + node_count + ] + } +} + resource "ionoscloud_ipblock" "ippools_zone1" { count = var.create_public_ip_pools ? var.nodepool_per_zone_count : 0 name = "${local.cluster_name}-zone1-nodepool-${count.index}" diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index bb89914..52234f7 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -107,3 +107,21 @@ variable "storage_size" { type = number default = 100 } + +variable "auto_scaling" { + type = bool + description = "This value is used activate auto scaling the k8s cluster node pools." + default = false +} + +variable "min_node_count" { + type = number + description = "This value is used to set the minimum number of nodes for auto scaling the k8s cluster node pools." + default = null +} + +variable "max_node_count" { + type = number + description = "This value is used to set the maximum number of nodes for auto scaling the k8s cluster node pools." + default = null +} \ No newline at end of file From 91fd096e54656765dfed1557bdca5b5325699bbe Mon Sep 17 00:00:00 2001 From: marhode Date: Wed, 1 Nov 2023 10:52:48 +0100 Subject: [PATCH 02/94] Test changes --- modules/ionos-k8s-cluster/locals.tf | 2 +- modules/ionos-k8s-cluster/main.tf | 62 ++++++++++++++++++++++++++ modules/ionos-k8s-cluster/variables.tf | 31 +++++++++++++ 3 files changed, 94 insertions(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 09d99d5..4b87b3e 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -7,7 +7,7 @@ locals { # Note that one nodepool is created in each availability zone. # Example: With 2 zones, the actual total node count is twice as high as the number stated here. #node_count = var.node_count - node_count = var.auto_scaling ? (var.min_node_count - 1) : (var.node_count != null ? var.node_count : 1) #What should the default node count be? + node_count = var.custom_nodepools != null ? (var.min_node_count - 1) : (var.node_count != null ? var.node_count : 1) #What should the default node count be? # This cannot be changed, after the nodepool is created, because all worker nodes must be equal at any time. core_count = var.core_count # This cannot be changed, after the nodepool is created, because all worker nodes must be equal at any time. diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index c24c091..e90eb5e 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -8,6 +8,68 @@ resource "ionoscloud_k8s_cluster" "cluster" { api_subnet_allow_list = local.api_subnet_allow_list } + +#---- +#Test Pool 1 +#---- + +resource "ionoscloud_k8s_node_pool" "nodepool_zone1" { + for_each = {for i, pool in var.custom_nodepools : i => custom_nodepools } + availability_zone = each.value.availability_zone + name = "${local.cluster_name}-zone1-nodepool-${count.index}" + k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version + allow_replace = var.allow_node_pool_replacement + # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks + # it iterates through the list of var.associated_lans and sets the appropriate lan id + # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) + dynamic "lans" { + for_each = var.associated_lans + content { + id = lans.value["id"] + dynamic "routes" { + # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes + # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... + for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list + + content { + # graps the values from the objects of the routes_list + network = routes.value["network"] + gateway_ip = routes.value["gateway_ip"] + } + } + } + } + + maintenance_window { + day_of_the_week = (local.maintenance_hour + 1 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) + time = format("%02d:00:00Z", (local.maintenance_hour + 1 + count.index * 4) % 24) + } + + datacenter_id = var.datacenter_id + k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id + cpu_family = local.cpu_family + storage_type = "SSD" + node_count = each.value.node_count + cores_count = each.value.core_count + ram_size = each.value.ram_size + storage_size = 100 + public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] + + dynamic "auto_scaling" { + for_each = each.value.auto_scaling == true ? [1] : [] + content { + min_node_count = each.value.min_node_count + max_node_count = each.value.max_node_count + } + } + + lifecycle { + ignore_changes = each.value.ignore_changes + } +} + + + #---- #Node Pool 1 #---- diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 52234f7..ffc9624 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -120,6 +120,37 @@ variable "min_node_count" { default = null } +variable "custom_nodepools" { + type = list(object({ + name = string + auto_scaling = optional(bool) + node_count = number + min_node_count= number + max_node_count= number + ram_size = number + core_count = number + purpose = string + availability_zone = string + ignore_changes = string + count = optional(number, 1) + }) + ) + description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." + default = list(object({ + auto_scaling = false + min_node_count = null + max_node_count = null + count = var.nodepool_per_zone_count + node_count = var.node_count + ram_size = var.ram_size != null ? var.ram_size : 16384 + core_count = var.core_count + availability_zone = var.availability_zone + ignore_changes = var.auto_scaling ? [node_count] : [] + node_count = var.node_count != null ? var.node_count : 1 + }) + ) +} + variable "max_node_count" { type = number description = "This value is used to set the maximum number of nodes for auto scaling the k8s cluster node pools." From fd8f71c7d330f8a46a967d6e2640f7c3fc3a2187 Mon Sep 17 00:00:00 2001 From: marhode Date: Wed, 1 Nov 2023 12:45:23 +0100 Subject: [PATCH 03/94] Made changes to the nodepools for scaling testing --- modules/ionos-k8s-cluster/locals.tf | 4 +- modules/ionos-k8s-cluster/main.tf | 475 ++++++++++++++----------- modules/ionos-k8s-cluster/variables.tf | 49 +-- 3 files changed, 294 insertions(+), 234 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 4b87b3e..3f80808 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -6,8 +6,8 @@ locals { # Number of nodes per nodepool. # Note that one nodepool is created in each availability zone. # Example: With 2 zones, the actual total node count is twice as high as the number stated here. - #node_count = var.node_count - node_count = var.custom_nodepools != null ? (var.min_node_count - 1) : (var.node_count != null ? var.node_count : 1) #What should the default node count be? + node_count = var.node_count + #node_count = var.custom_nodepools != null ? (var.min_node_count - 1) : (var.node_count != null ? var.node_count : 1) #What should the default node count be? # This cannot be changed, after the nodepool is created, because all worker nodes must be equal at any time. core_count = var.core_count # This cannot be changed, after the nodepool is created, because all worker nodes must be equal at any time. diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index e90eb5e..1448ad9 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -13,10 +13,11 @@ resource "ionoscloud_k8s_cluster" "cluster" { #Test Pool 1 #---- -resource "ionoscloud_k8s_node_pool" "nodepool_zone1" { +resource "ionoscloud_k8s_node_pool" "nodepool_zone1_scalingtest" { for_each = {for i, pool in var.custom_nodepools : i => custom_nodepools } - availability_zone = each.value.availability_zone - name = "${local.cluster_name}-zone1-nodepool-${count.index}" + count = each.value.nodepool_per_zone_count + availability_zone = "ZONE_1" + name = "${local.cluster_name}-zone1-nodepool${count.index}-${each.value.purpose}" k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = var.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks @@ -68,16 +69,15 @@ resource "ionoscloud_k8s_node_pool" "nodepool_zone1" { } } - - #---- -#Node Pool 1 +#Test Pool 2 #---- -resource "ionoscloud_k8s_node_pool" "nodepool_zone1" { - count = local.nodepool_per_zone_count - availability_zone = "ZONE_1" - name = "${local.cluster_name}-zone1-nodepool-${count.index}" +resource "ionoscloud_k8s_node_pool" "nodepool_zone2_scalingtest" { + for_each = {for i, pool in var.custom_nodepools : i => custom_nodepools } + count = each.value.nodepool_per_zone_count + availability_zone = "ZONE_2" + name = "${local.cluster_name}-zone2-nodepool${count.index}-${each.value.purpose}" k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = var.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks @@ -102,225 +102,284 @@ resource "ionoscloud_k8s_node_pool" "nodepool_zone1" { } maintenance_window { - # The maintenance of the nodepools starts 1 hour after the cluster (control plane) - # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) - # Example: - # Maintenance start (local.maintenance_hour): 2am - # Control plane (see cluster resource) starts at 2am - # Maintenance of ...-zone1-nodepool-0 starts at 3am (+1h) - # Maintenance of ...-zone1-nodepool-1 starts at 7am (+4h) - # Maintenance of ...-zone1-nodepool-2 starts at 11am (+4h) ... - # if the number of hour exceeds 24, the maintenance shall start on the next day day_of_the_week = (local.maintenance_hour + 1 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) - time = format("%02d:00:00Z", (local.maintenance_hour + 1 + count.index * 4) % 24) + time = format("%02d:00:00Z", (local.maintenance_hour + 1 + 2 + count.index * 4) % 24) } datacenter_id = var.datacenter_id k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id cpu_family = local.cpu_family storage_type = "SSD" - node_count = local.node_count - cores_count = local.core_count - ram_size = local.ram_size + node_count = each.value.node_count + cores_count = each.value.core_count + ram_size = each.value.ram_size storage_size = 100 public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] -} -#---- -#Node Pool 1 autoscaling -#node_count adjusted to act as a 2 in 1 nodepool with partially enabled autoscaling activated -#---- - -resource "ionoscloud_k8s_node_pool" "nodepool_zone1_autoscaling" { - count = var.auto_scaling ? 1 : 0 - availability_zone = "ZONE_1" - name = "${local.cluster_name}-zone1-nodepool-autoscaling-${count.index}" - k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version - allow_replace = var.allow_node_pool_replacement - # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks - # it iterates through the list of var.associated_lans and sets the appropriate lan id - # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) - dynamic "lans" { - for_each = var.associated_lans + dynamic "auto_scaling" { + for_each = each.value.auto_scaling == true ? [1] : [] content { - id = lans.value["id"] - dynamic "routes" { - # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes - # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... - for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list - - content { - # graps the values from the objects of the routes_list - network = routes.value["network"] - gateway_ip = routes.value["gateway_ip"] - } - } + min_node_count = each.value.min_node_count + max_node_count = each.value.max_node_count } } - maintenance_window { - # The maintenance of the nodepools starts 1 hour after the cluster (control plane) - # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) - # Example: - # Maintenance start (local.maintenance_hour): 2am - # Control plane (see cluster resource) starts at 2am - # Maintenance of ...-zone1-nodepool-0 starts at 3am (+1h) - # Maintenance of ...-zone1-nodepool-1 starts at 7am (+4h) - # Maintenance of ...-zone1-nodepool-2 starts at 11am (+4h) ... - # if the number of hour exceeds 24, the maintenance shall start on the next day - day_of_the_week = (local.maintenance_hour + 1 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) - time = format("%02d:00:00Z", (local.maintenance_hour + 1 + count.index * 4) % 24) - } - - datacenter_id = var.datacenter_id - k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id - cpu_family = local.cpu_family - storage_type = "SSD" - auto_scaling{ - min_node_count = 1 - max_node_count = var.max_node_count - local.node_count - } - node_count = local.node_count - cores_count = local.core_count - ram_size = local.ram_size - storage_size = 100 - public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] - #Ignore node count changes because of autoscaling to avoid unneeded updates lifecycle { - ignore_changes = [ - node_count - ] - } -} - -#---- -#Node Pool 2 -#---- - -resource "ionoscloud_k8s_node_pool" "nodepool_zone2" { - count = local.nodepool_per_zone_count - - availability_zone = "ZONE_2" - name = "${local.cluster_name}-zone2-nodepool-${count.index}" - k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version - allow_replace = var.allow_node_pool_replacement - # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks - # it iterates through the list of var.associated_lans and sets the appropriate lan id - # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) - dynamic "lans" { - for_each = var.associated_lans - content { - id = lans.value["id"] - dynamic "routes" { - # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes - # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... - for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list - - content { - # graps the values from the objects of the routes_list - network = routes.value["network"] - gateway_ip = routes.value["gateway_ip"] - } - } - } - } - - maintenance_window { - # The maintenance of the nodepools starts 1 hour after the cluster (control plane) - # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) - # Additionally the zones are shifted by 2 hours, so this one starts 2 hours later - # Example: - # Maintenance start (local.maintenance_hour): 2am - # Control plane (see cluster resource) starts at 2am - # Zone1 (see nodepool_zone1 resource) starts at 3am (+1h) - # Maintenance of ...-zone2-nodepool-0 starts at 5am (+2h) - # Maintenance of ...-zone2-nodepool-1 starts at 9am (+4h) - # Maintenance of ...-zone2-nodepool-2 starts at 1pm (+4h) ... - # if the number of hour exceeds 24, the maintenance shall start on the next day - day_of_the_week = (local.maintenance_hour + 1 + 2 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) - time = format("%02d:00:00Z", (local.maintenance_hour + 1 + 2 + count.index * 4) % 24) + ignore_changes = each.value.ignore_changes } - - datacenter_id = var.datacenter_id - k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id - cpu_family = local.cpu_family - storage_type = "SSD" - node_count = local.node_count - cores_count = local.core_count - ram_size = local.ram_size - storage_size = 100 - public_ips = local.public_ip_pool_zone2 != null ? slice(local.public_ip_pool_zone2[count.index], 0, local.node_count + 1) : [] } #---- -#Node Pool 2 autoscaling -#node_count adjusted to act as a 2 in 1 nodepool with partially enabled autoscaling activated +#Node Pool 1 #---- -resource "ionoscloud_k8s_node_pool" "nodepool_zone2_autoscaling" { - count = var.auto_scaling ? 1 : 0 - - availability_zone = "ZONE_2" - name = "${local.cluster_name}-zone2-nodepool-autoscaling-${count.index}" - k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version - allow_replace = var.allow_node_pool_replacement - # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks - # it iterates through the list of var.associated_lans and sets the appropriate lan id - # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) - dynamic "lans" { - for_each = var.associated_lans - content { - id = lans.value["id"] - dynamic "routes" { - # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes - # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... - for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list - - content { - # graps the values from the objects of the routes_list - network = routes.value["network"] - gateway_ip = routes.value["gateway_ip"] - } - } - } - } - - maintenance_window { - # The maintenance of the nodepools starts 1 hour after the cluster (control plane) - # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) - # Additionally the zones are shifted by 2 hours, so this one starts 2 hours later - # Example: - # Maintenance start (local.maintenance_hour): 2am - # Control plane (see cluster resource) starts at 2am - # Zone1 (see nodepool_zone1 resource) starts at 3am (+1h) - # Maintenance of ...-zone2-nodepool-0 starts at 5am (+2h) - # Maintenance of ...-zone2-nodepool-1 starts at 9am (+4h) - # Maintenance of ...-zone2-nodepool-2 starts at 1pm (+4h) ... - # if the number of hour exceeds 24, the maintenance shall start on the next day - day_of_the_week = (local.maintenance_hour + 1 + 2 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) - time = format("%02d:00:00Z", (local.maintenance_hour + 1 + 2 + count.index * 4) % 24) - } - - datacenter_id = var.datacenter_id - k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id - cpu_family = local.cpu_family - storage_type = "SSD" - auto_scaling{ - min_node_count = 1 - max_node_count = var.max_node_count - local.node_count - } - node_count = local.node_count - cores_count = local.core_count - ram_size = local.ram_size - storage_size = 100 - public_ips = local.public_ip_pool_zone2 != null ? slice(local.public_ip_pool_zone2[count.index], 0, local.node_count + 1) : [] - #Ignore node count changes because of autoscaling to avoid unneeded updates - lifecycle { - ignore_changes = [ - node_count - ] - } -} +# resource "ionoscloud_k8s_node_pool" "nodepool_zone1" { +# count = local.nodepool_per_zone_count +# availability_zone = "ZONE_1" +# name = "${local.cluster_name}-zone1-nodepool-${count.index}" +# k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version +# allow_replace = var.allow_node_pool_replacement +# # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks +# # it iterates through the list of var.associated_lans and sets the appropriate lan id +# # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) +# dynamic "lans" { +# for_each = var.associated_lans +# content { +# id = lans.value["id"] +# dynamic "routes" { +# # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes +# # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... +# for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list + +# content { +# # graps the values from the objects of the routes_list +# network = routes.value["network"] +# gateway_ip = routes.value["gateway_ip"] +# } +# } +# } +# } + +# maintenance_window { +# # The maintenance of the nodepools starts 1 hour after the cluster (control plane) +# # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) +# # Example: +# # Maintenance start (local.maintenance_hour): 2am +# # Control plane (see cluster resource) starts at 2am +# # Maintenance of ...-zone1-nodepool-0 starts at 3am (+1h) +# # Maintenance of ...-zone1-nodepool-1 starts at 7am (+4h) +# # Maintenance of ...-zone1-nodepool-2 starts at 11am (+4h) ... +# # if the number of hour exceeds 24, the maintenance shall start on the next day +# day_of_the_week = (local.maintenance_hour + 1 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) +# time = format("%02d:00:00Z", (local.maintenance_hour + 1 + count.index * 4) % 24) +# } + +# datacenter_id = var.datacenter_id +# k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id +# cpu_family = local.cpu_family +# storage_type = "SSD" +# node_count = local.node_count +# cores_count = local.core_count +# ram_size = local.ram_size +# storage_size = 100 +# public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] +# } + +# #---- +# #Node Pool 1 autoscaling +# #node_count adjusted to act as a 2 in 1 nodepool with partially enabled autoscaling activated +# #---- + +# resource "ionoscloud_k8s_node_pool" "nodepool_zone1_autoscaling" { +# count = var.auto_scaling ? 1 : 0 +# availability_zone = "ZONE_1" +# name = "${local.cluster_name}-zone1-nodepool-autoscaling-${count.index}" +# k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version +# allow_replace = var.allow_node_pool_replacement +# # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks +# # it iterates through the list of var.associated_lans and sets the appropriate lan id +# # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) +# dynamic "lans" { +# for_each = var.associated_lans +# content { +# id = lans.value["id"] +# dynamic "routes" { +# # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes +# # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... +# for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list + +# content { +# # graps the values from the objects of the routes_list +# network = routes.value["network"] +# gateway_ip = routes.value["gateway_ip"] +# } +# } +# } +# } + +# maintenance_window { +# # The maintenance of the nodepools starts 1 hour after the cluster (control plane) +# # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) +# # Example: +# # Maintenance start (local.maintenance_hour): 2am +# # Control plane (see cluster resource) starts at 2am +# # Maintenance of ...-zone1-nodepool-0 starts at 3am (+1h) +# # Maintenance of ...-zone1-nodepool-1 starts at 7am (+4h) +# # Maintenance of ...-zone1-nodepool-2 starts at 11am (+4h) ... +# # if the number of hour exceeds 24, the maintenance shall start on the next day +# day_of_the_week = (local.maintenance_hour + 1 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) +# time = format("%02d:00:00Z", (local.maintenance_hour + 1 + count.index * 4) % 24) +# } + +# datacenter_id = var.datacenter_id +# k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id +# cpu_family = local.cpu_family +# storage_type = "SSD" +# auto_scaling{ +# min_node_count = 1 +# max_node_count = var.max_node_count - local.node_count +# } +# node_count = local.node_count +# cores_count = local.core_count +# ram_size = local.ram_size +# storage_size = 100 +# public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] +# #Ignore node count changes because of autoscaling to avoid unneeded updates +# lifecycle { +# ignore_changes = [ +# node_count +# ] +# } +# } + +# #---- +# #Node Pool 2 +# #---- + +# resource "ionoscloud_k8s_node_pool" "nodepool_zone2" { +# count = local.nodepool_per_zone_count + +# availability_zone = "ZONE_2" +# name = "${local.cluster_name}-zone2-nodepool-${count.index}" +# k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version +# allow_replace = var.allow_node_pool_replacement +# # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks +# # it iterates through the list of var.associated_lans and sets the appropriate lan id +# # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) +# dynamic "lans" { +# for_each = var.associated_lans +# content { +# id = lans.value["id"] +# dynamic "routes" { +# # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes +# # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... +# for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list + +# content { +# # graps the values from the objects of the routes_list +# network = routes.value["network"] +# gateway_ip = routes.value["gateway_ip"] +# } +# } +# } +# } + +# maintenance_window { +# # The maintenance of the nodepools starts 1 hour after the cluster (control plane) +# # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) +# # Additionally the zones are shifted by 2 hours, so this one starts 2 hours later +# # Example: +# # Maintenance start (local.maintenance_hour): 2am +# # Control plane (see cluster resource) starts at 2am +# # Zone1 (see nodepool_zone1 resource) starts at 3am (+1h) +# # Maintenance of ...-zone2-nodepool-0 starts at 5am (+2h) +# # Maintenance of ...-zone2-nodepool-1 starts at 9am (+4h) +# # Maintenance of ...-zone2-nodepool-2 starts at 1pm (+4h) ... +# # if the number of hour exceeds 24, the maintenance shall start on the next day +# day_of_the_week = (local.maintenance_hour + 1 + 2 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) +# time = format("%02d:00:00Z", (local.maintenance_hour + 1 + 2 + count.index * 4) % 24) +# } + +# datacenter_id = var.datacenter_id +# k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id +# cpu_family = local.cpu_family +# storage_type = "SSD" +# node_count = local.node_count +# cores_count = local.core_count +# ram_size = local.ram_size +# storage_size = 100 +# public_ips = local.public_ip_pool_zone2 != null ? slice(local.public_ip_pool_zone2[count.index], 0, local.node_count + 1) : [] +# } + +# #---- +# #Node Pool 2 autoscaling +# #node_count adjusted to act as a 2 in 1 nodepool with partially enabled autoscaling activated +# #---- + +# resource "ionoscloud_k8s_node_pool" "nodepool_zone2_autoscaling" { +# count = var.auto_scaling ? 1 : 0 + +# availability_zone = "ZONE_2" +# name = "${local.cluster_name}-zone2-nodepool-autoscaling-${count.index}" +# k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version +# allow_replace = var.allow_node_pool_replacement +# # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks +# # it iterates through the list of var.associated_lans and sets the appropriate lan id +# # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) +# dynamic "lans" { +# for_each = var.associated_lans +# content { +# id = lans.value["id"] +# dynamic "routes" { +# # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes +# # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... +# for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list + +# content { +# # graps the values from the objects of the routes_list +# network = routes.value["network"] +# gateway_ip = routes.value["gateway_ip"] +# } +# } +# } +# } + +# maintenance_window { +# # The maintenance of the nodepools starts 1 hour after the cluster (control plane) +# # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) +# # Additionally the zones are shifted by 2 hours, so this one starts 2 hours later +# # Example: +# # Maintenance start (local.maintenance_hour): 2am +# # Control plane (see cluster resource) starts at 2am +# # Zone1 (see nodepool_zone1 resource) starts at 3am (+1h) +# # Maintenance of ...-zone2-nodepool-0 starts at 5am (+2h) +# # Maintenance of ...-zone2-nodepool-1 starts at 9am (+4h) +# # Maintenance of ...-zone2-nodepool-2 starts at 1pm (+4h) ... +# # if the number of hour exceeds 24, the maintenance shall start on the next day +# day_of_the_week = (local.maintenance_hour + 1 + 2 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) +# time = format("%02d:00:00Z", (local.maintenance_hour + 1 + 2 + count.index * 4) % 24) +# } + +# datacenter_id = var.datacenter_id +# k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id +# cpu_family = local.cpu_family +# storage_type = "SSD" +# auto_scaling{ +# min_node_count = 1 +# max_node_count = var.max_node_count - local.node_count +# } +# node_count = local.node_count +# cores_count = local.core_count +# ram_size = local.ram_size +# storage_size = 100 +# public_ips = local.public_ip_pool_zone2 != null ? slice(local.public_ip_pool_zone2[count.index], 0, local.node_count + 1) : [] +# #Ignore node count changes because of autoscaling to avoid unneeded updates +# lifecycle { +# ignore_changes = [ +# node_count +# ] +# } +# } resource "ionoscloud_ipblock" "ippools_zone1" { count = var.create_public_ip_pools ? var.nodepool_per_zone_count : 0 diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index ffc9624..a7de565 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -108,51 +108,52 @@ variable "storage_size" { default = 100 } -variable "auto_scaling" { - type = bool - description = "This value is used activate auto scaling the k8s cluster node pools." - default = false -} -variable "min_node_count" { - type = number - description = "This value is used to set the minimum number of nodes for auto scaling the k8s cluster node pools." - default = null -} +# variable "auto_scaling" { +# type = bool +# description = "This value is used activate auto scaling the k8s cluster node pools." +# default = false +# } + +# variable "min_node_count" { +# type = number +# description = "This value is used to set the minimum number of nodes for auto scaling the k8s cluster node pools." +# default = null +# } variable "custom_nodepools" { type = list(object({ name = string - auto_scaling = optional(bool) + auto_scaling = optional(bool, false) node_count = number - min_node_count= number - max_node_count= number + nodepool_per_zone_count = optional(number, 1) + zone_count = number + min_node_count= optional(number, null) + max_node_count= optional(number, null) ram_size = number core_count = number purpose = string availability_zone = string ignore_changes = string - count = optional(number, 1) }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." default = list(object({ - auto_scaling = false - min_node_count = null - max_node_count = null - count = var.nodepool_per_zone_count + nodepool_per_zone_count = var.nodepool_per_zone_count node_count = var.node_count ram_size = var.ram_size != null ? var.ram_size : 16384 + zone_count = var.zone_count core_count = var.core_count + purpose = "legacy" availability_zone = var.availability_zone - ignore_changes = var.auto_scaling ? [node_count] : [] + ignore_changes = [] node_count = var.node_count != null ? var.node_count : 1 }) ) } -variable "max_node_count" { - type = number - description = "This value is used to set the maximum number of nodes for auto scaling the k8s cluster node pools." - default = null -} \ No newline at end of file +# variable "max_node_count" { +# type = number +# description = "This value is used to set the maximum number of nodes for auto scaling the k8s cluster node pools." +# default = null +# } \ No newline at end of file From 2fb90f7d9872ab69ba1e3571bb06aa180c562a50 Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 2 Nov 2023 15:32:14 +0100 Subject: [PATCH 04/94] Additions to test the terraform nodepool creation --- modules/ionos-k8s-cluster/locals.tf | 3 ++ modules/ionos-k8s-cluster/main.tf | 47 ++++++++++---------------- modules/ionos-k8s-cluster/variables.tf | 11 +++--- 3 files changed, 26 insertions(+), 35 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 3f80808..e862bbc 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -19,5 +19,8 @@ locals { maintenance_day = var.maintenance_day maintenance_hour = var.maintenance_hour api_subnet_allow_list = var.api_subnet_allow_list + #if n.auto_scaling == true + availabilityzone_split = toset(flatten([for n in var.custom_nodepools : [for x in n.availabilityzones : merge(n,{availabilityzone = x})] ])) + nodepool_per_zone_creator = toset(flatten([for n in local.availabilityzone_split : [for x in range(nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) } diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 1448ad9..f04181b 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -8,16 +8,14 @@ resource "ionoscloud_k8s_cluster" "cluster" { api_subnet_allow_list = local.api_subnet_allow_list } - #---- -#Test Pool 1 +#Test Pool 1 scaling #---- -resource "ionoscloud_k8s_node_pool" "nodepool_zone1_scalingtest" { - for_each = {for i, pool in var.custom_nodepools : i => custom_nodepools } - count = each.value.nodepool_per_zone_count - availability_zone = "ZONE_1" - name = "${local.cluster_name}-zone1-nodepool${count.index}-${each.value.purpose}" +resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np if np.auto_scaling == true} + availability_zone = each.value.availabilityzone + name = each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = var.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks @@ -56,28 +54,29 @@ resource "ionoscloud_k8s_node_pool" "nodepool_zone1_scalingtest" { storage_size = 100 public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] - dynamic "auto_scaling" { - for_each = each.value.auto_scaling == true ? [1] : [] - content { + auto_scaling { min_node_count = each.value.min_node_count max_node_count = each.value.max_node_count - } } lifecycle { - ignore_changes = each.value.ignore_changes + ignore_changes = [ node_count ] } } #---- -#Test Pool 2 +#Test Pool 2 legacy #---- -resource "ionoscloud_k8s_node_pool" "nodepool_zone2_scalingtest" { - for_each = {for i, pool in var.custom_nodepools : i => custom_nodepools } - count = each.value.nodepool_per_zone_count - availability_zone = "ZONE_2" - name = "${local.cluster_name}-zone2-nodepool${count.index}-${each.value.purpose}" +resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np if np.auto_scaling == true} + availability_zone = each.value.availabilityzone + #for_each = { for k, v in var.custom_nodepools : k => v if var.auto_scaling } + #for_each = { for k in compact([for k, v in var.mymap: v.condition ? k : ""]): k => var.mymap[k] } + #conditional create is just another count, if auto_scaling=true set count to nodepools_per_zone_count + #for_each = { for pool in var.custom_nodepools : pool.site_name => pool if var.environment != "prod"} + #count = each.value.nodepool_per_zone_count + name = "${local.cluster_name}-${each.value.availability_zone}-nodepool${count.index}-${each.value.purpose}" k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = var.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks @@ -115,18 +114,6 @@ resource "ionoscloud_k8s_node_pool" "nodepool_zone2_scalingtest" { ram_size = each.value.ram_size storage_size = 100 public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] - - dynamic "auto_scaling" { - for_each = each.value.auto_scaling == true ? [1] : [] - content { - min_node_count = each.value.min_node_count - max_node_count = each.value.max_node_count - } - } - - lifecycle { - ignore_changes = each.value.ignore_changes - } } #---- diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index a7de565..a7a6748 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -121,13 +121,13 @@ variable "storage_size" { # default = null # } +#It is required to define each resource per availability zone on it's own (One definition for zone 1 and one definition for zone 2) variable "custom_nodepools" { type = list(object({ name = string auto_scaling = optional(bool, false) node_count = number nodepool_per_zone_count = optional(number, 1) - zone_count = number min_node_count= optional(number, null) max_node_count= optional(number, null) ram_size = number @@ -135,19 +135,20 @@ variable "custom_nodepools" { purpose = string availability_zone = string ignore_changes = string + availabilityzones = list(string) }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." default = list(object({ nodepool_per_zone_count = var.nodepool_per_zone_count node_count = var.node_count - ram_size = var.ram_size != null ? var.ram_size : 16384 - zone_count = var.zone_count + ram_size = var.ram_size core_count = var.core_count purpose = "legacy" availability_zone = var.availability_zone - ignore_changes = [] - node_count = var.node_count != null ? var.node_count : 1 + node_count = var.node_count + availabilityzones = ["ZONE_1", "ZONE_2"] + allow_node_pool_replacement = var.allow_node_pool_replacement }) ) } From c002756cb1d5bee039ba6c6c6556bd3b64838b84 Mon Sep 17 00:00:00 2001 From: marhode Date: Fri, 3 Nov 2023 08:50:44 +0100 Subject: [PATCH 05/94] Fix of error due to variable definition and expansion of the for loop to match requirements --- modules/ionos-k8s-cluster/locals.tf | 18 +++++++++++++++++- modules/ionos-k8s-cluster/main.tf | 12 ++++++------ modules/ionos-k8s-cluster/variables.tf | 23 ++++++++++++++--------- 3 files changed, 37 insertions(+), 16 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index e862bbc..0cc4115 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -19,8 +19,24 @@ locals { maintenance_day = var.maintenance_day maintenance_hour = var.maintenance_hour api_subnet_allow_list = var.api_subnet_allow_list + + #Loop through our nodepool list to detect empty values and fill them with legacy values + custom_nodepools = {for np in var.custom_nodepools : np => { + np.nodepool_per_zone_count = np.nodepool_per_zone_count != null ? np.nodepool_per_zone_count : var.nodepool_per_zone_count + np.node_count = np.node_count != null ? np.node_count : var.node_count + np.ram_size = np.ram_size != null ? np.ram_size : var.ram_size + np.core_count = np.core_count != null ? np.core_count : var.core_count + np.allow_node_pool_replacement = np.allow_node_pool_replacement != null ? np.allow_node_pool_replacement : var.allow_node_pool_replacement + np.datacenter_location = np.datacenter_location != null ? np.datacenter_location : var.datacenter_location + np.associated_lans = np.associated_lans != null ? np.associated_lans : var.associated_lans + np.maintenance_day = np.maintenance_day != null ? np.maintenance_day : var.maintenance_day + np.maintenance_hour = np.maintenance_hour != null ? np.maintenance_hour : var.maintenance_hour + } + + } + #if n.auto_scaling == true - availabilityzone_split = toset(flatten([for n in var.custom_nodepools : [for x in n.availabilityzones : merge(n,{availabilityzone = x})] ])) + availabilityzone_split = toset(flatten([for n in local.custom_nodepools : [for x in n.availabilityzones : merge(n,{availabilityzone = x})] ])) nodepool_per_zone_creator = toset(flatten([for n in local.availabilityzone_split : [for x in range(nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) } diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index f04181b..ebf4426 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -17,18 +17,18 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { availability_zone = each.value.availabilityzone name = each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version - allow_replace = var.allow_node_pool_replacement + allow_replace = each.value.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks # it iterates through the list of var.associated_lans and sets the appropriate lan id # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) dynamic "lans" { - for_each = var.associated_lans + for_each = each.value.associated_lans content { id = lans.value["id"] dynamic "routes" { # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... - for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list + for_each = each.value.associated_lans[lans.key].routes_list == null || length(each.value.associated_lans[lans.key].routes_list[0]) == 0 ? [] : each.value.associated_lans[lans.key].routes_list content { # graps the values from the objects of the routes_list @@ -38,10 +38,10 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { } } } - + #TODO we cant use count.index anymore maintenance_window { - day_of_the_week = (local.maintenance_hour + 1 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) - time = format("%02d:00:00Z", (local.maintenance_hour + 1 + count.index * 4) % 24) + day_of_the_week = (each.value.maintenance_hour + 1 + count.index * 4) < 24 ? each.value.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, each.value.maintenance_day, null) + time = format("%02d:00:00Z", (each.value.maintenance_hour + 1 + count.index * 4) % 24) } datacenter_id = var.datacenter_id diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index a7a6748..4428aa4 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -133,22 +133,27 @@ variable "custom_nodepools" { ram_size = number core_count = number purpose = string - availability_zone = string - ignore_changes = string availabilityzones = list(string) + datacenter_location = string + allow_node_pool_replacement = bool + associated_lans = list(object) + maintenance_day = string + maintenance_hour = number }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." default = list(object({ - nodepool_per_zone_count = var.nodepool_per_zone_count - node_count = var.node_count - ram_size = var.ram_size - core_count = var.core_count + nodepool_per_zone_count = null + node_count = null + ram_size = null + core_count = null purpose = "legacy" - availability_zone = var.availability_zone - node_count = var.node_count availabilityzones = ["ZONE_1", "ZONE_2"] - allow_node_pool_replacement = var.allow_node_pool_replacement + allow_node_pool_replacement = null + datacenter_location = null + associated_lans = null + maintenance_day = null + maintenance_hour = null }) ) } From 4bcf10f79049855af407a3922624c7b3dda6a0a5 Mon Sep 17 00:00:00 2001 From: marhode Date: Fri, 3 Nov 2023 09:39:55 +0100 Subject: [PATCH 06/94] Fix for object creation constructor --- modules/ionos-k8s-cluster/locals.tf | 4 +++- modules/ionos-k8s-cluster/variables.tf | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 0cc4115..0a78229 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -21,6 +21,7 @@ locals { api_subnet_allow_list = var.api_subnet_allow_list #Loop through our nodepool list to detect empty values and fill them with legacy values + #Only required for downward compatibility and legacy nodepools custom_nodepools = {for np in var.custom_nodepools : np => { np.nodepool_per_zone_count = np.nodepool_per_zone_count != null ? np.nodepool_per_zone_count : var.nodepool_per_zone_count np.node_count = np.node_count != null ? np.node_count : var.node_count @@ -35,8 +36,9 @@ locals { } - #if n.auto_scaling == true + #availabilityzone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those each. availabilityzone_split = toset(flatten([for n in local.custom_nodepools : [for x in n.availabilityzones : merge(n,{availabilityzone = x})] ])) + #nodepool_per_zone_creator this duplicates the objects in each availability zone to the amount of nodepool_per_zone_count nodepool_per_zone_creator = toset(flatten([for n in local.availabilityzone_split : [for x in range(nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) } diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 4428aa4..9169846 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -136,7 +136,10 @@ variable "custom_nodepools" { availabilityzones = list(string) datacenter_location = string allow_node_pool_replacement = bool - associated_lans = list(object) + associated_lans = list(object({ + id = number + routes_list = list(any) + })) maintenance_day = string maintenance_hour = number }) From f12b253631a67e2206c935fea2fdb3594f00aa00 Mon Sep 17 00:00:00 2001 From: marhode Date: Fri, 3 Nov 2023 10:06:56 +0100 Subject: [PATCH 07/94] Terraform function call fix --- modules/ionos-k8s-cluster/variables.tf | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 9169846..2b88f57 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -145,7 +145,8 @@ variable "custom_nodepools" { }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." - default = list(object({ + default = [{ + name = "Test" nodepool_per_zone_count = null node_count = null ram_size = null @@ -157,8 +158,8 @@ variable "custom_nodepools" { associated_lans = null maintenance_day = null maintenance_hour = null - }) - ) + }] + } # variable "max_node_count" { From 5ddddaa7ff979e3f22359a528c37baf204ceb849 Mon Sep 17 00:00:00 2001 From: marhode Date: Fri, 3 Nov 2023 13:59:21 +0100 Subject: [PATCH 08/94] Fixed the ip pool creation the new nodepool creation --- modules/ionos-k8s-cluster/locals.tf | 32 ++++++++++----------- modules/ionos-k8s-cluster/main.tf | 39 +++++++++++++------------- modules/ionos-k8s-cluster/output.tf | 12 ++++---- modules/ionos-k8s-cluster/variables.tf | 8 ++++-- 4 files changed, 47 insertions(+), 44 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 0a78229..0d64371 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -9,36 +9,34 @@ locals { node_count = var.node_count #node_count = var.custom_nodepools != null ? (var.min_node_count - 1) : (var.node_count != null ? var.node_count : 1) #What should the default node count be? # This cannot be changed, after the nodepool is created, because all worker nodes must be equal at any time. - core_count = var.core_count + #core_count = var.core_count # This cannot be changed, after the nodepool is created, because all worker nodes must be equal at any time. ram_size = var.ram_size != null ? var.ram_size : 16384 # The number of nodepools per zone. nodepool_per_zone_count = var.nodepool_per_zone_count - public_ip_pool_zone1 = var.create_public_ip_pools ? ionoscloud_ipblock.ippools_zone1[*].ips : var.public_ip_pool_zone1 - public_ip_pool_zone2 = var.create_public_ip_pools ? ionoscloud_ipblock.ippools_zone2[*].ips : var.public_ip_pool_zone2 - maintenance_day = var.maintenance_day - maintenance_hour = var.maintenance_hour + public_ip_pools = var.create_public_ip_pools ? ionoscloud_ipblock.ippools_scaling[*].ips : var.public_ip_pools + #maintenance_day = var.maintenance_day + #maintenance_hour = var.maintenance_hour api_subnet_allow_list = var.api_subnet_allow_list #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools custom_nodepools = {for np in var.custom_nodepools : np => { - np.nodepool_per_zone_count = np.nodepool_per_zone_count != null ? np.nodepool_per_zone_count : var.nodepool_per_zone_count - np.node_count = np.node_count != null ? np.node_count : var.node_count - np.ram_size = np.ram_size != null ? np.ram_size : var.ram_size - np.core_count = np.core_count != null ? np.core_count : var.core_count - np.allow_node_pool_replacement = np.allow_node_pool_replacement != null ? np.allow_node_pool_replacement : var.allow_node_pool_replacement - np.datacenter_location = np.datacenter_location != null ? np.datacenter_location : var.datacenter_location - np.associated_lans = np.associated_lans != null ? np.associated_lans : var.associated_lans - np.maintenance_day = np.maintenance_day != null ? np.maintenance_day : var.maintenance_day - np.maintenance_hour = np.maintenance_hour != null ? np.maintenance_hour : var.maintenance_hour - } - + np.nodepool_per_zone_count = np.nodepool_per_zone_count != null ? np.nodepool_per_zone_count : var.nodepool_per_zone_count + np.node_count = np.node_count != null ? np.node_count : var.node_count + np.ram_size = np.ram_size != null ? np.ram_size : var.ram_size + np.core_count = np.core_count != null ? np.core_count : var.core_count + np.allow_node_pool_replacement = np.allow_node_pool_replacement != null ? np.allow_node_pool_replacement : var.allow_node_pool_replacement + np.associated_lans = np.associated_lans != null ? np.associated_lans : var.associated_lans + np.maintenance_day = np.maintenance_day != null ? np.maintenance_day : var.maintenance_day + np.maintenance_hour = np.maintenance_hour != null ? np.maintenance_hour : var.maintenance_hour + } } + #availabilityzone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those each. availabilityzone_split = toset(flatten([for n in local.custom_nodepools : [for x in n.availabilityzones : merge(n,{availabilityzone = x})] ])) #nodepool_per_zone_creator this duplicates the objects in each availability zone to the amount of nodepool_per_zone_count - nodepool_per_zone_creator = toset(flatten([for n in local.availabilityzone_split : [for x in range(nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) + nodepool_per_zone_creator = toset(flatten([for n in local.availabilityzone_split : [for x in range(n.nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) } diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index ebf4426..147136d 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -2,8 +2,8 @@ resource "ionoscloud_k8s_cluster" "cluster" { name = local.cluster_name k8s_version = var.k8s_version maintenance_window { - day_of_the_week = local.maintenance_day - time = format("%02d:00:00Z", local.maintenance_hour) + day_of_the_week = var.maintenance_day + time = format("%02d:00:00Z", var.maintenance_hour) } api_subnet_allow_list = local.api_subnet_allow_list } @@ -52,7 +52,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = 100 - public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] + public_ips = local.public_ip_pools != null ? local.public_ip_pools[each.key] : [] auto_scaling { min_node_count = each.value.min_node_count @@ -69,14 +69,14 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { #---- resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np if np.auto_scaling == true} + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np if np.auto_scaling == false} availability_zone = each.value.availabilityzone #for_each = { for k, v in var.custom_nodepools : k => v if var.auto_scaling } #for_each = { for k in compact([for k, v in var.mymap: v.condition ? k : ""]): k => var.mymap[k] } #conditional create is just another count, if auto_scaling=true set count to nodepools_per_zone_count #for_each = { for pool in var.custom_nodepools : pool.site_name => pool if var.environment != "prod"} #count = each.value.nodepool_per_zone_count - name = "${local.cluster_name}-${each.value.availability_zone}-nodepool${count.index}-${each.value.purpose}" + name = each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = var.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks @@ -101,8 +101,8 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { } maintenance_window { - day_of_the_week = (local.maintenance_hour + 1 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) - time = format("%02d:00:00Z", (local.maintenance_hour + 1 + 2 + count.index * 4) % 24) + day_of_the_week = (each.value.maintenance_hour + 1 + count.index * 4) < 24 ? each.value.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, each.value.maintenance_day, null) + time = format("%02d:00:00Z", (each.value.maintenance_hour + 1 + 2 + count.index * 4) % 24) } datacenter_id = var.datacenter_id @@ -113,7 +113,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = 100 - public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] + public_ips = local.public_ip_pools != null ? local.public_ip_pools[each.key] : [] } #---- @@ -359,7 +359,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { # cores_count = local.core_count # ram_size = local.ram_size # storage_size = 100 -# public_ips = local.public_ip_pool_zone2 != null ? slice(local.public_ip_pool_zone2[count.index], 0, local.node_count + 1) : [] +# public_ips = local.public_ip_pool_legacy != null ? slice(local.public_ip_pool_legacy[index(keys(local.nodepool_per_zone_creator), each.key)], 0, each.value.node_count + 1) : [] # #Ignore node count changes because of autoscaling to avoid unneeded updates # lifecycle { # ignore_changes = [ @@ -368,16 +368,17 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { # } # } -resource "ionoscloud_ipblock" "ippools_zone1" { - count = var.create_public_ip_pools ? var.nodepool_per_zone_count : 0 - name = "${local.cluster_name}-zone1-nodepool-${count.index}" +resource "ionoscloud_ipblock" "ippools_scaling" { + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np } + name = each.key location = var.datacenter_location - size = var.node_count + 1 + size = value.each.auto_scaling ? each.value.max_node_count + 1 : each.value.node_count + 1 } -resource "ionoscloud_ipblock" "ippools_zone2" { - count = var.create_public_ip_pools ? var.nodepool_per_zone_count : 0 - name = "${local.cluster_name}-zone2-nodepool-${count.index}" - location = var.datacenter_location - size = var.node_count + 1 -} +# resource "ionoscloud_ipblock" "ippools_legacy" { +# for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np if np.auto_scaling == false} +# #count = var.create_public_ip_pools ? var.nodepool_per_zone_count : 0 +# name = each.key +# location = var.datacenter_location +# size = each.value.node_count + 1 +# } diff --git a/modules/ionos-k8s-cluster/output.tf b/modules/ionos-k8s-cluster/output.tf index d09fe75..cbd065d 100644 --- a/modules/ionos-k8s-cluster/output.tf +++ b/modules/ionos-k8s-cluster/output.tf @@ -7,9 +7,9 @@ output "cluster_k8s_version" { output "cluster_id" { value = ionoscloud_k8s_cluster.cluster.id } -output "nodepool_zone1_id" { - value = ionoscloud_k8s_node_pool.nodepool_zone1[*].id -} -output "nodepool_zone2_id" { - value = ionoscloud_k8s_node_pool.nodepool_zone2[*].id -} \ No newline at end of file +# output "nodepool_zone1_id" { +# value = ionoscloud_k8s_node_pool.nodepool_zone1[*].id +# } +# output "nodepool_zone2_id" { +# value = ionoscloud_k8s_node_pool.nodepool_zone2[*].id +# } \ No newline at end of file diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 2b88f57..4c45a51 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -55,6 +55,11 @@ variable "nodepool_per_zone_count" { default = 0 } +variable "public_ip_pools" { + type = list(list(string)) + default = null +} + variable "public_ip_pool_zone1" { type = list(list(string)) default = null @@ -134,8 +139,8 @@ variable "custom_nodepools" { core_count = number purpose = string availabilityzones = list(string) - datacenter_location = string allow_node_pool_replacement = bool + public_ip_pool = list(list(string)) associated_lans = list(object({ id = number routes_list = list(any) @@ -154,7 +159,6 @@ variable "custom_nodepools" { purpose = "legacy" availabilityzones = ["ZONE_1", "ZONE_2"] allow_node_pool_replacement = null - datacenter_location = null associated_lans = null maintenance_day = null maintenance_hour = null From a3afb82284cb9fa3168c728a070673f1bcad4678 Mon Sep 17 00:00:00 2001 From: marhode Date: Fri, 3 Nov 2023 14:12:34 +0100 Subject: [PATCH 09/94] Fix variables in nodepool object --- modules/ionos-k8s-cluster/variables.tf | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 4c45a51..658288e 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -140,7 +140,6 @@ variable "custom_nodepools" { purpose = string availabilityzones = list(string) allow_node_pool_replacement = bool - public_ip_pool = list(list(string)) associated_lans = list(object({ id = number routes_list = list(any) @@ -151,7 +150,7 @@ variable "custom_nodepools" { ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." default = [{ - name = "Test" + name = "Legacy" nodepool_per_zone_count = null node_count = null ram_size = null From 8c72f614b05227718875a5fcfd878402df26f3c5 Mon Sep 17 00:00:00 2001 From: marhode Date: Fri, 3 Nov 2023 14:25:42 +0100 Subject: [PATCH 10/94] Fixed typo --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 147136d..0f2e06f 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -372,7 +372,7 @@ resource "ionoscloud_ipblock" "ippools_scaling" { for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np } name = each.key location = var.datacenter_location - size = value.each.auto_scaling ? each.value.max_node_count + 1 : each.value.node_count + 1 + size = each.value.auto_scaling ? each.value.max_node_count + 1 : each.value.node_count + 1 } # resource "ionoscloud_ipblock" "ippools_legacy" { From 471d36ffb52ede16fd07bd372ad3aa5af7bd5cf7 Mon Sep 17 00:00:00 2001 From: marhode Date: Fri, 3 Nov 2023 14:46:03 +0100 Subject: [PATCH 11/94] Removed count for now --- modules/ionos-k8s-cluster/main.tf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 0f2e06f..f216cc9 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -40,8 +40,8 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { } #TODO we cant use count.index anymore maintenance_window { - day_of_the_week = (each.value.maintenance_hour + 1 + count.index * 4) < 24 ? each.value.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, each.value.maintenance_day, null) - time = format("%02d:00:00Z", (each.value.maintenance_hour + 1 + count.index * 4) % 24) + day_of_the_week = (each.value.maintenance_hour + 1 + 1 * 4) < 24 ? each.value.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, each.value.maintenance_day, null) + time = format("%02d:00:00Z", (each.value.maintenance_hour + 1 + 1 * 4) % 24) } datacenter_id = var.datacenter_id @@ -101,8 +101,8 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { } maintenance_window { - day_of_the_week = (each.value.maintenance_hour + 1 + count.index * 4) < 24 ? each.value.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, each.value.maintenance_day, null) - time = format("%02d:00:00Z", (each.value.maintenance_hour + 1 + 2 + count.index * 4) % 24) + day_of_the_week = (each.value.maintenance_hour + 1 + 1 * 4) < 24 ? each.value.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, each.value.maintenance_day, null) + time = format("%02d:00:00Z", (each.value.maintenance_hour + 1 + 2 * 4) % 24) } datacenter_id = var.datacenter_id From 25893ed9c7a566c4870a3774ab68331c0cdd40df Mon Sep 17 00:00:00 2001 From: marhode Date: Mon, 6 Nov 2023 10:40:02 +0100 Subject: [PATCH 12/94] Fixes towards legacy object creation to be filled with correct variables --- modules/ionos-k8s-cluster/locals.tf | 24 ++- modules/ionos-k8s-cluster/main.tf | 269 ++----------------------- modules/ionos-k8s-cluster/variables.tf | 41 +--- 3 files changed, 38 insertions(+), 296 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 0d64371..b3c9544 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -22,20 +22,26 @@ locals { #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools custom_nodepools = {for np in var.custom_nodepools : np => { - np.nodepool_per_zone_count = np.nodepool_per_zone_count != null ? np.nodepool_per_zone_count : var.nodepool_per_zone_count - np.node_count = np.node_count != null ? np.node_count : var.node_count - np.ram_size = np.ram_size != null ? np.ram_size : var.ram_size - np.core_count = np.core_count != null ? np.core_count : var.core_count - np.allow_node_pool_replacement = np.allow_node_pool_replacement != null ? np.allow_node_pool_replacement : var.allow_node_pool_replacement - np.associated_lans = np.associated_lans != null ? np.associated_lans : var.associated_lans - np.maintenance_day = np.maintenance_day != null ? np.maintenance_day : var.maintenance_day - np.maintenance_hour = np.maintenance_hour != null ? np.maintenance_hour : var.maintenance_hour + name = np.name + purpose = np.purpose + auto_scaling = np.auto_scaling + min_node_count = np.min_node_count + max_node_count = np.max_node_count + availability_zones = np.availability_zones + nodepool_per_zone_count = np.nodepool_per_zone_count != null ? np.nodepool_per_zone_count : var.nodepool_per_zone_count + node_count = np.node_count != null ? np.node_count : var.node_count + ram_size = np.ram_size != null ? np.ram_size : var.ram_size + core_count = np.core_count != null ? np.core_count : var.core_count + allow_node_pool_replacement = np.allow_node_pool_replacement != null ? np.allow_node_pool_replacement : var.allow_node_pool_replacement + associated_lans = np.associated_lans != null ? np.associated_lans : var.associated_lans + maintenance_day = np.maintenance_day != null ? np.maintenance_day : var.maintenance_day + maintenance_hour = np.maintenance_hour != null ? np.maintenance_hour : var.maintenance_hour } } #availabilityzone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those each. - availabilityzone_split = toset(flatten([for n in local.custom_nodepools : [for x in n.availabilityzones : merge(n,{availabilityzone = x})] ])) + availabilityzone_split = toset(flatten([for n in local.custom_nodepools : [for x in n.availability_zones : merge(n,{availabilityzone = x})] ])) #nodepool_per_zone_creator this duplicates the objects in each availability zone to the amount of nodepool_per_zone_count nodepool_per_zone_creator = toset(flatten([for n in local.availabilityzone_split : [for x in range(n.nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) } diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index f216cc9..0f56bbb 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -38,7 +38,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { } } } - #TODO we cant use count.index anymore + #TODO we cant use count.index anymore and need a proper solution: + 1 + count.index * 4 maintenance_window { day_of_the_week = (each.value.maintenance_hour + 1 + 1 * 4) < 24 ? each.value.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, each.value.maintenance_day, null) time = format("%02d:00:00Z", (each.value.maintenance_hour + 1 + 1 * 4) % 24) @@ -116,257 +116,6 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { public_ips = local.public_ip_pools != null ? local.public_ip_pools[each.key] : [] } -#---- -#Node Pool 1 -#---- - -# resource "ionoscloud_k8s_node_pool" "nodepool_zone1" { -# count = local.nodepool_per_zone_count -# availability_zone = "ZONE_1" -# name = "${local.cluster_name}-zone1-nodepool-${count.index}" -# k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version -# allow_replace = var.allow_node_pool_replacement -# # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks -# # it iterates through the list of var.associated_lans and sets the appropriate lan id -# # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) -# dynamic "lans" { -# for_each = var.associated_lans -# content { -# id = lans.value["id"] -# dynamic "routes" { -# # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes -# # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... -# for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list - -# content { -# # graps the values from the objects of the routes_list -# network = routes.value["network"] -# gateway_ip = routes.value["gateway_ip"] -# } -# } -# } -# } - -# maintenance_window { -# # The maintenance of the nodepools starts 1 hour after the cluster (control plane) -# # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) -# # Example: -# # Maintenance start (local.maintenance_hour): 2am -# # Control plane (see cluster resource) starts at 2am -# # Maintenance of ...-zone1-nodepool-0 starts at 3am (+1h) -# # Maintenance of ...-zone1-nodepool-1 starts at 7am (+4h) -# # Maintenance of ...-zone1-nodepool-2 starts at 11am (+4h) ... -# # if the number of hour exceeds 24, the maintenance shall start on the next day -# day_of_the_week = (local.maintenance_hour + 1 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) -# time = format("%02d:00:00Z", (local.maintenance_hour + 1 + count.index * 4) % 24) -# } - -# datacenter_id = var.datacenter_id -# k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id -# cpu_family = local.cpu_family -# storage_type = "SSD" -# node_count = local.node_count -# cores_count = local.core_count -# ram_size = local.ram_size -# storage_size = 100 -# public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] -# } - -# #---- -# #Node Pool 1 autoscaling -# #node_count adjusted to act as a 2 in 1 nodepool with partially enabled autoscaling activated -# #---- - -# resource "ionoscloud_k8s_node_pool" "nodepool_zone1_autoscaling" { -# count = var.auto_scaling ? 1 : 0 -# availability_zone = "ZONE_1" -# name = "${local.cluster_name}-zone1-nodepool-autoscaling-${count.index}" -# k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version -# allow_replace = var.allow_node_pool_replacement -# # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks -# # it iterates through the list of var.associated_lans and sets the appropriate lan id -# # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) -# dynamic "lans" { -# for_each = var.associated_lans -# content { -# id = lans.value["id"] -# dynamic "routes" { -# # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes -# # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... -# for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list - -# content { -# # graps the values from the objects of the routes_list -# network = routes.value["network"] -# gateway_ip = routes.value["gateway_ip"] -# } -# } -# } -# } - -# maintenance_window { -# # The maintenance of the nodepools starts 1 hour after the cluster (control plane) -# # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) -# # Example: -# # Maintenance start (local.maintenance_hour): 2am -# # Control plane (see cluster resource) starts at 2am -# # Maintenance of ...-zone1-nodepool-0 starts at 3am (+1h) -# # Maintenance of ...-zone1-nodepool-1 starts at 7am (+4h) -# # Maintenance of ...-zone1-nodepool-2 starts at 11am (+4h) ... -# # if the number of hour exceeds 24, the maintenance shall start on the next day -# day_of_the_week = (local.maintenance_hour + 1 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) -# time = format("%02d:00:00Z", (local.maintenance_hour + 1 + count.index * 4) % 24) -# } - -# datacenter_id = var.datacenter_id -# k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id -# cpu_family = local.cpu_family -# storage_type = "SSD" -# auto_scaling{ -# min_node_count = 1 -# max_node_count = var.max_node_count - local.node_count -# } -# node_count = local.node_count -# cores_count = local.core_count -# ram_size = local.ram_size -# storage_size = 100 -# public_ips = local.public_ip_pool_zone1 != null ? slice(local.public_ip_pool_zone1[count.index], 0, local.node_count + 1) : [] -# #Ignore node count changes because of autoscaling to avoid unneeded updates -# lifecycle { -# ignore_changes = [ -# node_count -# ] -# } -# } - -# #---- -# #Node Pool 2 -# #---- - -# resource "ionoscloud_k8s_node_pool" "nodepool_zone2" { -# count = local.nodepool_per_zone_count - -# availability_zone = "ZONE_2" -# name = "${local.cluster_name}-zone2-nodepool-${count.index}" -# k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version -# allow_replace = var.allow_node_pool_replacement -# # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks -# # it iterates through the list of var.associated_lans and sets the appropriate lan id -# # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) -# dynamic "lans" { -# for_each = var.associated_lans -# content { -# id = lans.value["id"] -# dynamic "routes" { -# # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes -# # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... -# for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list - -# content { -# # graps the values from the objects of the routes_list -# network = routes.value["network"] -# gateway_ip = routes.value["gateway_ip"] -# } -# } -# } -# } - -# maintenance_window { -# # The maintenance of the nodepools starts 1 hour after the cluster (control plane) -# # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) -# # Additionally the zones are shifted by 2 hours, so this one starts 2 hours later -# # Example: -# # Maintenance start (local.maintenance_hour): 2am -# # Control plane (see cluster resource) starts at 2am -# # Zone1 (see nodepool_zone1 resource) starts at 3am (+1h) -# # Maintenance of ...-zone2-nodepool-0 starts at 5am (+2h) -# # Maintenance of ...-zone2-nodepool-1 starts at 9am (+4h) -# # Maintenance of ...-zone2-nodepool-2 starts at 1pm (+4h) ... -# # if the number of hour exceeds 24, the maintenance shall start on the next day -# day_of_the_week = (local.maintenance_hour + 1 + 2 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) -# time = format("%02d:00:00Z", (local.maintenance_hour + 1 + 2 + count.index * 4) % 24) -# } - -# datacenter_id = var.datacenter_id -# k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id -# cpu_family = local.cpu_family -# storage_type = "SSD" -# node_count = local.node_count -# cores_count = local.core_count -# ram_size = local.ram_size -# storage_size = 100 -# public_ips = local.public_ip_pool_zone2 != null ? slice(local.public_ip_pool_zone2[count.index], 0, local.node_count + 1) : [] -# } - -# #---- -# #Node Pool 2 autoscaling -# #node_count adjusted to act as a 2 in 1 nodepool with partially enabled autoscaling activated -# #---- - -# resource "ionoscloud_k8s_node_pool" "nodepool_zone2_autoscaling" { -# count = var.auto_scaling ? 1 : 0 - -# availability_zone = "ZONE_2" -# name = "${local.cluster_name}-zone2-nodepool-autoscaling-${count.index}" -# k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version -# allow_replace = var.allow_node_pool_replacement -# # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks -# # it iterates through the list of var.associated_lans and sets the appropriate lan id -# # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) -# dynamic "lans" { -# for_each = var.associated_lans -# content { -# id = lans.value["id"] -# dynamic "routes" { -# # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes -# # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... -# for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list - -# content { -# # graps the values from the objects of the routes_list -# network = routes.value["network"] -# gateway_ip = routes.value["gateway_ip"] -# } -# } -# } -# } - -# maintenance_window { -# # The maintenance of the nodepools starts 1 hour after the cluster (control plane) -# # The maintenance window for one nodepool is 4 hours long. They are shifted by 4 hours so the don't overlap (in the same zone) -# # Additionally the zones are shifted by 2 hours, so this one starts 2 hours later -# # Example: -# # Maintenance start (local.maintenance_hour): 2am -# # Control plane (see cluster resource) starts at 2am -# # Zone1 (see nodepool_zone1 resource) starts at 3am (+1h) -# # Maintenance of ...-zone2-nodepool-0 starts at 5am (+2h) -# # Maintenance of ...-zone2-nodepool-1 starts at 9am (+4h) -# # Maintenance of ...-zone2-nodepool-2 starts at 1pm (+4h) ... -# # if the number of hour exceeds 24, the maintenance shall start on the next day -# day_of_the_week = (local.maintenance_hour + 1 + 2 + count.index * 4) < 24 ? local.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, local.maintenance_day, null) -# time = format("%02d:00:00Z", (local.maintenance_hour + 1 + 2 + count.index * 4) % 24) -# } - -# datacenter_id = var.datacenter_id -# k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id -# cpu_family = local.cpu_family -# storage_type = "SSD" -# auto_scaling{ -# min_node_count = 1 -# max_node_count = var.max_node_count - local.node_count -# } -# node_count = local.node_count -# cores_count = local.core_count -# ram_size = local.ram_size -# storage_size = 100 -# public_ips = local.public_ip_pool_legacy != null ? slice(local.public_ip_pool_legacy[index(keys(local.nodepool_per_zone_creator), each.key)], 0, each.value.node_count + 1) : [] -# #Ignore node count changes because of autoscaling to avoid unneeded updates -# lifecycle { -# ignore_changes = [ -# node_count -# ] -# } -# } resource "ionoscloud_ipblock" "ippools_scaling" { for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np } @@ -375,10 +124,16 @@ resource "ionoscloud_ipblock" "ippools_scaling" { size = each.value.auto_scaling ? each.value.max_node_count + 1 : each.value.node_count + 1 } -# resource "ionoscloud_ipblock" "ippools_legacy" { -# for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np if np.auto_scaling == false} -# #count = var.create_public_ip_pools ? var.nodepool_per_zone_count : 0 -# name = each.key +# resource "ionoscloud_ipblock" "ippools_zone1" { +# count = var.create_public_ip_pools ? var.nodepool_per_zone_count : 0 +# name = "${local.cluster_name}-zone1-nodepool-${count.index}" +# location = var.datacenter_location +# size = var.node_count + 1 +# } + +# resource "ionoscloud_ipblock" "ippools_zone2" { +# count = var.create_public_ip_pools ? var.nodepool_per_zone_count : 0 +# name = "${local.cluster_name}-zone2-nodepool-${count.index}" # location = var.datacenter_location -# size = each.value.node_count + 1 +# size = var.node_count + 1 # } diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 658288e..1bd635c 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -60,15 +60,15 @@ variable "public_ip_pools" { default = null } -variable "public_ip_pool_zone1" { - type = list(list(string)) - default = null -} +# variable "public_ip_pool_zone1" { +# type = list(list(string)) +# default = null +# } -variable "public_ip_pool_zone2" { - type = list(list(string)) - default = null -} +# variable "public_ip_pool_zone2" { +# type = list(list(string)) +# default = null +# } variable "create_public_ip_pools" { type = bool @@ -113,19 +113,6 @@ variable "storage_size" { default = 100 } - -# variable "auto_scaling" { -# type = bool -# description = "This value is used activate auto scaling the k8s cluster node pools." -# default = false -# } - -# variable "min_node_count" { -# type = number -# description = "This value is used to set the minimum number of nodes for auto scaling the k8s cluster node pools." -# default = null -# } - #It is required to define each resource per availability zone on it's own (One definition for zone 1 and one definition for zone 2) variable "custom_nodepools" { type = list(object({ @@ -138,7 +125,7 @@ variable "custom_nodepools" { ram_size = number core_count = number purpose = string - availabilityzones = list(string) + availability_zones = list(string) allow_node_pool_replacement = bool associated_lans = list(object({ id = number @@ -156,17 +143,11 @@ variable "custom_nodepools" { ram_size = null core_count = null purpose = "legacy" - availabilityzones = ["ZONE_1", "ZONE_2"] + availability_zones = ["ZONE_1", "ZONE_2"] allow_node_pool_replacement = null associated_lans = null maintenance_day = null maintenance_hour = null }] -} - -# variable "max_node_count" { -# type = number -# description = "This value is used to set the maximum number of nodes for auto scaling the k8s cluster node pools." -# default = null -# } \ No newline at end of file +} \ No newline at end of file From 4008c1d4400e42f7508139465c586641cb5d35d5 Mon Sep 17 00:00:00 2001 From: marhode Date: Mon, 6 Nov 2023 12:18:34 +0100 Subject: [PATCH 13/94] Changed definition of custom_nodepools in locals --- modules/ionos-k8s-cluster/locals.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index b3c9544..05f312d 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -21,7 +21,7 @@ locals { #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools - custom_nodepools = {for np in var.custom_nodepools : np => { + custom_nodepools = [ for np in var.custom_nodepools : { name = np.name purpose = np.purpose auto_scaling = np.auto_scaling @@ -37,7 +37,7 @@ locals { maintenance_day = np.maintenance_day != null ? np.maintenance_day : var.maintenance_day maintenance_hour = np.maintenance_hour != null ? np.maintenance_hour : var.maintenance_hour } - } + ] #availabilityzone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those each. From ba91cb8aba0842e007ff760581c5e71322fe23a8 Mon Sep 17 00:00:00 2001 From: marhode Date: Mon, 6 Nov 2023 12:31:57 +0100 Subject: [PATCH 14/94] Typo in availabilityzone fixed --- modules/ionos-k8s-cluster/locals.tf | 2 +- modules/ionos-k8s-cluster/main.tf | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 05f312d..8277942 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -41,7 +41,7 @@ locals { #availabilityzone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those each. - availabilityzone_split = toset(flatten([for n in local.custom_nodepools : [for x in n.availability_zones : merge(n,{availabilityzone = x})] ])) + availabilityzone_split = toset(flatten([for n in local.custom_nodepools : [for x in n.availability_zones : merge(n,{availability_zone = x})] ])) #nodepool_per_zone_creator this duplicates the objects in each availability zone to the amount of nodepool_per_zone_count nodepool_per_zone_creator = toset(flatten([for n in local.availabilityzone_split : [for x in range(n.nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) } diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 0f56bbb..a93abff 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -14,7 +14,7 @@ resource "ionoscloud_k8s_cluster" "cluster" { resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np if np.auto_scaling == true} - availability_zone = each.value.availabilityzone + availability_zone = each.value.availability_zone name = each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = each.value.allow_node_pool_replacement @@ -70,7 +70,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np if np.auto_scaling == false} - availability_zone = each.value.availabilityzone + availability_zone = each.value.availability_zone #for_each = { for k, v in var.custom_nodepools : k => v if var.auto_scaling } #for_each = { for k in compact([for k, v in var.mymap: v.condition ? k : ""]): k => var.mymap[k] } #conditional create is just another count, if auto_scaling=true set count to nodepools_per_zone_count From 4986ce17349855120ddf4d7e24dc3e589d9c7df2 Mon Sep 17 00:00:00 2001 From: marhode Date: Mon, 6 Nov 2023 15:34:43 +0100 Subject: [PATCH 15/94] Rename ippools resource --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index a93abff..63b890d 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -117,7 +117,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { } -resource "ionoscloud_ipblock" "ippools_scaling" { +resource "ionoscloud_ipblock" "ippools" { for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np } name = each.key location = var.datacenter_location From 65d0b3179d4178db9c1275769423a8c8b80920bb Mon Sep 17 00:00:00 2001 From: marhode Date: Mon, 6 Nov 2023 15:42:53 +0100 Subject: [PATCH 16/94] Fixed naming of ippools --- modules/ionos-k8s-cluster/locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 8277942..04a96de 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -14,7 +14,7 @@ locals { ram_size = var.ram_size != null ? var.ram_size : 16384 # The number of nodepools per zone. nodepool_per_zone_count = var.nodepool_per_zone_count - public_ip_pools = var.create_public_ip_pools ? ionoscloud_ipblock.ippools_scaling[*].ips : var.public_ip_pools + public_ip_pools = var.create_public_ip_pools ? ionoscloud_ipblock.ippools[*].ips : var.public_ip_pools #maintenance_day = var.maintenance_day #maintenance_hour = var.maintenance_hour api_subnet_allow_list = var.api_subnet_allow_list From 8482d5ae27847b665f4292d20d2581de95d41572 Mon Sep 17 00:00:00 2001 From: marhode Date: Tue, 7 Nov 2023 09:19:18 +0100 Subject: [PATCH 17/94] Enable to use both legacy and scaling nodepools --- modules/ionos-k8s-cluster/locals.tf | 38 ++++++++++++++++---------- modules/ionos-k8s-cluster/main.tf | 14 ---------- modules/ionos-k8s-cluster/variables.tf | 5 ++++ 3 files changed, 28 insertions(+), 29 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 04a96de..194ebb3 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -3,25 +3,33 @@ locals { # Valid choices depend on the datacenter location: # de/txl, de/fra: INTEL_SKYLAKE cpu_family = var.cpu_family - # Number of nodes per nodepool. - # Note that one nodepool is created in each availability zone. - # Example: With 2 zones, the actual total node count is twice as high as the number stated here. - node_count = var.node_count - #node_count = var.custom_nodepools != null ? (var.min_node_count - 1) : (var.node_count != null ? var.node_count : 1) #What should the default node count be? - # This cannot be changed, after the nodepool is created, because all worker nodes must be equal at any time. - #core_count = var.core_count - # This cannot be changed, after the nodepool is created, because all worker nodes must be equal at any time. - ram_size = var.ram_size != null ? var.ram_size : 16384 - # The number of nodepools per zone. - nodepool_per_zone_count = var.nodepool_per_zone_count + public_ip_pools = var.create_public_ip_pools ? ionoscloud_ipblock.ippools[*].ips : var.public_ip_pools - #maintenance_day = var.maintenance_day - #maintenance_hour = var.maintenance_hour api_subnet_allow_list = var.api_subnet_allow_list + #Create legacy object for possible merging into the nodepool list + legacy_onject = [{ + name = "Legacy" + nodepool_per_zone_count = null + node_count = null + ram_size = null + core_count = null + purpose = "legacy" + availability_zones = ["ZONE_1", "ZONE_2"] + allow_node_pool_replacement = null + associated_lans = null + maintenance_day = null + maintenance_hour = null + }] + + #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed + #if false: No need to do anything because it is either legacy or scaling + #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it + legacy_check = enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? merge(var.custom_nodepools, legacy_onject) : var.custom_nodepools) + #Loop through our nodepool list to detect empty values and fill them with legacy values - #Only required for downward compatibility and legacy nodepools - custom_nodepools = [ for np in var.custom_nodepools : { + #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) + custom_nodepools = [ for np in local.legacy_check : { name = np.name purpose = np.purpose auto_scaling = np.auto_scaling diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 63b890d..730f686 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -123,17 +123,3 @@ resource "ionoscloud_ipblock" "ippools" { location = var.datacenter_location size = each.value.auto_scaling ? each.value.max_node_count + 1 : each.value.node_count + 1 } - -# resource "ionoscloud_ipblock" "ippools_zone1" { -# count = var.create_public_ip_pools ? var.nodepool_per_zone_count : 0 -# name = "${local.cluster_name}-zone1-nodepool-${count.index}" -# location = var.datacenter_location -# size = var.node_count + 1 -# } - -# resource "ionoscloud_ipblock" "ippools_zone2" { -# count = var.create_public_ip_pools ? var.nodepool_per_zone_count : 0 -# name = "${local.cluster_name}-zone2-nodepool-${count.index}" -# location = var.datacenter_location -# size = var.node_count + 1 -# } diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 1bd635c..5402b02 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -113,6 +113,11 @@ variable "storage_size" { default = 100 } +variable "enable_legacy_and_scaling" { + type = bool + default = false +} + #It is required to define each resource per availability zone on it's own (One definition for zone 1 and one definition for zone 2) variable "custom_nodepools" { type = list(object({ From 873022bdc135ea9c23f401a17ab0c47f0c9b3fe7 Mon Sep 17 00:00:00 2001 From: marhode Date: Tue, 7 Nov 2023 09:29:30 +0100 Subject: [PATCH 18/94] Fixes to variable scopes --- modules/ionos-k8s-cluster/locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 194ebb3..5da9d5b 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -25,7 +25,7 @@ locals { #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed #if false: No need to do anything because it is either legacy or scaling #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it - legacy_check = enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? merge(var.custom_nodepools, legacy_onject) : var.custom_nodepools) + legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? merge(var.custom_nodepools, local.legacy_onject) : var.custom_nodepools) #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) From bdc44cea6e6ddc82994743f408fb9daf254e7dbf Mon Sep 17 00:00:00 2001 From: marhode Date: Tue, 7 Nov 2023 09:51:07 +0100 Subject: [PATCH 19/94] Fix to the legacy object for merging --- modules/ionos-k8s-cluster/locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 5da9d5b..73f2879 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -25,7 +25,7 @@ locals { #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed #if false: No need to do anything because it is either legacy or scaling #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it - legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? merge(var.custom_nodepools, local.legacy_onject) : var.custom_nodepools) + legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? merge(var.custom_nodepools, tolist(local.legacy_onject)) : var.custom_nodepools) #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) From d0f1965c3a99798423c4629c75e9725aa1789097 Mon Sep 17 00:00:00 2001 From: marhode Date: Tue, 7 Nov 2023 10:06:21 +0100 Subject: [PATCH 20/94] Test for merge of legacy and scaling lists --- modules/ionos-k8s-cluster/locals.tf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 73f2879..91e12c9 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -8,7 +8,7 @@ locals { api_subnet_allow_list = var.api_subnet_allow_list #Create legacy object for possible merging into the nodepool list - legacy_onject = [{ + legacy_object = { name = "Legacy" nodepool_per_zone_count = null node_count = null @@ -20,12 +20,12 @@ locals { associated_lans = null maintenance_day = null maintenance_hour = null - }] + } #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed #if false: No need to do anything because it is either legacy or scaling #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it - legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? merge(var.custom_nodepools, tolist(local.legacy_onject)) : var.custom_nodepools) + legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? merge(var.custom_nodepools, local.legacy_object) : var.custom_nodepools) #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) From badd726020d18bcbe5072638e6371a20536ca60f Mon Sep 17 00:00:00 2001 From: marhode Date: Tue, 7 Nov 2023 10:18:00 +0100 Subject: [PATCH 21/94] Changed merge to concat of two lists --- modules/ionos-k8s-cluster/locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 91e12c9..094e80c 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -25,7 +25,7 @@ locals { #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed #if false: No need to do anything because it is either legacy or scaling #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it - legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? merge(var.custom_nodepools, local.legacy_object) : var.custom_nodepools) + legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? concat(var.custom_nodepools, tolist(local.legacy_object)) : var.custom_nodepools) #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) From 2e044d1c189862c9fb131ee5f6e73b460e4bbcc3 Mon Sep 17 00:00:00 2001 From: marhode Date: Tue, 7 Nov 2023 10:23:10 +0100 Subject: [PATCH 22/94] turn object tolist for concat --- modules/ionos-k8s-cluster/locals.tf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 094e80c..743c15c 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -8,7 +8,7 @@ locals { api_subnet_allow_list = var.api_subnet_allow_list #Create legacy object for possible merging into the nodepool list - legacy_object = { + legacy_object = tolist(object({ name = "Legacy" nodepool_per_zone_count = null node_count = null @@ -20,12 +20,12 @@ locals { associated_lans = null maintenance_day = null maintenance_hour = null - } + })) #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed #if false: No need to do anything because it is either legacy or scaling #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it - legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? concat(var.custom_nodepools, tolist(local.legacy_object)) : var.custom_nodepools) + legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? concat(var.custom_nodepools, local.legacy_object) : var.custom_nodepools) #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) From a4387a53239c9ed136239eb81a417580d40a7fc5 Mon Sep 17 00:00:00 2001 From: marhode Date: Tue, 7 Nov 2023 10:29:07 +0100 Subject: [PATCH 23/94] Fixed syntax error --- modules/ionos-k8s-cluster/locals.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 743c15c..d6c9c57 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -8,7 +8,7 @@ locals { api_subnet_allow_list = var.api_subnet_allow_list #Create legacy object for possible merging into the nodepool list - legacy_object = tolist(object({ + legacy_object = tolist([{ name = "Legacy" nodepool_per_zone_count = null node_count = null @@ -20,7 +20,7 @@ locals { associated_lans = null maintenance_day = null maintenance_hour = null - })) + }]) #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed #if false: No need to do anything because it is either legacy or scaling From 7ef35ab3f55540adbd1d2861ec3e075e5366aaf7 Mon Sep 17 00:00:00 2001 From: marhode Date: Tue, 7 Nov 2023 10:54:05 +0100 Subject: [PATCH 24/94] Test for conditional fix --- modules/ionos-k8s-cluster/locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index d6c9c57..7a9dfd3 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -25,7 +25,7 @@ locals { #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed #if false: No need to do anything because it is either legacy or scaling #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it - legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? concat(var.custom_nodepools, local.legacy_object) : var.custom_nodepools) + legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? tolist(concat(var.custom_nodepools, local.legacy_object)) : var.custom_nodepools) #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) From 41e8294d85eab96a1ab3ee45e085e44dd9785506 Mon Sep 17 00:00:00 2001 From: marhode Date: Tue, 7 Nov 2023 11:55:11 +0100 Subject: [PATCH 25/94] Try setunion to combine both object lists together --- modules/ionos-k8s-cluster/locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 7a9dfd3..3455946 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -25,7 +25,7 @@ locals { #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed #if false: No need to do anything because it is either legacy or scaling #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it - legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? tolist(concat(var.custom_nodepools, local.legacy_object)) : var.custom_nodepools) + legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? setunion(var.custom_nodepools, local.legacy_object) : var.custom_nodepools) #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) From fae9c1d353e63cf1f016427ef167775021f87faa Mon Sep 17 00:00:00 2001 From: marhode Date: Tue, 7 Nov 2023 11:59:34 +0100 Subject: [PATCH 26/94] Added missing variables to object --- modules/ionos-k8s-cluster/locals.tf | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 3455946..de5b75f 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -10,8 +10,12 @@ locals { #Create legacy object for possible merging into the nodepool list legacy_object = tolist([{ name = "Legacy" + auto_scaling = false nodepool_per_zone_count = null node_count = null + nodepool_per_zone_count = null + min_node_count= null + max_node_count= null ram_size = null core_count = null purpose = "legacy" From 613e6e8d0f31905da9c2d4d79c7f263c54e5ccf2 Mon Sep 17 00:00:00 2001 From: marhode Date: Tue, 7 Nov 2023 12:24:37 +0100 Subject: [PATCH 27/94] added tolist --- modules/ionos-k8s-cluster/locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index de5b75f..4822c3f 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -29,7 +29,7 @@ locals { #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed #if false: No need to do anything because it is either legacy or scaling #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it - legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? setunion(var.custom_nodepools, local.legacy_object) : var.custom_nodepools) + legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? tolist(concat(var.custom_nodepools, local.legacy_object)) : var.custom_nodepools) #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) From 2106b919297f2e0e39a13d12b7415058f9c1f212 Mon Sep 17 00:00:00 2001 From: marhode Date: Tue, 7 Nov 2023 12:37:27 +0100 Subject: [PATCH 28/94] Fixed associated lans --- modules/ionos-k8s-cluster/locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 4822c3f..76ddd6d 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -21,7 +21,7 @@ locals { purpose = "legacy" availability_zones = ["ZONE_1", "ZONE_2"] allow_node_pool_replacement = null - associated_lans = null + associated_lans = var.associated_lans maintenance_day = null maintenance_hour = null }]) From 5512de37a66278417a0dd881fbc0093afbb51433 Mon Sep 17 00:00:00 2001 From: marhode Date: Wed, 8 Nov 2023 12:05:02 +0100 Subject: [PATCH 29/94] Small readability adjustments --- modules/ionos-k8s-cluster/locals.tf | 6 +++--- modules/ionos-k8s-cluster/main.tf | 6 +++--- modules/ionos-k8s-cluster/variables.tf | 2 ++ 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 76ddd6d..dfe3bb5 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -7,7 +7,7 @@ locals { public_ip_pools = var.create_public_ip_pools ? ionoscloud_ipblock.ippools[*].ips : var.public_ip_pools api_subnet_allow_list = var.api_subnet_allow_list - #Create legacy object for possible merging into the nodepool list + #Create legacy object for possible merging into the nodepool list(Only used when both legacy and custom nodespools are in use) legacy_object = tolist([{ name = "Legacy" auto_scaling = false @@ -26,7 +26,7 @@ locals { maintenance_hour = null }]) - #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed + #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed (default = false) #if false: No need to do anything because it is either legacy or scaling #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? tolist(concat(var.custom_nodepools, local.legacy_object)) : var.custom_nodepools) @@ -52,7 +52,7 @@ locals { ] - #availabilityzone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those each. + #availabilityzone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those zones each. availabilityzone_split = toset(flatten([for n in local.custom_nodepools : [for x in n.availability_zones : merge(n,{availability_zone = x})] ])) #nodepool_per_zone_creator this duplicates the objects in each availability zone to the amount of nodepool_per_zone_count nodepool_per_zone_creator = toset(flatten([for n in local.availabilityzone_split : [for x in range(n.nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 730f686..66c84d9 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -13,7 +13,7 @@ resource "ionoscloud_k8s_cluster" "cluster" { #---- resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np if np.auto_scaling == true} + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == true} availability_zone = each.value.availability_zone name = each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version @@ -69,7 +69,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { #---- resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np if np.auto_scaling == false} + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == false} availability_zone = each.value.availability_zone #for_each = { for k, v in var.custom_nodepools : k => v if var.auto_scaling } #for_each = { for k in compact([for k, v in var.mymap: v.condition ? k : ""]): k => var.mymap[k] } @@ -118,7 +118,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { resource "ionoscloud_ipblock" "ippools" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}${np.nodepool_index}" => np } + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np } name = each.key location = var.datacenter_location size = each.value.auto_scaling ? each.value.max_node_count + 1 : each.value.node_count + 1 diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 5402b02..1d43e7e 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -98,6 +98,7 @@ variable "api_subnet_allow_list" { default = null } +#Not needed anymore, we work with a list of zones now variable "availability_zone" { type = string default = "ZONE_1" @@ -113,6 +114,7 @@ variable "storage_size" { default = 100 } +#Determins if both should be used, otherwise only one will be used where custom_nodepools overwrite legacy ones variable "enable_legacy_and_scaling" { type = bool default = false From 8bba887c141e4a2421809bb53c5d64f0a6e61691 Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 9 Nov 2023 12:24:25 +0100 Subject: [PATCH 30/94] Testing purpose legacy min and max node count equals node count --- modules/ionos-k8s-cluster/locals.tf | 14 ++++++++++---- modules/ionos-k8s-cluster/main.tf | 14 +++++++------- modules/ionos-k8s-cluster/variables.tf | 11 ++++++++++- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index dfe3bb5..458578b 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -10,9 +10,9 @@ locals { #Create legacy object for possible merging into the nodepool list(Only used when both legacy and custom nodespools are in use) legacy_object = tolist([{ name = "Legacy" - auto_scaling = false + auto_scaling = true nodepool_per_zone_count = null - node_count = null + node_count = var.node_count nodepool_per_zone_count = null min_node_count= null max_node_count= null @@ -24,6 +24,9 @@ locals { associated_lans = var.associated_lans maintenance_day = null maintenance_hour = null + storage_type = null + storage_size = null + cpu_family = null }]) #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed (default = false) @@ -37,8 +40,8 @@ locals { name = np.name purpose = np.purpose auto_scaling = np.auto_scaling - min_node_count = np.min_node_count - max_node_count = np.max_node_count + min_node_count = np.purpose == "legacy" ? np.node_count : np.min_node_count + max_node_count = np.purpose == "legacy" ? np.node_count : np.max_node_count availability_zones = np.availability_zones nodepool_per_zone_count = np.nodepool_per_zone_count != null ? np.nodepool_per_zone_count : var.nodepool_per_zone_count node_count = np.node_count != null ? np.node_count : var.node_count @@ -48,6 +51,9 @@ locals { associated_lans = np.associated_lans != null ? np.associated_lans : var.associated_lans maintenance_day = np.maintenance_day != null ? np.maintenance_day : var.maintenance_day maintenance_hour = np.maintenance_hour != null ? np.maintenance_hour : var.maintenance_hour + storage_type = np.storage_type != null ? np.storage_type : var.storage_type + storage_size = np.storage_size != null ? np.storage_size : var.storage_size + cpu_family = np.cpu_family != null ? np.cpu_family : var.cpu_family } ] diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 66c84d9..9e788ea 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -47,11 +47,11 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { datacenter_id = var.datacenter_id k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id cpu_family = local.cpu_family - storage_type = "SSD" + storage_type = each.value.storage_type node_count = each.value.node_count cores_count = each.value.core_count ram_size = each.value.ram_size - storage_size = 100 + storage_size = each.value.storage_size public_ips = local.public_ip_pools != null ? local.public_ip_pools[each.key] : [] auto_scaling { @@ -78,18 +78,18 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { #count = each.value.nodepool_per_zone_count name = each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version - allow_replace = var.allow_node_pool_replacement + allow_replace = each.value.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks # it iterates through the list of var.associated_lans and sets the appropriate lan id # it also sets one or multiple route to the lan, if a not empty entry exists in routes_list(var.associated_lans) dynamic "lans" { - for_each = var.associated_lans + for_each = each.value.associated_lans content { id = lans.value["id"] dynamic "routes" { # if there is an entry in the routes_list, iterate through the values in the routes_list to create the routes # lans.key = works like count.index, returns the iteration number of current lan -> 0,1,2,3,4... - for_each = var.associated_lans[lans.key].routes_list == null || length(var.associated_lans[lans.key].routes_list[0]) == 0 ? [] : var.associated_lans[lans.key].routes_list + for_each = each.value.associated_lans[lans.key].routes_list == null || length(each.value.associated_lans[lans.key].routes_list[0]) == 0 ? [] : each.value.associated_lans[lans.key].routes_list content { # graps the values from the objects of the routes_list @@ -108,11 +108,11 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { datacenter_id = var.datacenter_id k8s_cluster_id = ionoscloud_k8s_cluster.cluster.id cpu_family = local.cpu_family - storage_type = "SSD" + storage_type = each.value.storage_type node_count = each.value.node_count cores_count = each.value.core_count ram_size = each.value.ram_size - storage_size = 100 + storage_size = each.value.storage_size public_ips = local.public_ip_pools != null ? local.public_ip_pools[each.key] : [] } diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 1d43e7e..a6a6999 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -124,7 +124,7 @@ variable "enable_legacy_and_scaling" { variable "custom_nodepools" { type = list(object({ name = string - auto_scaling = optional(bool, false) + auto_scaling = optional(bool, true) node_count = number nodepool_per_zone_count = optional(number, 1) min_node_count= optional(number, null) @@ -140,12 +140,18 @@ variable "custom_nodepools" { })) maintenance_day = string maintenance_hour = number + storage_type = string + storage_size = number + cpu_family = string }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." default = [{ name = "Legacy" + auto_scaling = true nodepool_per_zone_count = null + min_node_count= null + max_node_count= null node_count = null ram_size = null core_count = null @@ -155,6 +161,9 @@ variable "custom_nodepools" { associated_lans = null maintenance_day = null maintenance_hour = null + storage_type = null + storage_size = null + cpu_family = null }] } \ No newline at end of file From 3182dbad9e0843b9c1e45029636c0c50547f46b3 Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 9 Nov 2023 13:16:14 +0100 Subject: [PATCH 31/94] Undo last test because it doesnt work --- modules/ionos-k8s-cluster/locals.tf | 9 ++++----- modules/ionos-k8s-cluster/variables.tf | 4 ++-- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 458578b..d6945c8 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -10,10 +10,9 @@ locals { #Create legacy object for possible merging into the nodepool list(Only used when both legacy and custom nodespools are in use) legacy_object = tolist([{ name = "Legacy" - auto_scaling = true - nodepool_per_zone_count = null - node_count = var.node_count + auto_scaling = false nodepool_per_zone_count = null + node_count = null min_node_count= null max_node_count= null ram_size = null @@ -40,8 +39,8 @@ locals { name = np.name purpose = np.purpose auto_scaling = np.auto_scaling - min_node_count = np.purpose == "legacy" ? np.node_count : np.min_node_count - max_node_count = np.purpose == "legacy" ? np.node_count : np.max_node_count + min_node_count = np.min_node_count + max_node_count = np.max_node_count availability_zones = np.availability_zones nodepool_per_zone_count = np.nodepool_per_zone_count != null ? np.nodepool_per_zone_count : var.nodepool_per_zone_count node_count = np.node_count != null ? np.node_count : var.node_count diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index a6a6999..8f5eea5 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -124,7 +124,7 @@ variable "enable_legacy_and_scaling" { variable "custom_nodepools" { type = list(object({ name = string - auto_scaling = optional(bool, true) + auto_scaling = optional(bool, false) node_count = number nodepool_per_zone_count = optional(number, 1) min_node_count= optional(number, null) @@ -148,7 +148,7 @@ variable "custom_nodepools" { description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." default = [{ name = "Legacy" - auto_scaling = true + auto_scaling = false nodepool_per_zone_count = null min_node_count= null max_node_count= null From 6648900a3cf4be46f2333730ac02b4c8da3a533a Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 9 Nov 2023 13:35:14 +0100 Subject: [PATCH 32/94] Small fix --- modules/ionos-k8s-cluster/variables.tf | 3 --- 1 file changed, 3 deletions(-) diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 8f5eea5..ad6c4d9 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -148,10 +148,7 @@ variable "custom_nodepools" { description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." default = [{ name = "Legacy" - auto_scaling = false nodepool_per_zone_count = null - min_node_count= null - max_node_count= null node_count = null ram_size = null core_count = null From 5e7cf74246e198f0e011afa99c5975e7cbf188db Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 9 Nov 2023 14:32:53 +0100 Subject: [PATCH 33/94] Simplified check for legacy and scaling deployment --- modules/ionos-k8s-cluster/locals.tf | 5 ++++- modules/ionos-k8s-cluster/variables.tf | 17 +---------------- 2 files changed, 5 insertions(+), 17 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index d6945c8..c2cece5 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -28,10 +28,13 @@ locals { cpu_family = null }]) + #Check if the custom_nodepool list is empty, if so use the legacy object + nodepool_list_check = var.custom_nodepools == [] ? local.legacy_object : var.custom_nodepools + #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed (default = false) #if false: No need to do anything because it is either legacy or scaling #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it - legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? tolist(concat(var.custom_nodepools, local.legacy_object)) : var.custom_nodepools) + legacy_check = var.enable_legacy_and_scaling == false ? local.nodepool_list_check : tolist(concat(var.custom_nodepools, local.legacy_object)) #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index ad6c4d9..6c74743 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -146,21 +146,6 @@ variable "custom_nodepools" { }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." - default = [{ - name = "Legacy" - nodepool_per_zone_count = null - node_count = null - ram_size = null - core_count = null - purpose = "legacy" - availability_zones = ["ZONE_1", "ZONE_2"] - allow_node_pool_replacement = null - associated_lans = null - maintenance_day = null - maintenance_hour = null - storage_type = null - storage_size = null - cpu_family = null - }] + default = [] } \ No newline at end of file From f09ba9b844673e32dcc6947ac35d64cd24fd5553 Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 9 Nov 2023 14:50:53 +0100 Subject: [PATCH 34/94] Test with legacy only --- modules/ionos-k8s-cluster/locals.tf | 5 +---- modules/ionos-k8s-cluster/variables.tf | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index c2cece5..d6945c8 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -28,13 +28,10 @@ locals { cpu_family = null }]) - #Check if the custom_nodepool list is empty, if so use the legacy object - nodepool_list_check = var.custom_nodepools == [] ? local.legacy_object : var.custom_nodepools - #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed (default = false) #if false: No need to do anything because it is either legacy or scaling #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it - legacy_check = var.enable_legacy_and_scaling == false ? local.nodepool_list_check : tolist(concat(var.custom_nodepools, local.legacy_object)) + legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? tolist(concat(var.custom_nodepools, local.legacy_object)) : var.custom_nodepools) #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 6c74743..ad6c4d9 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -146,6 +146,21 @@ variable "custom_nodepools" { }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." - default = [] + default = [{ + name = "Legacy" + nodepool_per_zone_count = null + node_count = null + ram_size = null + core_count = null + purpose = "legacy" + availability_zones = ["ZONE_1", "ZONE_2"] + allow_node_pool_replacement = null + associated_lans = null + maintenance_day = null + maintenance_hour = null + storage_type = null + storage_size = null + cpu_family = null + }] } \ No newline at end of file From 1d43bf6638a2bbf9d2fbce34045c1bcf270d4f8e Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 9 Nov 2023 15:11:29 +0100 Subject: [PATCH 35/94] Test with legacy and scaling = false --- modules/ionos-k8s-cluster/variables.tf | 31 ++++++++++++++------------ 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index ad6c4d9..3e62b57 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -147,20 +147,23 @@ variable "custom_nodepools" { ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." default = [{ - name = "Legacy" - nodepool_per_zone_count = null - node_count = null - ram_size = null - core_count = null - purpose = "legacy" - availability_zones = ["ZONE_1", "ZONE_2"] - allow_node_pool_replacement = null - associated_lans = null - maintenance_day = null - maintenance_hour = null - storage_type = null - storage_size = null - cpu_family = null + name = "Legacy" + auto_scaling = false + nodepool_per_zone_count = null + node_count = null + min_node_count= null + max_node_count= null + ram_size = null + core_count = null + purpose = "legacy" + availability_zones = ["ZONE_1", "ZONE_2"] + allow_node_pool_replacement = null + associated_lans = null + maintenance_day = null + maintenance_hour = null + storage_type = null + storage_size = null + cpu_family = null }] } \ No newline at end of file From ca8365632c04014c8960ecaaa632906222574885 Mon Sep 17 00:00:00 2001 From: marhode Date: Fri, 10 Nov 2023 12:34:03 +0100 Subject: [PATCH 36/94] Test nodepool labels --- modules/ionos-k8s-cluster/main.tf | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 9e788ea..21052f8 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -13,7 +13,7 @@ resource "ionoscloud_k8s_cluster" "cluster" { #---- resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == true} + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-purpose-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == true} availability_zone = each.value.availability_zone name = each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version @@ -59,6 +59,10 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { max_node_count = each.value.max_node_count } + labels = { + "purpose" = each.value.purpose + } + lifecycle { ignore_changes = [ node_count ] } From e566fdca80d278926ae683a205540af80fabd93e Mon Sep 17 00:00:00 2001 From: marhode Date: Mon, 13 Nov 2023 10:36:48 +0100 Subject: [PATCH 37/94] Corrected nodepool name --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 21052f8..2b7a3f8 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -13,7 +13,7 @@ resource "ionoscloud_k8s_cluster" "cluster" { #---- resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-purpose-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == true} + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == true} availability_zone = each.value.availability_zone name = each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version From dc5e74fdcce202d4df213fe0f52ca9c3620fac58 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 16 Nov 2023 15:43:18 +0000 Subject: [PATCH 38/94] changed legacy nodepool name --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 2b7a3f8..f8f346a 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -80,7 +80,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { #conditional create is just another count, if auto_scaling=true set count to nodepools_per_zone_count #for_each = { for pool in var.custom_nodepools : pool.site_name => pool if var.environment != "prod"} #count = each.value.nodepool_per_zone_count - name = each.key + name = each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-0":"${local.cluster_name}-zone2-nodepool-0" #each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = each.value.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks From 01ddd831ceb787749d2ccfc06b9f1743a1183828 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Mon, 20 Nov 2023 07:12:21 +0000 Subject: [PATCH 39/94] changed legacy nodepool name --- modules/ionos-k8s-cluster/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index f8f346a..640807a 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -73,14 +73,14 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { #---- resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == false} + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == false} # & zone = 1 // zone =2 availability_zone = each.value.availability_zone #for_each = { for k, v in var.custom_nodepools : k => v if var.auto_scaling } #for_each = { for k in compact([for k, v in var.mymap: v.condition ? k : ""]): k => var.mymap[k] } #conditional create is just another count, if auto_scaling=true set count to nodepools_per_zone_count #for_each = { for pool in var.custom_nodepools : pool.site_name => pool if var.environment != "prod"} #count = each.value.nodepool_per_zone_count - name = each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-0":"${local.cluster_name}-zone2-nodepool-0" #each.key + name = each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-000000000":"${local.cluster_name}-zone2-nodepool-0000000000000" #each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = each.value.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks From 47cc49e2a36b318043349a0fc2f454191a8ccf5c Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Mon, 20 Nov 2023 07:20:32 +0000 Subject: [PATCH 40/94] added moved block --- modules/ionos-k8s-cluster/main.tf | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 640807a..774d99c 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -80,7 +80,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { #conditional create is just another count, if auto_scaling=true set count to nodepools_per_zone_count #for_each = { for pool in var.custom_nodepools : pool.site_name => pool if var.environment != "prod"} #count = each.value.nodepool_per_zone_count - name = each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-000000000":"${local.cluster_name}-zone2-nodepool-0000000000000" #each.key + name = each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-0":"${local.cluster_name}-zone2-nodepool-0" #each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = each.value.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks @@ -120,6 +120,15 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { public_ips = local.public_ip_pools != null ? local.public_ip_pools[each.key] : [] } +moved { + from = module.ionos_k8s_cluster.ionoscloud_k8s_node_pool.nodepool_zone1[0] + to = module.ionos_k8s_cluster.ionoscloud_k8s_node_pool.nodepool_legacy["infra-dev-schulcloud-ops-5426-ZONE_1-legacy-0"] +} + +moved { + from = module.ionos_k8s_cluster.ionoscloud_k8s_node_pool.nodepool_zone2[0] + to = module.ionos_k8s_cluster.ionoscloud_k8s_node_pool.nodepool_legacy["infra-dev-schulcloud-ops-5426-ZONE_2-legacy-0"] +} resource "ionoscloud_ipblock" "ippools" { for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np } From 057d82732900ea4678727395229ff2fee03eb3fe Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Mon, 20 Nov 2023 08:33:15 +0000 Subject: [PATCH 41/94] removed moved block --- modules/ionos-k8s-cluster/main.tf | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 774d99c..03fd28b 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -120,16 +120,6 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { public_ips = local.public_ip_pools != null ? local.public_ip_pools[each.key] : [] } -moved { - from = module.ionos_k8s_cluster.ionoscloud_k8s_node_pool.nodepool_zone1[0] - to = module.ionos_k8s_cluster.ionoscloud_k8s_node_pool.nodepool_legacy["infra-dev-schulcloud-ops-5426-ZONE_1-legacy-0"] -} - -moved { - from = module.ionos_k8s_cluster.ionoscloud_k8s_node_pool.nodepool_zone2[0] - to = module.ionos_k8s_cluster.ionoscloud_k8s_node_pool.nodepool_legacy["infra-dev-schulcloud-ops-5426-ZONE_2-legacy-0"] -} - resource "ionoscloud_ipblock" "ippools" { for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np } name = each.key From 908a05bbf30b864664aaef6f2c1563ef9a67a0da Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Wed, 22 Nov 2023 07:17:42 +0000 Subject: [PATCH 42/94] enabled outputs and variables for compatibility --- modules/ionos-k8s-cluster/output.tf | 12 ++++++------ modules/ionos-k8s-cluster/variables.tf | 18 +++++++++--------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/modules/ionos-k8s-cluster/output.tf b/modules/ionos-k8s-cluster/output.tf index cbd065d..d09fe75 100644 --- a/modules/ionos-k8s-cluster/output.tf +++ b/modules/ionos-k8s-cluster/output.tf @@ -7,9 +7,9 @@ output "cluster_k8s_version" { output "cluster_id" { value = ionoscloud_k8s_cluster.cluster.id } -# output "nodepool_zone1_id" { -# value = ionoscloud_k8s_node_pool.nodepool_zone1[*].id -# } -# output "nodepool_zone2_id" { -# value = ionoscloud_k8s_node_pool.nodepool_zone2[*].id -# } \ No newline at end of file +output "nodepool_zone1_id" { + value = ionoscloud_k8s_node_pool.nodepool_zone1[*].id +} +output "nodepool_zone2_id" { + value = ionoscloud_k8s_node_pool.nodepool_zone2[*].id +} \ No newline at end of file diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 3e62b57..ac592bc 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -60,15 +60,15 @@ variable "public_ip_pools" { default = null } -# variable "public_ip_pool_zone1" { -# type = list(list(string)) -# default = null -# } - -# variable "public_ip_pool_zone2" { -# type = list(list(string)) -# default = null -# } +variable "public_ip_pool_zone1" { + type = list(list(string)) + default = null +} + +variable "public_ip_pool_zone2" { + type = list(list(string)) + default = null +} variable "create_public_ip_pools" { type = bool From 2d04b1136748df1742799710ce754c1d6a5755f5 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Wed, 22 Nov 2023 07:34:52 +0000 Subject: [PATCH 43/94] remove variables --- modules/ionos-k8s-cluster/output.tf | 12 ++++++------ modules/ionos-k8s-cluster/variables.tf | 18 +++++++++--------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/modules/ionos-k8s-cluster/output.tf b/modules/ionos-k8s-cluster/output.tf index d09fe75..cbd065d 100644 --- a/modules/ionos-k8s-cluster/output.tf +++ b/modules/ionos-k8s-cluster/output.tf @@ -7,9 +7,9 @@ output "cluster_k8s_version" { output "cluster_id" { value = ionoscloud_k8s_cluster.cluster.id } -output "nodepool_zone1_id" { - value = ionoscloud_k8s_node_pool.nodepool_zone1[*].id -} -output "nodepool_zone2_id" { - value = ionoscloud_k8s_node_pool.nodepool_zone2[*].id -} \ No newline at end of file +# output "nodepool_zone1_id" { +# value = ionoscloud_k8s_node_pool.nodepool_zone1[*].id +# } +# output "nodepool_zone2_id" { +# value = ionoscloud_k8s_node_pool.nodepool_zone2[*].id +# } \ No newline at end of file diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index ac592bc..3e62b57 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -60,15 +60,15 @@ variable "public_ip_pools" { default = null } -variable "public_ip_pool_zone1" { - type = list(list(string)) - default = null -} - -variable "public_ip_pool_zone2" { - type = list(list(string)) - default = null -} +# variable "public_ip_pool_zone1" { +# type = list(list(string)) +# default = null +# } + +# variable "public_ip_pool_zone2" { +# type = list(list(string)) +# default = null +# } variable "create_public_ip_pools" { type = bool From 9cc5837ce259261bda36a9e36582767f3de2fef9 Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 09:31:03 +0100 Subject: [PATCH 44/94] Added ip pool creation code to be downward compatible with sc-legacy --- modules/ionos-k8s-cluster/locals.tf | 13 ++++++++++++- modules/ionos-k8s-cluster/main.tf | 17 +++++++---------- modules/ionos-k8s-cluster/variables.tf | 20 ++++++++++++-------- 3 files changed, 31 insertions(+), 19 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index d6945c8..09e9db8 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -26,6 +26,8 @@ locals { storage_type = null storage_size = null cpu_family = null + create_public_ip_pools = null + public_ips = [] }]) #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed (default = false) @@ -53,13 +55,22 @@ locals { storage_type = np.storage_type != null ? np.storage_type : var.storage_type storage_size = np.storage_size != null ? np.storage_size : var.storage_size cpu_family = np.cpu_family != null ? np.cpu_family : var.cpu_family + create_public_ip_pools = np.create_public_ip_pools != null ? np.create_public_ip_pools : var.create_public_ip_pools + public_ips = np.public_ips != [] ? np.public_ips : [] } ] #availabilityzone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those zones each. availabilityzone_split = toset(flatten([for n in local.custom_nodepools : [for x in n.availability_zones : merge(n,{availability_zone = x})] ])) + + #Does this work as copy? + nodepools_with_ips = [ for np in local.availabilityzone_split : { + public_ips = np.create_public_ip_pools == false ? [] : np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 + } + ] + #nodepool_per_zone_creator this duplicates the objects in each availability zone to the amount of nodepool_per_zone_count - nodepool_per_zone_creator = toset(flatten([for n in local.availabilityzone_split : [for x in range(n.nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) + nodepool_per_zone_creator = toset(flatten([for n in local.nodepools_with_ips : [for x in range(n.nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) } diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 03fd28b..5ae0b4a 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -9,7 +9,7 @@ resource "ionoscloud_k8s_cluster" "cluster" { } #---- -#Test Pool 1 scaling +# Scaling Nodepool Definition #---- resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { @@ -52,7 +52,8 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = local.public_ip_pools != null ? local.public_ip_pools[each.key] : [] + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips + auto_scaling { min_node_count = each.value.min_node_count @@ -69,17 +70,13 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { } #---- -#Test Pool 2 legacy +# Non-Scaling Nodepool Definition #---- resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == false} # & zone = 1 // zone =2 availability_zone = each.value.availability_zone - #for_each = { for k, v in var.custom_nodepools : k => v if var.auto_scaling } - #for_each = { for k in compact([for k, v in var.mymap: v.condition ? k : ""]): k => var.mymap[k] } - #conditional create is just another count, if auto_scaling=true set count to nodepools_per_zone_count - #for_each = { for pool in var.custom_nodepools : pool.site_name => pool if var.environment != "prod"} - #count = each.value.nodepool_per_zone_count + name = each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-0":"${local.cluster_name}-zone2-nodepool-0" #each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = each.value.allow_node_pool_replacement @@ -117,11 +114,11 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = local.public_ip_pools != null ? local.public_ip_pools[each.key] : [] + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips } resource "ionoscloud_ipblock" "ippools" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np } + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.create_public_ip_pools == true} name = each.key location = var.datacenter_location size = each.value.auto_scaling ? each.value.max_node_count + 1 : each.value.node_count + 1 diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 3e62b57..565d6c9 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -60,15 +60,15 @@ variable "public_ip_pools" { default = null } -# variable "public_ip_pool_zone1" { -# type = list(list(string)) -# default = null -# } +variable "public_ip_pool_zone1" { + type = list(list(string)) + default = null +} -# variable "public_ip_pool_zone2" { -# type = list(list(string)) -# default = null -# } +variable "public_ip_pool_zone2" { + type = list(list(string)) + default = null +} variable "create_public_ip_pools" { type = bool @@ -143,6 +143,8 @@ variable "custom_nodepools" { storage_type = string storage_size = number cpu_family = string + create_public_ip_pools = bool + public_ips = list(strings) }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." @@ -164,6 +166,8 @@ variable "custom_nodepools" { storage_type = null storage_size = null cpu_family = null + create_public_ip_pools = null + public_ips = [] }] } \ No newline at end of file From a9aefb928570ec71692df04b885aa5da082e03d2 Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 09:36:36 +0100 Subject: [PATCH 45/94] Added optional ip list --- modules/ionos-k8s-cluster/variables.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 565d6c9..214450a 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -144,7 +144,7 @@ variable "custom_nodepools" { storage_size = number cpu_family = string create_public_ip_pools = bool - public_ips = list(strings) + public_ips = optional(list(strings), []) }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." From 996a7131866e07aa59deac22d03de70cc2d639dc Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 10:03:53 +0100 Subject: [PATCH 46/94] Fixed typo --- modules/ionos-k8s-cluster/variables.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 214450a..cae68fa 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -144,7 +144,7 @@ variable "custom_nodepools" { storage_size = number cpu_family = string create_public_ip_pools = bool - public_ips = optional(list(strings), []) + public_ips = optional(list(string), []) }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." From 58441b5c58a5516ce36ad7fdec26f7f553859489 Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 10:36:02 +0100 Subject: [PATCH 47/94] Added capabilities to include custom ip lists --- modules/ionos-k8s-cluster/locals.tf | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 09e9db8..1e68c91 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -35,9 +35,12 @@ locals { #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? tolist(concat(var.custom_nodepools, local.legacy_object)) : var.custom_nodepools) + #availabilityzone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those zones each. + availabilityzone_split = toset(flatten([for n in local.legacy_check : [for x in n.availability_zones : merge(n,{availability_zone = x})] ])) + #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) - custom_nodepools = [ for np in local.legacy_check : { + custom_nodepools = [ for np in local.availabilityzone_split : { name = np.name purpose = np.purpose auto_scaling = np.auto_scaling @@ -56,21 +59,11 @@ locals { storage_size = np.storage_size != null ? np.storage_size : var.storage_size cpu_family = np.cpu_family != null ? np.cpu_family : var.cpu_family create_public_ip_pools = np.create_public_ip_pools != null ? np.create_public_ip_pools : var.create_public_ip_pools - public_ips = np.public_ips != [] ? np.public_ips : [] + public_ips = np.create_public_ip_pools == false ? [] : np.purpose == "legacy" ? (np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2) : np.public_ips } ] - - #availabilityzone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those zones each. - availabilityzone_split = toset(flatten([for n in local.custom_nodepools : [for x in n.availability_zones : merge(n,{availability_zone = x})] ])) - - #Does this work as copy? - nodepools_with_ips = [ for np in local.availabilityzone_split : { - public_ips = np.create_public_ip_pools == false ? [] : np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 - } - ] - #nodepool_per_zone_creator this duplicates the objects in each availability zone to the amount of nodepool_per_zone_count - nodepool_per_zone_creator = toset(flatten([for n in local.nodepools_with_ips : [for x in range(n.nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) + nodepool_per_zone_creator = toset(flatten([for n in local.custom_nodepools : [for x in range(n.nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) } From 47b3582ce3f64343a3c654779c3ca3b7a205baba Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 11:27:51 +0100 Subject: [PATCH 48/94] Removed bracelets --- modules/ionos-k8s-cluster/locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 1e68c91..11ca706 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -59,7 +59,7 @@ locals { storage_size = np.storage_size != null ? np.storage_size : var.storage_size cpu_family = np.cpu_family != null ? np.cpu_family : var.cpu_family create_public_ip_pools = np.create_public_ip_pools != null ? np.create_public_ip_pools : var.create_public_ip_pools - public_ips = np.create_public_ip_pools == false ? [] : np.purpose == "legacy" ? (np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2) : np.public_ips + public_ips = np.create_public_ip_pools == false ? [] : np.purpose == "legacy" ? np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 : np.public_ips } ] From 9cc6d9c11f7f01b27eebf52f4cd10146bf05941d Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 12:52:42 +0100 Subject: [PATCH 49/94] Changed the name of non-scaling nodepools --- modules/ionos-k8s-cluster/main.tf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 5ae0b4a..9f587b0 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -74,10 +74,10 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { #---- resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == false} # & zone = 1 // zone =2 + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == false} availability_zone = each.value.availability_zone - - name = each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-0":"${local.cluster_name}-zone2-nodepool-0" #each.key + #The name needs to be changed, not only legacy pools have auto_scaling= false and thus we need an additional check + name = each.value.purpose != "legacy" ? each.key : each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-${np.nodepool_index}":"${local.cluster_name}-zone2-nodepool-${np.nodepool_index}" k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = each.value.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks From 1ab4b6fb7dac32f13c4645b6e368928c87d1cbb1 Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 12:57:53 +0100 Subject: [PATCH 50/94] removed index due to not found fail --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 9f587b0..735fe70 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -77,7 +77,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == false} availability_zone = each.value.availability_zone #The name needs to be changed, not only legacy pools have auto_scaling= false and thus we need an additional check - name = each.value.purpose != "legacy" ? each.key : each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-${np.nodepool_index}":"${local.cluster_name}-zone2-nodepool-${np.nodepool_index}" + name = each.value.purpose != "legacy" ? each.key : each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-0":"${local.cluster_name}-zone2-nodepool-0" k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = each.value.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks From d3eb599b1760cdd10f863c5981738df1927b9b02 Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 13:11:08 +0100 Subject: [PATCH 51/94] Fixed inconsistency of types in ip pools --- modules/ionos-k8s-cluster/locals.tf | 2 +- modules/ionos-k8s-cluster/variables.tf | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 11ca706..c10e314 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -59,7 +59,7 @@ locals { storage_size = np.storage_size != null ? np.storage_size : var.storage_size cpu_family = np.cpu_family != null ? np.cpu_family : var.cpu_family create_public_ip_pools = np.create_public_ip_pools != null ? np.create_public_ip_pools : var.create_public_ip_pools - public_ips = np.create_public_ip_pools == false ? [] : np.purpose == "legacy" ? np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 : np.public_ips + public_ips = np.create_public_ip_pools == false ? null : np.purpose == "legacy" ? np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 : np.public_ips } ] diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index cae68fa..d08412d 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -144,7 +144,7 @@ variable "custom_nodepools" { storage_size = number cpu_family = string create_public_ip_pools = bool - public_ips = optional(list(string), []) + public_ips = optional(list(list(string)), null) }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." @@ -167,7 +167,7 @@ variable "custom_nodepools" { storage_size = null cpu_family = null create_public_ip_pools = null - public_ips = [] + public_ips = null }] } \ No newline at end of file From 793a549c1c60b5428f592111479b1885ce31908c Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 13:12:03 +0100 Subject: [PATCH 52/94] Missed one null --- modules/ionos-k8s-cluster/locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index c10e314..8310e39 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -27,7 +27,7 @@ locals { storage_size = null cpu_family = null create_public_ip_pools = null - public_ips = [] + public_ips = null }]) #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed (default = false) From 6d5f4562a06b53843f496cef105b9e9312839bdc Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 13:41:45 +0100 Subject: [PATCH 53/94] Changed the default public_ips to list of empty list --- modules/ionos-k8s-cluster/locals.tf | 5 ++--- modules/ionos-k8s-cluster/variables.tf | 10 ++-------- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 8310e39..04ad6b8 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -4,7 +4,6 @@ locals { # de/txl, de/fra: INTEL_SKYLAKE cpu_family = var.cpu_family - public_ip_pools = var.create_public_ip_pools ? ionoscloud_ipblock.ippools[*].ips : var.public_ip_pools api_subnet_allow_list = var.api_subnet_allow_list #Create legacy object for possible merging into the nodepool list(Only used when both legacy and custom nodespools are in use) @@ -27,7 +26,7 @@ locals { storage_size = null cpu_family = null create_public_ip_pools = null - public_ips = null + public_ips = [[]] }]) #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed (default = false) @@ -59,7 +58,7 @@ locals { storage_size = np.storage_size != null ? np.storage_size : var.storage_size cpu_family = np.cpu_family != null ? np.cpu_family : var.cpu_family create_public_ip_pools = np.create_public_ip_pools != null ? np.create_public_ip_pools : var.create_public_ip_pools - public_ips = np.create_public_ip_pools == false ? null : np.purpose == "legacy" ? np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 : np.public_ips + public_ips = np.create_public_ip_pools == false ? [[]] : np.purpose == "legacy" ? np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 : np.public_ips } ] diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index d08412d..a43a79f 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -55,11 +55,6 @@ variable "nodepool_per_zone_count" { default = 0 } -variable "public_ip_pools" { - type = list(list(string)) - default = null -} - variable "public_ip_pool_zone1" { type = list(list(string)) default = null @@ -144,7 +139,7 @@ variable "custom_nodepools" { storage_size = number cpu_family = string create_public_ip_pools = bool - public_ips = optional(list(list(string)), null) + public_ips = list(list(string)) }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." @@ -167,7 +162,6 @@ variable "custom_nodepools" { storage_size = null cpu_family = null create_public_ip_pools = null - public_ips = null + public_ips = [[]] }] - } \ No newline at end of file From a4e5e02ad7f653183ebbbd9d3e8ddb0415cb338a Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 14:04:01 +0100 Subject: [PATCH 54/94] fix typo --- modules/ionos-k8s-cluster/locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 04ad6b8..c4fed7d 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -58,7 +58,7 @@ locals { storage_size = np.storage_size != null ? np.storage_size : var.storage_size cpu_family = np.cpu_family != null ? np.cpu_family : var.cpu_family create_public_ip_pools = np.create_public_ip_pools != null ? np.create_public_ip_pools : var.create_public_ip_pools - public_ips = np.create_public_ip_pools == false ? [[]] : np.purpose == "legacy" ? np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 : np.public_ips + public_ips = np.create_public_ip_pools == false ? [[]] : np.purpose == "legacy" ? np.availability_zones == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 : np.public_ips } ] From 0326e0ab5a4e0c65d012723307aa1eaef947d0b6 Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 14:08:03 +0100 Subject: [PATCH 55/94] Changed variable to availability_zone --- modules/ionos-k8s-cluster/locals.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index c4fed7d..786fb9d 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -45,7 +45,7 @@ locals { auto_scaling = np.auto_scaling min_node_count = np.min_node_count max_node_count = np.max_node_count - availability_zones = np.availability_zones + availability_zone = np.availability_zone nodepool_per_zone_count = np.nodepool_per_zone_count != null ? np.nodepool_per_zone_count : var.nodepool_per_zone_count node_count = np.node_count != null ? np.node_count : var.node_count ram_size = np.ram_size != null ? np.ram_size : var.ram_size @@ -58,7 +58,7 @@ locals { storage_size = np.storage_size != null ? np.storage_size : var.storage_size cpu_family = np.cpu_family != null ? np.cpu_family : var.cpu_family create_public_ip_pools = np.create_public_ip_pools != null ? np.create_public_ip_pools : var.create_public_ip_pools - public_ips = np.create_public_ip_pools == false ? [[]] : np.purpose == "legacy" ? np.availability_zones == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 : np.public_ips + public_ips = np.create_public_ip_pools == false ? [[]] : np.purpose == "legacy" ? np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 : np.public_ips } ] From 123b0d58620a3eca35f99405630b333b3e4f02be Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 14:37:35 +0100 Subject: [PATCH 56/94] Test for ip lists inconsistence --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 735fe70..7ffc56e 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -52,7 +52,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips + public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips[0] auto_scaling { From 7a123af3b4117478e821d152cfcfb253a30415e8 Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 14:42:07 +0100 Subject: [PATCH 57/94] Added second addition to ip pool list selection --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 7ffc56e..cdf69e4 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -114,7 +114,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips[0] } resource "ionoscloud_ipblock" "ippools" { From ff3ed9ee8f603af34e8d9636eea8caee947c9bac Mon Sep 17 00:00:00 2001 From: marhode Date: Thu, 23 Nov 2023 15:26:33 +0100 Subject: [PATCH 58/94] Changed list of list of ips to map of list of list of list --- modules/ionos-k8s-cluster/locals.tf | 4 ++-- modules/ionos-k8s-cluster/main.tf | 4 ++-- modules/ionos-k8s-cluster/variables.tf | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 786fb9d..4b72fc6 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -26,7 +26,7 @@ locals { storage_size = null cpu_family = null create_public_ip_pools = null - public_ips = [[]] + public_ips = {ZONE_1=[[]], ZONE_2=[[]]} }]) #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed (default = false) @@ -58,7 +58,7 @@ locals { storage_size = np.storage_size != null ? np.storage_size : var.storage_size cpu_family = np.cpu_family != null ? np.cpu_family : var.cpu_family create_public_ip_pools = np.create_public_ip_pools != null ? np.create_public_ip_pools : var.create_public_ip_pools - public_ips = np.create_public_ip_pools == false ? [[]] : np.purpose == "legacy" ? np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 : np.public_ips + public_ips = np.create_public_ip_pools == true ? null : np.purpose == "legacy" ? np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 : np.public_ips[np.var.availability_zone] } ] diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index cdf69e4..8931f17 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -52,7 +52,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips[0] + public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips[each.value.nodepool_index] auto_scaling { @@ -114,7 +114,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips[0] + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : each.value.public_ips[each.value.nodepool_index] } resource "ionoscloud_ipblock" "ippools" { diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index a43a79f..d117101 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -139,7 +139,7 @@ variable "custom_nodepools" { storage_size = number cpu_family = string create_public_ip_pools = bool - public_ips = list(list(string)) + public_ips = map(list(list(string))) }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." @@ -162,6 +162,6 @@ variable "custom_nodepools" { storage_size = null cpu_family = null create_public_ip_pools = null - public_ips = [[]] + public_ips = {ZONE_1=[[]], ZONE_2=[[]]} }] } \ No newline at end of file From 5877e7a5d5366eb60e6040507be505642c5629ef Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 23 Nov 2023 14:40:59 +0000 Subject: [PATCH 59/94] changed public_ips definition --- modules/ionos-k8s-cluster/variables.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index d117101..c66f273 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -139,7 +139,7 @@ variable "custom_nodepools" { storage_size = number cpu_family = string create_public_ip_pools = bool - public_ips = map(list(list(string))) + public_ips = map(list(list)) }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." From 0f4f57397d8c8c029f8d17334c49f245dde9cbe9 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 23 Nov 2023 15:36:19 +0000 Subject: [PATCH 60/94] fixed typing of lists --- modules/ionos-k8s-cluster/locals.tf | 2 +- modules/ionos-k8s-cluster/variables.tf | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 4b72fc6..e2bbafa 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -58,7 +58,7 @@ locals { storage_size = np.storage_size != null ? np.storage_size : var.storage_size cpu_family = np.cpu_family != null ? np.cpu_family : var.cpu_family create_public_ip_pools = np.create_public_ip_pools != null ? np.create_public_ip_pools : var.create_public_ip_pools - public_ips = np.create_public_ip_pools == true ? null : np.purpose == "legacy" ? np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2 : np.public_ips[np.var.availability_zone] + public_ips = np.create_public_ip_pools == true ? [[]] : np.purpose == "legacy" ? (np.availability_zone == "ZONE_1" ? var.public_ip_pool_zone1 : var.public_ip_pool_zone2) : np.public_ips[np.availability_zone] } ] diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index c66f273..2e62d26 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -139,7 +139,7 @@ variable "custom_nodepools" { storage_size = number cpu_family = string create_public_ip_pools = bool - public_ips = map(list(list)) + public_ips = map(list(list(string))) }) ) description = "This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources." @@ -162,6 +162,6 @@ variable "custom_nodepools" { storage_size = null cpu_family = null create_public_ip_pools = null - public_ips = {ZONE_1=[[]], ZONE_2=[[]]} + public_ips = {ZONE_1=[[""]], ZONE_2=[[""]]} }] } \ No newline at end of file From ffb6fa915b115d455d7fe7988882deedb52fe90e Mon Sep 17 00:00:00 2001 From: marhode Date: Fri, 24 Nov 2023 10:26:55 +0100 Subject: [PATCH 61/94] Added slice to ip pool list --- modules/ionos-k8s-cluster/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 8931f17..eb1af81 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -52,7 +52,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips[each.value.nodepool_index] + public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count) auto_scaling { @@ -114,7 +114,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : each.value.public_ips[each.value.nodepool_index] + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count) } resource "ionoscloud_ipblock" "ippools" { From ec432babce1e8245b1658ee4a65ec43a95bf6477 Mon Sep 17 00:00:00 2001 From: marhode Date: Fri, 24 Nov 2023 10:59:31 +0100 Subject: [PATCH 62/94] Added +1 to the slice of ip lists --- modules/ionos-k8s-cluster/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index eb1af81..1766eb7 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -52,7 +52,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count) + public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count+1) auto_scaling { @@ -114,7 +114,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count) + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count+1) } resource "ionoscloud_ipblock" "ippools" { From a2a81a782f29b4898d8fec19b652ed8f192b5080 Mon Sep 17 00:00:00 2001 From: marhode Date: Fri, 24 Nov 2023 13:37:15 +0100 Subject: [PATCH 63/94] Adjusted maintenance times --- modules/ionos-k8s-cluster/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 1766eb7..a1c6c66 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -41,7 +41,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { #TODO we cant use count.index anymore and need a proper solution: + 1 + count.index * 4 maintenance_window { day_of_the_week = (each.value.maintenance_hour + 1 + 1 * 4) < 24 ? each.value.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, each.value.maintenance_day, null) - time = format("%02d:00:00Z", (each.value.maintenance_hour + 1 + 1 * 4) % 24) + time = format("%02d:00:00Z", (each.value.maintenance_hour + 1 + each.value.nodepool_index * 4) % 24) } datacenter_id = var.datacenter_id @@ -103,7 +103,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { maintenance_window { day_of_the_week = (each.value.maintenance_hour + 1 + 1 * 4) < 24 ? each.value.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, each.value.maintenance_day, null) - time = format("%02d:00:00Z", (each.value.maintenance_hour + 1 + 2 * 4) % 24) + time = format("%02d:00:00Z", (each.value.maintenance_hour + 1 + each.value.nodepool_index * 4) % 24) } datacenter_id = var.datacenter_id From 81a99a8637766f36875a5f856c339ee65d6a97e7 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Fri, 24 Nov 2023 13:45:16 +0000 Subject: [PATCH 64/94] increment nodepool counter in name --- modules/ionos-k8s-cluster/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 1766eb7..0fb7c97 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -74,10 +74,10 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { #---- resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == false} + for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${each.value.nodepool_index}" => np if np.auto_scaling == false} availability_zone = each.value.availability_zone #The name needs to be changed, not only legacy pools have auto_scaling= false and thus we need an additional check - name = each.value.purpose != "legacy" ? each.key : each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-0":"${local.cluster_name}-zone2-nodepool-0" + name = each.value.purpose != "legacy" ? each.key : each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-${each.value.nodepool_index}":"${local.cluster_name}-zone2-nodepool-${each.value.nodepool_index}" k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version allow_replace = each.value.allow_node_pool_replacement # the lans are created as a dynamic block - they help to dynamically construct repeatable nested blocks From 68e860d43f25e69650407c850b387d5cf4ff0bdf Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Fri, 24 Nov 2023 13:45:59 +0000 Subject: [PATCH 65/94] removed cluster name from key --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 0fb7c97..a27ed3c 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -74,7 +74,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { #---- resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${each.value.nodepool_index}" => np if np.auto_scaling == false} + for_each = {for np in local.nodepool_per_zone_creator : "${np.availability_zone}-${np.purpose}-${each.value.nodepool_index}" => np if np.auto_scaling == false} availability_zone = each.value.availability_zone #The name needs to be changed, not only legacy pools have auto_scaling= false and thus we need an additional check name = each.value.purpose != "legacy" ? each.key : each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-${each.value.nodepool_index}":"${local.cluster_name}-zone2-nodepool-${each.value.nodepool_index}" From 1a63db8918d4f19fe4d00191c6756c8a50d8de7c Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Fri, 24 Nov 2023 13:53:15 +0000 Subject: [PATCH 66/94] changed for-each loop --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index a2b902d..0af0059 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -74,7 +74,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { #---- resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { - for_each = {for np in local.nodepool_per_zone_creator : "${np.availability_zone}-${np.purpose}-${each.value.nodepool_index}" => np if np.auto_scaling == false} + for_each = {for np in local.nodepool_per_zone_creator : "${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == false} availability_zone = each.value.availability_zone #The name needs to be changed, not only legacy pools have auto_scaling= false and thus we need an additional check name = each.value.purpose != "legacy" ? each.key : each.value.availability_zone == "ZONE_1" ? "${local.cluster_name}-zone1-nodepool-${each.value.nodepool_index}":"${local.cluster_name}-zone2-nodepool-${each.value.nodepool_index}" From 3f8de6dbd3d32090d64468c76d4f522b09b84fbf Mon Sep 17 00:00:00 2001 From: marhode Date: Mon, 27 Nov 2023 08:27:32 +0100 Subject: [PATCH 67/94] Removed cluster name from keys to match between ip and nodepools --- modules/ionos-k8s-cluster/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 0af0059..1f6983c 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -13,7 +13,7 @@ resource "ionoscloud_k8s_cluster" "cluster" { #---- resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == true} + for_each = {for np in local.nodepool_per_zone_creator : "${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.auto_scaling == true} availability_zone = each.value.availability_zone name = each.key k8s_version = ionoscloud_k8s_cluster.cluster.k8s_version @@ -118,7 +118,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { } resource "ionoscloud_ipblock" "ippools" { - for_each = {for np in local.nodepool_per_zone_creator : "${local.cluster_name}-${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.create_public_ip_pools == true} + for_each = {for np in local.nodepool_per_zone_creator : "${np.availability_zone}-${np.purpose}-${np.nodepool_index}" => np if np.create_public_ip_pools == true} name = each.key location = var.datacenter_location size = each.value.auto_scaling ? each.value.max_node_count + 1 : each.value.node_count + 1 From ac0091c841c430795de20f2377800e70c57f64ae Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Mon, 27 Nov 2023 10:34:36 +0000 Subject: [PATCH 68/94] fixed nodepool-count problem --- modules/ionos-k8s-cluster/locals.tf | 2 +- modules/ionos-k8s-cluster/variables.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index e2bbafa..b2fb02d 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -63,6 +63,6 @@ locals { ] #nodepool_per_zone_creator this duplicates the objects in each availability zone to the amount of nodepool_per_zone_count - nodepool_per_zone_creator = toset(flatten([for n in local.custom_nodepools : [for x in range(n.nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) + nodepool_per_zone_creator = toset(flatten([for n in local.custom_nodepools : [for x in range(0, n.nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) } diff --git a/modules/ionos-k8s-cluster/variables.tf b/modules/ionos-k8s-cluster/variables.tf index 2e62d26..6bf29d1 100644 --- a/modules/ionos-k8s-cluster/variables.tf +++ b/modules/ionos-k8s-cluster/variables.tf @@ -121,7 +121,7 @@ variable "custom_nodepools" { name = string auto_scaling = optional(bool, false) node_count = number - nodepool_per_zone_count = optional(number, 1) + nodepool_per_zone_count = number min_node_count= optional(number, null) max_node_count= optional(number, null) ram_size = number From 485c794cc190cbe7b6b3ede032e473307d6dabae Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 27 Nov 2023 15:45:34 +0000 Subject: [PATCH 69/94] terraform-docs: automated action --- modules/ionos-k8s-cluster/README.md | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/modules/ionos-k8s-cluster/README.md b/modules/ionos-k8s-cluster/README.md index 3f50f38..29b06a6 100644 --- a/modules/ionos-k8s-cluster/README.md +++ b/modules/ionos-k8s-cluster/README.md @@ -21,9 +21,11 @@ No modules. | [allow\_node\_pool\_replacement](#input\_allow\_node\_pool\_replacement) | When set to true, allows the update of immutable fields by first destroying and then re-creating the node pool. | `bool` | `false` | no | | [api\_subnet\_allow\_list](#input\_api\_subnet\_allow\_list) | n/a | `list(string)` | `null` | no | | [associated\_lans](#input\_associated\_lans) | The lans as objects in a list [{lan[0] with id and routes\_list, lan[1] with id and routes\_list}, ...] |
list(object({
id = number
routes_list = list(any)
}))
| `[]` | no | -| [availability\_zone](#input\_availability\_zone) | n/a | `string` | `"ZONE_1"` | no | +| [availability\_zone](#input\_availability\_zone) | Not needed anymore, we work with a list of zones now | `string` | `"ZONE_1"` | no | | [cpu\_family](#input\_cpu\_family) | Valid cpu family | `string` | `"INTEL_SKYLAKE"` | no | | [create\_public\_ip\_pools](#input\_create\_public\_ip\_pools) | n/a | `bool` | `false` | no | +| [custom\_nodepools](#input\_custom\_nodepools) | This object describes nodepool configurations for dynamic creation of nodepools with a specific purpose and resources. |
list(object({
name = string
auto_scaling = optional(bool, false)
node_count = number
nodepool_per_zone_count = number
min_node_count= optional(number, null)
max_node_count= optional(number, null)
ram_size = number
core_count = number
purpose = string
availability_zones = list(string)
allow_node_pool_replacement = bool
associated_lans = list(object({
id = number
routes_list = list(any)
}))
maintenance_day = string
maintenance_hour = number
storage_type = string
storage_size = number
cpu_family = string
create_public_ip_pools = bool
public_ips = map(list(list(string)))
})
)
|
[
{
"allow_node_pool_replacement": null,
"associated_lans": null,
"auto_scaling": false,
"availability_zones": [
"ZONE_1",
"ZONE_2"
],
"core_count": null,
"cpu_family": null,
"create_public_ip_pools": null,
"maintenance_day": null,
"maintenance_hour": null,
"max_node_count": null,
"min_node_count": null,
"name": "Legacy",
"node_count": null,
"nodepool_per_zone_count": null,
"public_ips": {
"ZONE_1": [
[
""
]
],
"ZONE_2": [
[
""
]
]
},
"purpose": "legacy",
"ram_size": null,
"storage_size": null,
"storage_type": null
}
]
| no | +| [enable\_legacy\_and\_scaling](#input\_enable\_legacy\_and\_scaling) | Determins if both should be used, otherwise only one will be used where custom\_nodepools overwrite legacy ones | `bool` | `false` | no | | [k8s\_version](#input\_k8s\_version) | Kubernetes version | `string` | `"1.24.15"` | no | | [maintenance\_day](#input\_maintenance\_day) | On which day to do the maintenance | `string` | `"Saturday"` | no | | [maintenance\_hour](#input\_maintenance\_hour) | On which hour to do the maintenance | `number` | `3` | no | @@ -39,10 +41,6 @@ No modules. | [cluster\_id](#output\_cluster\_id) | n/a | | [cluster\_k8s\_version](#output\_cluster\_k8s\_version) | n/a | | [cluster\_name](#output\_cluster\_name) | n/a | -| [nodepool\_zone1\_id](#output\_nodepool\_zone1\_id) | n/a | -| [nodepool\_zone1\_ips](#output\_nodepool\_zone1\_ips) | n/a | -| [nodepool\_zone2\_id](#output\_nodepool\_zone2\_id) | n/a | -| [nodepool\_zone2\_ips](#output\_nodepool\_zone2\_ips) | n/a | ## Requirements | Name | Version | @@ -53,9 +51,8 @@ No modules. | Name | Type | |------|------| -| [ionoscloud_ipblock.ippools_zone1](https://registry.terraform.io/providers/ionos-cloud/ionoscloud/6.3.6/docs/resources/ipblock) | resource | -| [ionoscloud_ipblock.ippools_zone2](https://registry.terraform.io/providers/ionos-cloud/ionoscloud/6.3.6/docs/resources/ipblock) | resource | +| [ionoscloud_ipblock.ippools](https://registry.terraform.io/providers/ionos-cloud/ionoscloud/6.3.6/docs/resources/ipblock) | resource | | [ionoscloud_k8s_cluster.cluster](https://registry.terraform.io/providers/ionos-cloud/ionoscloud/6.3.6/docs/resources/k8s_cluster) | resource | -| [ionoscloud_k8s_node_pool.nodepool_zone1](https://registry.terraform.io/providers/ionos-cloud/ionoscloud/6.3.6/docs/resources/k8s_node_pool) | resource | -| [ionoscloud_k8s_node_pool.nodepool_zone2](https://registry.terraform.io/providers/ionos-cloud/ionoscloud/6.3.6/docs/resources/k8s_node_pool) | resource | +| [ionoscloud_k8s_node_pool.nodepool_legacy](https://registry.terraform.io/providers/ionos-cloud/ionoscloud/6.3.6/docs/resources/k8s_node_pool) | resource | +| [ionoscloud_k8s_node_pool.nodepool_scaling](https://registry.terraform.io/providers/ionos-cloud/ionoscloud/6.3.6/docs/resources/k8s_node_pool) | resource | \ No newline at end of file From 1a878869a8229a07d177f1f682ab1071168daf44 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Wed, 29 Nov 2023 14:21:27 +0000 Subject: [PATCH 70/94] changed name for readability --- modules/ionos-k8s-cluster/locals.tf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index b2fb02d..8a2da46 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -34,12 +34,12 @@ locals { #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? tolist(concat(var.custom_nodepools, local.legacy_object)) : var.custom_nodepools) - #availabilityzone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those zones each. - availabilityzone_split = toset(flatten([for n in local.legacy_check : [for x in n.availability_zones : merge(n,{availability_zone = x})] ])) + #availability_zone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those zones each. + availability_zone_split = toset(flatten([for n in local.legacy_check : [for x in n.availability_zones : merge(n,{availability_zone = x})] ])) #Loop through our nodepool list to detect empty values and fill them with legacy values #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) - custom_nodepools = [ for np in local.availabilityzone_split : { + custom_nodepools = [ for np in local.availability_zone_split : { name = np.name purpose = np.purpose auto_scaling = np.auto_scaling From a75716c541c187805aa20f02a81d8573faa2e336 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 09:35:33 +0000 Subject: [PATCH 71/94] add ids to output --- modules/ionos-k8s-cluster/output.tf | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/output.tf b/modules/ionos-k8s-cluster/output.tf index 40b0d39..faea513 100644 --- a/modules/ionos-k8s-cluster/output.tf +++ b/modules/ionos-k8s-cluster/output.tf @@ -6,4 +6,16 @@ output "cluster_k8s_version" { } output "cluster_id" { value = ionoscloud_k8s_cluster.cluster.id -} \ No newline at end of file +} +output "nodepool_zone1_id" { + value = values(ionoscloud_k8s_node_pool.nodepool_legacy)[*].id +} +# output "nodepool_zone2_id" { +# value = ionoscloud_k8s_node_pool.nodepool_zone2[*].id +# } +# output "nodepool_zone1_ips" { +# value = concat(ionoscloud_ipblock.ippools_zone1[*].ips) +# } +# output "nodepool_zone2_ips" { +# value = concat(ionoscloud_ipblock.ippools_zone2[*].ips) +# } \ No newline at end of file From 529e9c56fe6a9ddaf811c5d5e9db289d7bcb97ad Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 30 Nov 2023 09:36:09 +0000 Subject: [PATCH 72/94] terraform-docs: automated action --- modules/ionos-k8s-cluster/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ionos-k8s-cluster/README.md b/modules/ionos-k8s-cluster/README.md index 29b06a6..cd1fb3e 100644 --- a/modules/ionos-k8s-cluster/README.md +++ b/modules/ionos-k8s-cluster/README.md @@ -41,6 +41,7 @@ No modules. | [cluster\_id](#output\_cluster\_id) | n/a | | [cluster\_k8s\_version](#output\_cluster\_k8s\_version) | n/a | | [cluster\_name](#output\_cluster\_name) | n/a | +| [nodepool\_zone1\_id](#output\_nodepool\_zone1\_id) | n/a | ## Requirements | Name | Version | From 567d4a795ea5260ffbffbeee3a6d43e75bbb64f2 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 09:49:22 +0000 Subject: [PATCH 73/94] added ids for scaling nodepools --- modules/ionos-k8s-cluster/output.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/output.tf b/modules/ionos-k8s-cluster/output.tf index faea513..5840f07 100644 --- a/modules/ionos-k8s-cluster/output.tf +++ b/modules/ionos-k8s-cluster/output.tf @@ -8,7 +8,7 @@ output "cluster_id" { value = ionoscloud_k8s_cluster.cluster.id } output "nodepool_zone1_id" { - value = values(ionoscloud_k8s_node_pool.nodepool_legacy)[*].id + value = concat(values(ionoscloud_k8s_node_pool.nodepool_legacy)[*].id, values(ionoscloud_k8s_node_pool.nodepool_scaling)[*].id) } # output "nodepool_zone2_id" { # value = ionoscloud_k8s_node_pool.nodepool_zone2[*].id From cb399e1b626fe5427739c35964080dfe4e756978 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 10:15:24 +0000 Subject: [PATCH 74/94] fixed public ips for empty list --- modules/ionos-k8s-cluster/main.tf | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 1f6983c..59008cd 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -52,8 +52,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count+1) - + public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : (each.value.max_node_count > length(each.value.public_ips) ? null : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count+1)) auto_scaling { min_node_count = each.value.min_node_count From 5735a4af9fa9380189397dff295ae1f3474eb06c Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 10:23:33 +0000 Subject: [PATCH 75/94] added output for ippools --- modules/ionos-k8s-cluster/output.tf | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/modules/ionos-k8s-cluster/output.tf b/modules/ionos-k8s-cluster/output.tf index 5840f07..10b3d29 100644 --- a/modules/ionos-k8s-cluster/output.tf +++ b/modules/ionos-k8s-cluster/output.tf @@ -10,12 +10,6 @@ output "cluster_id" { output "nodepool_zone1_id" { value = concat(values(ionoscloud_k8s_node_pool.nodepool_legacy)[*].id, values(ionoscloud_k8s_node_pool.nodepool_scaling)[*].id) } -# output "nodepool_zone2_id" { -# value = ionoscloud_k8s_node_pool.nodepool_zone2[*].id -# } -# output "nodepool_zone1_ips" { -# value = concat(ionoscloud_ipblock.ippools_zone1[*].ips) -# } -# output "nodepool_zone2_ips" { -# value = concat(ionoscloud_ipblock.ippools_zone2[*].ips) -# } \ No newline at end of file +output "nodepool_zone1_ips" { + value = concat(ionoscloud_ipblock.ippools[*].ips) +} From abf3376d1272119e4eed7c352d006ed48c9619b0 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 30 Nov 2023 10:24:03 +0000 Subject: [PATCH 76/94] terraform-docs: automated action --- modules/ionos-k8s-cluster/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ionos-k8s-cluster/README.md b/modules/ionos-k8s-cluster/README.md index cd1fb3e..191b69b 100644 --- a/modules/ionos-k8s-cluster/README.md +++ b/modules/ionos-k8s-cluster/README.md @@ -42,6 +42,7 @@ No modules. | [cluster\_k8s\_version](#output\_cluster\_k8s\_version) | n/a | | [cluster\_name](#output\_cluster\_name) | n/a | | [nodepool\_zone1\_id](#output\_nodepool\_zone1\_id) | n/a | +| [nodepool\_zone1\_ips](#output\_nodepool\_zone1\_ips) | n/a | ## Requirements | Name | Version | From 87c71f65e35281ef860c3b4ab477e1f81acdeaf4 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 10:29:37 +0000 Subject: [PATCH 77/94] updated ip output --- modules/ionos-k8s-cluster/output.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/output.tf b/modules/ionos-k8s-cluster/output.tf index 10b3d29..9382b8c 100644 --- a/modules/ionos-k8s-cluster/output.tf +++ b/modules/ionos-k8s-cluster/output.tf @@ -11,5 +11,5 @@ output "nodepool_zone1_id" { value = concat(values(ionoscloud_k8s_node_pool.nodepool_legacy)[*].id, values(ionoscloud_k8s_node_pool.nodepool_scaling)[*].id) } output "nodepool_zone1_ips" { - value = concat(ionoscloud_ipblock.ippools[*].ips) + value = concat(values(ionoscloud_ipblock.ippools)[*].ips) } From 90c590c3a0ce55d2b831f17d5a9a775670edddbb Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 10:46:09 +0000 Subject: [PATCH 78/94] extended public ips condition for custom nodes --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 59008cd..9b1ac66 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -113,7 +113,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count+1) + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null || each.value.node_count > length(each.value.public_ips) ? null : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count+1) } resource "ionoscloud_ipblock" "ippools" { From 80368f2cf00d379006aeaf1efaca66199cc342fe Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 10:50:44 +0000 Subject: [PATCH 79/94] fixed ippools if no public ippools needed --- modules/ionos-k8s-cluster/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 9b1ac66..5b91cbe 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -52,7 +52,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : (each.value.max_node_count > length(each.value.public_ips) ? null : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count+1)) + public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : (each.value.max_node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count+1)) auto_scaling { min_node_count = each.value.min_node_count @@ -113,7 +113,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null || each.value.node_count > length(each.value.public_ips) ? null : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count+1) + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null || each.value.node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count+1) } resource "ionoscloud_ipblock" "ippools" { From d72dac959bd35d1ec12d2f3ac3b734f7a0236d7a Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 11:16:05 +0000 Subject: [PATCH 80/94] fix public ips --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 5b91cbe..bd062b1 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -113,7 +113,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null || each.value.node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count+1) + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : each.value.node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count+1) } resource "ionoscloud_ipblock" "ippools" { From 40251bd7cdf393239ac1e309af46fb0be6dd0f10 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 11:28:57 +0000 Subject: [PATCH 81/94] test slice function --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index bd062b1..06a9b70 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -113,7 +113,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : each.value.node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count+1) + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : each.value.node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count) #slice(public_ips[0] = [""],0,2) } resource "ionoscloud_ipblock" "ippools" { From d366c7170a8f4c78444605151c3a12c9eb49479c Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 11:30:46 +0000 Subject: [PATCH 82/94] test slice function --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 06a9b70..dcb19f1 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -52,7 +52,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : (each.value.max_node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count+1)) + public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : (each.value.max_node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count)) auto_scaling { min_node_count = each.value.min_node_count From b89702df14ce3c728917309c2064a7664ac28fd5 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 11:34:15 +0000 Subject: [PATCH 83/94] test slice function --- modules/ionos-k8s-cluster/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index dcb19f1..e04ca19 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -52,7 +52,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : (each.value.max_node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count)) + public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : (each.value.max_node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count+1)) auto_scaling { min_node_count = each.value.min_node_count @@ -113,7 +113,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : each.value.node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count) #slice(public_ips[0] = [""],0,2) + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : each.value.node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count+1) #slice(public_ips[0] = [""],0,2) } resource "ionoscloud_ipblock" "ippools" { From 681f400555b95864846a08f96fe1596efdf967c4 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 12:29:53 +0000 Subject: [PATCH 84/94] test slice function --- modules/ionos-k8s-cluster/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index e04ca19..dcb19f1 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -52,7 +52,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : (each.value.max_node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count+1)) + public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : (each.value.max_node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count)) auto_scaling { min_node_count = each.value.min_node_count @@ -113,7 +113,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : each.value.node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count+1) #slice(public_ips[0] = [""],0,2) + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : each.value.node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count) #slice(public_ips[0] = [""],0,2) } resource "ionoscloud_ipblock" "ippools" { From 3573d23b074423b69dae01b97926fe312ae01f2c Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 12:42:21 +0000 Subject: [PATCH 85/94] added index for public ip conditions --- modules/ionos-k8s-cluster/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index dcb19f1..22683bb 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -113,7 +113,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : each.value.node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count) #slice(public_ips[0] = [""],0,2) + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips[each.value.nodepool_index] == null ? [] : each.value.node_count > length(each.value.public_ips[each.value.nodepool_index]) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count) #slice(public_ips[0] = [""],0,2) } resource "ionoscloud_ipblock" "ippools" { From a791b2d28b7f3c94f1a06a6986a3ec3d3294c9b2 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 12:50:53 +0000 Subject: [PATCH 86/94] public ip generation --- modules/ionos-k8s-cluster/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 22683bb..7aea471 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -52,7 +52,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : (each.value.max_node_count > length(each.value.public_ips) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count)) + public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : (each.value.max_node_count > length(each.value.public_ips[each.value.nodepool_index]) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count)) auto_scaling { min_node_count = each.value.min_node_count @@ -113,7 +113,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips[each.value.nodepool_index] == null ? [] : each.value.node_count > length(each.value.public_ips[each.value.nodepool_index]) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count) #slice(public_ips[0] = [""],0,2) + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : each.value.node_count > length(each.value.public_ips[each.value.nodepool_index]) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count) #slice(public_ips[0] = [""],0,2) } resource "ionoscloud_ipblock" "ippools" { From 90f2290a723e28c581ccc034ae8028eeed616cd4 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 12:54:17 +0000 Subject: [PATCH 87/94] test nodepool output --- modules/ionos-k8s-cluster/output.tf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/ionos-k8s-cluster/output.tf b/modules/ionos-k8s-cluster/output.tf index 9382b8c..f3d4b71 100644 --- a/modules/ionos-k8s-cluster/output.tf +++ b/modules/ionos-k8s-cluster/output.tf @@ -13,3 +13,6 @@ output "nodepool_zone1_id" { output "nodepool_zone1_ips" { value = concat(values(ionoscloud_ipblock.ippools)[*].ips) } +output "nodepool_legacy" { + value = ionoscloud_k8s_node_pool.nodepool_legacy +} \ No newline at end of file From 2641cec2eca96ce5567558f92d226f38ad5f9df5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 30 Nov 2023 12:54:41 +0000 Subject: [PATCH 88/94] terraform-docs: automated action --- modules/ionos-k8s-cluster/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ionos-k8s-cluster/README.md b/modules/ionos-k8s-cluster/README.md index 191b69b..ab0073b 100644 --- a/modules/ionos-k8s-cluster/README.md +++ b/modules/ionos-k8s-cluster/README.md @@ -41,6 +41,7 @@ No modules. | [cluster\_id](#output\_cluster\_id) | n/a | | [cluster\_k8s\_version](#output\_cluster\_k8s\_version) | n/a | | [cluster\_name](#output\_cluster\_name) | n/a | +| [nodepool\_legacy](#output\_nodepool\_legacy) | n/a | | [nodepool\_zone1\_id](#output\_nodepool\_zone1\_id) | n/a | | [nodepool\_zone1\_ips](#output\_nodepool\_zone1\_ips) | n/a | ## Requirements From 2b4459d3a056284c60f2774031073db56376a626 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 13:30:04 +0000 Subject: [PATCH 89/94] public ips --- modules/ionos-k8s-cluster/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index 7aea471..d679532 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -52,7 +52,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : (each.value.max_node_count > length(each.value.public_ips[each.value.nodepool_index]) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count)) + public_ips = each.value.create_public_ip_pools == true ? ionoscloud_ipblock.ippools[each.key].ips : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.max_node_count+1) auto_scaling { min_node_count = each.value.min_node_count @@ -113,7 +113,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_legacy" { cores_count = each.value.core_count ram_size = each.value.ram_size storage_size = each.value.storage_size - public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : each.value.node_count > length(each.value.public_ips[each.value.nodepool_index]) ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count) #slice(public_ips[0] = [""],0,2) + public_ips = each.value.create_public_ip_pools ? ionoscloud_ipblock.ippools[each.key].ips : each.value.public_ips == null ? [] : slice(each.value.public_ips[each.value.nodepool_index], 0, each.value.node_count+1) } resource "ionoscloud_ipblock" "ippools" { From 8d52a6bc20cb85438c8c30b55aa530ed9eb0823b Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 13:30:14 +0000 Subject: [PATCH 90/94] removed debugging output --- modules/ionos-k8s-cluster/output.tf | 3 --- 1 file changed, 3 deletions(-) diff --git a/modules/ionos-k8s-cluster/output.tf b/modules/ionos-k8s-cluster/output.tf index f3d4b71..9382b8c 100644 --- a/modules/ionos-k8s-cluster/output.tf +++ b/modules/ionos-k8s-cluster/output.tf @@ -13,6 +13,3 @@ output "nodepool_zone1_id" { output "nodepool_zone1_ips" { value = concat(values(ionoscloud_ipblock.ippools)[*].ips) } -output "nodepool_legacy" { - value = ionoscloud_k8s_node_pool.nodepool_legacy -} \ No newline at end of file From a9bdf71e9960c5ea45dee450af6b77dac054292a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 30 Nov 2023 13:30:42 +0000 Subject: [PATCH 91/94] terraform-docs: automated action --- modules/ionos-k8s-cluster/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/README.md b/modules/ionos-k8s-cluster/README.md index ab0073b..191b69b 100644 --- a/modules/ionos-k8s-cluster/README.md +++ b/modules/ionos-k8s-cluster/README.md @@ -41,7 +41,6 @@ No modules. | [cluster\_id](#output\_cluster\_id) | n/a | | [cluster\_k8s\_version](#output\_cluster\_k8s\_version) | n/a | | [cluster\_name](#output\_cluster\_name) | n/a | -| [nodepool\_legacy](#output\_nodepool\_legacy) | n/a | | [nodepool\_zone1\_id](#output\_nodepool\_zone1\_id) | n/a | | [nodepool\_zone1\_ips](#output\_nodepool\_zone1\_ips) | n/a | ## Requirements From d59722a22f3680f0ab1a5231101f69b7056ee537 Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 13:49:29 +0000 Subject: [PATCH 92/94] added outputs for legacy compatibility --- modules/ionos-k8s-cluster/output.tf | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/modules/ionos-k8s-cluster/output.tf b/modules/ionos-k8s-cluster/output.tf index 9382b8c..27ff852 100644 --- a/modules/ionos-k8s-cluster/output.tf +++ b/modules/ionos-k8s-cluster/output.tf @@ -10,6 +10,12 @@ output "cluster_id" { output "nodepool_zone1_id" { value = concat(values(ionoscloud_k8s_node_pool.nodepool_legacy)[*].id, values(ionoscloud_k8s_node_pool.nodepool_scaling)[*].id) } +output "nodepool_zone2_id" { + value = [] +} output "nodepool_zone1_ips" { - value = concat(values(ionoscloud_ipblock.ippools)[*].ips) + value = concat(values(ionoscloud_k8s_node_pool.nodepool_legacy)[*].public_ips,values(ionoscloud_k8s_node_pool.nodepool_scaling)[*].id) +} +output "nodepool_zone2_ips" { + value = [] } From 6627a74f4796d77c649ddd89f5506530c746740c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 30 Nov 2023 13:50:24 +0000 Subject: [PATCH 93/94] terraform-docs: automated action --- modules/ionos-k8s-cluster/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/ionos-k8s-cluster/README.md b/modules/ionos-k8s-cluster/README.md index 191b69b..518fd9a 100644 --- a/modules/ionos-k8s-cluster/README.md +++ b/modules/ionos-k8s-cluster/README.md @@ -43,6 +43,8 @@ No modules. | [cluster\_name](#output\_cluster\_name) | n/a | | [nodepool\_zone1\_id](#output\_nodepool\_zone1\_id) | n/a | | [nodepool\_zone1\_ips](#output\_nodepool\_zone1\_ips) | n/a | +| [nodepool\_zone2\_id](#output\_nodepool\_zone2\_id) | n/a | +| [nodepool\_zone2\_ips](#output\_nodepool\_zone2\_ips) | n/a | ## Requirements | Name | Version | From 4a4693053fb40291bd56bbb84de1f1b209fda19b Mon Sep 17 00:00:00 2001 From: Maximilian Greve Date: Thu, 30 Nov 2023 14:02:20 +0000 Subject: [PATCH 94/94] cleanup --- modules/ionos-k8s-cluster/locals.tf | 17 ++++++++--------- modules/ionos-k8s-cluster/main.tf | 2 +- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/modules/ionos-k8s-cluster/locals.tf b/modules/ionos-k8s-cluster/locals.tf index 8a2da46..64b7c6b 100644 --- a/modules/ionos-k8s-cluster/locals.tf +++ b/modules/ionos-k8s-cluster/locals.tf @@ -6,7 +6,7 @@ locals { api_subnet_allow_list = var.api_subnet_allow_list - #Create legacy object for possible merging into the nodepool list(Only used when both legacy and custom nodespools are in use) + # Create legacy object for possible merging into the nodepool list(Only used when both legacy and custom nodespools are in use) legacy_object = tolist([{ name = "Legacy" auto_scaling = false @@ -29,16 +29,16 @@ locals { public_ips = {ZONE_1=[[]], ZONE_2=[[]]} }]) - #check if both legacy and scaling should be used, if so merge legacy object into the object list if needed (default = false) - #if false: No need to do anything because it is either legacy or scaling - #if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it + # check if both legacy and scaling should be used, if so merge legacy object into the object list if needed (default = false) + # if false: No need to do anything because it is either legacy or scaling + # if true: check if first object is legacy, if not only scaling objects are in the list => merge legacy into it legacy_check = var.enable_legacy_and_scaling == false ? var.custom_nodepools : (var.custom_nodepools[0].purpose != "legacy" ? tolist(concat(var.custom_nodepools, local.legacy_object)) : var.custom_nodepools) - #availability_zone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those zones each. + # availability_zone_split duplicates objects with each of their Availability zones once. if [ZONE1, ZONE2] we get 2 objects with one of those zones each. availability_zone_split = toset(flatten([for n in local.legacy_check : [for x in n.availability_zones : merge(n,{availability_zone = x})] ])) - #Loop through our nodepool list to detect empty values and fill them with legacy values - #Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) + # Loop through our nodepool list to detect empty values and fill them with legacy values + # Only required for downward compatibility and legacy nodepools (If no downward compatibility is required just use var.custom_nodepools to loop over) custom_nodepools = [ for np in local.availability_zone_split : { name = np.name purpose = np.purpose @@ -62,7 +62,6 @@ locals { } ] - #nodepool_per_zone_creator this duplicates the objects in each availability zone to the amount of nodepool_per_zone_count + # nodepool_per_zone_creator this duplicates the objects in each availability zone to the amount of nodepool_per_zone_count nodepool_per_zone_creator = toset(flatten([for n in local.custom_nodepools : [for x in range(0, n.nodepool_per_zone_count) : merge(n,{nodepool_index = x})] ])) } - diff --git a/modules/ionos-k8s-cluster/main.tf b/modules/ionos-k8s-cluster/main.tf index d679532..c200509 100644 --- a/modules/ionos-k8s-cluster/main.tf +++ b/modules/ionos-k8s-cluster/main.tf @@ -38,7 +38,7 @@ resource "ionoscloud_k8s_node_pool" "nodepool_scaling" { } } } - #TODO we cant use count.index anymore and need a proper solution: + 1 + count.index * 4 + maintenance_window { day_of_the_week = (each.value.maintenance_hour + 1 + 1 * 4) < 24 ? each.value.maintenance_day : lookup({ "Monday" = "Tuesday", "Tuesday" = "Wednesday", "Wednesday" = "Thursday", "Thursday" = "Friday", "Friday" = "Saturday", "Saturday" = "Sunday", "Sunday" = "Monday" }, each.value.maintenance_day, null) time = format("%02d:00:00Z", (each.value.maintenance_hour + 1 + each.value.nodepool_index * 4) % 24)