diff --git a/.github/workflows/checkov.yml b/.github/workflows/checkov.yml new file mode 100644 index 0000000..8984c18 --- /dev/null +++ b/.github/workflows/checkov.yml @@ -0,0 +1,25 @@ + +name: Static security analysis for Terraform + +permissions: read-all + +on: + push: + branches: + - master + pull_request: + branches: + - master +jobs: + checkov-job: + runs-on: ubuntu-latest + name: checkov-action + steps: + - name: Checkout repo + uses: actions/checkout@v2 + + - name: Run Checkov action + id: checkov + uses: bridgecrewio/checkov-action@master + with: + directory: ./ diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2b35d38 --- /dev/null +++ b/.gitignore @@ -0,0 +1,34 @@ +# Local .terraform directories +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* +.terraform.* +# Crash log files +crash.log + +# Exclude all .tfvars files, which are likely to contain sentitive data, such as +# password, private keys, and other secrets. These should not be part of version +# control as they are data points which are potentially sensitive and subject +# to change depending on the environment. +# +*.tfvars + +# Ignore override files as they are usually used to override resources locally and so +# are not checked in +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Include override files you do wish to add to version control using negated pattern +# +# !example_override.tf + +# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan +# example: *tfplan* + +# Ignore CLI configuration files +.terraformrc +terraform.rc diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..b707d79 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,11 @@ +repos: +- repo: https://github.com/antonbabenko/pre-commit-terraform + rev: v1.77.1 + hooks: + - id: terraform_fmt + - id: terraform_validate + - id: terraform_checkov + - id: terraform_docs + args: + - '--args=--lockfile=false' + diff --git a/README.md b/README.md new file mode 100644 index 0000000..020b162 --- /dev/null +++ b/README.md @@ -0,0 +1,103 @@ +# Terraform module for Azure AKS + +## How to use it as a module + +```hcl +``` + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [azurerm](#requirement\_azurerm) | >= 3.0.0 | + +## Providers + +| Name | Version | +|------|---------| +| [azurerm](#provider\_azurerm) | >= 3.0.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [azurerm_kubernetes_cluster.this](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) | resource | +| [azurerm_kubernetes_cluster_node_pool.this](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | +| [azurerm_role_assignment.app_gateway](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.rg_level](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.this](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.vnet_level](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_user_assigned_identity.this](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/user_assigned_identity) | resource | +| [azurerm_subscription.this](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/subscription) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aci\_connector\_linux](#input\_aci\_connector\_linux) | (Optional) A aci\_connector\_linux block as defined below. For more details, please visit Create and configure an AKS cluster to use virtual nodes. | `any` | `[]` | no | +| [additional\_node\_pools](#input\_additional\_node\_pools) | (Optional) Additional node pools | `any` | `{}` | no | +| [api\_server\_access\_profile](#input\_api\_server\_access\_profile) | (Optional) An api\_server\_access\_profile | `any` | `[]` | no | +| [auto\_scaler\_profile](#input\_auto\_scaler\_profile) | (Optional) A auto\_scaler\_profile block | `any` | `[]` | no | +| [automatic\_channel\_upgrade](#input\_automatic\_channel\_upgrade) | (Optional) The upgrade channel for this Kubernetes Cluster. Possible values are patch, rapid, node-image and stable. Omitting this field sets this value to none. | `string` | `null` | no | +| [azure\_active\_directory\_role\_based\_access\_control](#input\_azure\_active\_directory\_role\_based\_access\_control) | (Optional) A azure\_active\_directory\_role\_based\_access\_control | `any` | `[]` | no | +| [azure\_policy\_enabled](#input\_azure\_policy\_enabled) | (Optional) Should the Azure Policy Add-On be enabled? For more details please visit Understand Azure Policy for Azure Kubernetes Service | `bool` | `true` | no | +| [confidential\_computing](#input\_confidential\_computing) | (Optional) A confidential\_computing block | `any` | `[]` | no | +| [create\_aks](#input\_create\_aks) | Do you want to create AKS Cluster | `bool` | `true` | no | +| [default\_node\_pool](#input\_default\_node\_pool) | (Required) A default\_node\_pool block | `any` | n/a | yes | +| [disk\_encryption\_set\_id](#input\_disk\_encryption\_set\_id) | (Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information can be found in the documentation. Changing this forces a new resource to be created. | `string` | `null` | no | +| [dns\_prefix](#input\_dns\_prefix) | (Optional) DNS prefix specified when creating the managed cluster. Possible values must begin and end with a letter or number, contain only letters, numbers, and hyphens and be between 1 and 54 characters in length. Changing this forces a new resource to be created. | `string` | `null` | no | +| [dns\_prefix\_private\_cluster](#input\_dns\_prefix\_private\_cluster) | (Optional) Specifies the DNS prefix to use with private clusters. Changing this forces a new resource to be created. | `string` | `null` | no | +| [edge\_zone](#input\_edge\_zone) | (Optional) Specifies the Edge Zone within the Azure Region where this Managed Kubernetes Cluster should exist. Changing this forces a new resource to be created. | `string` | `null` | no | +| [http\_application\_routing\_enabled](#input\_http\_application\_routing\_enabled) | (Optional) Should HTTP Application Routing be enabled? | `bool` | `false` | no | +| [http\_proxy\_config](#input\_http\_proxy\_config) | (Optional) A http\_proxy\_config block | `any` | `[]` | no | +| [identity](#input\_identity) | (Optional) An identity block as defined below. One of either identity or service\_principal must be specified. | `any` | `[]` | no | +| [image\_cleaner\_enabled](#input\_image\_cleaner\_enabled) | (Optional) Specifies whether Image Cleaner is enabled | `bool` | `false` | no | +| [image\_cleaner\_interval\_hours](#input\_image\_cleaner\_interval\_hours) | (Optional) Specifies the interval in hours when images should be cleaned up. Defaults to 48. | `number` | `48` | no | +| [ingress\_application\_gateway](#input\_ingress\_application\_gateway) | (Optional) A ingress\_application\_gateway | `any` | `[]` | no | +| [key\_management\_service](#input\_key\_management\_service) | (Optional) A key\_management\_service block as defined below. For more details, please visit Key Management Service (KMS) etcd encryption to an AKS cluster. | `any` | `[]` | no | +| [key\_vault\_secrets\_provider](#input\_key\_vault\_secrets\_provider) | (Optional) A key\_vault\_secrets\_provider block as defined below. For more details, please visit Azure Keyvault Secrets Provider for AKS. | `any` | `[]` | no | +| [kubelet\_identity](#input\_kubelet\_identity) | (Optional) A kubelet\_identity block | `any` | `[]` | no | +| [kubernetes\_version](#input\_kubernetes\_version) | (Optional) Version of Kubernetes specified when creating the AKS managed cluster. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as 1.22 are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in the documentation. | `string` | `null` | no | +| [linux\_profile](#input\_linux\_profile) | (Optional) A linux\_profile block | `any` | `[]` | no | +| [local\_account\_disabled](#input\_local\_account\_disabled) | (Optional) If true local accounts will be disabled. See the documentation for more information. | `bool` | `false` | no | +| [location](#input\_location) | (Required) The location where the Managed Kubernetes Cluster should be created. Changing this forces a new resource to be created. | `string` | n/a | yes | +| [maintenance\_window](#input\_maintenance\_window) | (Optional) A maintenance\_window block | `any` | `[]` | no | +| [microsoft\_defender](#input\_microsoft\_defender) | (Optional) A microsoft\_defender block | `any` | `[]` | no | +| [monitor\_metrics](#input\_monitor\_metrics) | (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster. A monitor\_metrics block as defined below. | `any` | `[]` | no | +| [name](#input\_name) | (Required) The name of the Managed Kubernetes Cluster to create. Changing this forces a new resource to be created. | `string` | n/a | yes | +| [network\_profile](#input\_network\_profile) | (Optional) A network\_profile block as defined below. Changing this forces a new resource to be created. | `any` |
[| no | +| [node\_resource\_group](#input\_node\_resource\_group) | (Optional) The name of the Resource Group where the Kubernetes Nodes should exist. Changing this forces a new resource to be created. | `string` | `null` | no | +| [oidc\_issuer\_enabled](#input\_oidc\_issuer\_enabled) | (Optional) Enable or Disable the OIDC issuer URL | `bool` | `false` | no | +| [oms\_agent](#input\_oms\_agent) | (Optional) A oms\_agent block | `any` | `[]` | no | +| [open\_service\_mesh\_enabled](#input\_open\_service\_mesh\_enabled) | (Optional) Is Open Service Mesh enabled? For more details, please visit Open Service Mesh for AKS. | `bool` | `false` | no | +| [private\_cluster\_enabled](#input\_private\_cluster\_enabled) | (Optional) Should this Kubernetes Cluster have its API server only exposed on internal IP addresses? This provides a Private IP Address for the Kubernetes API on the Virtual Network where the Kubernetes Cluster is located. Defaults to false. Changing this forces a new resource to be created. | `bool` | `false` | no | +| [private\_cluster\_public\_fqdn\_enabled](#input\_private\_cluster\_public\_fqdn\_enabled) | (Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to false. | `bool` | `false` | no | +| [private\_dns\_zone\_id](#input\_private\_dns\_zone\_id) | (Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, System to have AKS manage this or None. In case of None you will need to bring your own DNS server and set up resolving, otherwise, the cluster will have issues after provisioning. Changing this forces a new resource to be created. | `string` | `null` | no | +| [public\_network\_access\_enabled](#input\_public\_network\_access\_enabled) | (Optional) Whether public network access is allowed for this Kubernetes Cluster. Defaults to true. Changing this forces a new resource to be created. | `bool` | `false` | no | +| [resource\_group\_name](#input\_resource\_group\_name) | (Required) Specifies the Resource Group where the Managed Kubernetes Cluster should exist. Changing this forces a new resource to be created. | `string` | n/a | yes | +| [role\_based\_access\_control\_enabled](#input\_role\_based\_access\_control\_enabled) | (Optional) Whether Role Based Access Control for the Kubernetes Cluster should be enabled. Defaults to true. Changing this forces a new resource to be created. | `bool` | `true` | no | +| [run\_command\_enabled](#input\_run\_command\_enabled) | (Optional) Whether to enable run command for the cluster or not. Defaults to true. | `bool` | `true` | no | +| [service\_mesh\_profile](#input\_service\_mesh\_profile) | (Optional) A service\_mesh\_profile block | `any` | `[]` | no | +| [service\_principal](#input\_service\_principal) | (Optional) A service\_principal block as documented below. One of either identity or service\_principal must be specified. | `any` | `[]` | no | +| [sku\_tier](#input\_sku\_tier) | (Optional) The SKU Tier that should be used for this Kubernetes Cluster. Possible values are Free, and Standard (which includes the Uptime SLA). Defaults to Free. | `string` | `"Standard"` | no | +| [tags](#input\_tags) | (Optional) A mapping of tags to assign to the resource. | `map(string)` | `{}` | no | +| [vnet\_id](#input\_vnet\_id) | (Required) Vnet id that Aks MSI should be network contributor in a private cluster | `string` | n/a | yes | +| [workload\_autoscaler\_profile](#input\_workload\_autoscaler\_profile) | (Optional) A workload\_autoscaler\_profile | `any` | `[]` | no | +| [workload\_identity\_enabled](#input\_workload\_identity\_enabled) | (Optional) Specifies whether Azure AD Workload Identity should be enabled for the Cluster. Defaults to false. | `bool` | `false` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [fqdn](#output\_fqdn) | The Kubernetes Managed Cluster ID. | +| [id](#output\_id) | The Kubernetes Managed Cluster ID. | +| [ingress\_application\_gateway](#output\_ingress\_application\_gateway) | The Kubernetes Managed Cluster ID. | +| [portal\_fqdn](#output\_portal\_fqdn) | The Kubernetes Managed Cluster ID. | +| [private\_fqdn](#output\_private\_fqdn) | The Kubernetes Managed Cluster ID. | + diff --git a/data.tf b/data.tf new file mode 100644 index 0000000..47de958 --- /dev/null +++ b/data.tf @@ -0,0 +1,2 @@ +data "azurerm_subscription" "this" { +} diff --git a/examples/simple/README.md b/examples/simple/README.md new file mode 100644 index 0000000..6ede37d --- /dev/null +++ b/examples/simple/README.md @@ -0,0 +1,47 @@ +# Example for using AKS Terraform module + + +## Requirements + +No requirements. + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aks](#module\_aks) | ../../ | n/a | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_node\_pools](#input\_additional\_node\_pools) | (Optional) Additional node pools | `any` | `{}` | no | +| [environment](#input\_environment) | description | `string` | `"dev"` | no | +| [location](#input\_location) | (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. | `string` | `"East US"` | no | +| [log\_analytics\_workspace\_id](#input\_log\_analytics\_workspace\_id) | description | `string` | `""` | no | +| [name](#input\_name) | (Required) Specifies the name of the Key Vault. Changing this forces a new resource to be created. The name must be globally unique. If the vault is in a recoverable state then the vault will need to be purged before reusing the name. | `string` | `"test"` | no | +| [node\_count](#input\_node\_count) | (Optional) The initial number of nodes which should exist in this Node Pool. If specified this must be between 1 and 1000 | `number` | `1` | no | +| [os\_disk\_size\_gb](#input\_os\_disk\_size\_gb) | (Optional) The size of the OS Disk which should be used for each agent in the Node Pool. Changing this forces a new resource to be created. | `number` | `30` | no | +| [public\_subnets\_id](#input\_public\_subnets\_id) | description | `list(string)` | `[]` | no | +| [resource\_group\_name](#input\_resource\_group\_name) | (Required) The name of the resource group in which to create the Container Registry. Changing this forces a new resource to be created. | `string` | `"terraform-test"` | no | +| [retention\_policy](#input\_retention\_policy) | (Optional) A retention\_policy block | `any` | `[]` | no | +| [tags](#input\_tags) | (Optional) A mapping of tags to assign to the resource. | `map(string)` | `{}` | no | +| [vm\_size](#input\_vm\_size) | (Optional) The size of the Virtual Machine, such as Standard\_DS2\_v2 | `string` | `"Standard_DS2_v2"` | no | +| [vnet\_id](#input\_vnet\_id) | description | `string` | `""` | no | +| [vnet\_subnet\_id](#input\_vnet\_subnet\_id) | n/a | `string` | `""` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [aks\_id](#output\_aks\_id) | The Kubernetes Managed Cluster ID. | +| [aks\_ingress\_application\_gateway](#output\_aks\_ingress\_application\_gateway) | An ingress\_application\_gateway block | + diff --git a/examples/simple/main.tf b/examples/simple/main.tf new file mode 100644 index 0000000..09fe8fe --- /dev/null +++ b/examples/simple/main.tf @@ -0,0 +1,85 @@ +module "aks" { + source = "../../" + + kubernetes_version = "1.25.6" + create_aks = true + name = format("%s-%s-mobile", var.name, var.environment) + location = var.location + resource_group_name = var.resource_group_name + dns_prefix = replace(format("%s-%s", var.name, var.environment), "/[\\W_]/", "-") + default_node_pool = [ + { + name = "default" + node_count = var.node_count + vm_size = var.vm_size # "Standard_DS2_v2" + enable_host_encryption = false + vnet_subnet_id = var.vnet_subnet_id + os_disk_size_gb = var.os_disk_size_gb #30 + enable_auto_scaling = true + min_count = var.node_count + max_count = 5 + upgrade_settings = [ + { + max_surge = 25 + } + ] + } + ] + + ingress_application_gateway = [ + { + gateway_name = format("%s-%s-ingress", var.name, var.environment) + subnet_id = var.public_subnets_id + } + ] + key_vault_secrets_provider = [ + { + secret_rotation_enabled = true + } + ] + + network_profile = [ + { + network_plugin = "azure" + load_balancer_sku = "standard" + } + ] + + vnet_id = var.vnet_id + + identity = [{ + type = "SystemAssigned" + }] + + auto_scaler_profile = [ + { max_unready_nodes = 1 } + ] + /* workload_autoscaler_profile = [ + { keda_enabled = false } + ] */ + + oms_agent = [ + { log_analytics_workspace_id = var.log_analytics_workspace_id } + ] + + tags = var.tags + + additional_node_pools = { + "addon" = { + node_count = var.node_count + vm_size = var.vm_size + enable_host_encryption = false + vnet_subnet_id = var.vnet_subnet_id + os_disk_size_gb = var.os_disk_size_gb + enable_auto_scaling = true + min_count = var.node_count + max_count = 5 + upgrade_settings = [ + { + max_surge = 25 + } + ] + + } + } +} diff --git a/examples/simple/outputs.tf b/examples/simple/outputs.tf new file mode 100644 index 0000000..4bce904 --- /dev/null +++ b/examples/simple/outputs.tf @@ -0,0 +1,9 @@ +output "aks_id" { + description = "The Kubernetes Managed Cluster ID." + value = module.aks.id +} + +output "aks_ingress_application_gateway" { + description = "An ingress_application_gateway block" + value = module.aks.ingress_application_gateway +} diff --git a/examples/simple/provider.tf b/examples/simple/provider.tf new file mode 100644 index 0000000..73809ee --- /dev/null +++ b/examples/simple/provider.tf @@ -0,0 +1,7 @@ +provider "azurerm" { + features { + resource_group { + prevent_deletion_if_contains_resources = false + } + } +} diff --git a/examples/simple/variables.tf b/examples/simple/variables.tf new file mode 100644 index 0000000..5ba8c69 --- /dev/null +++ b/examples/simple/variables.tf @@ -0,0 +1,81 @@ +variable "location" { + type = string + description = "(Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created." + default = "East US" +} + +variable "name" { + type = string + description = "(Required) Specifies the name of the Key Vault. Changing this forces a new resource to be created. The name must be globally unique. If the vault is in a recoverable state then the vault will need to be purged before reusing the name." + default = "test" +} +variable "environment" { + type = string + description = "description" + default = "dev" +} + +variable "resource_group_name" { + type = string + description = "(Required) The name of the resource group in which to create the Container Registry. Changing this forces a new resource to be created." + default = "terraform-test" +} + +variable "node_count" { + type = number + description = "(Optional) The initial number of nodes which should exist in this Node Pool. If specified this must be between 1 and 1000" + default = 1 +} + +variable "vm_size" { + type = string + description = "(Optional) The size of the Virtual Machine, such as Standard_DS2_v2" + default = "Standard_DS2_v2" +} + +variable "os_disk_size_gb" { + type = number + description = "(Optional) The size of the OS Disk which should be used for each agent in the Node Pool. Changing this forces a new resource to be created." + default = 30 +} + +variable "additional_node_pools" { + type = any + description = "(Optional) Additional node pools" + default = {} +} + +variable "retention_policy" { + type = any + description = "(Optional) A retention_policy block" + default = [] +} + +variable "vnet_subnet_id" { + type = string + default = "" +} + +variable "tags" { + type = map(string) + description = "(Optional) A mapping of tags to assign to the resource." + default = {} +} + +variable "log_analytics_workspace_id" { + type = string + description = "description" + default = "" +} + +variable "vnet_id" { + type = string + description = "description" + default = "" +} + +variable "public_subnets_id" { + type = list(string) + description = "description" + default = [] +} diff --git a/locals.tf b/locals.tf new file mode 100644 index 0000000..e69de29 diff --git a/main.tf b/main.tf new file mode 100644 index 0000000..471c2eb --- /dev/null +++ b/main.tf @@ -0,0 +1,596 @@ +resource "azurerm_user_assigned_identity" "this" { + count = var.create_aks ? 1 : 0 + + name = format("%s-aks-identity", var.name) + resource_group_name = var.resource_group_name + location = var.location + tags = var.tags +} + +resource "azurerm_role_assignment" "this" { + count = var.create_aks ? 1 : 0 + + scope = var.vnet_id + role_definition_name = "Network Contributor" + principal_id = azurerm_user_assigned_identity.this[0].principal_id +} + +resource "azurerm_kubernetes_cluster" "this" { + count = var.create_aks ? 1 : 0 + + name = var.name + location = var.location + resource_group_name = var.resource_group_name + + + dynamic "default_node_pool" { + for_each = var.default_node_pool + + content { + name = try(default_node_pool.value.name) + vm_size = try(default_node_pool.value.vm_size) + capacity_reservation_group_id = try(default_node_pool.value.capacity_reservation_group_id, null) + custom_ca_trust_enabled = try(default_node_pool.value.custom_ca_trust_enabled, null) + enable_auto_scaling = try(default_node_pool.value.enable_auto_scaling, false) + enable_host_encryption = try(default_node_pool.value.enable_host_encryption, true) + enable_node_public_ip = try(default_node_pool.value.enable_node_public_ip, false) + host_group_id = try(default_node_pool.value.host_group_id, null) + + dynamic "kubelet_config" { + for_each = try(default_node_pool.value.kubelet_config, []) + + content { + allowed_unsafe_sysctls = try(kubelet_config.value.allowed_unsafe_sysctls, null) + container_log_max_line = try(kubelet_config.value.container_log_max_line, null) + container_log_max_size_mb = try(kubelet_config.value.container_log_max_size_mb, null) + cpu_cfs_quota_enabled = try(kubelet_config.value.cpu_cfs_quota_enabled, null) + cpu_cfs_quota_period = try(kubelet_config.value.cpu_cfs_quota_period, null) + cpu_manager_policy = try(kubelet_config.value.cpu_manager_policy, null) + image_gc_high_threshold = try(kubelet_config.value.image_gc_high_threshold, null) + image_gc_low_threshold = try(kubelet_config.value.image_gc_low_threshold, null) + pod_max_pid = try(kubelet_config.value.pod_max_pid, null) + topology_manager_policy = try(kubelet_config.value.topology_manager_policy, null) + } + + } + + dynamic "linux_os_config" { + for_each = try(default_node_pool.value.linux_os_config, []) + + content { + swap_file_size_mb = try(linux_os_config.value.swap_file_size_mb, null) + transparent_huge_page_defrag = try(linux_os_config.value.transparent_huge_page_defrag, null) + transparent_huge_page_enabled = try(linux_os_config.value.transparent_huge_page_enabled, null) + + dynamic "sysctl_config" { + for_each = try(linux_os_config.value.sysctl_config, []) + content { + fs_aio_max_nr = try(sysctl_config.value.fs_aio_max_nr, null) + fs_file_max = try(sysctl_config.value.fs_file_max, null) + fs_inotify_max_user_watches = try(sysctl_config.value.fs_inotify_max_user_watches, null) + fs_nr_open = try(sysctl_config.value.fs_nr_open, null) + kernel_threads_max = try(sysctl_config.value.kernel_threads_max, null) + net_core_netdev_max_backlog = try(sysctl_config.value.net_core_netdev_max_backlog, null) + net_core_optmem_max = try(sysctl_config.value.net_core_optmem_max, null) + net_core_rmem_default = try(sysctl_config.value.net_core_rmem_default, null) + net_core_rmem_max = try(sysctl_config.value.net_core_rmem_max, null) + net_core_somaxconn = try(sysctl_config.value.net_core_somaxconn, null) + net_core_wmem_default = try(sysctl_config.value.net_core_wmem_default, null) + net_core_wmem_max = try(sysctl_config.value.net_core_wmem_max, null) + net_ipv4_ip_local_port_range_max = try(sysctl_config.value.net_ipv4_ip_local_port_range_max, null) + net_ipv4_ip_local_port_range_min = try(sysctl_config.value.net_ipv4_ip_local_port_range_min, null) + net_ipv4_neigh_default_gc_thresh1 = try(sysctl_config.value.net_ipv4_neigh_default_gc_thresh1, null) + net_ipv4_neigh_default_gc_thresh2 = try(sysctl_config.value.net_ipv4_neigh_default_gc_thresh2, null) + net_ipv4_neigh_default_gc_thresh3 = try(sysctl_config.value.net_ipv4_neigh_default_gc_thresh3, null) + net_ipv4_tcp_fin_timeout = try(sysctl_config.value.net_ipv4_tcp_fin_timeout, null) + net_ipv4_tcp_keepalive_intvl = try(sysctl_config.value.net_ipv4_tcp_keepalive_intvl, null) + net_ipv4_tcp_keepalive_probes = try(sysctl_config.value.net_ipv4_tcp_keepalive_probes, null) + net_ipv4_tcp_keepalive_time = try(sysctl_config.value.net_ipv4_tcp_keepalive_time, null) + net_ipv4_tcp_max_syn_backlog = try(sysctl_config.value.net_ipv4_tcp_max_syn_backlog, null) + net_ipv4_tcp_max_tw_buckets = try(sysctl_config.value.net_ipv4_tcp_max_tw_buckets, null) + net_ipv4_tcp_tw_reuse = try(sysctl_config.value.net_ipv4_tcp_tw_reuse, null) + net_netfilter_nf_conntrack_buckets = try(sysctl_config.value.net_netfilter_nf_conntrack_buckets, null) + net_netfilter_nf_conntrack_max = try(sysctl_config.value.net_netfilter_nf_conntrack_max, null) + vm_max_map_count = try(sysctl_config.value.vm_max_map_count, null) + vm_swappiness = try(sysctl_config.value.vm_swappiness, null) + vm_vfs_cache_pressure = try(sysctl_config.value.vm_vfs_cache_pressure, null) + } + } + } + } + fips_enabled = try(default_node_pool.value.fips_enabled, null) + kubelet_disk_type = try(default_node_pool.value.kubelet_disk_type, "OS") + + #checkov:skip=CKV_AZURE_168: MAX_PODS has been set to 110 + max_pods = try(default_node_pool.value.max_pods, 110) + message_of_the_day = try(default_node_pool.value.message_of_the_day, null) + dynamic "node_network_profile" { + for_each = try(default_node_pool.value.node_network_profile, []) + + content { + node_public_ip_tags = try(node_network_profile.value.node_public_ip_tags, {}) + } + } + node_public_ip_prefix_id = try(default_node_pool.value.node_public_ip_prefix_id, null) + node_labels = try(default_node_pool.value.node_labels, {}) + node_taints = try(default_node_pool.value.node_taints, []) + only_critical_addons_enabled = try(default_node_pool.value.only_critical_addons_enabled, null) + orchestrator_version = try(default_node_pool.value.orchestrator_version, null) + os_disk_size_gb = try(default_node_pool.value.os_disk_size_gb, 100) + os_disk_type = try(default_node_pool.value.os_disk_type, "Managed") + os_sku = try(default_node_pool.value.os_sku, "Ubuntu") + pod_subnet_id = try(default_node_pool.value.pod_subnet_id, null) + proximity_placement_group_id = try(default_node_pool.value.proximity_placement_group_id, null) + scale_down_mode = try(default_node_pool.value.scale_down_mode, "Delete") + temporary_name_for_rotation = try(default_node_pool.value.temporary_name_for_rotation, null) + type = try(default_node_pool.value.type, "VirtualMachineScaleSets") + tags = var.tags + ultra_ssd_enabled = try(default_node_pool.value.ultra_ssd_enabled, false) + + dynamic "upgrade_settings" { + for_each = try(default_node_pool.value.upgrade_settings, []) + content { + max_surge = try(upgrade_settings.value.max_surge) + } + } + vnet_subnet_id = try(default_node_pool.value.vnet_subnet_id, null) + workload_runtime = try(default_node_pool.value.workload_runtime, "OCIContainer") + zones = try(default_node_pool.value.zones, []) + max_count = try(default_node_pool.value.max_count, null) + min_count = try(default_node_pool.value.min_count, null) + node_count = default_node_pool.value.enable_auto_scaling ? null : try(default_node_pool.value.node_count, 3) + + } + } + + dns_prefix = var.dns_prefix + dns_prefix_private_cluster = var.dns_prefix_private_cluster + + dynamic "aci_connector_linux" { + for_each = var.aci_connector_linux + + content { + subnet_name = try(aci_connector_linux.value.subnet_name) + } + + } + + + automatic_channel_upgrade = var.automatic_channel_upgrade + + dynamic "api_server_access_profile" { + for_each = var.api_server_access_profile + + content { + authorized_ip_ranges = try(api_server_access_profile.value.authorized_ip_ranges, "0.0.0.0/32") + subnet_id = try(api_server_access_profile.value.subnet_id, null) + vnet_integration_enabled = try(api_server_access_profile.value.vnet_integration_enabled, false) + } + + } + + dynamic "auto_scaler_profile" { + for_each = var.auto_scaler_profile + + content { + balance_similar_node_groups = try(auto_scaler_profile.value.balance_similar_node_groups, false) + expander = try(auto_scaler_profile.value.expander, "random") + max_graceful_termination_sec = try(auto_scaler_profile.value.max_graceful_termination_sec, 600) + max_node_provisioning_time = try(auto_scaler_profile.value.max_node_provisioning_time, "15m") + max_unready_nodes = try(auto_scaler_profile.value.max_unready_nodes, 1) + max_unready_percentage = try(auto_scaler_profile.value.max_unready_percentage, 45) + new_pod_scale_up_delay = try(auto_scaler_profile.value.new_pod_scale_up_delay, "10s") + scale_down_delay_after_add = try(auto_scaler_profile.value.scale_down_delay_after_add, "10m") + scale_down_delay_after_delete = try(auto_scaler_profile.value.scale_down_delay_after_delete, "10s") + scale_down_delay_after_failure = try(auto_scaler_profile.value.scale_down_delay_after_failure, "3m") + scan_interval = try(auto_scaler_profile.value.scan_interval, "10s") + scale_down_unneeded = try(auto_scaler_profile.value.scale_down_unneeded, "10m") + scale_down_unready = try(auto_scaler_profile.value.scale_down_unready, "20m") + scale_down_utilization_threshold = try(auto_scaler_profile.value.scale_down_utilization_threshold, "0.5") + empty_bulk_delete_max = try(auto_scaler_profile.value.empty_bulk_delete_max, "10") + skip_nodes_with_local_storage = try(auto_scaler_profile.value.skip_nodes_with_local_storage, true) + skip_nodes_with_system_pods = try(auto_scaler_profile.value.skip_nodes_with_system_pods, true) + + } + + } + + dynamic "azure_active_directory_role_based_access_control" { + for_each = var.azure_active_directory_role_based_access_control + + content { + managed = try(azure_active_directory_role_based_access_control.value.managed, null) + tenant_id = try(azure_active_directory_role_based_access_control.value.tenant_id, null) + } + + } + + azure_policy_enabled = var.azure_policy_enabled + + dynamic "confidential_computing" { + for_each = var.confidential_computing + + content { + sgx_quote_helper_enabled = try(confidential_computing.value.sgx_quote_helper_enabled) + } + } + + disk_encryption_set_id = var.disk_encryption_set_id + edge_zone = var.edge_zone + http_application_routing_enabled = var.http_application_routing_enabled + + dynamic "http_proxy_config" { + for_each = var.http_proxy_config + content { + http_proxy = try(http_proxy_config.value.http_proxy, null) + https_proxy = try(http_proxy_config.value.https_proxy, null) + no_proxy = try(http_proxy_config.value.no_proxy, null) + } + } + + dynamic "identity" { + for_each = var.identity + + content { + type = try(identity.value.type) + identity_ids = try(identity.value.identity_ids, []) + } + } + + image_cleaner_enabled = var.image_cleaner_enabled + image_cleaner_interval_hours = var.image_cleaner_interval_hours + + dynamic "ingress_application_gateway" { + for_each = var.ingress_application_gateway + + content { + gateway_id = try(ingress_application_gateway.value.gateway_id, null) + gateway_name = try(ingress_application_gateway.value.gateway_name, null) + subnet_cidr = try(ingress_application_gateway.value.subnet_cidr, null) + subnet_id = try(ingress_application_gateway.value.subnet_id, null) + } + } + + dynamic "key_management_service" { + for_each = var.key_management_service + content { + key_vault_key_id = try(key_management_service.value.key_vault_key_id) + key_vault_network_access = try(key_management_service.value.key_vault_network_access, "Public") + } + } + + dynamic "key_vault_secrets_provider" { + for_each = var.key_vault_secrets_provider + content { + secret_rotation_enabled = try(key_vault_secrets_provider.value.secret_rotation_enabled, true) + secret_rotation_interval = try(key_vault_secrets_provider.value.secret_rotation_interval, "2m") + } + } + + dynamic "kubelet_identity" { + for_each = var.kubelet_identity + content { + client_id = try(kubelet_identity.value.client_id, null) + object_id = try(kubelet_identity.value.object_id, null) + user_assigned_identity_id = try(kubelet_identity.value.user_assigned_identity_id, null) + } + + } + + kubernetes_version = var.kubernetes_version + + dynamic "linux_profile" { + for_each = var.linux_profile + content { + admin_username = try(linux_profile.value.admin_username) + dynamic "ssh_key" { + for_each = try(linux_profile.value.ssh_key) + content { + key_data = try(ssh_key.value.key_data) + } + + } + } + } + + #checkov:skip=CKV_AZURE_141 + local_account_disabled = var.local_account_disabled + + dynamic "maintenance_window" { + for_each = var.maintenance_window + + content { + dynamic "allowed" { + for_each = try(maintenance_window.value.allowed, []) + content { + day = try(allowed.value.day) + hours = try(allowed.value.hours) + } + } + dynamic "not_allowed" { + for_each = try(maintenance_window.value.not_allowed, []) + content { + end = try(not_allowed.value.end) + start = try(not_allowed.value.start) + } + } + } + } + dynamic "microsoft_defender" { + for_each = var.microsoft_defender + content { + log_analytics_workspace_id = try(microsoft_defender.value.log_analytics_workspace_id) + } + } + + dynamic "monitor_metrics" { + for_each = var.monitor_metrics + content { + annotations_allowed = try(monitor_metrics.value.annotations_allowed, null) + labels_allowed = try(monitor_metrics.value.labels_allowed, null) + } + } + + dynamic "network_profile" { + for_each = var.network_profile + content { + network_plugin = try(network_profile.value.network_plugin, "azure") + network_mode = try(network_profile.value.network_mode, "transparent") + #checkov:skip=CKV_AZURE_7: network_policy already been set to Azure + network_policy = try(network_profile.value.network_policy, "azure") + dns_service_ip = try(network_profile.value.dns_service_ip, null) + docker_bridge_cidr = try(network_profile.value.docker_bridge_cidr, null) + ebpf_data_plane = try(network_profile.value.ebpf_data_plane, null) + network_plugin_mode = try(network_profile.value.network_plugin_mode, null) + outbound_type = try(network_profile.value.outbound_type, null) + pod_cidr = try(network_profile.value.pod_cidr, null) + pod_cidrs = try(network_profile.value.pod_cidrs, []) + service_cidr = try(network_profile.value.service_cidr, null) + service_cidrs = try(network_profile.value.service_cidrs, []) + ip_versions = try(network_profile.value.ip_versions, null) + load_balancer_sku = try(network_profile.value.load_balancer_sku, "standard") + + dynamic "load_balancer_profile" { + for_each = try(network_profile.value.load_balancer_profile, []) + + content { + idle_timeout_in_minutes = try(load_balancer_profile.value.idle_timeout_in_minutes, 30) + managed_outbound_ip_count = try(load_balancer_profile.value.managed_outbound_ip_count, null) + managed_outbound_ipv6_count = try(load_balancer_profile.value.managed_outbound_ipv6_count, 0) + outbound_ip_address_ids = try(load_balancer_profile.value.outbound_ip_address_ids, null) + outbound_ip_prefix_ids = try(load_balancer_profile.value.outbound_ip_prefix_ids, 0) + outbound_ports_allocated = try(load_balancer_profile.value.outbound_ports_allocated, 0) + + } + + } + + dynamic "nat_gateway_profile" { + for_each = try(network_profile.value.nat_gateway_profile, []) + + content { + idle_timeout_in_minutes = try(nat_gateway_profile.value.idle_timeout_in_minutes, 4) + managed_outbound_ip_count = try(nat_gateway_profile.value.managed_outbound_ip_count, null) + } + + } + } + } + + node_resource_group = var.node_resource_group + oidc_issuer_enabled = var.oidc_issuer_enabled + + dynamic "oms_agent" { + for_each = var.oms_agent + content { + log_analytics_workspace_id = try(oms_agent.value.log_analytics_workspace_id) + msi_auth_for_monitoring_enabled = try(oms_agent.value.msi_auth_for_monitoring_enabled, false) + } + } + + open_service_mesh_enabled = var.open_service_mesh_enabled + private_cluster_enabled = var.private_cluster_enabled + private_dns_zone_id = var.private_dns_zone_id + private_cluster_public_fqdn_enabled = var.private_cluster_public_fqdn_enabled + + dynamic "service_mesh_profile" { + for_each = var.service_mesh_profile + + content { + mode = try(service_mesh_profile.value.mode, "Istio") + } + } + + dynamic "workload_autoscaler_profile" { + for_each = var.workload_autoscaler_profile + + content { + keda_enabled = try(workload_autoscaler_profile.value.keda_enabled, null) + vertical_pod_autoscaler_enabled = try(workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled, null) + } + } + + workload_identity_enabled = var.workload_identity_enabled + public_network_access_enabled = var.public_network_access_enabled + role_based_access_control_enabled = var.role_based_access_control_enabled + run_command_enabled = var.run_command_enabled + + dynamic "service_principal" { + for_each = var.service_principal + + content { + client_id = try(service_principal.value.client_id) + client_secret = try(service_principal.value.client_secret) + } + } + + #checkov:skip=CKV_AZURE_170: we are using Standard (which includes the Uptime SLA) + sku_tier = var.sku_tier + + tags = var.tags + + lifecycle { + ignore_changes = [ + # Ignore changes to tags, e.g. because a management agent + # updates these based on some ruleset managed elsewhere. + network_profile, + kubelet_identity, + ] + } + +} + +# Allow user assigned identity to manage AKS items in RG +resource "azurerm_role_assignment" "rg_level" { + principal_id = azurerm_kubernetes_cluster.this[0].kubelet_identity[0].object_id + scope = format("/subscriptions/%s/resourceGroups/%s", data.azurerm_subscription.this.subscription_id, var.resource_group_name) + role_definition_name = "Contributor" +} + +resource "azurerm_role_assignment" "vnet_level" { + count = var.create_aks ? 1 : 0 + + scope = var.vnet_id + role_definition_name = "Network Contributor" + principal_id = azurerm_kubernetes_cluster.this[0].kubelet_identity[0].object_id +} + +resource "azurerm_kubernetes_cluster_node_pool" "this" { + for_each = var.additional_node_pools + + name = each.key + kubernetes_cluster_id = azurerm_kubernetes_cluster.this[0].id + vm_size = try(each.value.vm_size) + capacity_reservation_group_id = try(each.value.capacity_reservation_group_id, null) + custom_ca_trust_enabled = try(each.value.custom_ca_trust_enabled, false) + enable_auto_scaling = try(each.value.enable_auto_scaling, true) + enable_host_encryption = try(each.value.enable_host_encryption, false) + enable_node_public_ip = try(each.value.enable_node_public_ip, false) + eviction_policy = try(each.value.eviction_policy, null) + host_group_id = try(each.value.host_group_id, null) + + dynamic "kubelet_config" { + for_each = try(each.value.kubelet_config, []) + + content { + allowed_unsafe_sysctls = try(kubelet_config.value.allowed_unsafe_sysctls, null) + container_log_max_line = try(kubelet_config.value.container_log_max_line, null) + container_log_max_size_mb = try(kubelet_config.value.container_log_max_size_mb, null) + cpu_cfs_quota_enabled = try(kubelet_config.value.cpu_cfs_quota_enabled, null) + cpu_cfs_quota_period = try(kubelet_config.value.cpu_cfs_quota_period, null) + cpu_manager_policy = try(kubelet_config.value.cpu_manager_policy, null) + image_gc_high_threshold = try(kubelet_config.value.image_gc_high_threshold, null) + image_gc_low_threshold = try(kubelet_config.value.image_gc_low_threshold, null) + pod_max_pid = try(kubelet_config.value.pod_max_pid, null) + topology_manager_policy = try(kubelet_config.value.topology_manager_policy, null) + } + + } + + dynamic "linux_os_config" { + for_each = try(each.value.linux_os_config, []) + + content { + swap_file_size_mb = try(linux_os_config.value.swap_file_size_mb, null) + transparent_huge_page_defrag = try(linux_os_config.value.transparent_huge_page_defrag, null) + transparent_huge_page_enabled = try(linux_os_config.value.transparent_huge_page_enabled, null) + + dynamic "sysctl_config" { + for_each = try(linux_os_config.value.sysctl_config, []) + content { + fs_aio_max_nr = try(sysctl_config.value.fs_aio_max_nr, null) + fs_file_max = try(sysctl_config.value.fs_file_max, null) + fs_inotify_max_user_watches = try(sysctl_config.value.fs_inotify_max_user_watches, null) + fs_nr_open = try(sysctl_config.value.fs_nr_open, null) + kernel_threads_max = try(sysctl_config.value.kernel_threads_max, null) + net_core_netdev_max_backlog = try(sysctl_config.value.net_core_netdev_max_backlog, null) + net_core_optmem_max = try(sysctl_config.value.net_core_optmem_max, null) + net_core_rmem_default = try(sysctl_config.value.net_core_rmem_default, null) + net_core_rmem_max = try(sysctl_config.value.net_core_rmem_max, null) + net_core_somaxconn = try(sysctl_config.value.net_core_somaxconn, null) + net_core_wmem_default = try(sysctl_config.value.net_core_wmem_default, null) + net_core_wmem_max = try(sysctl_config.value.net_core_wmem_max, null) + net_ipv4_ip_local_port_range_max = try(sysctl_config.value.net_ipv4_ip_local_port_range_max, null) + net_ipv4_ip_local_port_range_min = try(sysctl_config.value.net_ipv4_ip_local_port_range_min, null) + net_ipv4_neigh_default_gc_thresh1 = try(sysctl_config.value.net_ipv4_neigh_default_gc_thresh1, null) + net_ipv4_neigh_default_gc_thresh2 = try(sysctl_config.value.net_ipv4_neigh_default_gc_thresh2, null) + net_ipv4_neigh_default_gc_thresh3 = try(sysctl_config.value.net_ipv4_neigh_default_gc_thresh3, null) + net_ipv4_tcp_fin_timeout = try(sysctl_config.value.net_ipv4_tcp_fin_timeout, null) + net_ipv4_tcp_keepalive_intvl = try(sysctl_config.value.net_ipv4_tcp_keepalive_intvl, null) + net_ipv4_tcp_keepalive_probes = try(sysctl_config.value.net_ipv4_tcp_keepalive_probes, null) + net_ipv4_tcp_keepalive_time = try(sysctl_config.value.net_ipv4_tcp_keepalive_time, null) + net_ipv4_tcp_max_syn_backlog = try(sysctl_config.value.net_ipv4_tcp_max_syn_backlog, null) + net_ipv4_tcp_max_tw_buckets = try(sysctl_config.value.net_ipv4_tcp_max_tw_buckets, null) + net_ipv4_tcp_tw_reuse = try(sysctl_config.value.net_ipv4_tcp_tw_reuse, null) + net_netfilter_nf_conntrack_buckets = try(sysctl_config.value.net_netfilter_nf_conntrack_buckets, null) + net_netfilter_nf_conntrack_max = try(sysctl_config.value.net_netfilter_nf_conntrack_max, null) + vm_max_map_count = try(sysctl_config.value.vm_max_map_count, null) + vm_swappiness = try(sysctl_config.value.vm_swappiness, null) + vm_vfs_cache_pressure = try(sysctl_config.value.vm_vfs_cache_pressure, null) + } + } + } + } + fips_enabled = try(each.value.host_group_id, null) + kubelet_disk_type = try(each.value.kubelet_disk_type, "OS") + #checkov:skip=CKV_AZURE_168: MAX_PODS has been set to 50 + max_pods = try(each.value.max_pods, 50) + message_of_the_day = try(each.value.message_of_the_day, null) + mode = try(each.value.mode, "System") + + dynamic "node_network_profile" { + for_each = try(each.value.node_network_profile, []) + + content { + node_public_ip_tags = try(node_network_profile.value.node_public_ip_tags, {}) + } + + } + + node_labels = try(each.value.node_labels, {}) + node_public_ip_prefix_id = try(each.value.node_public_ip_prefix_id, null) + node_taints = try(each.value.node_taints, []) + orchestrator_version = try(each.value.orchestrator_version, null) + os_disk_size_gb = try(each.value.os_disk_size_gb, 30) + os_disk_type = try(each.value.os_disk_type, "Managed") + pod_subnet_id = try(each.value.pod_subnet_id, null) + os_sku = try(each.value.os_sku, "Ubuntu") + os_type = try(each.value.os_type, "Linux") + priority = try(each.value.priority, "Regular") + proximity_placement_group_id = try(each.value.proximity_placement_group_id, null) + spot_max_price = try(each.value.spot_max_price, null) + /* snapshot_id = try(each.value.snapshot_id, null) */ + scale_down_mode = try(each.value.scale_down_mode, "Delete") + ultra_ssd_enabled = try(each.value.ultra_ssd_enabled, false) + + dynamic "upgrade_settings" { + for_each = try(each.value.upgrade_settings, []) + content { + max_surge = try(upgrade_settings.value.max_surge) + } + } + + vnet_subnet_id = try(each.value.vnet_subnet_id) + + dynamic "windows_profile" { + for_each = try(each.value.windows_profile, []) + + content { + outbound_nat_enabled = try(upgrade_settings.value.outbound_nat_enabled, false) + } + } + + workload_runtime = try(each.value.workload_runtime, "OCIContainer") + zones = try(each.value.zones, []) + max_count = try(each.value.enable_auto_scaling, true) ? try(each.value.max_count, 4) : null + min_count = try(each.value.enable_auto_scaling, true) ? try(each.value.min_count, 1) : null + node_count = try(each.value.enable_auto_scaling, true) ? try(each.value.min_count, 1) : 1 + tags = try(each.value.tags, {}) +} + +resource "azurerm_role_assignment" "app_gateway" { + count = var.create_aks && length(var.ingress_application_gateway) > 0 ? 1 : 0 + scope = var.vnet_id + role_definition_name = "Network Contributor" + principal_id = azurerm_kubernetes_cluster.this[0].ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id +} \ No newline at end of file diff --git a/outputs.tf b/outputs.tf new file mode 100644 index 0000000..908e77c --- /dev/null +++ b/outputs.tf @@ -0,0 +1,24 @@ +output "id" { + description = " The Kubernetes Managed Cluster ID." + value = try(azurerm_kubernetes_cluster.this[0].id, "") +} + +output "fqdn" { + description = " The Kubernetes Managed Cluster ID." + value = try(azurerm_kubernetes_cluster.this[0].fqdn, "") +} + +output "private_fqdn" { + description = " The Kubernetes Managed Cluster ID." + value = try(azurerm_kubernetes_cluster.this[0].private_fqdn, "") +} + +output "portal_fqdn" { + description = " The Kubernetes Managed Cluster ID." + value = try(azurerm_kubernetes_cluster.this[0].portal_fqdn, "") +} + +output "ingress_application_gateway" { + description = " The Kubernetes Managed Cluster ID." + value = try(azurerm_kubernetes_cluster.this[0].ingress_application_gateway, []) +} diff --git a/variables.tf b/variables.tf new file mode 100644 index 0000000..de1c043 --- /dev/null +++ b/variables.tf @@ -0,0 +1,304 @@ +### AKS +variable "create_aks" { + type = bool + description = "Do you want to create AKS Cluster" + default = true +} + +variable "name" { + type = string + description = "(Required) The name of the Managed Kubernetes Cluster to create. Changing this forces a new resource to be created." +} + +variable "location" { + type = string + description = "(Required) The location where the Managed Kubernetes Cluster should be created. Changing this forces a new resource to be created." +} + +variable "resource_group_name" { + type = string + description = "(Required) Specifies the Resource Group where the Managed Kubernetes Cluster should exist. Changing this forces a new resource to be created." +} + +variable "dns_prefix" { + type = string + description = "(Optional) DNS prefix specified when creating the managed cluster. Possible values must begin and end with a letter or number, contain only letters, numbers, and hyphens and be between 1 and 54 characters in length. Changing this forces a new resource to be created." + default = null +} + +variable "dns_prefix_private_cluster" { + type = string + description = "(Optional) Specifies the DNS prefix to use with private clusters. Changing this forces a new resource to be created." + default = null +} + +variable "automatic_channel_upgrade" { + type = string + description = "(Optional) The upgrade channel for this Kubernetes Cluster. Possible values are patch, rapid, node-image and stable. Omitting this field sets this value to none." + default = null +} + +variable "azure_policy_enabled" { + type = bool + description = "(Optional) Should the Azure Policy Add-On be enabled? For more details please visit Understand Azure Policy for Azure Kubernetes Service" + default = true +} + + +variable "disk_encryption_set_id" { + type = string + description = "(Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information can be found in the documentation. Changing this forces a new resource to be created." + default = null +} + +variable "edge_zone" { + type = string + description = "(Optional) Specifies the Edge Zone within the Azure Region where this Managed Kubernetes Cluster should exist. Changing this forces a new resource to be created." + default = null +} + +variable "http_application_routing_enabled" { + type = bool + description = "(Optional) Should HTTP Application Routing be enabled?" + default = false +} + +variable "image_cleaner_enabled" { + type = bool + description = "(Optional) Specifies whether Image Cleaner is enabled" + default = false +} + +variable "image_cleaner_interval_hours" { + type = number + description = " (Optional) Specifies the interval in hours when images should be cleaned up. Defaults to 48." + default = 48 +} + +variable "kubernetes_version" { + type = string + description = "(Optional) Version of Kubernetes specified when creating the AKS managed cluster. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as 1.22 are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in the documentation." + default = null +} + +variable "local_account_disabled" { + type = bool + description = "(Optional) If true local accounts will be disabled. See the documentation for more information." + default = false +} + +variable "node_resource_group" { + type = string + description = "(Optional) The name of the Resource Group where the Kubernetes Nodes should exist. Changing this forces a new resource to be created." + default = null +} + +variable "oidc_issuer_enabled" { + type = bool + description = "(Optional) Enable or Disable the OIDC issuer URL" + default = false +} + +variable "open_service_mesh_enabled" { + type = bool + description = "(Optional) Is Open Service Mesh enabled? For more details, please visit Open Service Mesh for AKS." + default = false +} + +variable "private_cluster_enabled" { + type = bool + description = "(Optional) Should this Kubernetes Cluster have its API server only exposed on internal IP addresses? This provides a Private IP Address for the Kubernetes API on the Virtual Network where the Kubernetes Cluster is located. Defaults to false. Changing this forces a new resource to be created." + default = false +} + +variable "private_dns_zone_id" { + type = string + description = "(Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, System to have AKS manage this or None. In case of None you will need to bring your own DNS server and set up resolving, otherwise, the cluster will have issues after provisioning. Changing this forces a new resource to be created." + default = null +} + +variable "private_cluster_public_fqdn_enabled" { + type = bool + description = "(Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to false." + default = false +} + +variable "workload_identity_enabled" { + type = bool + description = "(Optional) Specifies whether Azure AD Workload Identity should be enabled for the Cluster. Defaults to false." + default = false +} + +variable "public_network_access_enabled" { + type = bool + description = "(Optional) Whether public network access is allowed for this Kubernetes Cluster. Defaults to true. Changing this forces a new resource to be created." + default = false +} + +variable "role_based_access_control_enabled" { + type = bool + description = "(Optional) Whether Role Based Access Control for the Kubernetes Cluster should be enabled. Defaults to true. Changing this forces a new resource to be created." + default = true +} + +variable "run_command_enabled" { + type = bool + description = "(Optional) Whether to enable run command for the cluster or not. Defaults to true." + default = true +} + +variable "sku_tier" { + type = string + description = "(Optional) The SKU Tier that should be used for this Kubernetes Cluster. Possible values are Free, and Standard (which includes the Uptime SLA). Defaults to Free." + default = "Standard" +} + +variable "default_node_pool" { + type = any + description = "(Required) A default_node_pool block" +} + +variable "api_server_access_profile" { + type = any + description = "(Optional) An api_server_access_profile" + default = [] +} + +variable "auto_scaler_profile" { + type = any + description = "(Optional) A auto_scaler_profile block" + default = [] +} + +variable "azure_active_directory_role_based_access_control" { + type = any + description = "(Optional) A azure_active_directory_role_based_access_control" + default = [] +} + +variable "confidential_computing" { + type = any + description = "(Optional) A confidential_computing block" + default = [] +} + +variable "http_proxy_config" { + type = any + description = "(Optional) A http_proxy_config block" + default = [] +} + +variable "identity" { + type = any + description = "(Optional) An identity block as defined below. One of either identity or service_principal must be specified." + default = [] +} + +variable "ingress_application_gateway" { + type = any + description = "(Optional) A ingress_application_gateway" + default = [] +} + +variable "key_management_service" { + type = any + description = "(Optional) A key_management_service block as defined below. For more details, please visit Key Management Service (KMS) etcd encryption to an AKS cluster." + default = [] +} + +variable "key_vault_secrets_provider" { + type = any + description = "(Optional) A key_vault_secrets_provider block as defined below. For more details, please visit Azure Keyvault Secrets Provider for AKS." + default = [] +} + +variable "kubelet_identity" { + type = any + description = "(Optional) A kubelet_identity block" + default = [] +} + +variable "linux_profile" { + type = any + description = "(Optional) A linux_profile block" + default = [] +} + +variable "maintenance_window" { + type = any + description = "(Optional) A maintenance_window block" + default = [] +} + +variable "microsoft_defender" { + type = any + description = "(Optional) A microsoft_defender block" + default = [] +} + +variable "monitor_metrics" { + type = any + description = "(Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster. A monitor_metrics block as defined below." + default = [] +} + +variable "network_profile" { + type = any + description = " (Optional) A network_profile block as defined below. Changing this forces a new resource to be created." + default = [ + { + network_plugin = "azure" + load_balancer_sku = "standard" + network_policy = "azure" + } + ] +} + +variable "oms_agent" { + type = any + description = "(Optional) A oms_agent block" + default = [] +} + +variable "service_mesh_profile" { + type = any + description = "(Optional) A service_mesh_profile block" + default = [] +} + +variable "workload_autoscaler_profile" { + type = any + description = "(Optional) A workload_autoscaler_profile" + default = [] +} + +variable "service_principal" { + type = any + description = "(Optional) A service_principal block as documented below. One of either identity or service_principal must be specified." + default = [] +} + +variable "aci_connector_linux" { + type = any + description = "(Optional) A aci_connector_linux block as defined below. For more details, please visit Create and configure an AKS cluster to use virtual nodes." + default = [] +} + + +variable "tags" { + type = map(string) + description = "(Optional) A mapping of tags to assign to the resource." + default = {} +} + +variable "vnet_id" { + type = string + description = "(Required) Vnet id that Aks MSI should be network contributor in a private cluster" + +} + +variable "additional_node_pools" { + type = any + description = "(Optional) Additional node pools" + default = {} +} diff --git a/versions.tf b/versions.tf new file mode 100644 index 0000000..732006a --- /dev/null +++ b/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 0.13.1" + + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = ">= 3.0.0" + } + } +} \ No newline at end of file
{
"load_balancer_sku": "standard",
"network_plugin": "azure",
"network_policy": "azure"
}
]