forked from tibordp/terraform-hcloud-dualstack-k8s
-
Notifications
You must be signed in to change notification settings - Fork 0
/
worker.tf
46 lines (37 loc) · 1.28 KB
/
worker.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
module "worker" {
count = var.worker_count
source = "./modules/kubernetes-node"
name = "${var.name}-worker-${count.index}"
hcloud_ssh_key = var.hcloud_ssh_key
server_type = var.worker_server_type
image = var.image
location = var.location
labels = merge(var.labels, { cluster = var.name, role = "worker" })
firewall_ids = var.firewall_ids
ssh_private_key_path = var.ssh_private_key_path
tailscale_auth_key = var.tailscale_auth_key
}
resource "null_resource" "worker_join" {
count = var.worker_count
depends_on = [
null_resource.cluster_bootstrap
]
triggers = {
instance_id = module.worker[count.index].id
}
connection {
host = module.worker[count.index].ipv4_address
type = "ssh"
timeout = "5m"
user = "root"
private_key = file(var.ssh_private_key_path)
}
provisioner "local-exec" {
command = <<EOT
ssh -i ${var.ssh_private_key_path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
root@${local.kubeadm_host} 'kubeadm token create --print-join-command --ttl=60m' | \
ssh -i ${var.ssh_private_key_path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
root@${module.worker[count.index].ipv4_address}
EOT
}
}