forked from openshift/installer
-
Notifications
You must be signed in to change notification settings - Fork 0
/
terraform.tfvars.example
91 lines (70 loc) · 3.51 KB
/
terraform.tfvars.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
// ID identifying the cluster to create. Use your username so that resources created can be tracked back to you.
cluster_id = "example-cluster"
// Domain of the cluster. This should be "${cluster_id}.${base_domain}".
cluster_domain = "example-cluster.devcluster.openshift.com"
// Base domain from which the cluster domain is a subdomain.
base_domain = "devcluster.openshift.com"
// Name of the vSphere server. The dev cluster is on "vcsa.vmware.devcluster.openshift.com".
vsphere_server = "vcsa.vmware.devcluster.openshift.com"
// User on the vSphere server.
vsphere_user = "YOUR_USER"
// Password of the user on the vSphere server.
vsphere_password = "YOUR_PASSWORD"
// Name of the VM template to clone to create VMs for the cluster. The dev cluster has a template named "rhcos-latest".
vm_template = "rhcos-latest"
// The machine_cidr where IP addresses will be assigned for cluster nodes.
// Additionally, IPAM will assign IPs based on the network ID.
machine_cidr = "10.0.0.0/24"
// The number of control plane VMs to create. Default is 3.
control_plane_count = 3
// The number of compute VMs to create. Default is 3.
compute_count = 3
// Ignition config path for the control plane machines
control_plane_ignition_path = "./master.ign"
// Ignition config path for the compute machines
compute_ignition_path = "./worker.ign"
// Set ipam and ipam_token if you want to use the IPAM server to reserve IP
// addresses for the VMs.
// Address or hostname of the IPAM server from which to reserve IP addresses for the cluster machines.
ipam = "139.178.89.254"
// Token to use to authenticate with the IPAM server.
ipam_token = "TOKEN_FOR_THE_IPAM_SERVER"
// Set bootstrap_ip, control_plane_ip, and compute_ip if you want to use static
// IPs reserved someone else, rather than the IPAM server.
// The IP address to assign to the bootstrap VM.
//bootstrap_ip = "10.0.0.10"
// The IP addresses to assign to the control plane VMs. The length of this list
// must match the value of control_plane_count.
//control_plane_ips = ["10.0.0.20", "10.0.0.21", "10.0.0.22"]
// The IP addresses to assign to the compute VMs. The length of this list must
// match the value of compute_count.
//compute_ips = ["10.0.0.30", "10.0.0.31", "10.0.0.32"]
// A list of maps where each map defines a specific failure domain. Atleast 1 failure domain must be
// specified. When multiple failure domains are specified, control plane and compute nodes are distributed
// among the defined failure domains.
failure_domains = [
{
// Name of the vSphere data center.
datacenter = "dc1"
// Name of the vSphere cluster.
cluster = "devel"
// Name of the vSphere data store to use for the VMs.
datastore = "nvme-ds1"
// Name of the vSphere network to use for the VMs.
network = "ci-segment-151"
// UUID of the distrubted switch which is hosting the portgroup. This can derived from the MOB.
distributed_virtual_switch_uuid = "50 05 1b 07 19 2b 0b 0a-eb 90 98 54 1d c5 b5 19"
},
{
// Name of the vSphere data center.
datacenter = "dc2"
// Name of the vSphere cluster.
cluster = "devel2"
// Name of the vSphere data store to use for the VMs.
datastore = "nvme-ds2"
// Name of the vSphere network to use for the VMs.
network = "ci-segment-151"
// UUID of the distrubted switch which is hosting the portgroup. This can derived from the MOB.
distributed_virtual_switch_uuid = "50 05 1b 07 19 2b 0b 0a-eb 90 98 54 1d c5 b5 19"
}
]