diff --git a/.gitignore b/.gitignore
index 112b0a83..c215dfc8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
# hidden files/dirs
.*
!deploy/infrastructure/vagrant/.env
+!deploy/infrastructure/terraform/.env
!deploy/stack/compose/.env
!.gitignore
!.github/
diff --git a/contrib/tag-release.sh b/contrib/tag-release.sh
index 34a51902..e689cdab 100755
--- a/contrib/tag-release.sh
+++ b/contrib/tag-release.sh
@@ -13,7 +13,7 @@ new_tag=${1-}
exit 1
)
-if [[ $(git symbolic-ref HEAD) != refs/heads/main ]] && [[ -z ${ALLOW_NON_MAIN:-} ]]; then
+if [[ $(git symbolic-ref HEAD) != refs/heads/main ]] && [[ -z ${ALLOW_NON_MAIN-} ]]; then
echo "Must be on main branch" >&2
exit 1
fi
diff --git a/deploy/infrastructure/terraform/.env b/deploy/infrastructure/terraform/.env
new file mode 100644
index 00000000..4b40c976
--- /dev/null
+++ b/deploy/infrastructure/terraform/.env
@@ -0,0 +1,35 @@
+# Can be set to your own hook builds
+vOSIE=v0.8.0
+OSIE_DOWNLOAD_URLS=https://github.com/tinkerbell/hook/releases/download/${vOSIE}/hook_x86_64.tar.gz,https://github.com/tinkerbell/hook/releases/download/${vOSIE}/hook_aarch64.tar.gz
+
+# This is the IP and MAC of the machine to be provisioned
+# The IP should normally be in the same network as the IP used for the provisioner
+TINKERBELL_CLIENT_IP=192.168.56.43
+TINKERBELL_CLIENT_MAC=08:00:27:9e:f5:3a
+
+# These are the Gateway and DNS addresses the client should use, required for tink-worker to pull action images
+TINKERBELL_CLIENT_GW=192.168.56.4
+TINKERBELL_CLIENT_NAMESERVER_1=1.1.1.1
+TINKERBELL_CLIENT_NAMESERVER_2=8.8.8.8
+
+# This should be an IP that's on an interface where you will be provisioning machines
+TINKERBELL_HOST_IP=192.168.56.4
+
+# Images used by docker compose natively or in terraform/vagrant, update if necessary
+BOOTS_IMAGE=quay.io/tinkerbell/boots:v0.7.0
+HEGEL_IMAGE=quay.io/tinkerbell/hegel:v0.8.0
+TINK_VERSION=v0.8.0
+TINK_SERVER_IMAGE=quay.io/tinkerbell/tink:${TINK_VERSION}
+TINK_CONTROLLER_IMAGE=quay.io/tinkerbell/tink-controller:${TINK_VERSION}
+TINK_WORKER_IMAGE=quay.io/tinkerbell/tink-worker:${TINK_VERSION}
+RUFIO_VERSION=v0.1.0
+RUFIO_IMAGE=quay.io/tinkerbell/rufio:${RUFIO_VERSION}
+K3S_IMAGE=rancher/k3s:v1.24.4-k3s1
+
+# This is the boot/primary disk device and the device for its first partition
+# for the machine to be provisioned (as it would appear with lsblk)
+DISK_DEVICE=/dev/sda
+DISK_DEVICE_PARTITION_1=/dev/sda1
+# Example for a device with an NVME SSD
+#DISK_DEVICE=/dev/nvme0n1
+#DISK_DEVICE_PARTITION_1=/dev/nvme0n1p1
diff --git a/deploy/infrastructure/terraform/cloud-config.cfg b/deploy/infrastructure/terraform/cloud-config.cfg
index c9562c9c..519ef3b9 100644
--- a/deploy/infrastructure/terraform/cloud-config.cfg
+++ b/deploy/infrastructure/terraform/cloud-config.cfg
@@ -12,5 +12,11 @@ write_files:
owner: root:root
permissions: "0755"
+- encoding: b64
+ content: ${ENVFILE}
+ path: /root/.env
+ owner: root:root
+ permissions: "0755"
+
runcmd:
- /root/setup.sh ${WORKER_MAC}
diff --git a/deploy/infrastructure/terraform/main.tf b/deploy/infrastructure/terraform/main.tf
index 7643df7b..85539cb4 100644
--- a/deploy/infrastructure/terraform/main.tf
+++ b/deploy/infrastructure/terraform/main.tf
@@ -79,7 +79,7 @@ resource "metal_port" "eth1" {
data "archive_file" "compose" {
type = "zip"
- source_dir = "${path.module}/../compose"
+ source_dir = "${path.module}/../../stack/compose"
output_path = "${path.module}/compose.zip"
}
@@ -100,6 +100,7 @@ data "cloudinit_config" "setup" {
content = templatefile("${path.module}/cloud-config.cfg", {
COMPOSE_ZIP = local.compose_zip
SETUPSH = filebase64("${path.module}/setup.sh")
+ ENVFILE = filebase64("${path.module}/.env")
WORKER_MAC = local.worker_macs[0]
})
}
diff --git a/deploy/infrastructure/terraform/setup.sh b/deploy/infrastructure/terraform/setup.sh
index a52c148f..8a399b60 100755
--- a/deploy/infrastructure/terraform/setup.sh
+++ b/deploy/infrastructure/terraform/setup.sh
@@ -9,7 +9,16 @@ install_docker() {
install_docker_compose() {
apt-get install --no-install-recommends python3-pip
- pip install docker-compose
+ # Prevents the python X509_V_FLAG_CB_ISSUER_CHECK issue ref: https://stackoverflow.com/questions/73830524/attributeerror-module-lib-has-no-attribute-x509-v-flag-cb-issuer-check
+ rm -rf /usr/lib/python3/dist-packages/OpenSSL
+ pip3 install pyopenssl && pip install pyopenssl --upgrade
+ pip3 install docker-compose
+}
+
+install_kubectl() {
+ curl -LO https://dl.k8s.io/v1.25.2/bin/linux/amd64/kubectl
+ chmod +x ./kubectl
+ mv ./kubectl /usr/local/bin/kubectl
}
install_iptables_persistent() {
@@ -40,7 +49,7 @@ get_second_interface_from_bond0() {
# if the ip is in a file in interfaces.d then lets assume this is a re-run and we can just
# return the basename of the file (which should be named same as the interface)
f=$(grep -lr "${addr}" /etc/network/interfaces.d)
- [[ -n ${f:-} ]] && basename "$f" && return
+ [[ -n ${f-} ]] && basename "$f" && return
# sometimes the interfaces aren't sorted as expected in the /slaves file
#
@@ -56,6 +65,11 @@ setup_layer2_network() {
local interface=$1
local addr=$2
+ if ! [ "$(grep -c "$interface" /proc/net/bonding/bond0)" -eq 1 ]; then
+ echo "Interface already removed from bond0"
+ return
+ fi
+
# I tried getting rid of the following "manual" commands in favor of
# persisting the network config and then restarting the network but that
# didn't always work and was hard to recover from without a reboot so we're
@@ -110,29 +124,13 @@ make_host_gw_server() {
extract_compose_files() {
mkdir -p /sandbox
- unzip /root/compose.zip -d /sandbox/compose
+ unzip -o /root/compose.zip -d /sandbox/compose
}
setup_compose_env_overrides() {
local worker_mac=$1
- readarray -t lines <<-EOF
- TINKERBELL_CLIENT_MAC=$worker_mac
- TINKERBELL_TEMPLATE_MANIFEST=/manifests/template/ubuntu-equinix-metal.yaml
- TINKERBELL_HARDWARE_MANIFEST=/manifests/hardware/hardware-equinix-metal.json
- EOF
- for line in "${lines[@]}"; do
- grep -q "$line" /sandbox/compose/.env && continue
- echo "$line" >>/sandbox/compose/.env
- done
-}
-
-create_tink_helper_script() {
- cat >/usr/local/bin/tink <<-'EOF'
- #!/usr/bin/env bash
-
- exec docker-compose -f /sandbox/compose/docker-compose.yml exec tink-cli tink "$@"
- EOF
- chmod +x /usr/local/bin/tink
+ sed -i "s/TINKERBELL_CLIENT_MAC=.*/TINKERBELL_CLIENT_MAC=$worker_mac/" /root/.env
+ cp /root/.env /sandbox/compose/.env
}
tweak_bash_interactive_settings() {
@@ -152,6 +150,7 @@ main() {
update_apt
install_docker
install_docker_compose
+ install_kubectl
install_iptables_persistent
local layer2_interface
@@ -163,7 +162,6 @@ main() {
setup_compose_env_overrides "$worker_mac"
docker-compose -f /sandbox/compose/docker-compose.yml up -d
- create_tink_helper_script
tweak_bash_interactive_settings
}
diff --git a/deploy/infrastructure/vagrant/Vagrantfile b/deploy/infrastructure/vagrant/Vagrantfile
index ed61b332..847f4ff7 100644
--- a/deploy/infrastructure/vagrant/Vagrantfile
+++ b/deploy/infrastructure/vagrant/Vagrantfile
@@ -22,6 +22,7 @@ STACK_BASE_DIR = "../../stack/"
STACK_DIR = STACK_BASE_DIR + STACK_OPT
DEST_DIR_BASE = "/sandbox/stack/"
DEST_DIR = DEST_DIR_BASE + STACK_OPT
+ENV_FILE = ".env_compose"
Vagrant.configure("2") do |config|
config.vm.provider :libvirt do |libvirt|
@@ -53,7 +54,12 @@ Vagrant.configure("2") do |config|
override.vm.synced_folder STACK_BASE_DIR, DEST_DIR_BASE, type: "rsync"
end
- provisioner.vm.provision :shell, path: STACK_DIR + "/setup.sh", args: [PROVISIONER_IP, MACHINE1_IP, MACHINE1_MAC, DEST_DIR, LOADBALANCER_IP, HELM_CHART_VERSION, HELM_LOADBALANCER_INTERFACE]
+ if USE_HELM == "true"
+ provisioner.vm.provision :shell, path: STACK_DIR + "/setup.sh", args: [PROVISIONER_IP, MACHINE1_IP, MACHINE1_MAC, DEST_DIR, LOADBALANCER_IP, HELM_CHART_VERSION, HELM_LOADBALANCER_INTERFACE]
+ else
+ provisioner.vm.provision "file", source: ENV_FILE, destination: DEST_DIR + ".env"
+ provisioner.vm.provision :shell, path: STACK_DIR + "/setup.sh"
+ end
end
config.vm.define :machine1, autostart: false do |machine1|
diff --git a/deploy/stack/compose/.env b/deploy/stack/compose/.env
index 5f156d8f..4b40c976 100644
--- a/deploy/stack/compose/.env
+++ b/deploy/stack/compose/.env
@@ -1,12 +1,18 @@
# Can be set to your own hook builds
-vOSIE=v0.7.0
+vOSIE=v0.8.0
OSIE_DOWNLOAD_URLS=https://github.com/tinkerbell/hook/releases/download/${vOSIE}/hook_x86_64.tar.gz,https://github.com/tinkerbell/hook/releases/download/${vOSIE}/hook_aarch64.tar.gz
-TINKERBELL_HARDWARE_MANIFEST=/manifests/hardware/hardware.json
-TINKERBELL_TEMPLATE_MANIFEST=/manifests/template/ubuntu.yaml
-
+# This is the IP and MAC of the machine to be provisioned
+# The IP should normally be in the same network as the IP used for the provisioner
TINKERBELL_CLIENT_IP=192.168.56.43
TINKERBELL_CLIENT_MAC=08:00:27:9e:f5:3a
+
+# These are the Gateway and DNS addresses the client should use, required for tink-worker to pull action images
+TINKERBELL_CLIENT_GW=192.168.56.4
+TINKERBELL_CLIENT_NAMESERVER_1=1.1.1.1
+TINKERBELL_CLIENT_NAMESERVER_2=8.8.8.8
+
+# This should be an IP that's on an interface where you will be provisioning machines
TINKERBELL_HOST_IP=192.168.56.4
# Images used by docker compose natively or in terraform/vagrant, update if necessary
@@ -20,5 +26,10 @@ RUFIO_VERSION=v0.1.0
RUFIO_IMAGE=quay.io/tinkerbell/rufio:${RUFIO_VERSION}
K3S_IMAGE=rancher/k3s:v1.24.4-k3s1
-# DISK_DEVICE is used in the manifests/hardware.yaml
+# This is the boot/primary disk device and the device for its first partition
+# for the machine to be provisioned (as it would appear with lsblk)
DISK_DEVICE=/dev/sda
+DISK_DEVICE_PARTITION_1=/dev/sda1
+# Example for a device with an NVME SSD
+#DISK_DEVICE=/dev/nvme0n1
+#DISK_DEVICE_PARTITION_1=/dev/nvme0n1p1
diff --git a/deploy/stack/compose/manifests/hardware.yaml b/deploy/stack/compose/manifests/hardware.yaml
index 9c21058c..f2fd3f99 100644
--- a/deploy/stack/compose/manifests/hardware.yaml
+++ b/deploy/stack/compose/manifests/hardware.yaml
@@ -26,8 +26,8 @@ spec:
lease_time: 86400
mac: $TINKERBELL_CLIENT_MAC
name_servers:
- - 1.1.1.1
- - 8.8.8.8
+ - $TINKERBELL_CLIENT_NAMESERVER_1
+ - $TINKERBELL_CLIENT_NAMESERVER_2
uefi: false
netboot:
allowPXE: true
diff --git a/deploy/stack/compose/manifests/template.yaml b/deploy/stack/compose/manifests/template.yaml
index 65e6d35a..45655eae 100644
--- a/deploy/stack/compose/manifests/template.yaml
+++ b/deploy/stack/compose/manifests/template.yaml
@@ -19,23 +19,23 @@ spec:
image: quay.io/tinkerbell-actions/image2disk:v1.0.0
timeout: 600
environment:
- DEST_DISK: {{ index .Hardware.Disks 0 }}
+ DEST_DISK: $DISK_DEVICE
IMG_URL: "http://$TINKERBELL_HOST_IP:8080/focal-server-cloudimg-amd64.raw.gz"
COMPRESSED: true
- name: "grow-partition"
image: quay.io/tinkerbell-actions/cexec:v1.0.0
timeout: 90
environment:
- BLOCK_DEVICE: {{ index .Hardware.Disks 0 }}1
+ BLOCK_DEVICE: $DISK_DEVICE_PARTITION_1
FS_TYPE: ext4
CHROOT: y
DEFAULT_INTERPRETER: "/bin/sh -c"
- CMD_LINE: "growpart {{ index .Hardware.Disks 0 }} 1 && resize2fs {{ index .Hardware.Disks 0 }}1"
+ CMD_LINE: "growpart $DISK_DEVICE 1 && resize2fs $DISK_DEVICE_PARTITION_1"
- name: "install-openssl"
image: quay.io/tinkerbell-actions/cexec:v1.0.0
timeout: 90
environment:
- BLOCK_DEVICE: {{ index .Hardware.Disks 0 }}1
+ BLOCK_DEVICE: $DISK_DEVICE_PARTITION_1
FS_TYPE: ext4
CHROOT: y
DEFAULT_INTERPRETER: "/bin/sh -c"
@@ -44,7 +44,7 @@ spec:
image: quay.io/tinkerbell-actions/cexec:v1.0.0
timeout: 90
environment:
- BLOCK_DEVICE: {{ index .Hardware.Disks 0 }}1
+ BLOCK_DEVICE: $DISK_DEVICE_PARTITION_1
FS_TYPE: ext4
CHROOT: y
DEFAULT_INTERPRETER: "/bin/sh -c"
@@ -53,7 +53,7 @@ spec:
image: quay.io/tinkerbell-actions/cexec:v1.0.0
timeout: 90
environment:
- BLOCK_DEVICE: {{ index .Hardware.Disks 0 }}1
+ BLOCK_DEVICE: $DISK_DEVICE_PARTITION_1
FS_TYPE: ext4
CHROOT: y
DEFAULT_INTERPRETER: "/bin/sh -c"
@@ -62,7 +62,7 @@ spec:
image: quay.io/tinkerbell-actions/cexec:v1.0.0
timeout: 90
environment:
- BLOCK_DEVICE: {{ index .Hardware.Disks 0 }}1
+ BLOCK_DEVICE: $DISK_DEVICE_PARTITION_1
FS_TYPE: ext4
CHROOT: y
DEFAULT_INTERPRETER: "/bin/sh -c"
@@ -71,7 +71,7 @@ spec:
image: quay.io/tinkerbell-actions/writefile:v1.0.0
timeout: 90
environment:
- DEST_DISK: {{ index .Hardware.Disks 0 }}1
+ DEST_DISK: $DISK_DEVICE_PARTITION_1
FS_TYPE: ext4
DEST_PATH: /etc/netplan/config.yaml
CONTENTS: |
diff --git a/deploy/stack/compose/setup.sh b/deploy/stack/compose/setup.sh
index 81617dd3..f651dc28 100755
--- a/deploy/stack/compose/setup.sh
+++ b/deploy/stack/compose/setup.sh
@@ -2,7 +2,7 @@
install_docker() {
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+ add-apt-repository --yes "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
update_apt
apt-get install --no-install-recommends containerd.io docker-ce docker-ce-cli docker-compose-plugin
gpasswd -a vagrant docker
@@ -30,37 +30,6 @@ update_apt() {
apt-get update
}
-setup_layer2_network() {
- local host_ip=$1
- ip addr show dev eth1 | grep -q "$host_ip" && return 0
- ip addr add "$host_ip/24" dev eth1
- ip link set dev eth1 up
-}
-
-setup_compose_env_overrides() {
- local host_ip=$1
- local worker_ip=$2
- local worker_mac=$3
- local compose_dir=$4
- local disk_device
-
- disk_device="/dev/sda"
- if lsblk | grep -q vda; then
- disk_device="/dev/vda"
- fi
-
- readarray -t lines <<-EOF
- TINKERBELL_HOST_IP="$host_ip"
- TINKERBELL_CLIENT_IP="$worker_ip"
- TINKERBELL_CLIENT_MAC="$worker_mac"
- DISK_DEVICE="$disk_device"
- EOF
- for line in "${lines[@]}"; do
- grep -q "$line" "$compose_dir"/.env && continue
- echo "$line" >>"$compose_dir"/.env
- done
-}
-
create_tink_helper_script() {
local compose_dir=$1
@@ -87,19 +56,12 @@ tweak_bash_interactive_settings() {
}
main() {
- local host_ip=$1
- local worker_ip=$2
- local worker_mac=$3
- local compose_dir=$4
-
update_apt
install_docker
install_kubectl
- # setup_layer2_network "$host_ip"
-
- setup_compose_env_overrides "$host_ip" "$worker_ip" "$worker_mac" "$compose_dir"
- docker compose --env-file "$compose_dir"/.env -f "$compose_dir"/docker-compose.yml up -d
+ local compose_dir="/sandbox/stack/compose"
+ docker compose -f "$compose_dir"/docker-compose.yml up -d
create_tink_helper_script "$compose_dir"
tweak_bash_interactive_settings "$compose_dir"
diff --git a/deploy/stack/compose/tink b/deploy/stack/compose/tink
deleted file mode 100755
index 52698b10..00000000
--- a/deploy/stack/compose/tink
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/env bash
-
-exec docker compose -f /vagrant/compose/docker-compose.yml exec tink-cli tink "$@"
diff --git a/docs/quickstarts/COMPOSE.md b/docs/quickstarts/COMPOSE.md
index 219a8970..841d262d 100644
--- a/docs/quickstarts/COMPOSE.md
+++ b/docs/quickstarts/COMPOSE.md
@@ -9,37 +9,52 @@ You will need to bring your own machines to provision.
- [Docker](https://docs.docker.com/get-docker/) is installed (version >= 19.03)
- [Docker Compose](https://docs.docker.com/compose/install/) is installed (version >= 2.10.2)
+Both the Tinkerbell host and client require internet access to pull images, this sandbox is not designed to work in an isolated network.
+
## Steps
1. Clone this repository
```bash
git clone https://github.com/tinkerbell/sandbox.git
- cd sandbox
+ cd sandbox/deploy/stack/compose
```
-2. Set the public IP address for the provisioner
+2. Modify the [.env file](https://github.com/tinkerbell/sandbox/blob/47cfd6d0a0b659f1e364a78a4e63e08cdf168ca8/deploy/stack/compose/.env)
```bash
# This should be an IP that's on an interface where you will be provisioning machines
- export TINKERBELL_HOST_IP=192.168.2.111
+ # This is the IP and MAC of the machine to be provisioned
+ # The IP should normally be in the same network as the IP used for the provisioner
+ TINKERBELL_CLIENT_IP=192.168.56.43
+ TINKERBELL_CLIENT_MAC=08:00:27:9e:f5:3a
+
+ # These are the Gateway and DNS addresses the client should use, required for tink-worker to pull action images
+ TINKERBELL_CLIENT_GW=192.168.56.1
+ TINKERBELL_CLIENT_NAMESERVER_1=1.1.1.1
+ TINKERBELL_CLIENT_NAMESERVER_2=8.8.8.8
+
+ # This should be an IP that's on an interface where you will be provisioning machines
+ TINKERBELL_HOST_IP=192.168.56.4
```
-3. Set the IP and MAC address of the machine you want to provision (if you want Tink hardware, template, and workflow records auto-generated)
+ If you are provisioning bare metal machines with NVME SSDs, use NVME device paths:
```bash
- # This IP and MAC of the machine to be provisioned
- # The IP should normally be in the same network as the IP used for the provisioner
- export TINKERBELL_CLIENT_IP=192.168.2.211
- export TINKERBELL_CLIENT_MAC=08:00:27:9E:F5:3A
+ # This is the boot/primary disk device and the device for its first partition
+ # for the machine to be provisioned (as it would appear with lsblk)
+ #DISK_DEVICE=/dev/sda
+ #DISK_DEVICE_PARTITION_1=/dev/sda1
+ # Example for a device with an NVME SSD
+ DISK_DEVICE=/dev/nvme0n1
+ DISK_DEVICE_PARTITION_1=/dev/nvme0n1p1
```
- > Modify the [hardware.yaml](../../deploy/stack/compose/manifests/hardware.yaml), as needed, for your machine.
+ > Optionally modify the [hardware.yaml](../../deploy/stack/compose/manifests/hardware.yaml), as needed, for your machine.
-4. Start the provisioner
+3. Start the provisioner
```bash
- cd deploy/stack/compose
docker compose up -d
# This process will take about 5-10 minutes depending on your internet connection.
# Hook (OSIE) is about 400MB in size and the Ubuntu Focal image is about 500MB
@@ -69,14 +84,14 @@ You will need to bring your own machines to provision.
-5. Power up the machine to be provisioned
+4. Power up the machine to be provisioned
-6. Watch for the provisioner to complete
+5. Watch for the provisioner to complete
```bash
# watch for the workflow to completion
# once the workflow is complete (see the expected output below for completion), move on to the next step
- KUBECONFIG=./state/kube/kubeconfig.yaml kubectl get -n tink-system workflow sandbox-workflow --watch
+ KUBECONFIG=./state/kube/kubeconfig.yaml kubectl get -n default workflow sandbox-workflow --watch
```
@@ -103,9 +118,9 @@ You will need to bring your own machines to provision.
-7. Reboot the machine
+6. Reboot the machine
-8. Login to the machine
+7. Login to the machine
The machine has been provisioned with Ubuntu Focal.
You can now SSH into the machine.
diff --git a/docs/quickstarts/TERRAFORMEM.md b/docs/quickstarts/TERRAFORMEM.md
index bdfc8c5d..eb05c610 100644
--- a/docs/quickstarts/TERRAFORMEM.md
+++ b/docs/quickstarts/TERRAFORMEM.md
@@ -36,7 +36,17 @@ This option will also show you how to create a machine to provision.
# OSIE is about 2GB in size and the Ubuntu Focal image is about 500MB
```
-4. Reboot the machine
+4. Confirm setup.sh script has finished
+
+ ```bash
+ # log in to the provisioner
+ ssh root@$(terraform output -raw provisioner_ssh)
+
+ # verify that the /root/setup.sh script has finished running
+ ps -aux | grep setup.sh
+ ```
+
+5. Reboot the machine
In the [Equinix Metal Web UI](https://console.equinix.com), find the `tink_worker` and reboot it.
Or if you have the [Equinix Metal CLI](https://github.com/equinix/metal-cli) installed run the following:
@@ -45,52 +55,58 @@ This option will also show you how to create a machine to provision.
metal device reboot -i $(terraform output -raw worker_id)
```
-5. Watch the provision complete
+6. Watch the provision complete
+
+ Follow the docker-compose logs:
+
+ ```bash
+ # log in to the provisioner
+ ssh root@$(terraform output -raw provisioner_ssh)
+
+ # watch the docker-compose logs
+ # you should see Boots offer tink-worker an IP address and see tink-worker downloading files from the web server
+ docker-compose -f /sandbox/compose/docker-compose.yml logs -f
+
+ ```
+
+ Some of the steps can take a while to complete. In particular, it may look like tink-worker is hanging and not interacting with the provisioner after pulling the LinuxKit image. It may take a few minutes before it starts any of the workflows.
+
+ In a separate SSH session, watch the status of workflow tasks:
```bash
# log in to the provisioner
ssh root@$(terraform output -raw provisioner_ssh)
- # watch the workflow events and status for workflow completion
+ # watch for the workflow to completion
# once the workflow is complete (see the expected output below for completion), move on to the next step
- wid=$(tink workflow get --no-headers | awk '/^\|/ {print $2}'); watch -n1 "tink workflow events ${wid}; tink workflow state ${wid}"
+ KUBECONFIG=/sandbox/compose/state/kube/kubeconfig.yaml kubectl get -n default workflow sandbox-workflow --watch
```
expected output
```bash
- +--------------------------------------+-----------------+---------------------+----------------+---------------------------------+---------------+
- | WORKER ID | TASK NAME | ACTION NAME | EXECUTION TIME | MESSAGE | ACTION STATUS |
- +--------------------------------------+-----------------+---------------------+----------------+---------------------------------+---------------+
- | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 | os-installation | stream-ubuntu-image | 0 | Started execution | STATE_RUNNING |
- | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 | os-installation | stream-ubuntu-image | 15 | finished execution successfully | STATE_SUCCESS |
- | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 | os-installation | install-openssl | 0 | Started execution | STATE_RUNNING |
- | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 | os-installation | install-openssl | 1 | finished execution successfully | STATE_SUCCESS |
- | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 | os-installation | create-user | 0 | Started execution | STATE_RUNNING |
- | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 | os-installation | create-user | 0 | finished execution successfully | STATE_SUCCESS |
- | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 | os-installation | enable-ssh | 0 | Started execution | STATE_RUNNING |
- | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 | os-installation | enable-ssh | 0 | finished execution successfully | STATE_SUCCESS |
- | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 | os-installation | disable-apparmor | 0 | Started execution | STATE_RUNNING |
- | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 | os-installation | disable-apparmor | 0 | finished execution successfully | STATE_SUCCESS |
- | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 | os-installation | write-netplan | 0 | Started execution | STATE_RUNNING |
- | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 | os-installation | write-netplan | 0 | finished execution successfully | STATE_SUCCESS |
- +--------------------------------------+-----------------+---------------------+----------------+---------------------------------+---------------+
- +----------------------+--------------------------------------+
- | FIELD NAME | VALUES |
- +----------------------+--------------------------------------+
- | Workflow ID | 3107919b-e59d-11eb-bf99-0242ac120005 |
- | Workflow Progress | 100% |
- | Current Task | os-installation |
- | Current Action | write-netplan |
- | Current Worker | 0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94 |
- | Current Action State | STATE_SUCCESS |
- +----------------------+--------------------------------------+
+ NAME TEMPLATE STATE
+ sandbox-workflow ubuntu-focal STATE_PENDING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_RUNNING
+ sandbox-workflow ubuntu-focal STATE_SUCCESS
```
-6. Reboot the machine
+7. Reboot the machine
Now reboot the `tink-worker` via the [Equinix Metal Web UI](https://console.equinix.com), or if you have the [Equinix Metal CLI](https://github.com/equinix/metal-cli) installed run the following:
@@ -98,12 +114,14 @@ This option will also show you how to create a machine to provision.
metal device reboot -i $(terraform output -raw worker_id)
```
-7. Login to the machine
+8. Login to the machine
- The machine has been provisioned with Ubuntu Focal.
- Wait for the reboot to complete and then you can SSH into it.
+ The `tink-worker` machine has been provisioned with Ubuntu Focal.
+ Wait for the reboot to complete and then you can SSH into it from the `tink-provisioner` machine.
+ It may take some time for the worker to become available via SSH.
```bash
+ # Continuing on the tink-provisioner machine
# crtl-c to exit the watch
ssh tink@192.168.56.43 # user/pass => tink/tink
```