diff --git a/pkg/embeddedcluster/helmvm_node.go b/pkg/embeddedcluster/helmvm_node.go index 2b7fff9dd7..4443e3cdf8 100644 --- a/pkg/embeddedcluster/helmvm_node.go +++ b/pkg/embeddedcluster/helmvm_node.go @@ -160,6 +160,7 @@ func nodeRolesFromLabels(labels map[string]string) []string { roleLabel, ok := labels[fmt.Sprintf("%s-%d", types.EMBEDDED_CLUSTER_ROLE_LABEL, i)] if !ok { fmt.Printf("failed to find role label %d", i) + continue } toReturn = append(toReturn, roleLabel) } diff --git a/pkg/embeddedcluster/node_join.go b/pkg/embeddedcluster/node_join.go index b0835b3761..b7e85e9b35 100644 --- a/pkg/embeddedcluster/node_join.go +++ b/pkg/embeddedcluster/node_join.go @@ -3,12 +3,12 @@ package embeddedcluster import ( "context" "fmt" - "os" "strings" "sync" "time" "github.com/replicatedhq/kots/pkg/embeddedcluster/types" + "github.com/replicatedhq/kots/pkg/util" corev1 "k8s.io/api/core/v1" kuberneteserrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -70,6 +70,12 @@ func runAddNodeCommandPod(ctx context.Context, client kubernetes.Interface, node } } + // get the kotsadm image, as we know that will always exist + kotsadmImage, err := util.ThisImage(ctx, client) + if err != nil { + return "", fmt.Errorf("failed to get kotsadm image: %w", err) + } + hostPathFile := corev1.HostPathFile hostPathDir := corev1.HostPathDirectory _, err = client.CoreV1().Pods("kube-system").Create(ctx, &corev1.Pod{ @@ -143,7 +149,7 @@ func runAddNodeCommandPod(ctx context.Context, client kubernetes.Interface, node Containers: []corev1.Container{ { Name: "k0s-token-generator", - Image: "ubuntu:latest", // TODO use the kotsadm image here as we'll know it exists + Image: kotsadmImage, Command: []string{"/mnt/k0s"}, Args: []string{ "token", @@ -264,7 +270,7 @@ func GenerateK0sJoinCommand(ctx context.Context, client kubernetes.Interface, ro // gets the port of the 'admin-console' service func getAdminConsolePort(ctx context.Context, client kubernetes.Interface) (int32, error) { - svc, err := client.CoreV1().Services(os.Getenv("POD_NAMESPACE")).Get(ctx, "admin-console", metav1.GetOptions{}) + svc, err := client.CoreV1().Services(util.PodNamespace).Get(ctx, "admin-console", metav1.GetOptions{}) if err != nil { return -1, fmt.Errorf("failed to get admin-console service: %w", err) } diff --git a/pkg/util/image.go b/pkg/util/image.go new file mode 100644 index 0000000000..d1bd374831 --- /dev/null +++ b/pkg/util/image.go @@ -0,0 +1,26 @@ +package util + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// ThisImage looks for either a deployment 'kotsadm' or a statefulset 'kotsadm' in the current namespace +// it returns the image of the first container in the pod template +func ThisImage(ctx context.Context, client kubernetes.Interface) (string, error) { + deploy, err := client.AppsV1().Deployments(PodNamespace).Get(ctx, "kotsadm", metav1.GetOptions{}) + if err == nil { + return deploy.Spec.Template.Spec.Containers[0].Image, nil + } + + statefulset, err := client.AppsV1().StatefulSets(PodNamespace).Get(ctx, "kotsadm", metav1.GetOptions{}) + if err == nil { + return statefulset.Spec.Template.Spec.Containers[0].Image, nil + } + + return "", fmt.Errorf("failed to find deployment or statefulset") + +} diff --git a/web/src/components/apps/EmbeddedClusterManagement.tsx b/web/src/components/apps/EmbeddedClusterManagement.tsx index 1f946217fc..55834092c9 100644 --- a/web/src/components/apps/EmbeddedClusterManagement.tsx +++ b/web/src/components/apps/EmbeddedClusterManagement.tsx @@ -17,34 +17,6 @@ import "@src/scss/components/apps/EmbeddedClusterManagement.scss"; const testData = { nodes: undefined, }; -// const testData = { -// nodes: [ -// { -// name: "laverya-embeddedcluster", -// isConnected: true, -// isReady: true, -// isPrimaryNode: true, -// canDelete: false, -// kubeletVersion: "v1.28.2+k0s", -// kubeProxyVersion: "v1.28.2+k0s", -// operatingSystem: "linux", -// kernelVersion: "5.10.0-26-cloud-amd64", -// cpu: { capacity: 4, used: 1.9364847660000002 }, -// memory: { capacity: 15.633056640625, used: 3.088226318359375 }, -// pods: { capacity: 110, used: 27 }, -// labels: ["controller"], -// conditions: { -// memoryPressure: false, -// diskPressure: false, -// pidPressure: false, -// ready: true, -// }, -// podList: [], -// }, -// ], -// ha: true, -// isEmbeddedClusterEnabled: true, -// }; type State = { displayAddNode: boolean; diff --git a/web/src/components/apps/EmbeddedClusterViewNode.jsx b/web/src/components/apps/EmbeddedClusterViewNode.jsx index a581009c3c..c138aa79c5 100644 --- a/web/src/components/apps/EmbeddedClusterViewNode.jsx +++ b/web/src/components/apps/EmbeddedClusterViewNode.jsx @@ -5,36 +5,6 @@ import { Link, useParams } from "react-router-dom"; import Loader from "@components/shared/Loader"; const testData = undefined; -// const testData = { -// name: "laverya-embeddedcluster", -// isConnected: true, -// isReady: true, -// isPrimaryNode: true, -// canDelete: false, -// kubeletVersion: "v1.28.2+k0s", -// kubeProxyVersion: "v1.28.2+k0s", -// operatingSystem: "linux", -// kernelVersion: "5.10.0-26-cloud-amd64", -// cpu: { capacity: 4, used: 1.9364847660000002 }, -// memory: { capacity: 15.633056640625, used: 3.088226318359375 }, -// pods: { capacity: 110, used: 27 }, -// labels: ["controller"], -// conditions: { -// memoryPressure: false, -// diskPressure: false, -// pidPressure: false, -// ready: true, -// }, -// podList: [ -// { -// name: "example-es-85fc9df74-8x8l6", -// status: "Running", -// namespace: "embeddedcluster", -// cpu: "0.0345789345 GB", -// memory: 0, -// }, -// ], -// }; const EmbeddedClusterViewNode = () => { const { slug, nodeName } = useParams();