Skip to content

Commit

Permalink
address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
laverya committed Oct 23, 2023
1 parent 7258c81 commit cd4c012
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 61 deletions.
1 change: 1 addition & 0 deletions pkg/embeddedcluster/helmvm_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ func nodeRolesFromLabels(labels map[string]string) []string {
roleLabel, ok := labels[fmt.Sprintf("%s-%d", types.EMBEDDED_CLUSTER_ROLE_LABEL, i)]
if !ok {
fmt.Printf("failed to find role label %d", i)
continue
}
toReturn = append(toReturn, roleLabel)
}
Expand Down
12 changes: 9 additions & 3 deletions pkg/embeddedcluster/node_join.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@ package embeddedcluster
import (
"context"
"fmt"
"os"
"strings"
"sync"
"time"

"github.com/replicatedhq/kots/pkg/embeddedcluster/types"
"github.com/replicatedhq/kots/pkg/util"
corev1 "k8s.io/api/core/v1"
kuberneteserrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -70,6 +70,12 @@ func runAddNodeCommandPod(ctx context.Context, client kubernetes.Interface, node
}
}

// get the kotsadm image, as we know that will always exist
kotsadmImage, err := util.ThisImage(ctx, client)
if err != nil {
return "", fmt.Errorf("failed to get kotsadm image: %w", err)
}

hostPathFile := corev1.HostPathFile
hostPathDir := corev1.HostPathDirectory
_, err = client.CoreV1().Pods("kube-system").Create(ctx, &corev1.Pod{
Expand Down Expand Up @@ -143,7 +149,7 @@ func runAddNodeCommandPod(ctx context.Context, client kubernetes.Interface, node
Containers: []corev1.Container{
{
Name: "k0s-token-generator",
Image: "ubuntu:latest", // TODO use the kotsadm image here as we'll know it exists
Image: kotsadmImage,
Command: []string{"/mnt/k0s"},
Args: []string{
"token",
Expand Down Expand Up @@ -264,7 +270,7 @@ func GenerateK0sJoinCommand(ctx context.Context, client kubernetes.Interface, ro

// gets the port of the 'admin-console' service
func getAdminConsolePort(ctx context.Context, client kubernetes.Interface) (int32, error) {
svc, err := client.CoreV1().Services(os.Getenv("POD_NAMESPACE")).Get(ctx, "admin-console", metav1.GetOptions{})
svc, err := client.CoreV1().Services(util.PodNamespace).Get(ctx, "admin-console", metav1.GetOptions{})
if err != nil {
return -1, fmt.Errorf("failed to get admin-console service: %w", err)
}
Expand Down
26 changes: 26 additions & 0 deletions pkg/util/image.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
package util

import (
"context"
"fmt"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)

// ThisImage looks for either a deployment 'kotsadm' or a statefulset 'kotsadm' in the current namespace
// it returns the image of the first container in the pod template
func ThisImage(ctx context.Context, client kubernetes.Interface) (string, error) {
deploy, err := client.AppsV1().Deployments(PodNamespace).Get(ctx, "kotsadm", metav1.GetOptions{})
if err == nil {
return deploy.Spec.Template.Spec.Containers[0].Image, nil
}

statefulset, err := client.AppsV1().StatefulSets(PodNamespace).Get(ctx, "kotsadm", metav1.GetOptions{})
if err == nil {
return statefulset.Spec.Template.Spec.Containers[0].Image, nil
}

return "", fmt.Errorf("failed to find deployment or statefulset")

}
28 changes: 0 additions & 28 deletions web/src/components/apps/EmbeddedClusterManagement.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -17,34 +17,6 @@ import "@src/scss/components/apps/EmbeddedClusterManagement.scss";
const testData = {
nodes: undefined,
};
// const testData = {
// nodes: [
// {
// name: "laverya-embeddedcluster",
// isConnected: true,
// isReady: true,
// isPrimaryNode: true,
// canDelete: false,
// kubeletVersion: "v1.28.2+k0s",
// kubeProxyVersion: "v1.28.2+k0s",
// operatingSystem: "linux",
// kernelVersion: "5.10.0-26-cloud-amd64",
// cpu: { capacity: 4, used: 1.9364847660000002 },
// memory: { capacity: 15.633056640625, used: 3.088226318359375 },
// pods: { capacity: 110, used: 27 },
// labels: ["controller"],
// conditions: {
// memoryPressure: false,
// diskPressure: false,
// pidPressure: false,
// ready: true,
// },
// podList: [],
// },
// ],
// ha: true,
// isEmbeddedClusterEnabled: true,
// };

type State = {
displayAddNode: boolean;
Expand Down
30 changes: 0 additions & 30 deletions web/src/components/apps/EmbeddedClusterViewNode.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -5,36 +5,6 @@ import { Link, useParams } from "react-router-dom";
import Loader from "@components/shared/Loader";

const testData = undefined;
// const testData = {
// name: "laverya-embeddedcluster",
// isConnected: true,
// isReady: true,
// isPrimaryNode: true,
// canDelete: false,
// kubeletVersion: "v1.28.2+k0s",
// kubeProxyVersion: "v1.28.2+k0s",
// operatingSystem: "linux",
// kernelVersion: "5.10.0-26-cloud-amd64",
// cpu: { capacity: 4, used: 1.9364847660000002 },
// memory: { capacity: 15.633056640625, used: 3.088226318359375 },
// pods: { capacity: 110, used: 27 },
// labels: ["controller"],
// conditions: {
// memoryPressure: false,
// diskPressure: false,
// pidPressure: false,
// ready: true,
// },
// podList: [
// {
// name: "example-es-85fc9df74-8x8l6",
// status: "Running",
// namespace: "embeddedcluster",
// cpu: "0.0345789345 GB",
// memory: 0,
// },
// ],
// };

const EmbeddedClusterViewNode = () => {
const { slug, nodeName } = useParams();
Expand Down

0 comments on commit cd4c012

Please sign in to comment.