diff --git a/migrations/tables/k0s_tokens.yaml b/migrations/tables/k0s_tokens.yaml
new file mode 100644
index 0000000000..1ae6760972
--- /dev/null
+++ b/migrations/tables/k0s_tokens.yaml
@@ -0,0 +1,21 @@
+apiVersion: schemas.schemahero.io/v1alpha4
+kind: Table
+metadata:
+ name: k0s-tokens
+spec:
+ name: k0s_tokens
+ requires: []
+ schema:
+ rqlite:
+ strict: true
+ primaryKey:
+ - token
+ columns:
+ - name: token
+ type: text
+ constraints:
+ notNull: true
+ - name: roles
+ type: text
+ constraints:
+ notNull: true
diff --git a/pkg/handlers/handlers.go b/pkg/handlers/handlers.go
index aa711b7b89..8ee55ea376 100644
--- a/pkg/handlers/handlers.go
+++ b/pkg/handlers/handlers.go
@@ -277,16 +277,16 @@ func RegisterSessionAuthRoutes(r *mux.Router, kotsStore store.Store, handler KOT
// HelmVM
r.Name("HelmVM").Path("/api/v1/helmvm").HandlerFunc(NotImplemented)
- r.Name("GenerateHelmVMNodeJoinCommandSecondary").Path("/api/v1/helmvm/generate-node-join-command-secondary").Methods("POST").
- HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.GenerateHelmVMNodeJoinCommandSecondary))
- r.Name("GenerateHelmVMNodeJoinCommandPrimary").Path("/api/v1/helmvm/generate-node-join-command-primary").Methods("POST").
- HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.GenerateHelmVMNodeJoinCommandPrimary))
+ r.Name("GenerateK0sNodeJoinCommand").Path("/api/v1/helmvm/generate-node-join-command").Methods("POST").
+ HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.GenerateK0sNodeJoinCommand))
r.Name("DrainHelmVMNode").Path("/api/v1/helmvm/nodes/{nodeName}/drain").Methods("POST").
HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.DrainHelmVMNode))
r.Name("DeleteHelmVMNode").Path("/api/v1/helmvm/nodes/{nodeName}").Methods("DELETE").
HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.DeleteHelmVMNode))
r.Name("GetHelmVMNodes").Path("/api/v1/helmvm/nodes").Methods("GET").
HandlerFunc(middleware.EnforceAccess(policy.ClusterRead, handler.GetHelmVMNodes))
+ r.Name("GetHelmVMNode").Path("/api/v1/helmvm/node/{nodeName}").Methods("GET").
+ HandlerFunc(middleware.EnforceAccess(policy.ClusterRead, handler.GetHelmVMNode))
// Prometheus
r.Name("SetPrometheusAddress").Path("/api/v1/prometheus").Methods("POST").
@@ -355,6 +355,9 @@ func RegisterUnauthenticatedRoutes(handler *Handler, kotsStore store.Store, debu
// These handlers should be called by the application only.
loggingRouter.Path("/license/v1/license").Methods("GET").HandlerFunc(handler.GetPlatformLicenseCompatibility)
loggingRouter.Path("/api/v1/app/custom-metrics").Methods("POST").HandlerFunc(handler.GetSendCustomAppMetricsHandler(kotsStore))
+
+ // This handler requires a valid token in the query
+ loggingRouter.Path("/api/v1/embedded-cluster/join").Methods("GET").HandlerFunc(handler.GetK0sNodeJoinCommand)
}
func RegisterLicenseIDAuthRoutes(r *mux.Router, kotsStore store.Store, handler KOTSHandler) {
diff --git a/pkg/handlers/handlers_test.go b/pkg/handlers/handlers_test.go
index 91bd7c0731..9d4ee77a74 100644
--- a/pkg/handlers/handlers_test.go
+++ b/pkg/handlers/handlers_test.go
@@ -1210,54 +1210,65 @@ var HandlerPolicyTests = map[string][]HandlerPolicyTest{
},
"HelmVM": {}, // Not implemented
- "GenerateHelmVMNodeJoinCommandSecondary": {
+ "GenerateK0sNodeJoinCommand": {
{
Roles: []rbactypes.Role{rbac.ClusterAdminRole},
SessionRoles: []string{rbac.ClusterAdminRoleID},
Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) {
- handlerRecorder.GenerateHelmVMNodeJoinCommandSecondary(gomock.Any(), gomock.Any())
+ handlerRecorder.GenerateK0sNodeJoinCommand(gomock.Any(), gomock.Any())
},
ExpectStatus: http.StatusOK,
},
},
- "GenerateHelmVMNodeJoinCommandPrimary": {
+ "DrainHelmVMNode": {
{
+ Vars: map[string]string{"nodeName": "node-name"},
Roles: []rbactypes.Role{rbac.ClusterAdminRole},
SessionRoles: []string{rbac.ClusterAdminRoleID},
Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) {
- handlerRecorder.GenerateHelmVMNodeJoinCommandPrimary(gomock.Any(), gomock.Any())
+ handlerRecorder.DrainHelmVMNode(gomock.Any(), gomock.Any())
},
ExpectStatus: http.StatusOK,
},
},
- "DrainHelmVMNode": {
+ "DeleteHelmVMNode": {
{
Vars: map[string]string{"nodeName": "node-name"},
Roles: []rbactypes.Role{rbac.ClusterAdminRole},
SessionRoles: []string{rbac.ClusterAdminRoleID},
Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) {
- handlerRecorder.DrainHelmVMNode(gomock.Any(), gomock.Any())
+ handlerRecorder.DeleteHelmVMNode(gomock.Any(), gomock.Any())
},
ExpectStatus: http.StatusOK,
},
},
- "DeleteHelmVMNode": {
+ "GetHelmVMNodes": {
+ {
+ Roles: []rbactypes.Role{rbac.ClusterAdminRole},
+ SessionRoles: []string{rbac.ClusterAdminRoleID},
+ Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) {
+ handlerRecorder.GetHelmVMNodes(gomock.Any(), gomock.Any())
+ },
+ ExpectStatus: http.StatusOK,
+ },
+ },
+ "GetHelmVMNode": {
{
Vars: map[string]string{"nodeName": "node-name"},
Roles: []rbactypes.Role{rbac.ClusterAdminRole},
SessionRoles: []string{rbac.ClusterAdminRoleID},
Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) {
- handlerRecorder.DeleteHelmVMNode(gomock.Any(), gomock.Any())
+ handlerRecorder.GetHelmVMNode(gomock.Any(), gomock.Any())
},
ExpectStatus: http.StatusOK,
},
},
- "GetHelmVMNodes": {
+ "GetK0sNodeJoinCommand": {
{
Roles: []rbactypes.Role{rbac.ClusterAdminRole},
SessionRoles: []string{rbac.ClusterAdminRoleID},
Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) {
- handlerRecorder.GetHelmVMNodes(gomock.Any(), gomock.Any())
+ handlerRecorder.GetK0sNodeJoinCommand(gomock.Any(), gomock.Any())
},
ExpectStatus: http.StatusOK,
},
diff --git a/pkg/handlers/helmvm_get.go b/pkg/handlers/helmvm_get.go
index cd440d116f..4f736996ed 100644
--- a/pkg/handlers/helmvm_get.go
+++ b/pkg/handlers/helmvm_get.go
@@ -3,6 +3,7 @@ package handlers
import (
"net/http"
+ "github.com/gorilla/mux"
"github.com/replicatedhq/kots/pkg/helmvm"
"github.com/replicatedhq/kots/pkg/k8sutil"
"github.com/replicatedhq/kots/pkg/logger"
@@ -16,7 +17,7 @@ func (h *Handler) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) {
return
}
- nodes, err := helmvm.GetNodes(client)
+ nodes, err := helmvm.GetNodes(r.Context(), client)
if err != nil {
logger.Error(err)
w.WriteHeader(http.StatusInternalServerError)
@@ -24,3 +25,21 @@ func (h *Handler) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) {
}
JSON(w, http.StatusOK, nodes)
}
+
+func (h *Handler) GetHelmVMNode(w http.ResponseWriter, r *http.Request) {
+ client, err := k8sutil.GetClientset()
+ if err != nil {
+ logger.Error(err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ nodeName := mux.Vars(r)["nodeName"]
+ node, err := helmvm.GetNode(r.Context(), client, nodeName)
+ if err != nil {
+ logger.Error(err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ JSON(w, http.StatusOK, node)
+}
diff --git a/pkg/handlers/helmvm_node_join_command.go b/pkg/handlers/helmvm_node_join_command.go
index 6604b659d9..b4d6a0da4f 100644
--- a/pkg/handlers/helmvm_node_join_command.go
+++ b/pkg/handlers/helmvm_node_join_command.go
@@ -1,55 +1,116 @@
package handlers
import (
+ "encoding/json"
+ "fmt"
"net/http"
- "time"
"github.com/replicatedhq/kots/pkg/helmvm"
"github.com/replicatedhq/kots/pkg/k8sutil"
"github.com/replicatedhq/kots/pkg/logger"
+ "github.com/replicatedhq/kots/pkg/store/kotsstore"
)
-type GenerateHelmVMNodeJoinCommandResponse struct {
+type GenerateK0sNodeJoinCommandResponse struct {
Command []string `json:"command"`
- Expiry string `json:"expiry"`
}
-func (h *Handler) GenerateHelmVMNodeJoinCommandSecondary(w http.ResponseWriter, r *http.Request) {
- client, err := k8sutil.GetClientset()
+type GetK0sNodeJoinCommandResponse struct {
+ ClusterID string `json:"clusterID"`
+ K0sJoinCommand string `json:"k0sJoinCommand"`
+ K0sToken string `json:"k0sToken"`
+}
+
+type GenerateHelmVMNodeJoinCommandRequest struct {
+ Roles []string `json:"roles"`
+}
+
+func (h *Handler) GenerateK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) {
+ generateHelmVMNodeJoinCommandRequest := GenerateHelmVMNodeJoinCommandRequest{}
+ if err := json.NewDecoder(r.Body).Decode(&generateHelmVMNodeJoinCommandRequest); err != nil {
+ logger.Error(fmt.Errorf("failed to decode request body: %w", err))
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+
+ store := kotsstore.StoreFromEnv()
+ token, err := store.SetK0sInstallCommandRoles(generateHelmVMNodeJoinCommandRequest.Roles)
if err != nil {
- logger.Error(err)
+ logger.Error(fmt.Errorf("failed to set k0s install command roles: %w", err))
w.WriteHeader(http.StatusInternalServerError)
return
}
- command, expiry, err := helmvm.GenerateAddNodeCommand(client, false)
+ client, err := k8sutil.GetClientset()
+ if err != nil {
+ logger.Error(fmt.Errorf("failed to get clientset: %w", err))
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ nodeJoinCommand, err := helmvm.GenerateAddNodeCommand(r.Context(), client, token)
if err != nil {
- logger.Error(err)
+ logger.Error(fmt.Errorf("failed to generate add node command: %w", err))
w.WriteHeader(http.StatusInternalServerError)
return
}
- JSON(w, http.StatusOK, GenerateHelmVMNodeJoinCommandResponse{
- Command: command,
- Expiry: expiry.Format(time.RFC3339),
+
+ JSON(w, http.StatusOK, GenerateK0sNodeJoinCommandResponse{
+ Command: []string{nodeJoinCommand},
})
}
-func (h *Handler) GenerateHelmVMNodeJoinCommandPrimary(w http.ResponseWriter, r *http.Request) {
+// this function relies on the token being valid for authentication
+func (h *Handler) GetK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) {
+ // read query string, ensure that the token is valid
+ token := r.URL.Query().Get("token")
+ store := kotsstore.StoreFromEnv()
+ roles, err := store.GetK0sInstallCommandRoles(token)
+ if err != nil {
+ logger.Error(fmt.Errorf("failed to get k0s install command roles: %w", err))
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ // use roles to generate join token etc
client, err := k8sutil.GetClientset()
if err != nil {
- logger.Error(err)
+ logger.Error(fmt.Errorf("failed to get clientset: %w", err))
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ k0sRole := "worker"
+ for _, role := range roles {
+ if role == "controller" {
+ k0sRole = "controller"
+ break
+ }
+ }
+
+ k0sToken, err := helmvm.GenerateAddNodeToken(r.Context(), client, k0sRole)
+ if err != nil {
+ logger.Error(fmt.Errorf("failed to generate add node token: %w", err))
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ k0sJoinCommand, err := helmvm.GenerateK0sJoinCommand(r.Context(), client, roles)
+ if err != nil {
+ logger.Error(fmt.Errorf("failed to generate k0s join command: %w", err))
w.WriteHeader(http.StatusInternalServerError)
return
}
- command, expiry, err := helmvm.GenerateAddNodeCommand(client, true)
+ clusterID, err := helmvm.ClusterID(client)
if err != nil {
- logger.Error(err)
+ logger.Error(fmt.Errorf("failed to get cluster id: %w", err))
w.WriteHeader(http.StatusInternalServerError)
return
}
- JSON(w, http.StatusOK, GenerateHelmVMNodeJoinCommandResponse{
- Command: command,
- Expiry: expiry.Format(time.RFC3339),
+
+ JSON(w, http.StatusOK, GetK0sNodeJoinCommandResponse{
+ ClusterID: clusterID,
+ K0sJoinCommand: k0sJoinCommand,
+ K0sToken: k0sToken,
})
}
diff --git a/pkg/handlers/interface.go b/pkg/handlers/interface.go
index c6cb2a00db..81945a41ac 100644
--- a/pkg/handlers/interface.go
+++ b/pkg/handlers/interface.go
@@ -139,11 +139,12 @@ type KOTSHandler interface {
GetKurlNodes(w http.ResponseWriter, r *http.Request)
// HelmVM
- GenerateHelmVMNodeJoinCommandSecondary(w http.ResponseWriter, r *http.Request)
- GenerateHelmVMNodeJoinCommandPrimary(w http.ResponseWriter, r *http.Request)
+ GenerateK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request)
DrainHelmVMNode(w http.ResponseWriter, r *http.Request)
DeleteHelmVMNode(w http.ResponseWriter, r *http.Request)
GetHelmVMNodes(w http.ResponseWriter, r *http.Request)
+ GetHelmVMNode(w http.ResponseWriter, r *http.Request)
+ GetK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request)
// Prometheus
SetPrometheusAddress(w http.ResponseWriter, r *http.Request)
diff --git a/pkg/handlers/mock/mock.go b/pkg/handlers/mock/mock.go
index cf9fe09ede..2d47a3af6f 100644
--- a/pkg/handlers/mock/mock.go
+++ b/pkg/handlers/mock/mock.go
@@ -442,28 +442,16 @@ func (mr *MockKOTSHandlerMockRecorder) GarbageCollectImages(w, r interface{}) *g
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GarbageCollectImages", reflect.TypeOf((*MockKOTSHandler)(nil).GarbageCollectImages), w, r)
}
-// GenerateHelmVMNodeJoinCommandPrimary mocks base method.
-func (m *MockKOTSHandler) GenerateHelmVMNodeJoinCommandPrimary(w http.ResponseWriter, r *http.Request) {
+// GenerateK0sNodeJoinCommand mocks base method.
+func (m *MockKOTSHandler) GenerateK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "GenerateHelmVMNodeJoinCommandPrimary", w, r)
+ m.ctrl.Call(m, "GenerateK0sNodeJoinCommand", w, r)
}
-// GenerateHelmVMNodeJoinCommandPrimary indicates an expected call of GenerateHelmVMNodeJoinCommandPrimary.
-func (mr *MockKOTSHandlerMockRecorder) GenerateHelmVMNodeJoinCommandPrimary(w, r interface{}) *gomock.Call {
+// GenerateK0sNodeJoinCommand indicates an expected call of GenerateK0sNodeJoinCommand.
+func (mr *MockKOTSHandlerMockRecorder) GenerateK0sNodeJoinCommand(w, r interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateHelmVMNodeJoinCommandPrimary", reflect.TypeOf((*MockKOTSHandler)(nil).GenerateHelmVMNodeJoinCommandPrimary), w, r)
-}
-
-// GenerateHelmVMNodeJoinCommandSecondary mocks base method.
-func (m *MockKOTSHandler) GenerateHelmVMNodeJoinCommandSecondary(w http.ResponseWriter, r *http.Request) {
- m.ctrl.T.Helper()
- m.ctrl.Call(m, "GenerateHelmVMNodeJoinCommandSecondary", w, r)
-}
-
-// GenerateHelmVMNodeJoinCommandSecondary indicates an expected call of GenerateHelmVMNodeJoinCommandSecondary.
-func (mr *MockKOTSHandlerMockRecorder) GenerateHelmVMNodeJoinCommandSecondary(w, r interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateHelmVMNodeJoinCommandSecondary", reflect.TypeOf((*MockKOTSHandler)(nil).GenerateHelmVMNodeJoinCommandSecondary), w, r)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateK0sNodeJoinCommand", reflect.TypeOf((*MockKOTSHandler)(nil).GenerateK0sNodeJoinCommand), w, r)
}
// GenerateKurlNodeJoinCommandMaster mocks base method.
@@ -766,6 +754,18 @@ func (mr *MockKOTSHandlerMockRecorder) GetGlobalSnapshotSettings(w, r interface{
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGlobalSnapshotSettings", reflect.TypeOf((*MockKOTSHandler)(nil).GetGlobalSnapshotSettings), w, r)
}
+// GetHelmVMNode mocks base method.
+func (m *MockKOTSHandler) GetHelmVMNode(w http.ResponseWriter, r *http.Request) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "GetHelmVMNode", w, r)
+}
+
+// GetHelmVMNode indicates an expected call of GetHelmVMNode.
+func (mr *MockKOTSHandlerMockRecorder) GetHelmVMNode(w, r interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHelmVMNode", reflect.TypeOf((*MockKOTSHandler)(nil).GetHelmVMNode), w, r)
+}
+
// GetHelmVMNodes mocks base method.
func (m *MockKOTSHandler) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) {
m.ctrl.T.Helper()
@@ -814,6 +814,18 @@ func (mr *MockKOTSHandlerMockRecorder) GetInstanceSnapshotConfig(w, r interface{
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceSnapshotConfig", reflect.TypeOf((*MockKOTSHandler)(nil).GetInstanceSnapshotConfig), w, r)
}
+// GetK0sNodeJoinCommand mocks base method.
+func (m *MockKOTSHandler) GetK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "GetK0sNodeJoinCommand", w, r)
+}
+
+// GetK0sNodeJoinCommand indicates an expected call of GetK0sNodeJoinCommand.
+func (mr *MockKOTSHandlerMockRecorder) GetK0sNodeJoinCommand(w, r interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetK0sNodeJoinCommand", reflect.TypeOf((*MockKOTSHandler)(nil).GetK0sNodeJoinCommand), w, r)
+}
+
// GetKotsadmRegistry mocks base method.
func (m *MockKOTSHandler) GetKotsadmRegistry(w http.ResponseWriter, r *http.Request) {
m.ctrl.T.Helper()
diff --git a/pkg/helmvm/helmvm_node.go b/pkg/helmvm/helmvm_node.go
new file mode 100644
index 0000000000..a9b17f497e
--- /dev/null
+++ b/pkg/helmvm/helmvm_node.go
@@ -0,0 +1,168 @@
+package helmvm
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/replicatedhq/kots/pkg/helmvm/types"
+ "github.com/replicatedhq/kots/pkg/k8sutil"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+ metricsv "k8s.io/metrics/pkg/client/clientset/versioned"
+)
+
+// GetNode will get a node with stats and podlist
+func GetNode(ctx context.Context, client kubernetes.Interface, nodeName string) (*types.Node, error) {
+ node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("get node %s: %w", nodeName, err)
+ }
+
+ clientConfig, err := k8sutil.GetClusterConfig()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get cluster config: %w", err)
+ }
+
+ metricsClient, err := metricsv.NewForConfig(clientConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create metrics client: %w", err)
+ }
+
+ return nodeMetrics(ctx, client, metricsClient, *node)
+}
+
+func podsOnNode(ctx context.Context, client kubernetes.Interface, nodeName string) ([]corev1.Pod, error) {
+ namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("list namespaces: %w", err)
+ }
+
+ toReturn := []corev1.Pod{}
+
+ for _, ns := range namespaces.Items {
+ nsPods, err := client.CoreV1().Pods(ns.Name).List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("spec.nodeName=%s", nodeName)})
+ if err != nil {
+ return nil, fmt.Errorf("list pods on %s in namespace %s: %w", nodeName, ns.Name, err)
+ }
+
+ toReturn = append(toReturn, nsPods.Items...)
+ }
+ return toReturn, nil
+}
+
+// nodeMetrics takes a corev1.Node and gets metrics + status for that node
+func nodeMetrics(ctx context.Context, client kubernetes.Interface, metricsClient *metricsv.Clientset, node corev1.Node) (*types.Node, error) {
+ nodePods, err := podsOnNode(ctx, client, node.Name)
+ if err != nil {
+ return nil, fmt.Errorf("pods per node: %w", err)
+ }
+
+ cpuCapacity := types.CapacityUsed{}
+ memoryCapacity := types.CapacityUsed{}
+ podCapacity := types.CapacityUsed{}
+
+ memoryCapacity.Capacity = float64(node.Status.Capacity.Memory().Value()) / math.Pow(2, 30) // capacity in GB
+
+ cpuCapacity.Capacity, err = strconv.ParseFloat(node.Status.Capacity.Cpu().String(), 64)
+ if err != nil {
+ return nil, fmt.Errorf("parse CPU capacity %q for node %s: %w", node.Status.Capacity.Cpu().String(), node.Name, err)
+ }
+
+ podCapacity.Capacity = float64(node.Status.Capacity.Pods().Value())
+
+ nodeUsageMetrics, err := metricsClient.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{})
+ if err == nil {
+ if nodeUsageMetrics.Usage.Memory() != nil {
+ memoryCapacity.Used = float64(nodeUsageMetrics.Usage.Memory().Value()) / math.Pow(2, 30)
+ }
+
+ if nodeUsageMetrics.Usage.Cpu() != nil {
+ cpuCapacity.Used = nodeUsageMetrics.Usage.Cpu().AsApproximateFloat64()
+ }
+ } else {
+ // if we can't get metrics, we'll do nothing for now
+ // in the future we may decide to retry or log a warning
+ }
+
+ podCapacity.Used = float64(len(nodePods))
+
+ podInfo := []types.PodInfo{}
+
+ for _, pod := range nodePods {
+ newInfo := types.PodInfo{
+ Name: pod.Name,
+ Namespace: pod.Namespace,
+ Status: string(pod.Status.Phase),
+ }
+
+ podMetrics, err := metricsClient.MetricsV1beta1().PodMetricses(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
+ if err == nil {
+ podTotalMemory := 0.0
+ podTotalCPU := 0.0
+ for _, container := range podMetrics.Containers {
+ if container.Usage.Memory() != nil {
+ podTotalMemory += float64(container.Usage.Memory().Value()) / math.Pow(2, 20)
+ }
+ if container.Usage.Cpu() != nil {
+ podTotalCPU += container.Usage.Cpu().AsApproximateFloat64() * 1000
+ }
+ }
+ newInfo.Memory = fmt.Sprintf("%.1f MB", podTotalMemory)
+ newInfo.CPU = fmt.Sprintf("%.1f m", podTotalCPU)
+ }
+
+ podInfo = append(podInfo, newInfo)
+ }
+
+ return &types.Node{
+ Name: node.Name,
+ IsConnected: isConnected(node),
+ IsReady: isReady(node),
+ IsPrimaryNode: isPrimary(node),
+ CanDelete: node.Spec.Unschedulable && !isConnected(node),
+ KubeletVersion: node.Status.NodeInfo.KubeletVersion,
+ KubeProxyVersion: node.Status.NodeInfo.KubeProxyVersion,
+ OperatingSystem: node.Status.NodeInfo.OperatingSystem,
+ KernelVersion: node.Status.NodeInfo.KernelVersion,
+ CPU: cpuCapacity,
+ Memory: memoryCapacity,
+ Pods: podCapacity,
+ Labels: nodeRolesFromLabels(node.Labels),
+ Conditions: findNodeConditions(node.Status.Conditions),
+ PodList: podInfo,
+ }, nil
+}
+
+// nodeRolesFromLabels parses a map of k8s node labels, and returns the roles of the node
+func nodeRolesFromLabels(labels map[string]string) []string {
+ toReturn := []string{}
+
+ numRolesStr, ok := labels[types.EMBEDDED_CLUSTER_ROLE_LABEL]
+ if !ok {
+ // the first node will not initially have a role label, but is a 'controller'
+ if val, ok := labels["node-role.kubernetes.io/control-plane"]; ok && val == "true" {
+ return []string{"controller"}
+ }
+ return nil
+ }
+ numRoles, err := strconv.Atoi(strings.TrimPrefix(numRolesStr, "total-"))
+ if err != nil {
+ fmt.Printf("failed to parse role label %q: %s", numRolesStr, err.Error())
+
+ return nil
+ }
+
+ for i := 0; i < numRoles; i++ {
+ roleLabel, ok := labels[fmt.Sprintf("%s-%d", types.EMBEDDED_CLUSTER_ROLE_LABEL, i)]
+ if !ok {
+ fmt.Printf("failed to find role label %d", i)
+ }
+ toReturn = append(toReturn, roleLabel)
+ }
+
+ return toReturn
+}
diff --git a/pkg/helmvm/helmvm_nodes.go b/pkg/helmvm/helmvm_nodes.go
index e00dca2108..9396e6508c 100644
--- a/pkg/helmvm/helmvm_nodes.go
+++ b/pkg/helmvm/helmvm_nodes.go
@@ -2,92 +2,41 @@ package helmvm
import (
"context"
- "crypto/tls"
- "encoding/json"
- "fmt"
- "io"
- "math"
- "net/http"
- "os"
- "strconv"
- "time"
-
"github.com/pkg/errors"
"github.com/replicatedhq/kots/pkg/helmvm/types"
- "github.com/replicatedhq/kots/pkg/logger"
+ "github.com/replicatedhq/kots/pkg/k8sutil"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
- statsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+ metricsv "k8s.io/metrics/pkg/client/clientset/versioned"
)
// GetNodes will get a list of nodes with stats
-func GetNodes(client kubernetes.Interface) (*types.HelmVMNodes, error) {
- nodes, err := client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
+func GetNodes(ctx context.Context, client kubernetes.Interface) (*types.HelmVMNodes, error) {
+ nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, errors.Wrap(err, "list nodes")
}
- toReturn := types.HelmVMNodes{}
+ clientConfig, err := k8sutil.GetClusterConfig()
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to get cluster config")
+ }
- for _, node := range nodes.Items {
- cpuCapacity := types.CapacityAvailable{}
- memoryCapacity := types.CapacityAvailable{}
- podCapacity := types.CapacityAvailable{}
+ metricsClient, err := metricsv.NewForConfig(clientConfig)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create metrics client")
+ }
- memoryCapacity.Capacity = float64(node.Status.Capacity.Memory().Value()) / math.Pow(2, 30) // capacity in GB
+ toReturn := types.HelmVMNodes{}
- cpuCapacity.Capacity, err = strconv.ParseFloat(node.Status.Capacity.Cpu().String(), 64)
+ for _, node := range nodes.Items {
+ nodeMet, err := nodeMetrics(ctx, client, metricsClient, node)
if err != nil {
- return nil, errors.Wrapf(err, "parse CPU capacity %q for node %s", node.Status.Capacity.Cpu().String(), node.Name)
- }
-
- podCapacity.Capacity = float64(node.Status.Capacity.Pods().Value())
-
- nodeIP := ""
- for _, address := range node.Status.Addresses {
- if address.Type == corev1.NodeInternalIP {
- nodeIP = address.Address
- }
- }
-
- if nodeIP == "" {
- logger.Infof("Did not find address for node %s, %+v", node.Name, node.Status.Addresses)
- } else {
- nodeMetrics, err := getNodeMetrics(nodeIP)
- if err != nil {
- logger.Infof("Got error retrieving stats for node %q: %v", node.Name, err)
- } else {
- if nodeMetrics.Node.Memory != nil && nodeMetrics.Node.Memory.AvailableBytes != nil {
- memoryCapacity.Available = float64(*nodeMetrics.Node.Memory.AvailableBytes) / math.Pow(2, 30)
- }
-
- if nodeMetrics.Node.CPU != nil && nodeMetrics.Node.CPU.UsageNanoCores != nil {
- cpuCapacity.Available = cpuCapacity.Capacity - (float64(*nodeMetrics.Node.CPU.UsageNanoCores) / math.Pow(10, 9))
- }
-
- podCapacity.Available = podCapacity.Capacity - float64(len(nodeMetrics.Pods))
- }
- }
-
- nodeLabelArray := []string{}
- for k, v := range node.Labels {
- nodeLabelArray = append(nodeLabelArray, fmt.Sprintf("%s:%s", k, v))
+ return nil, errors.Wrap(err, "node metrics")
}
- toReturn.Nodes = append(toReturn.Nodes, types.Node{
- Name: node.Name,
- IsConnected: isConnected(node),
- IsReady: isReady(node),
- IsPrimaryNode: isPrimary(node),
- CanDelete: node.Spec.Unschedulable && !isConnected(node),
- KubeletVersion: node.Status.NodeInfo.KubeletVersion,
- CPU: cpuCapacity,
- Memory: memoryCapacity,
- Pods: podCapacity,
- Labels: nodeLabelArray,
- Conditions: findNodeConditions(node.Status.Conditions),
- })
+ toReturn.Nodes = append(toReturn.Nodes, *nodeMet)
}
isHelmVM, err := IsHelmVM(client)
@@ -124,51 +73,6 @@ func findNodeConditions(conditions []corev1.NodeCondition) types.NodeConditions
return discoveredConditions
}
-// get kubelet PKI info from /etc/kubernetes/pki/kubelet, use it to hit metrics server at `http://${nodeIP}:10255/stats/summary`
-func getNodeMetrics(nodeIP string) (*statsv1alpha1.Summary, error) {
- client := http.Client{
- Timeout: time.Second,
- }
- port := 10255
-
- // only use mutual TLS if client cert exists
- _, err := os.ReadFile("/etc/kubernetes/pki/kubelet/client.crt")
- if err == nil {
- cert, err := tls.LoadX509KeyPair("/etc/kubernetes/pki/kubelet/client.crt", "/etc/kubernetes/pki/kubelet/client.key")
- if err != nil {
- return nil, errors.Wrap(err, "get client keypair")
- }
-
- // this will leak memory
- client.Transport = &http.Transport{
- TLSClientConfig: &tls.Config{
- Certificates: []tls.Certificate{cert},
- InsecureSkipVerify: true,
- },
- }
- port = 10250
- }
-
- r, err := client.Get(fmt.Sprintf("https://%s:%d/stats/summary", nodeIP, port))
- if err != nil {
- return nil, errors.Wrapf(err, "get node %s stats", nodeIP)
- }
- defer r.Body.Close()
-
- body, err := io.ReadAll(r.Body)
- if err != nil {
- return nil, errors.Wrapf(err, "read node %s stats response", nodeIP)
- }
-
- summary := statsv1alpha1.Summary{}
- err = json.Unmarshal(body, &summary)
- if err != nil {
- return nil, errors.Wrapf(err, "parse node %s stats response", nodeIP)
- }
-
- return &summary, nil
-}
-
func isConnected(node corev1.Node) bool {
for _, taint := range node.Spec.Taints {
if taint.Key == "node.kubernetes.io/unreachable" {
@@ -201,12 +105,3 @@ func isPrimary(node corev1.Node) bool {
return false
}
-
-func internalIP(node corev1.Node) string {
- for _, address := range node.Status.Addresses {
- if address.Type == corev1.NodeInternalIP {
- return address.Address
- }
- }
- return ""
-}
diff --git a/pkg/helmvm/node_join.go b/pkg/helmvm/node_join.go
index 6aad6255a9..4a4bb6c068 100644
--- a/pkg/helmvm/node_join.go
+++ b/pkg/helmvm/node_join.go
@@ -1,12 +1,339 @@
package helmvm
import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+ "sync"
"time"
+ "github.com/replicatedhq/kots/pkg/helmvm/types"
+ corev1 "k8s.io/api/core/v1"
+ kuberneteserrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
-// GenerateAddNodeCommand will generate the HelmVM node add command for a primary or secondary node
-func GenerateAddNodeCommand(client kubernetes.Interface, primary bool) ([]string, *time.Time, error) {
- return nil, nil, nil
+type joinTokenEntry struct {
+ Token string
+ Creation *time.Time
+ Mut sync.Mutex
+}
+
+var joinTokenMapMut = sync.Mutex{}
+var joinTokenMap = map[string]*joinTokenEntry{}
+
+// GenerateAddNodeToken will generate the HelmVM node add command for a primary or secondary node
+// join commands will last for 24 hours, and will be cached for 1 hour after first generation
+func GenerateAddNodeToken(ctx context.Context, client kubernetes.Interface, nodeRole string) (string, error) {
+ // get the joinToken struct entry for this node role
+ joinTokenMapMut.Lock()
+ if _, ok := joinTokenMap[nodeRole]; !ok {
+ joinTokenMap[nodeRole] = &joinTokenEntry{}
+ }
+ joinToken := joinTokenMap[nodeRole]
+ joinTokenMapMut.Unlock()
+
+ // lock the joinToken struct entry
+ joinToken.Mut.Lock()
+ defer joinToken.Mut.Unlock()
+
+ // if the joinToken has been generated in the past hour, return it
+ if joinToken.Creation != nil && time.Now().Before(joinToken.Creation.Add(time.Hour)) {
+ return joinToken.Token, nil
+ }
+
+ newToken, err := runAddNodeCommandPod(ctx, client, nodeRole)
+ if err != nil {
+ return "", fmt.Errorf("failed to run add node command pod: %w", err)
+ }
+
+ now := time.Now()
+ joinToken.Token = newToken
+ joinToken.Creation = &now
+
+ return newToken, nil
+}
+
+// run a pod that will generate the add node token
+func runAddNodeCommandPod(ctx context.Context, client kubernetes.Interface, nodeRole string) (string, error) {
+ podName := "k0s-token-generator-"
+ suffix := strings.Replace(nodeRole, "+", "-", -1)
+ podName += suffix
+
+ // cleanup the pod if it already exists
+ err := client.CoreV1().Pods("kube-system").Delete(ctx, podName, metav1.DeleteOptions{})
+ if err != nil {
+ if !kuberneteserrors.IsNotFound(err) {
+ return "", fmt.Errorf("failed to delete pod: %w", err)
+ }
+ }
+
+ hostPathFile := corev1.HostPathFile
+ hostPathDir := corev1.HostPathDirectory
+ _, err = client.CoreV1().Pods("kube-system").Create(ctx, &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: podName,
+ Namespace: "kube-system",
+ Labels: map[string]string{
+ "replicated.app/embedded-cluster": "true",
+ },
+ },
+ Spec: corev1.PodSpec{
+ RestartPolicy: corev1.RestartPolicyOnFailure,
+ HostNetwork: true,
+ Volumes: []corev1.Volume{
+ {
+ Name: "bin",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/usr/local/bin/k0s",
+ Type: &hostPathFile,
+ },
+ },
+ },
+ {
+ Name: "lib",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/var/lib/k0s",
+ Type: &hostPathDir,
+ },
+ },
+ },
+ {
+ Name: "etc",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/etc/k0s",
+ Type: &hostPathDir,
+ },
+ },
+ },
+ {
+ Name: "run",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/run/k0s",
+ Type: &hostPathDir,
+ },
+ },
+ },
+ },
+ Affinity: &corev1.Affinity{
+ NodeAffinity: &corev1.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
+ NodeSelectorTerms: []corev1.NodeSelectorTerm{
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "node.k0sproject.io/role",
+ Operator: corev1.NodeSelectorOpIn,
+ Values: []string{
+ "control-plane",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Containers: []corev1.Container{
+ {
+ Name: "k0s-token-generator",
+ Image: "ubuntu:latest", // TODO use the kotsadm image here as we'll know it exists
+ Command: []string{"/mnt/k0s"},
+ Args: []string{
+ "token",
+ "create",
+ "--expiry",
+ "12h",
+ "--role",
+ nodeRole,
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "bin",
+ MountPath: "/mnt/k0s",
+ },
+ {
+ Name: "lib",
+ MountPath: "/var/lib/k0s",
+ },
+ {
+ Name: "etc",
+ MountPath: "/etc/k0s",
+ },
+ {
+ Name: "run",
+ MountPath: "/run/k0s",
+ },
+ },
+ },
+ },
+ },
+ }, metav1.CreateOptions{})
+ if err != nil {
+ return "", fmt.Errorf("failed to create pod: %w", err)
+ }
+
+ // wait for the pod to complete
+ for {
+ pod, err := client.CoreV1().Pods("kube-system").Get(ctx, podName, metav1.GetOptions{})
+ if err != nil {
+ return "", fmt.Errorf("failed to get pod: %w", err)
+ }
+
+ if pod.Status.Phase == corev1.PodSucceeded {
+ break
+ }
+
+ if pod.Status.Phase == corev1.PodFailed {
+ return "", fmt.Errorf("pod failed")
+ }
+
+ time.Sleep(time.Second)
+ }
+
+ // get the logs from the completed pod
+ podLogs, err := client.CoreV1().Pods("kube-system").GetLogs(podName, &corev1.PodLogOptions{}).DoRaw(ctx)
+ if err != nil {
+ return "", fmt.Errorf("failed to get pod logs: %w", err)
+ }
+
+ // delete the completed pod
+ err = client.CoreV1().Pods("kube-system").Delete(ctx, podName, metav1.DeleteOptions{})
+ if err != nil {
+ return "", fmt.Errorf("failed to delete pod: %w", err)
+ }
+
+ // the logs are just a join token, which needs to be added to other things to get a join command
+ return string(podLogs), nil
+}
+
+// GenerateAddNodeCommand returns the command a user should run to add a node with the provided token
+// the command will be of the form 'helmvm node join ip:port UUID'
+func GenerateAddNodeCommand(ctx context.Context, client kubernetes.Interface, token string) (string, error) {
+ cm, err := ReadConfigMap(client)
+ if err != nil {
+ return "", fmt.Errorf("failed to read configmap: %w", err)
+ }
+
+ binaryName := cm.Data["embedded-binary-name"]
+
+ // get the IP of a controller node
+ nodeIP, err := getControllerNodeIP(ctx, client)
+ if err != nil {
+ return "", fmt.Errorf("failed to get controller node IP: %w", err)
+ }
+
+ // get the port of the 'admin-console' service
+ port, err := getAdminConsolePort(ctx, client)
+ if err != nil {
+ return "", fmt.Errorf("failed to get admin console port: %w", err)
+ }
+
+ return fmt.Sprintf("sudo ./%s node join %s:%d %s", binaryName, nodeIP, port, token), nil
+}
+
+// GenerateK0sJoinCommand returns the k0s node join command, without the token but with all other required flags
+// (including node labels generated from the roles etc)
+func GenerateK0sJoinCommand(ctx context.Context, client kubernetes.Interface, roles []string) (string, error) {
+ k0sRole := "worker"
+ for _, role := range roles {
+ if role == "controller" {
+ k0sRole = "controller"
+ }
+ }
+
+ cmd := []string{"/usr/local/bin/k0s", "install", k0sRole}
+ if k0sRole == "controller" {
+ cmd = append(cmd, "--enable-worker")
+ }
+
+ labels, err := getRolesNodeLabels(ctx, client, roles)
+ if err != nil {
+ return "", fmt.Errorf("failed to get role labels: %w", err)
+ }
+ cmd = append(cmd, "--labels", labels)
+
+ return strings.Join(cmd, " "), nil
+}
+
+// gets the port of the 'admin-console' service
+func getAdminConsolePort(ctx context.Context, client kubernetes.Interface) (int32, error) {
+ svc, err := client.CoreV1().Services(os.Getenv("POD_NAMESPACE")).Get(ctx, "admin-console", metav1.GetOptions{})
+ if err != nil {
+ return -1, fmt.Errorf("failed to get admin-console service: %w", err)
+ }
+
+ for _, port := range svc.Spec.Ports {
+ if port.Name == "http" {
+ return port.NodePort, nil
+ }
+ }
+ return -1, fmt.Errorf("did not find port 'http' in service 'admin-console'")
+}
+
+// getControllerNodeIP gets the IP of a healthy controller node
+func getControllerNodeIP(ctx context.Context, client kubernetes.Interface) (string, error) {
+ nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return "", fmt.Errorf("failed to list nodes: %w", err)
+ }
+
+ for _, node := range nodes.Items {
+ if cp, ok := node.Labels["node-role.kubernetes.io/control-plane"]; !ok || cp != "true" {
+ continue
+ }
+
+ for _, condition := range node.Status.Conditions {
+ if condition.Type == "Ready" && condition.Status == "True" {
+ for _, address := range node.Status.Addresses {
+ if address.Type == "InternalIP" {
+ return address.Address, nil
+ }
+ }
+ }
+ }
+
+ }
+
+ return "", fmt.Errorf("failed to find healthy controller node")
+}
+
+func getRolesNodeLabels(ctx context.Context, client kubernetes.Interface, roles []string) (string, error) {
+ roleLabels := getRoleListLabels(roles)
+
+ for _, role := range roles {
+ labels, err := getRoleNodeLabels(ctx, client, role)
+ if err != nil {
+ return "", fmt.Errorf("failed to get node labels for role %s: %w", role, err)
+ }
+ roleLabels = append(roleLabels, labels...)
+ }
+
+ return strings.Join(roleLabels, ","), nil
+}
+
+// TODO: look up role in cluster config, apply additional labels based on role
+func getRoleNodeLabels(ctx context.Context, client kubernetes.Interface, role string) ([]string, error) {
+ toReturn := []string{}
+
+ return toReturn, nil
+}
+
+// getRoleListLabels returns the labels needed to identify the roles of this node in the future
+// one label will be the number of roles, and then deterministic label names will be used to store the role names
+func getRoleListLabels(roles []string) []string {
+ toReturn := []string{}
+ toReturn = append(toReturn, fmt.Sprintf("%s=total-%d", types.EMBEDDED_CLUSTER_ROLE_LABEL, len(roles)))
+
+ for idx, role := range roles {
+ toReturn = append(toReturn, fmt.Sprintf("%s-%d=%s", types.EMBEDDED_CLUSTER_ROLE_LABEL, idx, role))
+ }
+
+ return toReturn
}
diff --git a/pkg/helmvm/types/types.go b/pkg/helmvm/types/types.go
index c298dfbd93..10bf390368 100644
--- a/pkg/helmvm/types/types.go
+++ b/pkg/helmvm/types/types.go
@@ -1,5 +1,8 @@
package types
+const EMBEDDED_CLUSTER_LABEL = "kots.io/embedded-cluster"
+const EMBEDDED_CLUSTER_ROLE_LABEL = EMBEDDED_CLUSTER_LABEL + "-role"
+
type HelmVMNodes struct {
Nodes []Node `json:"nodes"`
HA bool `json:"ha"`
@@ -7,22 +10,26 @@ type HelmVMNodes struct {
}
type Node struct {
- Name string `json:"name"`
- IsConnected bool `json:"isConnected"`
- IsReady bool `json:"isReady"`
- IsPrimaryNode bool `json:"isPrimaryNode"`
- CanDelete bool `json:"canDelete"`
- KubeletVersion string `json:"kubeletVersion"`
- CPU CapacityAvailable `json:"cpu"`
- Memory CapacityAvailable `json:"memory"`
- Pods CapacityAvailable `json:"pods"`
- Labels []string `json:"labels"`
- Conditions NodeConditions `json:"conditions"`
+ Name string `json:"name"`
+ IsConnected bool `json:"isConnected"`
+ IsReady bool `json:"isReady"`
+ IsPrimaryNode bool `json:"isPrimaryNode"`
+ CanDelete bool `json:"canDelete"`
+ KubeletVersion string `json:"kubeletVersion"`
+ KubeProxyVersion string `json:"kubeProxyVersion"`
+ OperatingSystem string `json:"operatingSystem"`
+ KernelVersion string `json:"kernelVersion"`
+ CPU CapacityUsed `json:"cpu"`
+ Memory CapacityUsed `json:"memory"`
+ Pods CapacityUsed `json:"pods"`
+ Labels []string `json:"labels"`
+ Conditions NodeConditions `json:"conditions"`
+ PodList []PodInfo `json:"podList"`
}
-type CapacityAvailable struct {
- Capacity float64 `json:"capacity"`
- Available float64 `json:"available"`
+type CapacityUsed struct {
+ Capacity float64 `json:"capacity"`
+ Used float64 `json:"used"`
}
type NodeConditions struct {
@@ -31,3 +38,11 @@ type NodeConditions struct {
PidPressure bool `json:"pidPressure"`
Ready bool `json:"ready"`
}
+
+type PodInfo struct {
+ Name string `json:"name"`
+ Status string `json:"status"`
+ Namespace string `json:"namespace"`
+ CPU string `json:"cpu"`
+ Memory string `json:"memory"`
+}
diff --git a/pkg/helmvm/util.go b/pkg/helmvm/util.go
index 7d2817f93e..87b6bfe285 100644
--- a/pkg/helmvm/util.go
+++ b/pkg/helmvm/util.go
@@ -1,13 +1,54 @@
package helmvm
import (
+ "context"
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ kuberneteserrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
+const configMapName = "embedded-cluster-config"
+const configMapNamespace = "embedded-cluster"
+
+// ReadConfigMap will read the Kurl config from a configmap
+func ReadConfigMap(client kubernetes.Interface) (*corev1.ConfigMap, error) {
+ return client.CoreV1().ConfigMaps(configMapNamespace).Get(context.TODO(), configMapName, metav1.GetOptions{})
+}
+
func IsHelmVM(clientset kubernetes.Interface) (bool, error) {
- return false, nil
+ if clientset == nil {
+ return false, fmt.Errorf("clientset is nil")
+ }
+
+ configMapExists := false
+ _, err := ReadConfigMap(clientset)
+ if err == nil {
+ configMapExists = true
+ } else if kuberneteserrors.IsNotFound(err) {
+ configMapExists = false
+ } else if kuberneteserrors.IsUnauthorized(err) {
+ configMapExists = false
+ } else if kuberneteserrors.IsForbidden(err) {
+ configMapExists = false
+ } else if err != nil {
+ return false, fmt.Errorf("failed to get embedded cluster configmap: %w", err)
+ }
+
+ return configMapExists, nil
}
func IsHA(clientset kubernetes.Interface) (bool, error) {
- return false, nil
+ return true, nil
+}
+
+func ClusterID(client kubernetes.Interface) (string, error) {
+ configMap, err := ReadConfigMap(client)
+ if err != nil {
+ return "", fmt.Errorf("failed to read configmap: %w", err)
+ }
+
+ return configMap.Data["embedded-cluster-id"], nil
}
diff --git a/pkg/store/kotsstore/k0s_store.go b/pkg/store/kotsstore/k0s_store.go
new file mode 100644
index 0000000000..4eb7a9bd7b
--- /dev/null
+++ b/pkg/store/kotsstore/k0s_store.go
@@ -0,0 +1,69 @@
+package kotsstore
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/rqlite/gorqlite"
+
+ "github.com/replicatedhq/kots/pkg/persistence"
+ "github.com/replicatedhq/kots/pkg/rand"
+)
+
+func (s *KOTSStore) SetK0sInstallCommandRoles(roles []string) (string, error) {
+ db := persistence.MustGetDBSession()
+
+ installID := rand.StringWithCharset(24, rand.LOWER_CASE+rand.UPPER_CASE)
+
+ query := `delete from k0s_tokens where token = ?`
+ wr, err := db.WriteOneParameterized(gorqlite.ParameterizedStatement{
+ Query: query,
+ Arguments: []interface{}{installID},
+ })
+ if err != nil {
+ return "", fmt.Errorf("delete k0s join token: %v: %v", err, wr.Err)
+ }
+
+ jsonRoles, err := json.Marshal(roles)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal roles: %w", err)
+ }
+
+ query = `insert into k0s_tokens (token, roles) values (?, ?)`
+ wr, err = db.WriteOneParameterized(gorqlite.ParameterizedStatement{
+ Query: query,
+ Arguments: []interface{}{installID, string(jsonRoles)},
+ })
+ if err != nil {
+ return "", fmt.Errorf("insert k0s join token: %v: %v", err, wr.Err)
+ }
+
+ return installID, nil
+}
+
+func (s *KOTSStore) GetK0sInstallCommandRoles(token string) ([]string, error) {
+ db := persistence.MustGetDBSession()
+ query := `select roles from k0s_tokens where token = ?`
+ rows, err := db.QueryOneParameterized(gorqlite.ParameterizedStatement{
+ Query: query,
+ Arguments: []interface{}{token},
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to query: %v: %v", err, rows.Err)
+ }
+ if !rows.Next() {
+ return nil, ErrNotFound
+ }
+
+ rolesStr := ""
+ if err = rows.Scan(&rolesStr); err != nil {
+ return nil, fmt.Errorf("failed to scan roles: %w", err)
+ }
+
+ rolesArr := []string{}
+ err = json.Unmarshal([]byte(rolesStr), &rolesArr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal roles: %w", err)
+ }
+
+ return rolesArr, nil
+}
diff --git a/pkg/store/types/constants.go b/pkg/store/types/constants.go
index a449968b28..1ce8b655d7 100644
--- a/pkg/store/types/constants.go
+++ b/pkg/store/types/constants.go
@@ -3,12 +3,12 @@ package types
type DownstreamVersionStatus string
const (
- VersionUnknown DownstreamVersionStatus = "unknown"
- VersionPendingConfig DownstreamVersionStatus = "pending_config"
- VersionPending DownstreamVersionStatus = "pending"
- VersionPendingPreflight DownstreamVersionStatus = "pending_preflight"
- VersionPendingDownload DownstreamVersionStatus = "pending_download"
- VersionDeploying DownstreamVersionStatus = "deploying"
- VersionDeployed DownstreamVersionStatus = "deployed"
- VersionFailed DownstreamVersionStatus = "failed"
+ VersionUnknown DownstreamVersionStatus = "unknown" // we don't know
+ VersionPendingConfig DownstreamVersionStatus = "pending_config" // needs required configuration
+ VersionPendingDownload DownstreamVersionStatus = "pending_download" // needs to be downloaded from the upstream source
+ VersionPendingPreflight DownstreamVersionStatus = "pending_preflight" // waiting for preflights to finish
+ VersionPending DownstreamVersionStatus = "pending" // can be deployed, but is not yet
+ VersionDeploying DownstreamVersionStatus = "deploying" // is being deployed
+ VersionDeployed DownstreamVersionStatus = "deployed" // did deploy successfully
+ VersionFailed DownstreamVersionStatus = "failed" // did not deploy successfully
)
diff --git a/web/package.json b/web/package.json
index 21caa76d0c..f58cc7c981 100644
--- a/web/package.json
+++ b/web/package.json
@@ -121,9 +121,13 @@
"webpack-merge": "5.8.0"
},
"dependencies": {
+ "@emotion/react": "^11.11.1",
+ "@emotion/styled": "^11.11.0",
"@grafana/data": "^8.5.16",
"@maji/react-prism": "^1.0.1",
"@monaco-editor/react": "^4.4.5",
+ "@mui/icons-material": "^5.14.14",
+ "@mui/material": "^5.14.14",
"@storybook/addon-storysource": "^6.5.16",
"@tanstack/react-query": "^4.36.1",
"@tanstack/react-query-devtools": "^4.36.1",
@@ -144,6 +148,7 @@
"js-yaml": "3.14.0",
"lodash": "4.17.21",
"markdown-it": "^12.3.2",
+ "material-react-table": "^1.15.1",
"monaco-editor": "^0.33.0",
"monaco-editor-webpack-plugin": "^7.0.1",
"node-polyfill-webpack-plugin": "^1.1.4",
diff --git a/web/src/Root.tsx b/web/src/Root.tsx
index 1260d96b8c..a78e3b46bc 100644
--- a/web/src/Root.tsx
+++ b/web/src/Root.tsx
@@ -58,6 +58,7 @@ import SnapshotDetails from "@components/snapshots/SnapshotDetails";
import SnapshotRestore from "@components/snapshots/SnapshotRestore";
import AppSnapshots from "@components/snapshots/AppSnapshots";
import AppSnapshotRestore from "@components/snapshots/AppSnapshotRestore";
+import HelmVMViewNode from "@components/apps/HelmVMViewNode";
// react-query client
const queryClient = new QueryClient();
@@ -531,6 +532,7 @@ const Root = () => {
appSlugFromMetadata={state.appSlugFromMetadata || ""}
fetchingMetadata={state.fetchingMetadata}
onUploadSuccess={getAppsList}
+ isHelmVM={Boolean(state.adminConsoleMetadata?.isHelmVM)}
/>
}
/>
@@ -573,16 +575,34 @@ const Root = () => {
}
/>
} />
-
- ) : (
-
- )
- }
- />
+ {state.adminConsoleMetadata?.isHelmVM && (
+ <>
+ }
+ />
+ }
+ />
+ >
+ )}
+ {(state.adminConsoleMetadata?.isKurl ||
+ state.adminConsoleMetadata?.isHelmVM) && (
+
+ ) : (
+
+ )
+ }
+ />
+ )}
+ {state.adminConsoleMetadata?.isHelmVM && (
+ } />
+ )}
}
@@ -672,6 +692,7 @@ const Root = () => {
snapshotInProgressApps={state.snapshotInProgressApps}
ping={ping}
isHelmManaged={state.isHelmManaged}
+ isHelmVM={Boolean(state.adminConsoleMetadata?.isHelmVM)}
/>
}
/>
@@ -687,6 +708,7 @@ const Root = () => {
snapshotInProgressApps={state.snapshotInProgressApps}
ping={ping}
isHelmManaged={state.isHelmManaged}
+ isHelmVM={Boolean(state.adminConsoleMetadata?.isHelmVM)}
/>
}
>
@@ -761,12 +783,7 @@ const Root = () => {
} />
- }
+ element={}
/>
{/* WHERE IS SELECTEDAPP */}
{state.app?.isAppIdentityServiceSupported && (
diff --git a/web/src/components/UploadLicenseFile.tsx b/web/src/components/UploadLicenseFile.tsx
index 19bc1cf95b..e08cf5b829 100644
--- a/web/src/components/UploadLicenseFile.tsx
+++ b/web/src/components/UploadLicenseFile.tsx
@@ -1,23 +1,23 @@
import React, { useEffect, useReducer } from "react";
-import { useNavigate } from "react-router-dom";
-import { Link } from "react-router-dom";
-import { KotsPageTitle } from "@components/Head";
-// TODO: upgrade this dependency
-// @ts-ignore
-import Dropzone from "react-dropzone";
+import { Link, useNavigate } from "react-router-dom";
import yaml from "js-yaml";
-import size from "lodash/size";
import isEmpty from "lodash/isEmpty";
import keyBy from "lodash/keyBy";
+import size from "lodash/size";
+// TODO: upgrade this dependency
+// @ts-ignore
+import Dropzone from "react-dropzone";
import Modal from "react-modal";
import Select from "react-select";
+
+import { KotsPageTitle } from "@components/Head";
import { getFileContent } from "../utilities/utilities";
-import CodeSnippet from "./shared/CodeSnippet";
+import Icon from "./Icon";
import LicenseUploadProgress from "./LicenseUploadProgress";
+import CodeSnippet from "./shared/CodeSnippet";
import "../scss/components/troubleshoot/UploadSupportBundleModal.scss";
import "../scss/components/UploadLicenseFile.scss";
-import Icon from "./Icon";
type LicenseYaml = {
spec: {
@@ -26,17 +26,6 @@ type LicenseYaml = {
};
};
-type Props = {
- appsListLength: number;
- appName: string;
- appSlugFromMetadata: string;
- fetchingMetadata: boolean;
- isBackupRestore?: boolean;
- onUploadSuccess: () => Promise;
- logo: string | null;
- snapshot?: { name: string };
-};
-
type SelectedAppToInstall = {
label: string;
value: string;
@@ -68,6 +57,19 @@ type UploadLicenseResponse = {
slug: string;
success?: boolean;
};
+
+type Props = {
+ appsListLength: number;
+ appName: string;
+ appSlugFromMetadata: string;
+ fetchingMetadata: boolean;
+ isBackupRestore?: boolean;
+ onUploadSuccess: () => Promise;
+ logo: string | null;
+ snapshot?: { name: string };
+ isHelmVM: boolean;
+};
+
const UploadLicenseFile = (props: Props) => {
const [state, setState] = useReducer(
(currentState: State, newState: Partial) => ({
@@ -259,6 +261,11 @@ const UploadLicenseFile = (props: Props) => {
return;
}
+ if (props.isHelmVM) {
+ navigate(`/${data.slug}/cluster/manage`, { replace: true });
+ return;
+ }
+
if (data.isConfigurable) {
navigate(`/${data.slug}/config`, { replace: true });
return;
diff --git a/web/src/components/apps/AddNodeModal.tsx b/web/src/components/apps/AddNodeModal.tsx
new file mode 100644
index 0000000000..b5d22af568
--- /dev/null
+++ b/web/src/components/apps/AddNodeModal.tsx
@@ -0,0 +1,181 @@
+import { useQuery } from "@tanstack/react-query";
+import cx from "classnames";
+import React, { ChangeEvent, useState } from "react";
+import Modal from "react-modal";
+
+import Icon from "@components/Icon";
+import CodeSnippet from "@components/shared/CodeSnippet";
+import { Utilities } from "@src/utilities/utilities";
+
+const AddNodeModal = ({
+ showModal,
+ handleCloseModal,
+}: {
+ showModal: boolean;
+ handleCloseModal: () => void;
+}) => {
+ const [selectedNodeTypes, setSelectedNodeTypes] = useState([]);
+
+ type AddNodeCommandResponse = {
+ command: string;
+ expiry: string;
+ };
+
+ const {
+ data: generateAddNodeCommand,
+ isLoading: generateAddNodeCommandLoading,
+ error: generateAddNodeCommandError,
+ } = useQuery({
+ queryKey: ["generateAddNodeCommand", selectedNodeTypes],
+ queryFn: async ({ queryKey }) => {
+ const [, nodeTypes] = queryKey;
+ const res = await fetch(
+ `${process.env.API_ENDPOINT}/helmvm/generate-node-join-command`,
+ {
+ headers: {
+ "Content-Type": "application/json",
+ Accept: "application/json",
+ },
+ credentials: "include",
+ method: "POST",
+ body: JSON.stringify({
+ roles: nodeTypes,
+ }),
+ }
+ );
+ if (!res.ok) {
+ if (res.status === 401) {
+ Utilities.logoutUser();
+ }
+ console.log(
+ "failed to get generate node command, unexpected status code",
+ res.status
+ );
+ try {
+ const error = await res.json();
+ throw new Error(
+ error?.error?.message || error?.error || error?.message
+ );
+ } catch (err) {
+ throw new Error(
+ "Unable to generate node join command, please try again later."
+ );
+ }
+ }
+ return res.json();
+ },
+ enabled: selectedNodeTypes.length > 0,
+ });
+ // #region node type logic
+ const NODE_TYPES = ["controller"];
+
+ const determineDisabledState = () => {
+ return false;
+ };
+
+ const handleSelectNodeType = (e: ChangeEvent) => {
+ let nodeType = e.currentTarget.value;
+ let types = selectedNodeTypes;
+
+ if (selectedNodeTypes.includes(nodeType)) {
+ setSelectedNodeTypes(types.filter((type) => type !== nodeType));
+ } else {
+ setSelectedNodeTypes([...types, nodeType]);
+ }
+ };
+ // #endregion
+ return (
+
+
+
+
+ Add a Node
+
+
+
+
+ To add a node to this cluster, select the type of node you'd like to
+ add. Once you've selected a node type, we will generate a node join
+ command for you to use in the CLI. When the node successfully joins
+ the cluster, you will see it appear in the list of nodes on this page.
+
- Are you sure you want to drain {this.state.nodeNameToDrain}?
-
-
- Draining this node may cause data loss. If you want to delete{" "}
- {this.state.nodeNameToDrain} you must disconnect it after it has
- been drained.
-