Skip to content

Commit

Permalink
Remove usage of embedded-cluster-config configmap (#4662)
Browse files Browse the repository at this point in the history
* Remove usage of embedded-cluster-config configmap
  • Loading branch information
sgalsaleh authored Jun 5, 2024
1 parent 5884615 commit 22a41b5
Show file tree
Hide file tree
Showing 14 changed files with 180 additions and 152 deletions.
3 changes: 2 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect
github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/microsoft/go-mssqldb v1.7.1 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
Expand Down Expand Up @@ -401,6 +401,7 @@ require (
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/c9s/goprocinfo v0.0.0-20190309065803-0b2ad9ac246b // indirect
github.com/containers/storage v1.53.0 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/kopia/kopia v0.10.7 // indirect
Expand Down
14 changes: 12 additions & 2 deletions pkg/embeddedcluster/monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (

embeddedclusterv1beta1 "github.com/replicatedhq/embedded-cluster-kinds/apis/v1beta1"
appstatetypes "github.com/replicatedhq/kots/pkg/appstate/types"
"github.com/replicatedhq/kots/pkg/k8sutil"
"github.com/replicatedhq/kots/pkg/kotsutil"
"github.com/replicatedhq/kots/pkg/logger"
"github.com/replicatedhq/kots/pkg/store"
Expand All @@ -32,8 +33,13 @@ func MaybeStartClusterUpgrade(ctx context.Context, store store.Store, kotsKinds
return nil
}

kbClient, err := k8sutil.GetKubeClient(ctx)
if err != nil {
return fmt.Errorf("failed to get kubeclient: %w", err)
}

spec := kotsKinds.EmbeddedClusterConfig.Spec
if upgrade, err := RequiresUpgrade(ctx, spec); err != nil {
if upgrade, err := RequiresUpgrade(ctx, kbClient, spec); err != nil {
// if there is no installation object we can't start an upgrade. this is a valid
// scenario specially during cluster bootstrap. as we do not need to upgrade the
// cluster just after its installation we can return nil here.
Expand Down Expand Up @@ -123,7 +129,11 @@ func watchClusterState(ctx context.Context, store store.Store) {
// by reading the latest embedded cluster installation CRD.
// If the lastState is the same as the current state, it will not update the database.
func updateClusterState(ctx context.Context, store store.Store, lastState string) (string, error) {
installation, err := GetCurrentInstallation(ctx)
kbClient, err := k8sutil.GetKubeClient(ctx)
if err != nil {
return "", fmt.Errorf("failed to get kubeclient: %w", err)
}
installation, err := GetCurrentInstallation(ctx, kbClient)
if err != nil {
return "", fmt.Errorf("failed to get current installation: %w", err)
}
Expand Down
57 changes: 28 additions & 29 deletions pkg/embeddedcluster/node_join.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,10 @@ import (
"github.com/replicatedhq/kots/pkg/embeddedcluster/types"
"github.com/replicatedhq/kots/pkg/k8sutil"
"github.com/replicatedhq/kots/pkg/util"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
k8stypes "k8s.io/apimachinery/pkg/types"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
)

type joinTokenEntry struct {
Expand Down Expand Up @@ -46,7 +47,7 @@ users:

// GenerateAddNodeToken will generate the embedded cluster node add command for a node with the specified roles
// join commands will last for 24 hours, and will be cached for 1 hour after first generation
func GenerateAddNodeToken(ctx context.Context, client kubernetes.Interface, nodeRole string) (string, error) {
func GenerateAddNodeToken(ctx context.Context, client kbclient.Client, nodeRole string) (string, error) {
// get the joinToken struct entry for this node role
joinTokenMapMut.Lock()
if _, ok := joinTokenMap[nodeRole]; !ok {
Expand Down Expand Up @@ -76,7 +77,7 @@ func GenerateAddNodeToken(ctx context.Context, client kubernetes.Interface, node
return newToken, nil
}

func makeK0sToken(ctx context.Context, client kubernetes.Interface, nodeRole string) (string, error) {
func makeK0sToken(ctx context.Context, client kbclient.Client, nodeRole string) (string, error) {
rawToken, err := k8sutil.GenerateK0sBootstrapToken(client, time.Hour, nodeRole)
if err != nil {
return "", fmt.Errorf("failed to generate bootstrap token: %w", err)
Expand Down Expand Up @@ -110,9 +111,9 @@ func makeK0sToken(ctx context.Context, client kubernetes.Interface, nodeRole str
return b64Token, nil
}

func firstPrimaryIpAddress(ctx context.Context, client kubernetes.Interface) (string, error) {
nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
func firstPrimaryIpAddress(ctx context.Context, client kbclient.Client) (string, error) {
var nodes corev1.NodeList
if err := client.List(ctx, &nodes); err != nil {
return "", fmt.Errorf("failed to list nodes: %w", err)
}

Expand All @@ -126,30 +127,29 @@ func firstPrimaryIpAddress(ctx context.Context, client kubernetes.Interface) (st
return address.Address, nil
}
}

}

return "", fmt.Errorf("failed to find controller node")
}

// GenerateAddNodeCommand returns the command a user should run to add a node with the provided token
// the command will be of the form 'embeddedcluster node join ip:port UUID'
func GenerateAddNodeCommand(ctx context.Context, client kubernetes.Interface, token string, isAirgap bool) (string, error) {
cm, err := ReadConfigMap(client)
func GenerateAddNodeCommand(ctx context.Context, kbClient kbclient.Client, token string, isAirgap bool) (string, error) {
installation, err := GetCurrentInstallation(ctx, kbClient)
if err != nil {
return "", fmt.Errorf("failed to read configmap: %w", err)
return "", fmt.Errorf("failed to get current installation: %w", err)
}

binaryName := cm.Data["embedded-binary-name"]
binaryName := installation.Spec.BinaryName

// get the IP of a controller node
nodeIP, err := getControllerNodeIP(ctx, client)
nodeIP, err := getControllerNodeIP(ctx, kbClient)
if err != nil {
return "", fmt.Errorf("failed to get controller node IP: %w", err)
}

// get the port of the 'admin-console' service
port, err := getAdminConsolePort(ctx, client)
port, err := getAdminConsolePort(ctx, kbClient)
if err != nil {
return "", fmt.Errorf("failed to get admin console port: %w", err)
}
Expand All @@ -165,8 +165,8 @@ func GenerateAddNodeCommand(ctx context.Context, client kubernetes.Interface, to

// GenerateK0sJoinCommand returns the k0s node join command, without the token but with all other required flags
// (including node labels generated from the roles etc)
func GenerateK0sJoinCommand(ctx context.Context, client kubernetes.Interface, roles []string) (string, error) {
controllerRoleName, err := ControllerRoleName(ctx)
func GenerateK0sJoinCommand(ctx context.Context, kbClient kbclient.Client, roles []string) (string, error) {
controllerRoleName, err := ControllerRoleName(ctx, kbClient)
if err != nil {
return "", fmt.Errorf("failed to get controller role name: %w", err)
}
Expand All @@ -183,7 +183,7 @@ func GenerateK0sJoinCommand(ctx context.Context, client kubernetes.Interface, ro
cmd = append(cmd, "--enable-worker", "--no-taints")
}

labels, err := getRolesNodeLabels(ctx, roles)
labels, err := getRolesNodeLabels(ctx, kbClient, roles)
if err != nil {
return "", fmt.Errorf("failed to get role labels: %w", err)
}
Expand All @@ -193,11 +193,11 @@ func GenerateK0sJoinCommand(ctx context.Context, client kubernetes.Interface, ro
}

// gets the port of the 'admin-console' or 'kurl-proxy-kotsadm' service
func getAdminConsolePort(ctx context.Context, client kubernetes.Interface) (int32, error) {
kurlProxyPort, err := getAdminConsolePortImpl(ctx, client, "kurl-proxy-kotsadm")
func getAdminConsolePort(ctx context.Context, kbClient kbclient.Client) (int32, error) {
kurlProxyPort, err := getAdminConsolePortImpl(ctx, kbClient, "kurl-proxy-kotsadm")
if err != nil {
if errors.IsNotFound(err) {
adminConsolePort, err := getAdminConsolePortImpl(ctx, client, "admin-console")
adminConsolePort, err := getAdminConsolePortImpl(ctx, kbClient, "admin-console")
if err != nil {
return -1, fmt.Errorf("failed to get admin-console port: %w", err)
}
Expand All @@ -208,9 +208,9 @@ func getAdminConsolePort(ctx context.Context, client kubernetes.Interface) (int3
return kurlProxyPort, nil
}

func getAdminConsolePortImpl(ctx context.Context, client kubernetes.Interface, svcName string) (int32, error) {
svc, err := client.CoreV1().Services(util.PodNamespace).Get(ctx, svcName, metav1.GetOptions{})
if err != nil {
func getAdminConsolePortImpl(ctx context.Context, kbClient kbclient.Client, svcName string) (int32, error) {
var svc corev1.Service
if err := kbClient.Get(ctx, k8stypes.NamespacedName{Name: svcName, Namespace: util.PodNamespace}, &svc); err != nil {
return -1, fmt.Errorf("failed to get %s service: %w", svcName, err)
}

Expand All @@ -227,9 +227,9 @@ func getAdminConsolePortImpl(ctx context.Context, client kubernetes.Interface, s
}

// getControllerNodeIP gets the IP of a healthy controller node
func getControllerNodeIP(ctx context.Context, client kubernetes.Interface) (string, error) {
nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
func getControllerNodeIP(ctx context.Context, kbClient kbclient.Client) (string, error) {
var nodes corev1.NodeList
if err := kbClient.List(ctx, &nodes); err != nil {
return "", fmt.Errorf("failed to list nodes: %w", err)
}

Expand All @@ -247,16 +247,15 @@ func getControllerNodeIP(ctx context.Context, client kubernetes.Interface) (stri
}
}
}

}

return "", fmt.Errorf("failed to find healthy controller node")
}

func getRolesNodeLabels(ctx context.Context, roles []string) (string, error) {
func getRolesNodeLabels(ctx context.Context, kbClient kbclient.Client, roles []string) (string, error) {
roleListLabels := getRoleListLabels(roles)

labels, err := getRoleNodeLabels(ctx, roles)
labels, err := getRoleNodeLabels(ctx, kbClient, roles)
if err != nil {
return "", fmt.Errorf("failed to get node labels for roles %v: %w", roles, err)
}
Expand Down
29 changes: 19 additions & 10 deletions pkg/embeddedcluster/node_join_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,15 @@ package embeddedcluster
import (
"context"
"testing"
"time"

embeddedclusterv1beta1 "github.com/replicatedhq/embedded-cluster-kinds/apis/v1beta1"
"github.com/replicatedhq/kots/pkg/util"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)

func TestGenerateAddNodeCommand(t *testing.T) {
Expand All @@ -17,15 +20,21 @@ func TestGenerateAddNodeCommand(t *testing.T) {
util.PodNamespace = ""
}()

scheme := runtime.NewScheme()
corev1.AddToScheme(scheme)
embeddedclusterv1beta1.AddToScheme(scheme)

// Create a fake clientset
clientset := fake.NewSimpleClientset(
&corev1.ConfigMap{
kbClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(
&embeddedclusterv1beta1.Installation{
ObjectMeta: metav1.ObjectMeta{
Name: "embedded-cluster-config",
Namespace: "embedded-cluster",
Name: time.Now().Format("20060102150405"),
Labels: map[string]string{
"replicated.com/disaster-recovery": "ec-install",
},
},
Data: map[string]string{
"embedded-binary-name": "my-app",
Spec: embeddedclusterv1beta1.InstallationSpec{
BinaryName: "my-app",
},
},
&corev1.Node{
Expand Down Expand Up @@ -66,12 +75,12 @@ func TestGenerateAddNodeCommand(t *testing.T) {
},
},
},
)
).Build()

req := require.New(t)

// Generate the add node command for online
gotCommand, err := GenerateAddNodeCommand(context.Background(), clientset, "token", false)
gotCommand, err := GenerateAddNodeCommand(context.Background(), kbClient, "token", false)
if err != nil {
t.Fatalf("Failed to generate add node command: %v", err)
}
Expand All @@ -81,7 +90,7 @@ func TestGenerateAddNodeCommand(t *testing.T) {
req.Equal(wantCommand, gotCommand)

// Generate the add node command for airgap
gotCommand, err = GenerateAddNodeCommand(context.Background(), clientset, "token", true)
gotCommand, err = GenerateAddNodeCommand(context.Background(), kbClient, "token", true)
if err != nil {
t.Fatalf("Failed to generate add node command: %v", err)
}
Expand Down
13 changes: 7 additions & 6 deletions pkg/embeddedcluster/roles.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,16 @@ import (
"strings"

embeddedclusterv1beta1 "github.com/replicatedhq/embedded-cluster-kinds/apis/v1beta1"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
)

const DEFAULT_CONTROLLER_ROLE_NAME = "controller"

var labelValueRegex = regexp.MustCompile(`[^a-zA-Z0-9-_.]+`)

// GetRoles will get a list of role names
func GetRoles(ctx context.Context) ([]string, error) {
config, err := ClusterConfig(ctx)
func GetRoles(ctx context.Context, kbClient kbclient.Client) ([]string, error) {
config, err := ClusterConfig(ctx, kbClient)
if err != nil {
return nil, fmt.Errorf("failed to get cluster config: %w", err)
}
Expand Down Expand Up @@ -45,8 +46,8 @@ func GetRoles(ctx context.Context) ([]string, error) {

// ControllerRoleName determines the name for the 'controller' role
// this might be part of the config, or it might be the default
func ControllerRoleName(ctx context.Context) (string, error) {
conf, err := ClusterConfig(ctx)
func ControllerRoleName(ctx context.Context, kbClient kbclient.Client) (string, error) {
conf, err := ClusterConfig(ctx, kbClient)
if err != nil {
return "", fmt.Errorf("failed to get cluster config: %w", err)
}
Expand Down Expand Up @@ -89,8 +90,8 @@ func SortRoles(controllerRole string, inputRoles []string) []string {
}

// getRoleNodeLabels looks up roles in the cluster config and determines the additional labels to be applied from that
func getRoleNodeLabels(ctx context.Context, roles []string) ([]string, error) {
config, err := ClusterConfig(ctx)
func getRoleNodeLabels(ctx context.Context, kbClient kbclient.Client, roles []string) ([]string, error) {
config, err := ClusterConfig(ctx, kbClient)
if err != nil {
return nil, fmt.Errorf("failed to get cluster config: %w", err)
}
Expand Down
Loading

0 comments on commit 22a41b5

Please sign in to comment.