Skip to content

Commit

Permalink
feat(ec_join): update handler to return tcp connections required (#5004)
Browse files Browse the repository at this point in the history
* feat(ec_join): add method to return all ready node ip addresses

* feat(ec_join): update handler to return node ips

* chore(test): create a struct and interface to allow kube client mocks in handlers

* chore: use newly created struct

* chore: tests for join handler

* chore: moaaar tests

* chore: tests for the worker and controller node IPs

* chore: refactor endpoint to return full endpoint list vs node ips
  • Loading branch information
JGAntunes authored Nov 26, 2024
1 parent e87a657 commit 7eacbb7
Show file tree
Hide file tree
Showing 10 changed files with 689 additions and 16 deletions.
2 changes: 1 addition & 1 deletion pkg/apiserver/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ func Start(params *APIServerParams) {
loggingRouter := r.NewRoute().Subrouter()
loggingRouter.Use(handlers.LoggingMiddleware)

handler := &handlers.Handler{}
handler := handlers.NewHandler()

/**********************************************************************
* Unauthenticated routes
Expand Down
88 changes: 82 additions & 6 deletions pkg/embeddedcluster/node_join.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,75 @@ func GenerateAddNodeToken(ctx context.Context, client kbclient.Client, nodeRole
return newToken, nil
}

// GetendpointsToCheck returns the list of endpoints that should be checked by a node joining the cluster
// based on the array of roles the node will have
func GetEndpointsToCheck(ctx context.Context, client kbclient.Client, roles []string) ([]string, error) {
controllerRoleName, err := ControllerRoleName(ctx, client)
if err != nil {
return nil, fmt.Errorf("failed to get controller role name: %w", err)
}

isController := false
for _, role := range roles {
if role == controllerRoleName {
isController = true
break
}
}
controllerAddr, workerAddr, err := getAllNodeIPAddresses(ctx, client)
if err != nil {
return nil, fmt.Errorf("failed to get all node IP addresses: %w", err)
}

endpoints := []string{}
for _, addr := range controllerAddr {
// any joining node should be able to reach the kube-api port and k0s-api port on all the controllers
endpoints = append(endpoints, fmt.Sprintf("%s:6443", addr), fmt.Sprintf("%s:9443", addr))
if isController {
// controllers should be able to reach the etcd and kubelet ports on the controllers
endpoints = append(endpoints, fmt.Sprintf("%s:2380", addr), fmt.Sprintf("%s:10250", addr))
}
}
if isController {
for _, addr := range workerAddr {
// controllers should be able to reach the kubelet port on the workers
endpoints = append(endpoints, fmt.Sprintf("%s:10250", addr))
}
}
return endpoints, nil
}

// getAllNodeIPAddresses returns the internal IP addresses of all the ready nodes in the cluster grouped by
// controller and worker nodes respectively
func getAllNodeIPAddresses(ctx context.Context, client kbclient.Client) ([]string, []string, error) {
var nodes corev1.NodeList
if err := client.List(ctx, &nodes); err != nil {
return nil, nil, fmt.Errorf("failed to list nodes: %w", err)
}

controllerAddr := []string{}
workerAddr := []string{}
for _, node := range nodes.Items {
// Only consider nodes that are ready
if !isReady(node) {
continue
}

// Filter nodes by control-plane and worker roles
if cp, ok := node.Labels["node-role.kubernetes.io/control-plane"]; ok && cp == "true" {
if addr := findInternalIPAddress(node.Status.Addresses); addr != nil {
controllerAddr = append(controllerAddr, addr.Address)
}
} else {
if addr := findInternalIPAddress(node.Status.Addresses); addr != nil {
workerAddr = append(workerAddr, addr.Address)
}
}
}

return controllerAddr, workerAddr, nil
}

func makeK0sToken(ctx context.Context, client kbclient.Client, nodeRole string) (string, error) {
rawToken, err := k8sutil.GenerateK0sBootstrapToken(client, time.Hour, nodeRole)
if err != nil {
Expand All @@ -89,7 +158,7 @@ func makeK0sToken(ctx context.Context, client kbclient.Client, nodeRole string)
}
cert = base64.StdEncoding.EncodeToString([]byte(cert))

firstPrimary, err := firstPrimaryIpAddress(ctx, client)
firstPrimary, err := firstPrimaryIPAddress(ctx, client)
if err != nil {
return "", fmt.Errorf("failed to get first primary ip address: %w", err)
}
Expand All @@ -111,7 +180,7 @@ func makeK0sToken(ctx context.Context, client kbclient.Client, nodeRole string)
return b64Token, nil
}

func firstPrimaryIpAddress(ctx context.Context, client kbclient.Client) (string, error) {
func firstPrimaryIPAddress(ctx context.Context, client kbclient.Client) (string, error) {
var nodes corev1.NodeList
if err := client.List(ctx, &nodes); err != nil {
return "", fmt.Errorf("failed to list nodes: %w", err)
Expand All @@ -122,16 +191,23 @@ func firstPrimaryIpAddress(ctx context.Context, client kbclient.Client) (string,
continue
}

for _, address := range node.Status.Addresses {
if address.Type == "InternalIP" {
return address.Address, nil
}
if addr := findInternalIPAddress(node.Status.Addresses); addr != nil {
return addr.Address, nil
}
}

return "", fmt.Errorf("failed to find controller node")
}

func findInternalIPAddress(addresses []corev1.NodeAddress) *corev1.NodeAddress {
for _, address := range addresses {
if address.Type == "InternalIP" {
return &address
}
}
return nil
}

// GenerateAddNodeCommand returns the command a user should run to add a node with the provided token
// the command will be of the form 'embeddedcluster node join ip:port UUID'
func GenerateAddNodeCommand(ctx context.Context, kbClient kbclient.Client, token string, isAirgap bool) (string, error) {
Expand Down
255 changes: 255 additions & 0 deletions pkg/embeddedcluster/node_join_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)

Expand Down Expand Up @@ -99,3 +100,257 @@ func TestGenerateAddNodeCommand(t *testing.T) {
wantCommand = "sudo ./my-app join --airgap-bundle my-app.airgap 192.168.0.100:30000 token"
req.Equal(wantCommand, gotCommand)
}

func TestGetAllNodeIPAddresses(t *testing.T) {
scheme := runtime.NewScheme()
corev1.AddToScheme(scheme)
embeddedclusterv1beta1.AddToScheme(scheme)

tests := []struct {
name string
roles []string
kbClient kbclient.Client
expectedEndpoints []string
}{
{
name: "no nodes",
roles: []string{"some-role"},
kbClient: fake.NewClientBuilder().WithScheme(scheme).WithObjects(
&embeddedclusterv1beta1.Installation{
ObjectMeta: metav1.ObjectMeta{
Name: time.Now().Format("20060102150405"),
},
Spec: embeddedclusterv1beta1.InstallationSpec{
BinaryName: "my-app",
Config: &embeddedclusterv1beta1.ConfigSpec{
Version: "v1.100.0",
Roles: embeddedclusterv1beta1.Roles{
Controller: embeddedclusterv1beta1.NodeRole{
Name: "controller-role",
},
},
},
},
},
).Build(),
expectedEndpoints: []string{},
},
{
name: "worker node joining cluster with 1 controller and 1 worker",
roles: []string{"some-role"},
kbClient: fake.NewClientBuilder().WithScheme(scheme).WithObjects(
&embeddedclusterv1beta1.Installation{
ObjectMeta: metav1.ObjectMeta{
Name: time.Now().Format("20060102150405"),
},
Spec: embeddedclusterv1beta1.InstallationSpec{
BinaryName: "my-app",
Config: &embeddedclusterv1beta1.ConfigSpec{
Version: "v1.100.0",
Roles: embeddedclusterv1beta1.Roles{
Controller: embeddedclusterv1beta1.NodeRole{
Name: "controller-role",
},
},
},
},
},
&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "controller",
Labels: map[string]string{
"node-role.kubernetes.io/control-plane": "true",
},
},
Status: corev1.NodeStatus{
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
Addresses: []corev1.NodeAddress{
{
Type: corev1.NodeInternalIP,
Address: "192.168.0.100",
},
},
},
},
&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "worker",
Labels: map[string]string{
"node-role.kubernetes.io/control-plane": "false",
},
},
Status: corev1.NodeStatus{
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
Addresses: []corev1.NodeAddress{
{
Type: corev1.NodeInternalIP,
Address: "192.168.0.101",
},
},
},
},
).Build(),
expectedEndpoints: []string{"192.168.0.100:6443", "192.168.0.100:9443"},
},
{
name: "controller node joining cluster with 2 controller ready, 1 controller not ready, 1 worker ready, 1 worker not ready",
roles: []string{"controller-role"},
kbClient: fake.NewClientBuilder().WithScheme(scheme).WithObjects(
&embeddedclusterv1beta1.Installation{
ObjectMeta: metav1.ObjectMeta{
Name: time.Now().Format("20060102150405"),
},
Spec: embeddedclusterv1beta1.InstallationSpec{
BinaryName: "my-app",
Config: &embeddedclusterv1beta1.ConfigSpec{
Version: "v1.100.0",
Roles: embeddedclusterv1beta1.Roles{
Controller: embeddedclusterv1beta1.NodeRole{
Name: "controller-role",
},
},
},
},
},
&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "controller 1",
Labels: map[string]string{
"node-role.kubernetes.io/control-plane": "true",
},
},
Status: corev1.NodeStatus{
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
Addresses: []corev1.NodeAddress{
{
Type: corev1.NodeInternalIP,
Address: "192.168.0.100",
},
},
},
},
&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "controller 2",
Labels: map[string]string{
"node-role.kubernetes.io/control-plane": "true",
},
},
Status: corev1.NodeStatus{
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionFalse,
},
},
Addresses: []corev1.NodeAddress{
{
Type: corev1.NodeInternalIP,
Address: "192.168.0.101",
},
},
},
},
&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "controller 3",
Labels: map[string]string{
"node-role.kubernetes.io/control-plane": "true",
},
},
Status: corev1.NodeStatus{
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
Addresses: []corev1.NodeAddress{
{
Type: corev1.NodeInternalIP,
Address: "192.168.0.102",
},
},
},
},
&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "worker 1",
Labels: map[string]string{},
},
Status: corev1.NodeStatus{
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
Addresses: []corev1.NodeAddress{
{
Type: corev1.NodeInternalIP,
Address: "192.168.0.103",
},
},
},
},
&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "worker 2",
Labels: map[string]string{
"node-role.kubernetes.io/control-plane": "false",
},
},
Status: corev1.NodeStatus{
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionFalse,
},
},
Addresses: []corev1.NodeAddress{
{
Type: corev1.NodeInternalIP,
Address: "192.168.0.104",
},
},
},
},
).Build(),
expectedEndpoints: []string{
"192.168.0.100:6443",
"192.168.0.100:9443",
"192.168.0.100:2380",
"192.168.0.100:10250",
"192.168.0.102:6443",
"192.168.0.102:9443",
"192.168.0.102:2380",
"192.168.0.102:10250",
"192.168.0.103:10250",
},
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req := require.New(t)
endpoints, err := GetEndpointsToCheck(context.Background(), test.kbClient, test.roles)
req.NoError(err)
req.Equal(test.expectedEndpoints, endpoints)
})
}
}
Loading

0 comments on commit 7eacbb7

Please sign in to comment.