From b399c50606da7e572e8852e088bdfd66f4139baa Mon Sep 17 00:00:00 2001 From: Mitali Paygude Date: Fri, 21 Jun 2024 16:52:56 -0700 Subject: [PATCH] Kubelet config e2e tests for vsphere and Docker (#8362) --- internal/pkg/api/cluster.go | 18 ++++ .../cloudstackisolatednetwork_types.go | 3 +- test/e2e/docker_test.go | 21 ++++ test/e2e/kubeletconfig.go | 16 +++ test/e2e/upgrade_from_latest.go | 20 ++-- test/e2e/vsphere_test.go | 21 ++++ test/framework/kubeletconfig.go | 99 +++++++++++++++++++ test/framework/network.go | 2 +- test/framework/vsphere.go | 6 ++ 9 files changed, 194 insertions(+), 12 deletions(-) create mode 100644 test/e2e/kubeletconfig.go create mode 100644 test/framework/kubeletconfig.go diff --git a/internal/pkg/api/cluster.go b/internal/pkg/api/cluster.go index a2fd6f03b58a..3a7793d56a61 100644 --- a/internal/pkg/api/cluster.go +++ b/internal/pkg/api/cluster.go @@ -5,6 +5,7 @@ import ( "strings" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" @@ -137,6 +138,13 @@ func WithControlPlaneAPIServerExtraArgs() ClusterFiller { } } +// WithControlPlaneKubeletConfig adds the Kubelet config to the control plane in cluster spec. +func WithControlPlaneKubeletConfig(kc *unstructured.Unstructured) ClusterFiller { + return func(c *anywherev1.Cluster) { + c.Spec.ControlPlaneConfiguration.KubeletConfiguration = kc + } +} + // RemoveAllAPIServerExtraArgs removes all the API server flags from the cluster spec. func RemoveAllAPIServerExtraArgs() ClusterFiller { return func(c *anywherev1.Cluster) { @@ -186,6 +194,16 @@ func workerNodeWithKubernetesVersion(name string, version *anywherev1.Kubernetes } } +// WithWorkerNodeKubeletConfig adds the Kubelet config to the worker node groups in cluster spec. +func WithWorkerNodeKubeletConfig(kc *unstructured.Unstructured) ClusterFiller { + return func(c *anywherev1.Cluster) { + if len(c.Spec.WorkerNodeGroupConfigurations) == 0 { + c.Spec.WorkerNodeGroupConfigurations = []anywherev1.WorkerNodeGroupConfiguration{{}} + } + c.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = kc + } +} + func WithWorkerNodeCount(r int) ClusterFiller { return func(c *anywherev1.Cluster) { if len(c.Spec.WorkerNodeGroupConfigurations) == 0 { diff --git a/internal/thirdparty/capc/api/v1beta3/cloudstackisolatednetwork_types.go b/internal/thirdparty/capc/api/v1beta3/cloudstackisolatednetwork_types.go index 76c15995bbbf..db2630c1be65 100644 --- a/internal/thirdparty/capc/api/v1beta3/cloudstackisolatednetwork_types.go +++ b/internal/thirdparty/capc/api/v1beta3/cloudstackisolatednetwork_types.go @@ -57,7 +57,8 @@ func (n *CloudStackIsolatedNetwork) Network() *Network { return &Network{ Name: n.Spec.Name, Type: "IsolatedNetwork", - ID: n.Spec.ID} + ID: n.Spec.ID, + } } //+kubebuilder:object:root=true diff --git a/test/e2e/docker_test.go b/test/e2e/docker_test.go index fe9c915d4659..1557f73b5d57 100644 --- a/test/e2e/docker_test.go +++ b/test/e2e/docker_test.go @@ -1459,3 +1459,24 @@ func TestDockerKubernetes129to130EtcdScaleDown(t *testing.T) { ), ) } + +// Kubelet Configuration e2e tests +func TestDockerKubernetes129KubeletConfigurationSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithKubeletConfig(), + ) + runKubeletConfigurationFlow(test) +} + +func TestDockerKubernetes130KubeletConfigurationSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithKubeletConfig(), + ) + runKubeletConfigurationFlow(test) +} diff --git a/test/e2e/kubeletconfig.go b/test/e2e/kubeletconfig.go new file mode 100644 index 000000000000..d10630abcede --- /dev/null +++ b/test/e2e/kubeletconfig.go @@ -0,0 +1,16 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "github.com/aws/eks-anywhere/test/framework" +) + +func runKubeletConfigurationFlow(test *framework.ClusterE2ETest) { + test.GenerateClusterConfig() + test.CreateCluster() + test.ValidateKubeletConfig() + test.StopIfFailed() + test.DeleteCluster() +} diff --git a/test/e2e/upgrade_from_latest.go b/test/e2e/upgrade_from_latest.go index f3235fcd7219..0ba4407cf5dd 100644 --- a/test/e2e/upgrade_from_latest.go +++ b/test/e2e/upgrade_from_latest.go @@ -68,21 +68,21 @@ func runInPlaceUpgradeFromReleaseFlow(test *framework.ClusterE2ETest, latestRele // runMulticlusterUpgradeFromReleaseFlowAPI tests the ability to create workload clusters with an old Bundle in a management cluster // that has been updated to a new Bundle. It follows the following steps: -// 1. Create a management cluster with the old Bundle. -// 2. Create workload clusters with the old Bundle. -// 3. Upgrade the management cluster to the new Bundle and new Kubernetes version (newVersion). -// 4. Upgrade the workload clusters to the new Bundle and new Kubernetes version (newVersion). -// 5. Delete the workload clusters. -// 6. Re-create the workload clusters with the old Bundle and previous Kubernetes version (oldVersion). It's necessary to sometimes -// use a different kube version because the old Bundle might not support the new kubernetes version. -// 7. Delete the workload clusters. -// 8. Delete the management cluster. +// 1. Create a management cluster with the old Bundle. +// 2. Create workload clusters with the old Bundle. +// 3. Upgrade the management cluster to the new Bundle and new Kubernetes version (newVersion). +// 4. Upgrade the workload clusters to the new Bundle and new Kubernetes version (newVersion). +// 5. Delete the workload clusters. +// 6. Re-create the workload clusters with the old Bundle and previous Kubernetes version (oldVersion). It's necessary to sometimes +// use a different kube version because the old Bundle might not support the new kubernetes version. +// 7. Delete the workload clusters. +// 8. Delete the management cluster. func runMulticlusterUpgradeFromReleaseFlowAPI(test *framework.MulticlusterE2ETest, release *releasev1.EksARelease, oldVersion, newVersion anywherev1.KubernetesVersion, os framework.OS) { provider := test.ManagementCluster.Provider // 1. Create management cluster test.CreateManagementCluster(framework.ExecuteWithEksaRelease(release)) - // 2. Create workload clusters with the old Bundle + // 2. Create workload clusters with the old Bundle test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) { wc.CreateCluster(framework.ExecuteWithEksaRelease(release)) wc.ValidateCluster(wc.ClusterConfig.Cluster.Spec.KubernetesVersion) diff --git a/test/e2e/vsphere_test.go b/test/e2e/vsphere_test.go index 89fab09673af..8eb3b0399792 100644 --- a/test/e2e/vsphere_test.go +++ b/test/e2e/vsphere_test.go @@ -5993,3 +5993,24 @@ func TestVSphereKubernetes129to130UbuntuEtcdScaleDown(t *testing.T) { provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } + +// Kubelet Configuration e2e tests +func TestVSphereKubernetes129KubeletConfigurationSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu129()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithKubeletConfig(), + ) + runKubeletConfigurationFlow(test) +} + +func TestVSphereKubernetes130KubeletConfigurationSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithKubeletConfig(), + ) + runKubeletConfigurationFlow(test) +} diff --git a/test/framework/kubeletconfig.go b/test/framework/kubeletconfig.go new file mode 100644 index 000000000000..c851743629c5 --- /dev/null +++ b/test/framework/kubeletconfig.go @@ -0,0 +1,99 @@ +package framework + +import ( + "context" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/aws/eks-anywhere/internal/pkg/api" +) + +const ( + maxPod50 = 50 + maxPod60 = 60 +) + +// WithKubeletConfig returns the default kubelet config set for e2e test. +func WithKubeletConfig() ClusterE2ETestOpt { + return func(e *ClusterE2ETest) { + e.addClusterConfigFillers(WithKubeletClusterConfig()) + } +} + +// WithKubeletClusterConfig returns a ClusterConfigFiller that adds the default +// KubeletConfig for E2E tests to the cluster Config. +func WithKubeletClusterConfig() api.ClusterConfigFiller { + cpKubeletConfiguration := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": maxPod50, + "kind": "KubeletConfiguration", + }, + } + + wnKubeletConfiguration := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": maxPod60, + "kind": "KubeletConfiguration", + }, + } + + return api.JoinClusterConfigFillers( + api.ClusterToConfigFiller(api.WithControlPlaneKubeletConfig(cpKubeletConfiguration)), + api.ClusterToConfigFiller(api.WithWorkerNodeKubeletConfig(wnKubeletConfiguration)), + ) +} + +// ValidateKubeletConfig validates the kubelet config specified in the cluster spec has been applied to the nodes. +func (e *ClusterE2ETest) ValidateKubeletConfig() { + ctx := context.Background() + kubectlClient := buildLocalKubectl() + + e.T.Log("Getting control plane nodes for kubelet max pod verification") + nodes, err := kubectlClient.GetControlPlaneNodes(ctx, + e.KubeconfigFilePath(), + ) + if err != nil { + e.T.Fatalf("Error getting nodes: %v", err) + } + if len(nodes) == 0 { + e.T.Fatalf("no control plane nodes found") + } + + got, _ := nodes[0].Status.Capacity.Pods().AsInt64() + if got != int64(maxPod50) { + e.T.Fatalf("Node capacity for control plane pods not equal to %v", maxPod50) + } + + e.T.Log("Successfully verified Kubelet Configuration for control plane nodes") + + e.T.Log("Getting control plane nodes for kubelet max pod verification") + allNodes, err := kubectlClient.GetNodes(ctx, + e.KubeconfigFilePath(), + ) + if err != nil { + e.T.Fatalf("Error getting nodes: %v", err) + } + if len(allNodes) == 0 { + e.T.Fatalf("no nodes found") + } + + e.T.Log("Getting worker nodes for kubelet max pod verification") + var workerNode corev1.Node + for i := range allNodes { + if strings.Contains(allNodes[i].Name, "-md-") { + workerNode = allNodes[i] + } + } + if err != nil { + e.T.Fatalf("Error getting nodes: %v", err) + } + + got, _ = workerNode.Status.Capacity.Pods().AsInt64() + if got != int64(maxPod60) { + e.T.Fatalf("Node capacity for worker node pods not equal to %v", maxPod60) + } + + e.T.Log("Successfully verified Kubelet Configuration for worker nodes") +} diff --git a/test/framework/network.go b/test/framework/network.go index 6fd0d20b9584..b28fa2add74a 100644 --- a/test/framework/network.go +++ b/test/framework/network.go @@ -54,7 +54,7 @@ func GetIP(cidr, ipEnvVar string) (string, error) { } else { ip, err = GenerateUniqueIp(cidr) if err != nil { - return "", fmt.Errorf("failed to generate ip for cidr %s: %v", cidr, err) + return "", fmt.Errorf("GenerateUniqueIp() failed to generate ip for cidr %s: %v", cidr, err) } } return ip, nil diff --git a/test/framework/vsphere.go b/test/framework/vsphere.go index 1b124d7abcb7..3328c5a0d378 100644 --- a/test/framework/vsphere.go +++ b/test/framework/vsphere.go @@ -413,6 +413,12 @@ func (v *VSphere) WithUbuntu128() api.ClusterConfigFiller { return v.WithKubeVersionAndOS(anywherev1.Kube128, Ubuntu2004, nil) } +// WithUbuntu129 returns a cluster config filler that sets the kubernetes version of the cluster to 1.29 +// as well as the right ubuntu template and osFamily for all VSphereMachineConfigs. +func (v *VSphere) WithUbuntu129() api.ClusterConfigFiller { + return v.WithKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, nil) +} + // WithUbuntu130 returns a cluster config filler that sets the kubernetes version of the cluster to 1.30 // as well as the right ubuntu template and osFamily for all VSphereMachineConfigs. func (v *VSphere) WithUbuntu130() api.ClusterConfigFiller {