From b2bf264f521e104431d7f7c6fa2c72e45d6850e4 Mon Sep 17 00:00:00 2001 From: Raymond Zhang <67798267+raymond-zhang00@users.noreply.github.com> Date: Tue, 23 Jan 2024 19:00:53 -0600 Subject: [PATCH] updating taint validation to management cluster only (#6294) --- pkg/api/v1alpha1/cluster.go | 4 +- pkg/api/v1alpha1/cluster_test.go | 75 +++++++++++++++ ...cluster_valid_taints_workload_cluster.yaml | 94 +++++++++++++++++++ test/e2e/docker_test.go | 35 +++++++ test/e2e/multicluster.go | 10 ++ test/e2e/vsphere_test.go | 35 +++++++ 6 files changed, 252 insertions(+), 1 deletion(-) create mode 100644 pkg/api/v1alpha1/testdata/cluster_valid_taints_workload_cluster.yaml diff --git a/pkg/api/v1alpha1/cluster.go b/pkg/api/v1alpha1/cluster.go index b88d275d6269..c25b7d1653a8 100644 --- a/pkg/api/v1alpha1/cluster.go +++ b/pkg/api/v1alpha1/cluster.go @@ -507,7 +507,9 @@ func validateWorkerNodeGroups(clusterConfig *Cluster) error { } if len(workerNodeGroupConfigs) > 0 && len(noExecuteNoScheduleTaintedNodeGroups) == len(workerNodeGroupConfigs) { - return errors.New("at least one WorkerNodeGroupConfiguration must not have NoExecute and/or NoSchedule taints") + if clusterConfig.IsSelfManaged() { + return errors.New("at least one WorkerNodeGroupConfiguration must not have NoExecute and/or NoSchedule taints") + } } if len(workerNodeGroupConfigs) == 0 && len(clusterConfig.Spec.ControlPlaneConfiguration.Taints) != 0 { diff --git a/pkg/api/v1alpha1/cluster_test.go b/pkg/api/v1alpha1/cluster_test.go index 29bb9de20b82..0ac74c5015a4 100644 --- a/pkg/api/v1alpha1/cluster_test.go +++ b/pkg/api/v1alpha1/cluster_test.go @@ -741,6 +741,81 @@ func TestGetAndValidateClusterConfig(t *testing.T) { }, wantErr: false, }, + { + testName: "valid tainted workload cluster machine configs", + fileName: "testdata/cluster_valid_taints_workload_cluster.yaml", + wantCluster: &Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: ClusterKind, + APIVersion: SchemeBuilder.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "eksa-unit-test", + }, + Spec: ClusterSpec{ + KubernetesVersion: Kube119, + ManagementCluster: ManagementCluster{ + Name: "mgmt", + }, + ControlPlaneConfiguration: ControlPlaneConfiguration{ + Count: 3, + Endpoint: &Endpoint{ + Host: "test-ip", + }, + MachineGroupRef: &Ref{ + Kind: VSphereMachineConfigKind, + Name: "eksa-unit-test", + }, + }, + WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{ + { + Name: "md-0", + Count: ptr.Int(3), + MachineGroupRef: &Ref{ + Kind: VSphereMachineConfigKind, + Name: "eksa-unit-test-2", + }, + Taints: []v1.Taint{ + { + Key: "key1", + Value: "val1", + Effect: v1.TaintEffectNoSchedule, + }, + }, + }, + { + Name: "md-1", + Count: ptr.Int(3), + MachineGroupRef: &Ref{ + Kind: VSphereMachineConfigKind, + Name: "eksa-unit-test-2", + }, + Taints: []v1.Taint{ + { + Key: "key1", + Value: "val1", + Effect: v1.TaintEffectNoExecute, + }, + }, + }, + }, + DatacenterRef: Ref{ + Kind: VSphereDatacenterKind, + Name: "eksa-unit-test", + }, + ClusterNetwork: ClusterNetwork{ + CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}}, + Pods: Pods{ + CidrBlocks: []string{"192.168.0.0/16"}, + }, + Services: Services{ + CidrBlocks: []string{"10.96.0.0/12"}, + }, + }, + }, + }, + wantErr: false, + }, { testName: "with no worker node groups", fileName: "testdata/cluster_invalid_no_worker_node_groups.yaml", diff --git a/pkg/api/v1alpha1/testdata/cluster_valid_taints_workload_cluster.yaml b/pkg/api/v1alpha1/testdata/cluster_valid_taints_workload_cluster.yaml new file mode 100644 index 000000000000..189031e9840a --- /dev/null +++ b/pkg/api/v1alpha1/testdata/cluster_valid_taints_workload_cluster.yaml @@ -0,0 +1,94 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: eksa-unit-test +spec: + controlPlaneConfiguration: + count: 3 + endpoint: + host: test-ip + machineGroupRef: + name: eksa-unit-test + kind: VSphereMachineConfig + kubernetesVersion: "1.19" + managementCluster: + name: mgmt + workerNodeGroupConfigurations: + - count: 3 + machineGroupRef: + name: eksa-unit-test-2 + kind: VSphereMachineConfig + name: "md-0" + taints: + - key: key1 + value: val1 + effect: NoSchedule + - count: 3 + machineGroupRef: + name: eksa-unit-test-2 + kind: VSphereMachineConfig + name: "md-1" + taints: + - key: key1 + value: val1 + effect: NoExecute + datacenterRef: + kind: VSphereDatacenterConfig + name: eksa-unit-test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: eksa-unit-test +spec: + diskGiB: 25 + datastore: "myDatastore" + folder: "myFolder" + memoryMiB: 8192 + numCPUs: 2 + osFamily: "ubuntu" + resourcePool: "myResourcePool" + storagePolicyName: "myStoragePolicyName" + template: "myTemplate" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: eksa-unit-test-2 +spec: + diskGiB: 20 + datastore: "myDatastore2" + folder: "myFolder2" + memoryMiB: 2048 + numCPUs: 4 + osFamily: "bottlerocket" + resourcePool: "myResourcePool2" + storagePolicyName: "myStoragePolicyName2" + template: "myTemplate2" + users: + - name: "mySshUsername2" + sshAuthorizedKeys: + - "mySshAuthorizedKey2" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereDatacenterConfig +metadata: + name: eksa-unit-test +spec: + datacenter: "myDatacenter" + network: "myNetwork" + server: "myServer" + thumbprint: "myTlsThumbprint" + insecure: false diff --git a/test/e2e/docker_test.go b/test/e2e/docker_test.go index 901b5054b8d4..fa649ff50334 100644 --- a/test/e2e/docker_test.go +++ b/test/e2e/docker_test.go @@ -604,6 +604,41 @@ func TestDockerKubernetes128Taints(t *testing.T) { ) } +func TestDockerKubernetes127WorkloadClusterTaints(t *testing.T) { + provider := framework.NewDocker(t) + + managementCluster := framework.NewClusterE2ETest( + t, provider, + ).WithClusterConfig( + api.ClusterToConfigFiller( + api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithExternalEtcdTopology(1), + ), + ) + + test := framework.NewMulticlusterE2ETest(t, managementCluster) + + test.WithWorkloadClusters( + framework.NewClusterE2ETest( + t, provider, framework.WithClusterName(test.NewWorkloadClusterName()), + ).WithClusterConfig( + api.ClusterToConfigFiller( + api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithManagementCluster(managementCluster.ClusterName), + api.WithControlPlaneCount(1), + api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate + api.WithWorkerNodeGroup("worker-0", api.WithCount(1), api.WithTaint(framework.NoScheduleTaint())), + api.WithWorkerNodeGroup("worker-1", api.WithCount(1), api.WithTaint(framework.NoExecuteTaint())), + api.WithStackedEtcdTopology(), + ), + ), + ) + + runWorkloadClusterExistingConfigFlow(test) +} + // Upgrade func TestDockerKubernetes127To128StackedEtcdUpgrade(t *testing.T) { provider := framework.NewDocker(t) diff --git a/test/e2e/multicluster.go b/test/e2e/multicluster.go index 842ec69b6bde..a14f883ff6d6 100644 --- a/test/e2e/multicluster.go +++ b/test/e2e/multicluster.go @@ -23,6 +23,16 @@ func runWorkloadClusterFlow(test *framework.MulticlusterE2ETest) { test.DeleteManagementCluster() } +func runWorkloadClusterExistingConfigFlow(test *framework.MulticlusterE2ETest) { + test.CreateManagementClusterWithConfig() + test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) { + w.CreateCluster() + w.DeleteCluster() + }) + time.Sleep(5 * time.Minute) + test.DeleteManagementCluster() +} + func runWorkloadClusterPrevVersionCreateFlow(test *framework.MulticlusterE2ETest, latestMinorRelease *releasev1.EksARelease) { test.CreateManagementClusterWithConfig() test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) { diff --git a/test/e2e/vsphere_test.go b/test/e2e/vsphere_test.go index 48967d402e7f..19ca4deedc40 100644 --- a/test/e2e/vsphere_test.go +++ b/test/e2e/vsphere_test.go @@ -1751,6 +1751,41 @@ func TestVSphereKubernetes128BottlerocketTaintsUpgradeFlow(t *testing.T) { ) } +func TestVSphereKubernetes127UbuntuWorkloadClusterTaintsFlow(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu127()) + + managementCluster := framework.NewClusterE2ETest( + t, provider, + ).WithClusterConfig( + api.ClusterToConfigFiller( + api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithExternalEtcdTopology(1), + ), + ) + + test := framework.NewMulticlusterE2ETest(t, managementCluster) + + test.WithWorkloadClusters( + framework.NewClusterE2ETest( + t, provider, framework.WithClusterName(test.NewWorkloadClusterName()), + ).WithClusterConfig( + api.ClusterToConfigFiller( + api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithManagementCluster(managementCluster.ClusterName), + api.WithControlPlaneCount(1), + api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate + api.WithStackedEtcdTopology(), + ), + provider.WithNewWorkerNodeGroup("worker-0", framework.WithWorkerNodeGroup("worker-0", api.WithCount(1), api.WithLabel("key1", "val2"), api.WithTaint(framework.NoScheduleTaint()))), + provider.WithNewWorkerNodeGroup("worker-1", framework.WithWorkerNodeGroup("worker-1", api.WithCount(1), api.WithLabel("key1", "val2"), api.WithTaint(framework.NoExecuteTaint()))), + ), + ) + + runWorkloadClusterExistingConfigFlow(test) +} + // Upgrade func TestVSphereKubernetes125UbuntuTo126Upgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu125())