Skip to content

Commit

Permalink
updating taint validation to management cluster only (#6294)
Browse files Browse the repository at this point in the history
  • Loading branch information
raymond-zhang00 authored Jan 24, 2024
1 parent 39bfc02 commit b2bf264
Show file tree
Hide file tree
Showing 6 changed files with 252 additions and 1 deletion.
4 changes: 3 additions & 1 deletion pkg/api/v1alpha1/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,9 @@ func validateWorkerNodeGroups(clusterConfig *Cluster) error {
}

if len(workerNodeGroupConfigs) > 0 && len(noExecuteNoScheduleTaintedNodeGroups) == len(workerNodeGroupConfigs) {
return errors.New("at least one WorkerNodeGroupConfiguration must not have NoExecute and/or NoSchedule taints")
if clusterConfig.IsSelfManaged() {
return errors.New("at least one WorkerNodeGroupConfiguration must not have NoExecute and/or NoSchedule taints")
}
}

if len(workerNodeGroupConfigs) == 0 && len(clusterConfig.Spec.ControlPlaneConfiguration.Taints) != 0 {
Expand Down
75 changes: 75 additions & 0 deletions pkg/api/v1alpha1/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -741,6 +741,81 @@ func TestGetAndValidateClusterConfig(t *testing.T) {
},
wantErr: false,
},
{
testName: "valid tainted workload cluster machine configs",
fileName: "testdata/cluster_valid_taints_workload_cluster.yaml",
wantCluster: &Cluster{
TypeMeta: metav1.TypeMeta{
Kind: ClusterKind,
APIVersion: SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: ClusterSpec{
KubernetesVersion: Kube119,
ManagementCluster: ManagementCluster{
Name: "mgmt",
},
ControlPlaneConfiguration: ControlPlaneConfiguration{
Count: 3,
Endpoint: &Endpoint{
Host: "test-ip",
},
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{
{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test-2",
},
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectNoSchedule,
},
},
},
{
Name: "md-1",
Count: ptr.Int(3),
MachineGroupRef: &Ref{
Kind: VSphereMachineConfigKind,
Name: "eksa-unit-test-2",
},
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectNoExecute,
},
},
},
},
DatacenterRef: Ref{
Kind: VSphereDatacenterKind,
Name: "eksa-unit-test",
},
ClusterNetwork: ClusterNetwork{
CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}},
Pods: Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
},
},
wantErr: false,
},
{
testName: "with no worker node groups",
fileName: "testdata/cluster_invalid_no_worker_node_groups.yaml",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Cluster
metadata:
name: eksa-unit-test
spec:
controlPlaneConfiguration:
count: 3
endpoint:
host: test-ip
machineGroupRef:
name: eksa-unit-test
kind: VSphereMachineConfig
kubernetesVersion: "1.19"
managementCluster:
name: mgmt
workerNodeGroupConfigurations:
- count: 3
machineGroupRef:
name: eksa-unit-test-2
kind: VSphereMachineConfig
name: "md-0"
taints:
- key: key1
value: val1
effect: NoSchedule
- count: 3
machineGroupRef:
name: eksa-unit-test-2
kind: VSphereMachineConfig
name: "md-1"
taints:
- key: key1
value: val1
effect: NoExecute
datacenterRef:
kind: VSphereDatacenterConfig
name: eksa-unit-test
clusterNetwork:
cni: "cilium"
pods:
cidrBlocks:
- 192.168.0.0/16
services:
cidrBlocks:
- 10.96.0.0/12
---
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: VSphereMachineConfig
metadata:
name: eksa-unit-test
spec:
diskGiB: 25
datastore: "myDatastore"
folder: "myFolder"
memoryMiB: 8192
numCPUs: 2
osFamily: "ubuntu"
resourcePool: "myResourcePool"
storagePolicyName: "myStoragePolicyName"
template: "myTemplate"
users:
- name: "mySshUsername"
sshAuthorizedKeys:
- "mySshAuthorizedKey"
---
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: VSphereMachineConfig
metadata:
name: eksa-unit-test-2
spec:
diskGiB: 20
datastore: "myDatastore2"
folder: "myFolder2"
memoryMiB: 2048
numCPUs: 4
osFamily: "bottlerocket"
resourcePool: "myResourcePool2"
storagePolicyName: "myStoragePolicyName2"
template: "myTemplate2"
users:
- name: "mySshUsername2"
sshAuthorizedKeys:
- "mySshAuthorizedKey2"
---
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: VSphereDatacenterConfig
metadata:
name: eksa-unit-test
spec:
datacenter: "myDatacenter"
network: "myNetwork"
server: "myServer"
thumbprint: "myTlsThumbprint"
insecure: false
35 changes: 35 additions & 0 deletions test/e2e/docker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -604,6 +604,41 @@ func TestDockerKubernetes128Taints(t *testing.T) {
)
}

func TestDockerKubernetes127WorkloadClusterTaints(t *testing.T) {
provider := framework.NewDocker(t)

managementCluster := framework.NewClusterE2ETest(
t, provider,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithExternalEtcdTopology(1),
),
)

test := framework.NewMulticlusterE2ETest(t, managementCluster)

test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, provider, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
api.WithWorkerNodeGroup("worker-0", api.WithCount(1), api.WithTaint(framework.NoScheduleTaint())),
api.WithWorkerNodeGroup("worker-1", api.WithCount(1), api.WithTaint(framework.NoExecuteTaint())),
api.WithStackedEtcdTopology(),
),
),
)

runWorkloadClusterExistingConfigFlow(test)
}

// Upgrade
func TestDockerKubernetes127To128StackedEtcdUpgrade(t *testing.T) {
provider := framework.NewDocker(t)
Expand Down
10 changes: 10 additions & 0 deletions test/e2e/multicluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,16 @@ func runWorkloadClusterFlow(test *framework.MulticlusterE2ETest) {
test.DeleteManagementCluster()
}

func runWorkloadClusterExistingConfigFlow(test *framework.MulticlusterE2ETest) {
test.CreateManagementClusterWithConfig()
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
w.CreateCluster()
w.DeleteCluster()
})
time.Sleep(5 * time.Minute)
test.DeleteManagementCluster()
}

func runWorkloadClusterPrevVersionCreateFlow(test *framework.MulticlusterE2ETest, latestMinorRelease *releasev1.EksARelease) {
test.CreateManagementClusterWithConfig()
test.RunInWorkloadClusters(func(w *framework.WorkloadCluster) {
Expand Down
35 changes: 35 additions & 0 deletions test/e2e/vsphere_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1751,6 +1751,41 @@ func TestVSphereKubernetes128BottlerocketTaintsUpgradeFlow(t *testing.T) {
)
}

func TestVSphereKubernetes127UbuntuWorkloadClusterTaintsFlow(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu127())

managementCluster := framework.NewClusterE2ETest(
t, provider,
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithControlPlaneCount(1),
api.WithWorkerNodeCount(1),
api.WithExternalEtcdTopology(1),
),
)

test := framework.NewMulticlusterE2ETest(t, managementCluster)

test.WithWorkloadClusters(
framework.NewClusterE2ETest(
t, provider, framework.WithClusterName(test.NewWorkloadClusterName()),
).WithClusterConfig(
api.ClusterToConfigFiller(
api.WithKubernetesVersion(v1alpha1.Kube127),
api.WithManagementCluster(managementCluster.ClusterName),
api.WithControlPlaneCount(1),
api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate
api.WithStackedEtcdTopology(),
),
provider.WithNewWorkerNodeGroup("worker-0", framework.WithWorkerNodeGroup("worker-0", api.WithCount(1), api.WithLabel("key1", "val2"), api.WithTaint(framework.NoScheduleTaint()))),
provider.WithNewWorkerNodeGroup("worker-1", framework.WithWorkerNodeGroup("worker-1", api.WithCount(1), api.WithLabel("key1", "val2"), api.WithTaint(framework.NoExecuteTaint()))),
),
)

runWorkloadClusterExistingConfigFlow(test)
}

// Upgrade
func TestVSphereKubernetes125UbuntuTo126Upgrade(t *testing.T) {
provider := framework.NewVSphere(t, framework.WithUbuntu125())
Expand Down

0 comments on commit b2bf264

Please sign in to comment.