Skip to content

Commit

Permalink
Add template name and uuid name check for Nutanix kindless management…
Browse files Browse the repository at this point in the history
… cluster upgrade feature
  • Loading branch information
panktishah26 committed Oct 23, 2023
1 parent bce1d5d commit e57bbb0
Show file tree
Hide file tree
Showing 9 changed files with 262 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ spec:
memorySize: 8Gi
image:
type: "name"
name: "prism-image"
name: "prism-image-1-19"
cluster:
type: "name"
name: "prism-cluster"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ spec:
memorySize: 8Gi
image:
type: "name"
name: "prism-image"
name: "prism-image-1-19"
cluster:
type: "name"
name: "prism-cluster"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ spec:
memorySize: 8Gi
image:
type: "name"
name: "prism-image"
name: "prism-image-1-19"
cluster:
type: "name"
name: "prism-cluster"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ spec:
memorySize: 8Gi
image:
type: "name"
name: "prism-image"
name: "prism-image-1-19"
cluster:
type: "name"
name: "prism-central"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ spec:
memorySize: 8Gi
image:
type: "name"
name: "prism-image"
name: "prism-image-1-19"
cluster:
type: "name"
name: "non-existent-cluster"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ spec:
memorySize: 8Gi
image:
type: "name"
name: "prism-image"
name: "prism-image-1-19"
cluster:
type: "name"
name: "prism-cluster"
Expand All @@ -90,7 +90,7 @@ spec:
memorySize: 8Gi
image:
type: "name"
name: "prism-image"
name: "prism-image-1-19"
cluster:
type: "name"
name: "prism-cluster"
Expand All @@ -115,7 +115,7 @@ spec:
memorySize: 8Gi
image:
type: "name"
name: "prism-image"
name: "prism-image-1-19"
cluster:
type: "name"
name: "prism-cluster"
Expand All @@ -128,3 +128,29 @@ spec:
- name: "mySshUsername"
sshAuthorizedKeys:
- "mySshAuthorizedKey"
---
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: NutanixMachineConfig
metadata:
name: eksa-unit-test
namespace: default
spec:
vcpusPerSocket: 1
vcpuSockets: 4
memorySize: 8Gi
image:
type: "name"
name: "prism-image-1-19"
cluster:
type: "name"
name: "prism-cluster"
subnet:
type: "name"
name: "prism-subnet"
systemDiskSize: 40Gi
osFamily: "ubuntu"
users:
- name: "mySshUsername"
sshAuthorizedKeys:
- "mySshAuthorizedKey"
---
2 changes: 1 addition & 1 deletion pkg/providers/nutanix/testdata/eksa-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ spec:
memorySize: 8Gi
image:
type: "name"
name: "prism-image"
name: "prism-image-1-19"
cluster:
type: "name"
name: "prism-cluster"
Expand Down
63 changes: 63 additions & 0 deletions pkg/providers/nutanix/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,41 @@ func (v *Validator) ValidateClusterSpec(ctx context.Context, spec *cluster.Spec,
}
}

return v.checkImageNameMatchesKubernetesVersion(ctx, spec, client)
}

func (v *Validator) checkImageNameMatchesKubernetesVersion(ctx context.Context, spec *cluster.Spec, client Client) error {
controlPlaneMachineConfig := spec.NutanixMachineConfigs[spec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name]
if controlPlaneMachineConfig == nil {
return fmt.Errorf("cannot find NutanixMachineConfig %v for control plane", spec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name)
}
// validate template field name contains cluster kubernetes version for the control plane machine.
if err := v.validateTemplateMatchesKubernetesVersion(ctx, controlPlaneMachineConfig.Spec.Image, client, string(spec.Cluster.Spec.KubernetesVersion)); err != nil {
return fmt.Errorf("machine config %s validation failed: %v", controlPlaneMachineConfig.Name, err)
}

if spec.Cluster.Spec.ExternalEtcdConfiguration != nil {
etcdMachineConfig := spec.NutanixMachineConfigs[spec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name]
if etcdMachineConfig == nil {
return fmt.Errorf("cannot find NutanixMachineConfig %v for etcd machines", spec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name)
}

Check warning on line 86 in pkg/providers/nutanix/validator.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/nutanix/validator.go#L85-L86

Added lines #L85 - L86 were not covered by tests
// validate template field name contains cluster kubernetes version for the external etcd machine.
if err := v.validateTemplateMatchesKubernetesVersion(ctx, etcdMachineConfig.Spec.Image, client, string(spec.Cluster.Spec.KubernetesVersion)); err != nil {
return fmt.Errorf("machine config %s validation failed: %v", etcdMachineConfig.Name, err)
}
}

for _, workerNodeGroupConfiguration := range spec.Cluster.Spec.WorkerNodeGroupConfigurations {
kubernetesVersion := string(spec.Cluster.Spec.KubernetesVersion)
if workerNodeGroupConfiguration.KubernetesVersion != nil {
kubernetesVersion = string(*workerNodeGroupConfiguration.KubernetesVersion)
}
// validate template field name contains cluster kubernetes version for the control plane machine.
imageIdentifier := spec.NutanixMachineConfigs[workerNodeGroupConfiguration.MachineGroupRef.Name].Spec.Image
if err := v.validateTemplateMatchesKubernetesVersion(ctx, imageIdentifier, client, kubernetesVersion); err != nil {
return fmt.Errorf("machine config %s validation failed: %v", controlPlaneMachineConfig.Name, err)
}
}
return nil
}

Expand Down Expand Up @@ -264,6 +299,34 @@ func (v *Validator) validateImageConfig(ctx context.Context, client Client, iden
return nil
}

func (v *Validator) validateTemplateMatchesKubernetesVersion(ctx context.Context, identifier anywherev1.NutanixResourceIdentifier, client Client, kubernetesVersionName string) error {
var templateName string
if identifier.Type == anywherev1.NutanixIdentifierUUID {
imageUUID := *identifier.UUID
imageDetails, err := client.GetImage(ctx, imageUUID)
if err != nil {
return fmt.Errorf("failed to find image with uuid %s: %v", imageUUID, err)
}
templateName = *imageDetails.Spec.Name

Check warning on line 310 in pkg/providers/nutanix/validator.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/nutanix/validator.go#L305-L310

Added lines #L305 - L310 were not covered by tests
} else {
templateName = *identifier.Name
}

// Replace 1.23, 1-23, 1_23 to 123 in the template name string.
templateReplacer := strings.NewReplacer("-", "", ".", "", "_", "")
template := templateReplacer.Replace(templateName)
// Replace 1-23 to 123 in the kubernetesversion string.
replacer := strings.NewReplacer(".", "")
kubernetesVersion := replacer.Replace(string(kubernetesVersionName))
// This will return an error if the template name does not contain specified kubernetes version.
// For ex if the kubernetes version is 1.23,
// the template name should include 1.23 or 1-23, 1_23 or 123 i.e. kubernetes-1-23-eks in the string.
if !strings.Contains(template, kubernetesVersion) {
return fmt.Errorf("missing kube version from the machine config template name: template=%s, version=%s", templateName, string(kubernetesVersionName))
}
return nil
}

func (v *Validator) validateSubnetConfig(ctx context.Context, client Client, identifier anywherev1.NutanixResourceIdentifier) error {
switch identifier.Type {
case anywherev1.NutanixIdentifierName:
Expand Down
164 changes: 164 additions & 0 deletions pkg/providers/nutanix/validator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"sigs.k8s.io/yaml"

"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
mockCrypto "github.com/aws/eks-anywhere/pkg/crypto/mocks"
mocknutanix "github.com/aws/eks-anywhere/pkg/providers/nutanix/mocks"
Expand Down Expand Up @@ -670,3 +672,165 @@ func TestNutanixValidatorValidateDatacenterConfigWithInvalidCreds(t *testing.T)
})
}
}

func TestValidateClusterMachineConfigsError(t *testing.T) {
ctx := context.Background()
clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml"
clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile)
clusterSpec.Cluster.Spec.KubernetesVersion = "1.22"

ctrl := gomock.NewController(t)
mockClient := mocknutanix.NewMockClient(ctrl)
mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes()

mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl)
mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()

mockTransport := mocknutanix.NewMockRoundTripper(ctrl)
mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes()

mockHTTPClient := &http.Client{Transport: mockTransport}
clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}}
validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient)

err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"])
if err == nil {
t.Fatalf("validation should not pass: %v", err)
}
}

func TestValidateClusterMachineConfigsCPNotFoundError(t *testing.T) {
ctx := context.Background()
clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml"
clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile)
clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "invalid-cp-name"

ctrl := gomock.NewController(t)
mockClient := mocknutanix.NewMockClient(ctrl)
mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes()

mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl)
mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()

mockTransport := mocknutanix.NewMockRoundTripper(ctrl)
mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes()

mockHTTPClient := &http.Client{Transport: mockTransport}
clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}}
validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient)

err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"])
if err == nil {
t.Fatalf("validation should not pass: %v", err)
}
}

func TestValidateClusterMachineConfigsCPError(t *testing.T) {
ctx := context.Background()
clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml"
clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile)
clusterSpec.NutanixMachineConfigs["eksa-unit-test-cp"].Spec.Image.Name = utils.StringPtr("kubernetes_1_22")

ctrl := gomock.NewController(t)
mockClient := mocknutanix.NewMockClient(ctrl)
mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes()

mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl)
mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()

mockTransport := mocknutanix.NewMockRoundTripper(ctrl)
mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes()

mockHTTPClient := &http.Client{Transport: mockTransport}
clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}}
validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient)

err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"])
if err == nil {
t.Fatalf("validation should not pass: %v", err)
}
}

func TestValidateClusterMachineConfigsEtcdError(t *testing.T) {
ctx := context.Background()
clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml"
clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile)
clusterSpec.NutanixMachineConfigs["eksa-unit-test"].Spec.Image.Name = utils.StringPtr("kubernetes_1_22")

ctrl := gomock.NewController(t)
mockClient := mocknutanix.NewMockClient(ctrl)
mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes()

mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl)
mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()

mockTransport := mocknutanix.NewMockRoundTripper(ctrl)
mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes()

mockHTTPClient := &http.Client{Transport: mockTransport}
clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}}
validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient)

err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"])
if err == nil {
t.Fatalf("validation should not pass: %v", err)
}
}

func TestValidateClusterMachineConfigsModularUpgradeError(t *testing.T) {
ctx := context.Background()
clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml"
clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile)
kube122 := v1alpha1.KubernetesVersion("1.22")
clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubernetesVersion = &kube122

ctrl := gomock.NewController(t)
mockClient := mocknutanix.NewMockClient(ctrl)
mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes()

mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl)
mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()

mockTransport := mocknutanix.NewMockRoundTripper(ctrl)
mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes()

mockHTTPClient := &http.Client{Transport: mockTransport}
clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}}
validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient)

err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"])
if err == nil {
t.Fatalf("validation should not pass: %v", err)
}
}

func TestValidateClusterMachineConfigsSuccess(t *testing.T) {
ctx := context.Background()
clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml"
clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile)

clusterSpec.Cluster.Spec.KubernetesVersion = "1.22"
clusterSpec.NutanixMachineConfigs["eksa-unit-test-cp"].Spec.Image.Name = utils.StringPtr("kubernetes_1_22")
clusterSpec.NutanixMachineConfigs["eksa-unit-test"].Spec.Image.Name = utils.StringPtr("kubernetes_1_22")
clusterSpec.NutanixMachineConfigs["eksa-unit-test-md-1"].Spec.Image.Name = utils.StringPtr("kubernetes_1_22")
clusterSpec.NutanixMachineConfigs["eksa-unit-test-md-2"].Spec.Image.Name = utils.StringPtr("kubernetes_1_22")

ctrl := gomock.NewController(t)
mockClient := mocknutanix.NewMockClient(ctrl)
mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes()

mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl)
mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()

mockTransport := mocknutanix.NewMockRoundTripper(ctrl)
mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes()

mockHTTPClient := &http.Client{Transport: mockTransport}
clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}}
validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient)

err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"])
if err != nil {
t.Fatalf("validation should pass: %v", err)
}
}

0 comments on commit e57bbb0

Please sign in to comment.