diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_with_trust_bundle.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_with_trust_bundle.yaml index d895d942192a..5ef4bc2835dc 100644 --- a/pkg/providers/nutanix/testdata/cluster_nutanix_with_trust_bundle.yaml +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_with_trust_bundle.yaml @@ -82,7 +82,7 @@ spec: memorySize: 8Gi image: type: "name" - name: "prism-image" + name: "prism-image-1-19" cluster: type: "name" name: "prism-cluster" diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_cp.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_cp.yaml index df82897d67d4..6ee3145e2d0f 100644 --- a/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_cp.yaml +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_cp.yaml @@ -79,7 +79,7 @@ spec: memorySize: 8Gi image: type: "name" - name: "prism-image" + name: "prism-image-1-19" cluster: type: "name" name: "prism-cluster" diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_md.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_md.yaml index 1691a717f6f4..9f6520bc4b1d 100644 --- a/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_md.yaml +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_md.yaml @@ -79,7 +79,7 @@ spec: memorySize: 8Gi image: type: "name" - name: "prism-image" + name: "prism-image-1-19" cluster: type: "name" name: "prism-cluster" diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-pc.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-pc.yaml index 68430e0154f0..1e1da75037b2 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-pc.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-pc.yaml @@ -60,7 +60,7 @@ spec: memorySize: 8Gi image: type: "name" - name: "prism-image" + name: "prism-image-1-19" cluster: type: "name" name: "prism-central" diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-random-name.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-random-name.yaml index dcd32f6f8b09..64053d55bfc2 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-random-name.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-random-name.yaml @@ -60,7 +60,7 @@ spec: memorySize: 8Gi image: type: "name" - name: "prism-image" + name: "prism-image-1-19" cluster: type: "name" name: "non-existent-cluster" diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-multiple-machineconfigs.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-multiple-machineconfigs.yaml index ab759ddfd182..ac9452e8854c 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-multiple-machineconfigs.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-multiple-machineconfigs.yaml @@ -65,7 +65,7 @@ spec: memorySize: 8Gi image: type: "name" - name: "prism-image" + name: "prism-image-1-19" cluster: type: "name" name: "prism-cluster" @@ -90,7 +90,7 @@ spec: memorySize: 8Gi image: type: "name" - name: "prism-image" + name: "prism-image-1-19" cluster: type: "name" name: "prism-cluster" @@ -115,7 +115,7 @@ spec: memorySize: 8Gi image: type: "name" - name: "prism-image" + name: "prism-image-1-19" cluster: type: "name" name: "prism-cluster" @@ -128,3 +128,29 @@ spec: - name: "mySshUsername" sshAuthorizedKeys: - "mySshAuthorizedKey" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image-1-19" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- \ No newline at end of file diff --git a/pkg/providers/nutanix/testdata/eksa-cluster.yaml b/pkg/providers/nutanix/testdata/eksa-cluster.yaml index 259f4960d8b8..54c9ca80955b 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster.yaml @@ -60,7 +60,7 @@ spec: memorySize: 8Gi image: type: "name" - name: "prism-image" + name: "prism-image-1-19" cluster: type: "name" name: "prism-cluster" diff --git a/pkg/providers/nutanix/validator.go b/pkg/providers/nutanix/validator.go index 72e462247c14..3679828e054b 100644 --- a/pkg/providers/nutanix/validator.go +++ b/pkg/providers/nutanix/validator.go @@ -66,6 +66,41 @@ func (v *Validator) ValidateClusterSpec(ctx context.Context, spec *cluster.Spec, } } + return v.checkImageNameMatchesKubernetesVersion(ctx, spec, client) +} + +func (v *Validator) checkImageNameMatchesKubernetesVersion(ctx context.Context, spec *cluster.Spec, client Client) error { + controlPlaneMachineConfig := spec.NutanixMachineConfigs[spec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name] + if controlPlaneMachineConfig == nil { + return fmt.Errorf("cannot find NutanixMachineConfig %v for control plane", spec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + } + // validate template field name contains cluster kubernetes version for the control plane machine. + if err := v.validateTemplateMatchesKubernetesVersion(ctx, controlPlaneMachineConfig.Spec.Image, client, string(spec.Cluster.Spec.KubernetesVersion)); err != nil { + return fmt.Errorf("machine config %s validation failed: %v", controlPlaneMachineConfig.Name, err) + } + + if spec.Cluster.Spec.ExternalEtcdConfiguration != nil { + etcdMachineConfig := spec.NutanixMachineConfigs[spec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name] + if etcdMachineConfig == nil { + return fmt.Errorf("cannot find NutanixMachineConfig %v for etcd machines", spec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name) + } + // validate template field name contains cluster kubernetes version for the external etcd machine. + if err := v.validateTemplateMatchesKubernetesVersion(ctx, etcdMachineConfig.Spec.Image, client, string(spec.Cluster.Spec.KubernetesVersion)); err != nil { + return fmt.Errorf("machine config %s validation failed: %v", etcdMachineConfig.Name, err) + } + } + + for _, workerNodeGroupConfiguration := range spec.Cluster.Spec.WorkerNodeGroupConfigurations { + kubernetesVersion := string(spec.Cluster.Spec.KubernetesVersion) + if workerNodeGroupConfiguration.KubernetesVersion != nil { + kubernetesVersion = string(*workerNodeGroupConfiguration.KubernetesVersion) + } + // validate template field name contains cluster kubernetes version for the control plane machine. + imageIdentifier := spec.NutanixMachineConfigs[workerNodeGroupConfiguration.MachineGroupRef.Name].Spec.Image + if err := v.validateTemplateMatchesKubernetesVersion(ctx, imageIdentifier, client, kubernetesVersion); err != nil { + return fmt.Errorf("machine config %s validation failed: %v", controlPlaneMachineConfig.Name, err) + } + } return nil } @@ -264,6 +299,37 @@ func (v *Validator) validateImageConfig(ctx context.Context, client Client, iden return nil } +func (v *Validator) validateTemplateMatchesKubernetesVersion(ctx context.Context, identifier anywherev1.NutanixResourceIdentifier, client Client, kubernetesVersionName string) error { + var templateName string + if identifier.Type == anywherev1.NutanixIdentifierUUID { + imageUUID := *identifier.UUID + imageDetails, err := client.GetImage(ctx, imageUUID) + if err != nil { + return fmt.Errorf("failed to find image with uuid %s: %v", imageUUID, err) + } + if imageDetails.Spec == nil || imageDetails.Spec.Name == nil { + return fmt.Errorf("failed to find image details with uuid %s", imageUUID) + } + templateName = *imageDetails.Spec.Name + } else { + templateName = *identifier.Name + } + + // Replace 1.23, 1-23, 1_23 to 123 in the template name string. + templateReplacer := strings.NewReplacer("-", "", ".", "", "_", "") + template := templateReplacer.Replace(templateName) + // Replace 1-23 to 123 in the kubernetesversion string. + replacer := strings.NewReplacer(".", "") + kubernetesVersion := replacer.Replace(string(kubernetesVersionName)) + // This will return an error if the template name does not contain specified kubernetes version. + // For ex if the kubernetes version is 1.23, + // the template name should include 1.23 or 1-23, 1_23 or 123 i.e. kubernetes-1-23-eks in the string. + if !strings.Contains(template, kubernetesVersion) { + return fmt.Errorf("missing kube version from the machine config template name: template=%s, version=%s", templateName, string(kubernetesVersionName)) + } + return nil +} + func (v *Validator) validateSubnetConfig(ctx context.Context, client Client, identifier anywherev1.NutanixResourceIdentifier) error { switch identifier.Type { case anywherev1.NutanixIdentifierName: diff --git a/pkg/providers/nutanix/validator_test.go b/pkg/providers/nutanix/validator_test.go index d99a7f346dc9..fd2016b456c5 100644 --- a/pkg/providers/nutanix/validator_test.go +++ b/pkg/providers/nutanix/validator_test.go @@ -16,6 +16,8 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "sigs.k8s.io/yaml" + "github.com/aws/eks-anywhere/internal/test" + "github.com/aws/eks-anywhere/pkg/api/v1alpha1" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" mockCrypto "github.com/aws/eks-anywhere/pkg/crypto/mocks" mocknutanix "github.com/aws/eks-anywhere/pkg/providers/nutanix/mocks" @@ -670,3 +672,191 @@ func TestNutanixValidatorValidateDatacenterConfigWithInvalidCreds(t *testing.T) }) } } + +func TestValidateClusterMachineConfigsError(t *testing.T) { + ctx := context.Background() + clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml" + clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile) + clusterSpec.Cluster.Spec.KubernetesVersion = "1.22" + + ctrl := gomock.NewController(t) + mockClient := mocknutanix.NewMockClient(ctrl) + mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() + + mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) + mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + mockTransport := mocknutanix.NewMockRoundTripper(ctrl) + mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() + + mockHTTPClient := &http.Client{Transport: mockTransport} + clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} + validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient) + + err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"]) + if err == nil { + t.Fatalf("validation should not pass: %v", err) + } +} + +func TestValidateClusterMachineConfigsCPNotFoundError(t *testing.T) { + ctx := context.Background() + clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml" + clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile) + clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "invalid-cp-name" + + ctrl := gomock.NewController(t) + mockClient := mocknutanix.NewMockClient(ctrl) + mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() + + mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) + mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + mockTransport := mocknutanix.NewMockRoundTripper(ctrl) + mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() + + mockHTTPClient := &http.Client{Transport: mockTransport} + clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} + validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient) + + err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"]) + if err == nil { + t.Fatalf("validation should not pass: %v", err) + } +} + +func TestValidateClusterMachineConfigsEtcdNotFoundError(t *testing.T) { + ctx := context.Background() + clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml" + clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile) + clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name = "invalid-etcd-name" + + ctrl := gomock.NewController(t) + mockClient := mocknutanix.NewMockClient(ctrl) + mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() + + mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) + mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + mockTransport := mocknutanix.NewMockRoundTripper(ctrl) + mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() + + mockHTTPClient := &http.Client{Transport: mockTransport} + clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} + validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient) + + err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"]) + if err == nil { + t.Fatalf("validation should not pass: %v", err) + } +} + +func TestValidateClusterMachineConfigsCPError(t *testing.T) { + ctx := context.Background() + clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml" + clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile) + clusterSpec.NutanixMachineConfigs["eksa-unit-test-cp"].Spec.Image.Name = utils.StringPtr("kubernetes_1_22") + + ctrl := gomock.NewController(t) + mockClient := mocknutanix.NewMockClient(ctrl) + mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() + + mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) + mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + mockTransport := mocknutanix.NewMockRoundTripper(ctrl) + mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() + + mockHTTPClient := &http.Client{Transport: mockTransport} + clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} + validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient) + + err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"]) + if err == nil { + t.Fatalf("validation should not pass: %v", err) + } +} + +func TestValidateClusterMachineConfigsEtcdError(t *testing.T) { + ctx := context.Background() + clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml" + clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile) + clusterSpec.NutanixMachineConfigs["eksa-unit-test"].Spec.Image.Name = utils.StringPtr("kubernetes_1_22") + + ctrl := gomock.NewController(t) + mockClient := mocknutanix.NewMockClient(ctrl) + mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() + + mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) + mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + mockTransport := mocknutanix.NewMockRoundTripper(ctrl) + mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() + + mockHTTPClient := &http.Client{Transport: mockTransport} + clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} + validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient) + + err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"]) + if err == nil { + t.Fatalf("validation should not pass: %v", err) + } +} + +func TestValidateClusterMachineConfigsModularUpgradeError(t *testing.T) { + ctx := context.Background() + clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml" + clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile) + kube122 := v1alpha1.KubernetesVersion("1.22") + clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubernetesVersion = &kube122 + + ctrl := gomock.NewController(t) + mockClient := mocknutanix.NewMockClient(ctrl) + mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() + + mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) + mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + mockTransport := mocknutanix.NewMockRoundTripper(ctrl) + mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() + + mockHTTPClient := &http.Client{Transport: mockTransport} + clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} + validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient) + + err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"]) + if err == nil { + t.Fatalf("validation should not pass: %v", err) + } +} + +func TestValidateClusterMachineConfigsSuccess(t *testing.T) { + ctx := context.Background() + clusterConfigFile := "testdata/eksa-cluster-multiple-machineconfigs.yaml" + clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile) + + clusterSpec.Cluster.Spec.KubernetesVersion = "1.22" + clusterSpec.NutanixMachineConfigs["eksa-unit-test-cp"].Spec.Image.Name = utils.StringPtr("kubernetes_1_22") + clusterSpec.NutanixMachineConfigs["eksa-unit-test"].Spec.Image.Name = utils.StringPtr("kubernetes_1_22") + clusterSpec.NutanixMachineConfigs["eksa-unit-test-md-1"].Spec.Image.Name = utils.StringPtr("kubernetes_1_22") + clusterSpec.NutanixMachineConfigs["eksa-unit-test-md-2"].Spec.Image.Name = utils.StringPtr("kubernetes_1_22") + + ctrl := gomock.NewController(t) + mockClient := mocknutanix.NewMockClient(ctrl) + mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() + + mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) + mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + mockTransport := mocknutanix.NewMockRoundTripper(ctrl) + mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() + + mockHTTPClient := &http.Client{Transport: mockTransport} + clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} + validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient) + + err := validator.checkImageNameMatchesKubernetesVersion(ctx, clusterSpec, clientCache.clients["test"]) + if err != nil { + t.Fatalf("validation should pass: %v", err) + } +}