From f47a7ae6c69973a422dd3ab39b052b872af08313 Mon Sep 17 00:00:00 2001 From: Melissa Lee Date: Wed, 21 Jun 2023 09:51:46 -0400 Subject: [PATCH] Unit test enhancements --- Makefile | 37 +- utils/reconciler_test.go | 21 +- utils/utils_test.go | 737 ++++++++++++++++++++++++++++++++++----- 3 files changed, 691 insertions(+), 104 deletions(-) diff --git a/Makefile b/Makefile index 83937128..fdf546a6 100644 --- a/Makefile +++ b/Makefile @@ -93,6 +93,9 @@ endif # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) CRD_OPTIONS ?= "crd:crdVersions=v1,generateEmbeddedObjectMeta=true" +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.24 + # Produce files under internal/deploy/kustomize/daily with runtime-component namespace KUSTOMIZE_NAMESPACE = runtime-component KUSTOMIZE_IMG = icr.io/appcafe/runtime-component-operator:daily @@ -141,22 +144,31 @@ LOCALBIN ?= $(shell pwd)/bin $(LOCALBIN): mkdir -p $(LOCALBIN) +## Tool Binaries +KUSTOMIZE ?= $(LOCALBIN)/kustomize CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen -# find or download controller-gen -# download controller-gen if necessary -.PHONY: controller-gen -controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. -$(CONTROLLER_GEN): $(LOCALBIN) - test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.9.2 +ENVTEST ?= $(LOCALBIN)/setup-envtest -KUSTOMIZE ?= $(LOCALBIN)/kustomize +## Tool Versions KUSTOMIZE_VERSION ?= 4.5.5 +CONTROLLER_TOOLS_VERSION ?= 0.9.2 + KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/kustomize/v${KUSTOMIZE_VERSION}/hack/install_kustomize.sh" .PHONY: kustomize kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. $(KUSTOMIZE): $(LOCALBIN) test -s $(LOCALBIN)/kustomize || curl -s $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s $(KUSTOMIZE_VERSION) $(LOCALBIN) +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@v$(CONTROLLER_TOOLS_VERSION) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. +$(ENVTEST): $(LOCALBIN) + test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest + .PHONY: setup setup: ## Ensure Operator SDK is installed. ./scripts/installers/install-operator-sdk.sh ${OPERATOR_SDK_RELEASE_VERSION} @@ -219,12 +231,13 @@ fmt: ## Run go fmt against code. vet: ## Run go vet against code. go vet ./... -ENVTEST_ASSETS_DIR=$(shell pwd)/testbin .PHONY: test -test: manifests generate fmt vet ## Run tests. - mkdir -p ${ENVTEST_ASSETS_DIR} - test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.7.2/hack/setup-envtest.sh - source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out +test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out + +.PHONY: test-cover +test-cover: test + go tool cover -html=cover.out .PHONY: unit-test unit-test: ## Run unit tests diff --git a/utils/reconciler_test.go b/utils/reconciler_test.go index 5237ac22..65f8bb4f 100644 --- a/utils/reconciler_test.go +++ b/utils/reconciler_test.go @@ -3,11 +3,12 @@ package utils import ( "context" "fmt" - appstacksv1 "github.com/application-stacks/runtime-component-operator/api/v1" "reflect" "strings" "testing" + appstacksv1 "github.com/application-stacks/runtime-component-operator/api/v1" + "github.com/application-stacks/runtime-component-operator/common" routev1 "github.com/openshift/api/route/v1" @@ -166,7 +167,7 @@ func TestGetDiscoveryClient(t *testing.T) { logger := zap.New() logf.SetLogger(logger) - runtimecomponent := createRuntimeComponent(name, namespace, spec) + runtimecomponent := createRuntimeComponent(objMeta, spec) objs, s := []runtime.Object{runtimecomponent}, scheme.Scheme s.AddKnownTypes(appstacksv1.GroupVersion, runtimecomponent) cl := fakeclient.NewFakeClient(objs...) @@ -186,7 +187,7 @@ func TestCreateOrUpdate(t *testing.T) { logf.SetLogger(logger) serviceAccount := &corev1.ServiceAccount{ObjectMeta: defaultMeta} - runtimecomponent := createRuntimeComponent(name, namespace, spec) + runtimecomponent := createRuntimeComponent(objMeta, spec) objs, s := []runtime.Object{runtimecomponent}, scheme.Scheme s.AddKnownTypes(appstacksv1.GroupVersion, runtimecomponent) cl := fakeclient.NewFakeClient(objs...) @@ -207,7 +208,7 @@ func TestDeleteResources(t *testing.T) { logger := zap.New() logf.SetLogger(logger) - runtimecomponent := createRuntimeComponent(name, namespace, spec) + runtimecomponent := createRuntimeComponent(objMeta, spec) objs, s := []runtime.Object{runtimecomponent}, scheme.Scheme s.AddKnownTypes(appstacksv1.GroupVersion, runtimecomponent) cl := fakeclient.NewFakeClient(objs...) @@ -260,7 +261,7 @@ func TestGetOpConfigMap(t *testing.T) { }, } - runtimecomponent := createRuntimeComponent(name, namespace, spec) + runtimecomponent := createRuntimeComponent(objMeta, spec) objs, s := []runtime.Object{runtimecomponent}, scheme.Scheme s.AddKnownTypes(appstacksv1.GroupVersion, runtimecomponent) cl := fakeclient.NewFakeClient(objs...) @@ -286,7 +287,7 @@ func TestManageError(t *testing.T) { logf.SetLogger(logger) err := fmt.Errorf("test-error") - runtimecomponent := createRuntimeComponent(name, namespace, spec) + runtimecomponent := createRuntimeComponent(objMeta, spec) objs, s := []runtime.Object{runtimecomponent}, scheme.Scheme s.AddKnownTypes(appstacksv1.GroupVersion, runtimecomponent) cl := fakeclient.NewFakeClient(objs...) @@ -307,7 +308,7 @@ func TestManageSuccess(t *testing.T) { logger := zap.New() logf.SetLogger(logger) - runtimecomponent := createRuntimeComponent(name, namespace, spec) + runtimecomponent := createRuntimeComponent(objMeta, spec) objs, s := []runtime.Object{runtimecomponent}, scheme.Scheme s.AddKnownTypes(appstacksv1.GroupVersion, runtimecomponent) cl := fakeclient.NewFakeClient(objs...) @@ -326,7 +327,7 @@ func TestIsGroupVersionSupported(t *testing.T) { logger := zap.New() logf.SetLogger(logger) - runtimecomponent := createRuntimeComponent(name, namespace, spec) + runtimecomponent := createRuntimeComponent(objMeta, spec) objs, s := []runtime.Object{runtimecomponent}, scheme.Scheme s.AddKnownTypes(appstacksv1.GroupVersion, runtimecomponent) cl := fakeclient.NewFakeClient(objs...) @@ -363,7 +364,7 @@ func TestIsGroupVersionSupported(t *testing.T) { func testGetSvcTLSValues(t *testing.T) { // Configure the runtime component - runtimecomponent := createRuntimeComponent(name, namespace, spec) + runtimecomponent := createRuntimeComponent(objMeta, spec) expose := true runtimecomponent.Spec.Expose = &expose runtimecomponent.Spec.Service = &appstacksv1.RuntimeComponentService{ @@ -404,7 +405,7 @@ func testGetSvcTLSValues(t *testing.T) { // testGetRouteTLSValues test the function GetRouteTLSValues in reconciler.go. func testGetRouteTLSValues(t *testing.T) { // Configure the rumtime component - runtimecomponent := createRuntimeComponent(name, namespace, spec) + runtimecomponent := createRuntimeComponent(objMeta, spec) terminationPolicy := routev1.TLSTerminationReencrypt secretRefName := "my-app-route-tls" runtimecomponent.Spec.Expose = &expose diff --git a/utils/utils_test.go b/utils/utils_test.go index 88ba92dd..34d58d78 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -1,59 +1,89 @@ package utils import ( - appstacksv1 "github.com/application-stacks/runtime-component-operator/api/v1" + "errors" + "fmt" "os" "reflect" "strconv" "testing" + appstacksv1 "github.com/application-stacks/runtime-component-operator/api/v1" + "github.com/application-stacks/runtime-component-operator/common" + routev1 "github.com/openshift/api/route/v1" prometheusv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - cruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" servingv1 "knative.dev/serving/pkg/apis/serving/v1" + "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) var ( - name = "my-app" - namespace = "runtime" - stack = "java-microprofile" - appImage = "my-image" - replicas int32 = 2 - expose = true - createKNS = true + name = "my-app" + namespace = "runtime" + appImage = "my-image" + objMeta = metav1.ObjectMeta{Name: name, Namespace: namespace} + labels = map[string]string{"key1": "value1"} + annotations = map[string]string{"key2": "value2"} + stack = "java-microprofile" + replicas int32 = 2 + createKNS = true + manageTLS = true + envFrom = []corev1.EnvFromSource{{Prefix: namespace}} + env = []corev1.EnvVar{{Name: namespace}} + pullPolicy = corev1.PullAlways + pullSecret = "mysecret" + serviceAccountName = "service-account" + + // Autoscaling & Resource targetCPUPer int32 = 30 - targetPort int32 = 3333 - nodePort int32 = 3011 autoscaling = &appstacksv1.RuntimeComponentAutoScaling{ TargetCPUUtilizationPercentage: &targetCPUPer, MinReplicas: &replicas, MaxReplicas: 3, } - envFrom = []corev1.EnvFromSource{{Prefix: namespace}} - env = []corev1.EnvVar{{Name: namespace}} - pullPolicy = corev1.PullAlways - pullSecret = "mysecret" - serviceAccountName = "service-account" - serviceType = corev1.ServiceTypeClusterIP - service = &appstacksv1.RuntimeComponentService{Type: &serviceType, Port: 8443} - deploymentAnnos = map[string]string{"depAnno": "depAnno"} - deployment = &appstacksv1.RuntimeComponentDeployment{Annotations: deploymentAnnos} - ssAnnos = map[string]string{"setAnno": "setAnno"} - statefulSet = &appstacksv1.RuntimeComponentStatefulSet{Annotations: ssAnnos} - volumeCT = &corev1.PersistentVolumeClaim{ + resLimits = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: {}, + } + resourceContraints = &corev1.ResourceRequirements{Limits: resLimits} + + // Service + targetPort int32 = 3333 + additionalTargetPort int32 = 9000 + nodePort int32 = 3011 + serviceClusterIPType = corev1.ServiceTypeClusterIP + serviceNodePortType = corev1.ServiceTypeNodePort + svcPortName = "myservice" + service = &appstacksv1.RuntimeComponentService{Type: &serviceClusterIPType, Port: 8443} + ports = []corev1.ServicePort{{Name: "https", Port: 9080, TargetPort: intstr.FromInt(9000)}, {Port: targetPort}} + + // Deployment + deploymentAnnos = map[string]string{"depAnno": "depAnno"} + deployment = &appstacksv1.RuntimeComponentDeployment{Annotations: deploymentAnnos} + + // StatefulSet + ssAnnos = map[string]string{"setAnno": "setAnno"} + statefulSet = &appstacksv1.RuntimeComponentStatefulSet{Annotations: ssAnnos} + + // Storage & Volume + volumeCT = &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{Name: "pvc", Namespace: namespace}, TypeMeta: metav1.TypeMeta{Kind: "StatefulSet"}} - storage = appstacksv1.RuntimeComponentStorage{Size: "10Mi", MountPath: "/mnt/data", VolumeClaimTemplate: volumeCT} - arch = []string{"ppc64le"} + storage = appstacksv1.RuntimeComponentStorage{Size: "10Mi", MountPath: "/mnt/data", VolumeClaimTemplate: volumeCT} + arch = []string{"ppc64le"} + volume = corev1.Volume{Name: "runtime-volume"} + volumeMount = corev1.VolumeMount{Name: volumeCT.Name, MountPath: storage.MountPath} + + // Probe readinessProbe = &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{}, @@ -76,13 +106,17 @@ var ( Readiness: readinessProbe, Liveness: livenessProbe, Startup: startupProbe} - volume = corev1.Volume{Name: "runtime-volume"} - volumeMount = corev1.VolumeMount{Name: volumeCT.Name, MountPath: storage.MountPath} - resLimits = map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceCPU: {}, - } - resourceContraints = &corev1.ResourceRequirements{Limits: resLimits} - secret = &corev1.Secret{ + + // Route & Ingress + expose = true + notExposed = false + key = "key" + crt = "crt" + ca = "ca" + destCACert = "destCACert" + + // Fake client with Secrets + secret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "mysecret", Namespace: namespace, @@ -98,8 +132,23 @@ var ( Type: "Opaque", Data: map[string][]byte{"key": []byte("value")}, } - objs = []cruntime.Object{secret, secret2} - fcl = fakeclient.NewFakeClient(objs...) + secretObjs = []client.Object{secret, secret2} + fclSecret = fakeclient.NewClientBuilder().WithObjects(secretObjs...).Build() + + // Fake client with ServiceAccount + imagePullSecret = corev1.LocalObjectReference{Name: "mysecret"} + serviceAccount = &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceAccountName, + Namespace: namespace, + ResourceVersion: "9999", + }, + ImagePullSecrets: []corev1.LocalObjectReference{imagePullSecret}, + } + SAObjs = []client.Object{serviceAccount, secret} + fclSA = fakeclient.NewClientBuilder().WithObjects(SAObjs...).Build() + + fclEmpty = fakeclient.NewClientBuilder().WithObjects().Build() ) type Test struct { @@ -108,15 +157,91 @@ type Test struct { actual interface{} } -func TestCustomizeRoute(t *testing.T) { +func TestCustomizeDeployment(t *testing.T) { logger := zap.New() logf.SetLogger(logger) + + // Test Recreate update strategy configuration + deploymentConfig := &appstacksv1.RuntimeComponentDeployment{ + UpdateStrategy: &appsv1.DeploymentStrategy{ + Type: appsv1.RecreateDeploymentStrategyType}, + } + var replicas int32 = 1 + spec := appstacksv1.RuntimeComponentSpec{Service: service, Deployment: deploymentConfig, Replicas: &replicas} + dp, runtime := &appsv1.Deployment{}, createRuntimeComponent(objMeta, spec) + CustomizeDeployment(dp, runtime) + updateStrategy1 := dp.Spec.Strategy.Type + + // Test Rolling update strategy (default) configuration + spec.Deployment = &appstacksv1.RuntimeComponentDeployment{Annotations: deploymentAnnos} + dp, runtime = &appsv1.Deployment{}, createRuntimeComponent(objMeta, spec) + CustomizeDeployment(dp, runtime) + updateStrategy2 := dp.Spec.Strategy.Type + + testCD := []Test{ + {"Deployment replicas", replicas, *dp.Spec.Replicas}, + {"Deployment labels", name, dp.Labels["app.kubernetes.io/instance"]}, + {"Deployment annotations", deploymentAnnos, dp.Annotations}, + {"Deployment recreate update strategy", appsv1.RecreateDeploymentStrategyType, updateStrategy1}, + {"Deployment rolling update strategy", appsv1.RollingUpdateDeploymentStrategyType, updateStrategy2}, + } + verifyTests(testCD, t) +} +func TestCustomizeStatefulSet(t *testing.T) { + logger := zap.New() + logf.SetLogger(logger) + + // Test OnDelete update strategy configuration + statefulsetConfig := &appstacksv1.RuntimeComponentStatefulSet{ + UpdateStrategy: &appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.OnDeleteStatefulSetStrategyType}, + } + var replicas int32 = 1 + spec := appstacksv1.RuntimeComponentSpec{Service: service, StatefulSet: statefulsetConfig, Replicas: &replicas} + ss, runtime := &appsv1.StatefulSet{}, createRuntimeComponent(objMeta, spec) + CustomizeStatefulSet(ss, runtime) + updateStrategy1 := ss.Spec.UpdateStrategy.Type + + // Test rolling update strategy (default) + spec.StatefulSet = &appstacksv1.RuntimeComponentStatefulSet{Annotations: ssAnnos} + ss, runtime = &appsv1.StatefulSet{}, createRuntimeComponent(objMeta, spec) + CustomizeStatefulSet(ss, runtime) + updateStrategy2 := ss.Spec.UpdateStrategy.Type + + testCS := []Test{ + {"StatefulSet replicas", replicas, *ss.Spec.Replicas}, + {"StatefulSet service name", name + "-headless", ss.Spec.ServiceName}, + {"StatefulSet labels", name, ss.Labels["app.kubernetes.io/instance"]}, + {"StatefulSet annotations", ssAnnos, ss.Annotations}, + {"StatefulSet ondelete update strategy", appsv1.OnDeleteStatefulSetStrategyType, updateStrategy1}, + {"StatefulSet rolling update strategy", appsv1.RollingUpdateStatefulSetStrategyType, updateStrategy2}, + } + verifyTests(testCS, t) +} + +func routeTerminationTypeTestHelper(termination routev1.TLSTerminationType, insecureEdgeTerminationPolicy routev1.InsecureEdgeTerminationPolicyType) *routev1.Route { + // Set various Termination and InsecureEdgeTerminationPolicy for Route spec := appstacksv1.RuntimeComponentSpec{Service: service} - route, runtime := &routev1.Route{}, createRuntimeComponent(name, namespace, spec) + common.Config[common.OpConfigDefaultHostname] = "defaultHostName" + routeConfig := &appstacksv1.RuntimeComponentRoute{Termination: &termination, InsecureEdgeTerminationPolicy: &insecureEdgeTerminationPolicy} + spec.Route = routeConfig + spec.Service.PortName = svcPortName + + route, runtime := &routev1.Route{}, createRuntimeComponent(objMeta, spec) + CustomizeRoute(route, runtime, key, crt, ca, destCACert) + + return route +} + +func TestCustomizeRoute(t *testing.T) { + logger := zap.New() + logf.SetLogger(logger) + // Test default/empty Route configurations + spec := appstacksv1.RuntimeComponentSpec{Service: service} + route, runtime := &routev1.Route{}, createRuntimeComponent(objMeta, spec) CustomizeRoute(route, runtime, "", "", "", "") - //TestGetLabels testCR := []Test{ {"Route labels", name, route.Labels["app.kubernetes.io/instance"]}, {"Route target kind", "Service", route.Spec.To.Kind}, @@ -124,38 +249,68 @@ func TestCustomizeRoute(t *testing.T) { {"Route target weight", int32(100), *route.Spec.To.Weight}, {"Route service target port", intstr.FromString(strconv.Itoa(int(runtime.Spec.Service.Port)) + "-tcp"), route.Spec.Port.TargetPort}, } + verifyTests(testCR, t) + // Test Route configurations with Reencrypt termination and Allow termination policy + insecureEdgeTerminationPolicy := routev1.InsecureEdgeTerminationPolicyAllow + termination := routev1.TLSTerminationReencrypt + reencryptRoute := routeTerminationTypeTestHelper(termination, insecureEdgeTerminationPolicy) + + // Test Route configurations with Passthrough termination and None termination policy + termination = routev1.TLSTerminationPassthrough + insecureEdgeTerminationPolicy = routev1.InsecureEdgeTerminationPolicyNone + passthroughRoute := routeTerminationTypeTestHelper(termination, insecureEdgeTerminationPolicy) + + // Test Route configurations with Edge termination and Redirect termination policy + insecureEdgeTerminationPolicy = routev1.InsecureEdgeTerminationPolicyRedirect + termination = routev1.TLSTerminationEdge + edgeRoute := routeTerminationTypeTestHelper(termination, insecureEdgeTerminationPolicy) + + testCR = []Test{ + {"Route host", name + "-" + namespace + "." + "defaultHostName", reencryptRoute.Spec.Host}, + {"Route target port", intstr.FromString(svcPortName), reencryptRoute.Spec.Port.TargetPort}, + {"Allow termination policy", routev1.InsecureEdgeTerminationPolicyAllow, reencryptRoute.Spec.TLS.InsecureEdgeTerminationPolicy}, + {"Route encryption termination", routev1.TLSTerminationReencrypt, reencryptRoute.Spec.TLS.Termination}, + {"Route encryption termination cert", crt, reencryptRoute.Spec.TLS.Certificate}, + {"Route encryption termination CAcert", ca, reencryptRoute.Spec.TLS.CACertificate}, + {"Route encryption termination key", key, reencryptRoute.Spec.TLS.Key}, + {"Route encryption termination destCACert", destCACert, reencryptRoute.Spec.TLS.DestinationCACertificate}, + + {"None termination policy", routev1.InsecureEdgeTerminationPolicyNone, passthroughRoute.Spec.TLS.InsecureEdgeTerminationPolicy}, + {"Route passthrough termination", routev1.TLSTerminationPassthrough, passthroughRoute.Spec.TLS.Termination}, + {"Route passthrough termination cert", "", passthroughRoute.Spec.TLS.Certificate}, + {"Route passthrough termination CAcert", "", passthroughRoute.Spec.TLS.CACertificate}, + {"Route passthrough termination key", "", passthroughRoute.Spec.TLS.Key}, + {"Route passthrough termination destCACert", "", passthroughRoute.Spec.TLS.DestinationCACertificate}, + + {"Redirect termination policy", routev1.InsecureEdgeTerminationPolicyRedirect, edgeRoute.Spec.TLS.InsecureEdgeTerminationPolicy}, + {"Route edge termination", routev1.TLSTerminationEdge, edgeRoute.Spec.TLS.Termination}, + {"Route edge termination cert", crt, edgeRoute.Spec.TLS.Certificate}, + {"Route edge termination CAcert", ca, edgeRoute.Spec.TLS.CACertificate}, + {"Route edge termination key", key, edgeRoute.Spec.TLS.Key}, + {"Route edge termination destCACert", "", edgeRoute.Spec.TLS.DestinationCACertificate}, + } verifyTests(testCR, t) } -func TestCustomizeService(t *testing.T) { +func TestErrorIsNoMatchesForKind(t *testing.T) { logger := zap.New() logf.SetLogger(logger) - spec := appstacksv1.RuntimeComponentSpec{Service: service} - svc, runtime := &corev1.Service{}, createRuntimeComponent(name, namespace, spec) + newError := errors.New("test error") + errorValue := ErrorIsNoMatchesForKind(newError, "kind", "version") - CustomizeService(svc, runtime) - testCS := []Test{ - {"Service number of exposed ports", 1, len(svc.Spec.Ports)}, - {"Sercice first exposed port", runtime.Spec.Service.Port, svc.Spec.Ports[0].Port}, - {"Service first exposed target port", intstr.FromInt(int(runtime.Spec.Service.Port)), svc.Spec.Ports[0].TargetPort}, - {"Service type", *runtime.Spec.Service.Type, svc.Spec.Type}, - {"Service selector", name, svc.Spec.Selector["app.kubernetes.io/instance"]}, + testCR := []Test{ + {"Error", false, errorValue}, } - verifyTests(testCS, t) - - // Verify behaviour of optional target port functionality - verifyTests(optionalTargetPortFunctionalityTests(), t) - - // verify optional nodePort functionality in NodePort service - verifyTests(optionalNodePortFunctionalityTests(), t) + verifyTests(testCR, t) } func optionalTargetPortFunctionalityTests() []Test { + // Test Service with target port spec := appstacksv1.RuntimeComponentSpec{Service: service} spec.Service.TargetPort = &targetPort - svc, runtime := &corev1.Service{}, createRuntimeComponent(name, namespace, spec) + svc, runtime := &corev1.Service{}, createRuntimeComponent(objMeta, spec) CustomizeService(svc, runtime) testCS := []Test{ @@ -169,15 +324,16 @@ func optionalTargetPortFunctionalityTests() []Test { } func optionalNodePortFunctionalityTests() []Test { + // Test Service with nodeport serviceType := corev1.ServiceTypeNodePort service := &appstacksv1.RuntimeComponentService{Type: &serviceType, Port: 8443, NodePort: &nodePort} spec := appstacksv1.RuntimeComponentSpec{Service: service} - svc, runtime := &corev1.Service{}, createRuntimeComponent(name, namespace, spec) + svc, runtime := &corev1.Service{}, createRuntimeComponent(objMeta, spec) CustomizeService(svc, runtime) testCS := []Test{ {"Service number of exposed ports", 1, len(svc.Spec.Ports)}, - {"Sercice first exposed port", runtime.Spec.Service.Port, svc.Spec.Ports[0].Port}, + {"Service first exposed port", runtime.Spec.Service.Port, svc.Spec.Ports[0].Port}, {"Service first exposed target port", intstr.FromInt(int(runtime.Spec.Service.Port)), svc.Spec.Ports[0].TargetPort}, {"Service type", *runtime.Spec.Service.Type, svc.Spec.Type}, {"Service selector", name, svc.Spec.Selector["app.kubernetes.io/instance"]}, @@ -186,6 +342,176 @@ func optionalNodePortFunctionalityTests() []Test { return testCS } +func additionalPortFunctionalityTests(t *testing.T) { + // Test Service with additional ports + spec := appstacksv1.RuntimeComponentSpec{Service: service} + svc, runtime := &corev1.Service{}, createRuntimeComponent(objMeta, spec) + runtime.Spec.Service.Ports = ports + CustomizeService(svc, runtime) + + testCS := []Test{ + {"Service number of exposed ports", 3, len(svc.Spec.Ports)}, + {"Second exposed port", ports[0].Port, svc.Spec.Ports[1].Port}, + {"Second exposed target port", additionalTargetPort, svc.Spec.Ports[1].TargetPort.IntVal}, + {"Second exposed port name", ports[0].Name, svc.Spec.Ports[1].Name}, + {"Second nodeport", ports[0].NodePort, svc.Spec.Ports[1].NodePort}, + {"Third exposed port", ports[1].Port, svc.Spec.Ports[2].Port}, + {"Third exposed port name", fmt.Sprint(ports[1].Port) + "-tcp", svc.Spec.Ports[2].Name}, + {"Third nodeport", ports[1].NodePort, svc.Spec.Ports[2].NodePort}, + } + verifyTests(testCS, t) + + // Test Service with additional nodeport + runtime.Spec.Service.Ports = runtime.Spec.Service.Ports[:len(runtime.Spec.Service.Ports)-1] + runtime.Spec.Service.Ports[0].NodePort = 3000 + runtime.Spec.Service.Type = &serviceNodePortType + CustomizeService(svc, runtime) + + testCS = []Test{ + {"Service number of exposed ports", 2, len(svc.Spec.Ports)}, + {"First nodeport", runtime.Spec.Service.Ports[0].NodePort, svc.Spec.Ports[1].NodePort}, + {"Port type", serviceNodePortType, svc.Spec.Type}, + } + verifyTests(testCS, t) + + // Test Service with no more additional ports + runtime.Spec.Service.Ports = nil + runtime.Spec.Service.PortName = svcPortName + CustomizeService(svc, runtime) + + testCS = []Test{ + {"Service number of exposed ports", 1, len(svc.Spec.Ports)}, + {"Service port name", svcPortName, svc.Spec.Ports[0].Name}, + } + verifyTests(testCS, t) +} + +func TestCustomizeService(t *testing.T) { + logger := zap.New() + logf.SetLogger(logger) + + spec := appstacksv1.RuntimeComponentSpec{Service: service} + svc, runtime := &corev1.Service{}, createRuntimeComponent(objMeta, spec) + + CustomizeService(svc, runtime) + testCS := []Test{ + {"Service number of exposed ports", 1, len(svc.Spec.Ports)}, + {"Service first exposed port", runtime.Spec.Service.Port, svc.Spec.Ports[0].Port}, + {"Service first exposed target port", intstr.FromInt(int(runtime.Spec.Service.Port)), svc.Spec.Ports[0].TargetPort}, + {"Service type", *runtime.Spec.Service.Type, svc.Spec.Type}, + {"Service selector", name, svc.Spec.Selector["app.kubernetes.io/instance"]}, + } + verifyTests(testCS, t) + + // Verify behaviour of optional target port functionality + verifyTests(optionalTargetPortFunctionalityTests(), t) + + // verify optional nodePort functionality in NodePort service + verifyTests(optionalNodePortFunctionalityTests(), t) + + // Verify behaviour of additional ports functionality + additionalPortFunctionalityTests(t) +} + +func TestCustomizeProbes(t *testing.T) { + logger := zap.New() + logf.SetLogger(logger) + + // Test nil Probes + var nilProbe *corev1.Probe + spec := appstacksv1.RuntimeComponentSpec{ + ApplicationImage: appImage, + Service: service, + } + pts, runtime := &corev1.PodTemplateSpec{}, createRuntimeComponent(objMeta, spec) + CustomizePodSpec(pts, runtime) + nilLivenessProbe := pts.Spec.Containers[0].LivenessProbe + nilReadinessProbe := pts.Spec.Containers[0].ReadinessProbe + nilStartupProbe := pts.Spec.Containers[0].StartupProbe + + // Test LivenessProbe without probe handler (default probe handler) + livenessProbeConfig := &corev1.Probe{ + InitialDelaySeconds: 60, + TimeoutSeconds: 2, + PeriodSeconds: 10, + SuccessThreshold: 10, + FailureThreshold: 3} + + // Test empty ReadinessProbe (default probe config) + defaultLivenessProbeConfig := common.GetDefaultMicroProfileLivenessProbe(runtime) + defaultReadinessProbeConfig := common.GetDefaultMicroProfileReadinessProbe(runtime) + runtime.Spec.Probes = &appstacksv1.RuntimeComponentProbes{ + Readiness: &corev1.Probe{}, + Liveness: livenessProbeConfig} + CustomizePodSpec(pts, runtime) + + defaultReadinessProbe := pts.Spec.Containers[0].ReadinessProbe + configuredLivenessProbe := pts.Spec.Containers[0].LivenessProbe + livenessProbeConfig.ProbeHandler = defaultLivenessProbeConfig.ProbeHandler + emptyStartupProbe := pts.Spec.Containers[0].StartupProbe + + testCP := []Test{ + {"Nil LivenessProbe", nilProbe, nilLivenessProbe}, + {"Nil ReadinessProbe", nilProbe, nilReadinessProbe}, + {"Nil StartupProbe", nilProbe, nilStartupProbe}, + + {"Configured LivenessProbe", livenessProbeConfig, configuredLivenessProbe}, + {"Default ReadinessProbe", defaultReadinessProbeConfig, defaultReadinessProbe}, + {"Empty StartupProbe", nilProbe, emptyStartupProbe}, + } + verifyTests(testCP, t) +} + +func TestCustomizeNetworkPolicy(t *testing.T) { + logger := zap.New() + logf.SetLogger(logger) + + isOpenshift := true + npConfig := &appstacksv1.RuntimeComponentNetworkPolicy{} + + spec := appstacksv1.RuntimeComponentSpec{ + Expose: &expose, + Service: service, + NetworkPolicy: npConfig, + } + networkPolicy, runtime := &networkingv1.NetworkPolicy{}, createRuntimeComponent(objMeta, spec) + + // NetworkPolicy for OpenShift when exposed + CustomizeNetworkPolicy(networkPolicy, isOpenshift, runtime) + openshiftNP := networkPolicy + + // NetworkPolicy for Non-OpenShift when exposed + runtime.Spec.Service.Ports = ports + CustomizeNetworkPolicy(networkPolicy, !isOpenshift, runtime) + nonOpenshiftNP := networkPolicy + + // NetworkPolicy for Non-OpenShift when not exposed + runtime.Spec.NetworkPolicy = &appstacksv1.RuntimeComponentNetworkPolicy{ + NamespaceLabels: &map[string]string{"namespace": "test"}, + FromLabels: &map[string]string{"foo": "bar"}, + } + runtime.Spec.Service.Ports = nil + runtime.Spec.Expose = ¬Exposed + CustomizeNetworkPolicy(networkPolicy, !isOpenshift, runtime) + notExposedNP := networkPolicy + + runtime.Spec.NetworkPolicy = &appstacksv1.RuntimeComponentNetworkPolicy{ + NamespaceLabels: &map[string]string{}, + FromLabels: &map[string]string{}, + } + + CustomizeNetworkPolicy(networkPolicy, isOpenshift, runtime) + allowAllNP := networkPolicy + + testCNP := []Test{ + {"OpenShift NetworkPolicy", name, openshiftNP.Labels["app.kubernetes.io/instance"]}, + {"Non OpenShift NetworkPolicy", "runtime-component-operator", nonOpenshiftNP.Labels["app.kubernetes.io/managed-by"]}, + {"Non OpenShift - not exposed NetworkPolicy", "runtime-component-operator", notExposedNP.Labels["app.kubernetes.io/managed-by"]}, + {"Allow All NetworkPolicy", &metav1.LabelSelector{}, allowAllNP.Spec.Ingress[0].From[0].NamespaceSelector}, + } + verifyTests(testCNP, t) +} + // Partial test for unittest TestCustomizeAffinity bewlow func partialTestCustomizeNodeAffinity(t *testing.T) { // required during scheduling ignored during execution @@ -231,7 +557,7 @@ func partialTestCustomizeNodeAffinity(t *testing.T) { ApplicationImage: appImage, Affinity: &affinityConfig, } - affinity, runtime := &corev1.Affinity{}, createRuntimeComponent(name, namespace, spec) + affinity, runtime := &corev1.Affinity{}, createRuntimeComponent(objMeta, spec) CustomizeAffinity(affinity, runtime) expectedMatchExpressions := []corev1.NodeSelectorRequirement{ @@ -250,6 +576,17 @@ func partialTestCustomizeNodeAffinity(t *testing.T) { affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Preference.MatchExpressions}, } verifyTests(testCNA, t) + + // Test nil Affinity configuration + runtime.Spec.Affinity = &appstacksv1.RuntimeComponentAffinity{ + NodeAffinityLabels: labels, + } + CustomizeAffinity(affinity, runtime) + + testCNA = []Test{ + {"Nil Node Affinity", expectedMatchExpressions[1], affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0]}, + } + verifyTests(testCNA, t) } // Partial test for unittest TestCustomizeAffinity bewlow @@ -281,7 +618,7 @@ func partialTestCustomizePodAffinity(t *testing.T) { ApplicationImage: appImage, Affinity: &affinityConfig, } - affinity, runtime := &corev1.Affinity{}, createRuntimeComponent(name, namespace, spec) + affinity, runtime := &corev1.Affinity{}, createRuntimeComponent(objMeta, spec) CustomizeAffinity(affinity, runtime) testCPA := []Test{ @@ -317,7 +654,7 @@ func TestCustomizePodSpecAnnotations(t *testing.T) { } // No dep or set, annotation should be empty - pts1, runtime1 := &corev1.PodTemplateSpec{}, createRuntimeComponent(name, namespace, spec) + pts1, runtime1 := &corev1.PodTemplateSpec{}, createRuntimeComponent(objMeta, spec) CustomizePodSpec(pts1, runtime1) annolen1 := len(pts1.Annotations) testAnnotations1 := []Test{ @@ -327,7 +664,7 @@ func TestCustomizePodSpecAnnotations(t *testing.T) { // dep but not set, annotation should be dep annotations spec.Deployment = deployment - pts2, runtime2 := &corev1.PodTemplateSpec{}, createRuntimeComponent(name, namespace, spec) + pts2, runtime2 := &corev1.PodTemplateSpec{}, createRuntimeComponent(objMeta, spec) CustomizePodSpec(pts2, runtime2) annolen2 := len(pts2.Annotations) anno2 := pts2.Annotations["depAnno"] @@ -340,7 +677,7 @@ func TestCustomizePodSpecAnnotations(t *testing.T) { // set but not dep, annotation should be set annotations spec.Deployment = nil spec.StatefulSet = statefulSet - pts3, runtime3 := &corev1.PodTemplateSpec{}, createRuntimeComponent(name, namespace, spec) + pts3, runtime3 := &corev1.PodTemplateSpec{}, createRuntimeComponent(objMeta, spec) CustomizePodSpec(pts3, runtime3) annolen3 := len(pts3.Annotations) anno3 := pts3.Annotations["setAnno"] @@ -352,7 +689,7 @@ func TestCustomizePodSpecAnnotations(t *testing.T) { // dep and set, annotation should be set annotations spec.Deployment = deployment - pts4, runtime4 := &corev1.PodTemplateSpec{}, createRuntimeComponent(name, namespace, spec) + pts4, runtime4 := &corev1.PodTemplateSpec{}, createRuntimeComponent(objMeta, spec) CustomizePodSpec(pts4, runtime4) annolen4 := len(pts4.Annotations) anno4 := pts4.Annotations["setAnno"] @@ -379,7 +716,7 @@ func TestCustomizePodSpec(t *testing.T) { EnvFrom: envFrom, Volumes: []corev1.Volume{volume}, } - pts, runtime := &corev1.PodTemplateSpec{}, createRuntimeComponent(name, namespace, spec) + pts, runtime := &corev1.PodTemplateSpec{}, createRuntimeComponent(objMeta, spec) // else cond CustomizePodSpec(pts, runtime) noCont := len(pts.Spec.Containers) @@ -392,35 +729,41 @@ func TestCustomizePodSpec(t *testing.T) { spec = appstacksv1.RuntimeComponentSpec{ ApplicationImage: appImage, Service: &appstacksv1.RuntimeComponentService{ - Type: &serviceType, + Type: &serviceClusterIPType, Port: 8443, TargetPort: &targetPort, + PortName: svcPortName, }, Resources: resourceContraints, Probes: probes, VolumeMounts: []corev1.VolumeMount{volumeMount}, PullPolicy: &pullPolicy, + PullSecret: &pullSecret, Env: env, EnvFrom: envFrom, Volumes: []corev1.Volume{volume}, ServiceAccountName: &serviceAccountName, Affinity: &affinityConfig, + SecurityContext: &corev1.SecurityContext{}, } - runtime = createRuntimeComponent(name, namespace, spec) + runtime = createRuntimeComponent(objMeta, spec) + testServiceAccountPullSecretExists(t, runtime) CustomizePodSpec(pts, runtime) ptsCSAN := pts.Spec.ServiceAccountName - // affinity tests + // Affinity tests affArchs := pts.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Values[0] weight := pts.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight prefAffArchs := pts.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Preference.MatchExpressions[0].Values[0] assignedTPort := pts.Spec.Containers[0].Ports[0].ContainerPort + portName := pts.Spec.Containers[0].Ports[0].Name testCPS := []Test{ {"No containers", 1, noCont}, {"No port", 1, noPorts}, {"No ServiceAccountName", name, ptsSAN}, {"ServiceAccountName available", serviceAccountName, ptsCSAN}, + {"Service port name", svcPortName, portName}, } verifyTests(testCPS, t) @@ -433,13 +776,22 @@ func TestCustomizePodSpec(t *testing.T) { verifyTests(testCA, t) } +func testServiceAccountPullSecretExists(t *testing.T, runtime *appstacksv1.RuntimeComponent) { + ServiceAccountPullSecretExists(runtime, fclSA) + + testCSA := []Test{ + {"ServiceAccount Resource Version", serviceAccount.ResourceVersion, runtime.Status.References[common.StatusReferenceSAResourceVersion]}, + } + verifyTests(testCSA, t) +} + func TestCustomizePersistence(t *testing.T) { logger := zap.New() logf.SetLogger(logger) runtimeStatefulSet := &appstacksv1.RuntimeComponentStatefulSet{Storage: &storage} spec := appstacksv1.RuntimeComponentSpec{StatefulSet: runtimeStatefulSet} - statefulSet, runtime := &appsv1.StatefulSet{}, createRuntimeComponent(name, namespace, spec) + statefulSet, runtime := &appsv1.StatefulSet{}, createRuntimeComponent(objMeta, spec) statefulSet.Spec.Template.Spec.Containers = []corev1.Container{{}} statefulSet.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{} // if vct == 0, runtimeVCT != nil, not found @@ -448,10 +800,10 @@ func TestCustomizePersistence(t *testing.T) { ssMountPath := statefulSet.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath //reset - storageNilVCT := appstacksv1.RuntimeComponentStorage{Size: "10Mi", MountPath: "/mnt/data", VolumeClaimTemplate: nil} + storageNilVCT := appstacksv1.RuntimeComponentStorage{Size: "10Mi", MountPath: "/mnt/data", ClassName: "storageClassName", VolumeClaimTemplate: nil} runtimeStatefulSet = &appstacksv1.RuntimeComponentStatefulSet{Storage: &storageNilVCT} spec = appstacksv1.RuntimeComponentSpec{StatefulSet: runtimeStatefulSet} - statefulSet, runtime = &appsv1.StatefulSet{}, createRuntimeComponent(name, namespace, spec) + statefulSet, runtime = &appsv1.StatefulSet{}, createRuntimeComponent(objMeta, spec) statefulSet.Spec.Template.Spec.Containers = []corev1.Container{{}} statefulSet.Spec.Template.Spec.Containers[0].VolumeMounts = append(statefulSet.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount) @@ -464,6 +816,7 @@ func TestCustomizePersistence(t *testing.T) { {"PVC size", storage.Size, size.String()}, {"Mount path", storage.MountPath, ssMountPath}, {"Volume Mount Name", volumeCT.Name, ssVolumeMountName}, + {"Storage Class Name", "storageClassName", *statefulSet.Spec.VolumeClaimTemplates[0].Spec.StorageClassName}, } verifyTests(testCP, t) } @@ -473,20 +826,30 @@ func TestCustomizeServiceAccount(t *testing.T) { logf.SetLogger(logger) spec := appstacksv1.RuntimeComponentSpec{PullSecret: &pullSecret} - sa, runtime := &corev1.ServiceAccount{}, createRuntimeComponent(name, namespace, spec) - CustomizeServiceAccount(sa, runtime, fcl) + sa, runtime := &corev1.ServiceAccount{}, createRuntimeComponent(objMeta, spec) + CustomizeServiceAccount(sa, runtime, fclSecret) emptySAIPS := sa.ImagePullSecrets[0].Name newSecret := "my-new-secret" spec = appstacksv1.RuntimeComponentSpec{PullSecret: &newSecret} - runtime = createRuntimeComponent(name, namespace, spec) - CustomizeServiceAccount(sa, runtime, fcl) + runtime = createRuntimeComponent(objMeta, spec) + CustomizeServiceAccount(sa, runtime, fclSecret) testCSA := []Test{ {"ServiceAccount image pull secrets is empty", pullSecret, emptySAIPS}, {"ServiceAccount image pull secrets", newSecret, sa.ImagePullSecrets[1].Name}, } verifyTests(testCSA, t) + + wrongPullSecret := "wrong-pull-secret" + runtime.Spec.PullSecret = &wrongPullSecret + CustomizeServiceAccount(sa, runtime, fclSecret) + + testCSA = []Test{ + {"ServiceAccount image pull secret is deleted", 1, len(sa.ImagePullSecrets)}, + } + verifyTests(testCSA, t) + } func TestCustomizeKnativeService(t *testing.T) { @@ -502,7 +865,7 @@ func TestCustomizeKnativeService(t *testing.T) { EnvFrom: envFrom, Volumes: []corev1.Volume{volume}, } - ksvc, runtime := &servingv1.Service{}, createRuntimeComponent(name, namespace, spec) + ksvc, runtime := &servingv1.Service{}, createRuntimeComponent(objMeta, spec) CustomizeKnativeService(ksvc, runtime) ksvcNumPorts := len(ksvc.Spec.Template.Spec.Containers[0].Ports) @@ -527,7 +890,7 @@ func TestCustomizeKnativeService(t *testing.T) { Probes: probes, Expose: &expose, } - runtime = createRuntimeComponent(name, namespace, spec) + runtime = createRuntimeComponent(objMeta, spec) CustomizeKnativeService(ksvc, runtime) ksvcLabelTrueExpose := ksvc.Labels["serving.knative.dev/visibility"] @@ -558,13 +921,13 @@ func TestCustomizeHPA(t *testing.T) { logf.SetLogger(logger) spec := appstacksv1.RuntimeComponentSpec{Autoscaling: autoscaling} - hpa, runtime := &autoscalingv1.HorizontalPodAutoscaler{}, createRuntimeComponent(name, namespace, spec) + hpa, runtime := &autoscalingv1.HorizontalPodAutoscaler{}, createRuntimeComponent(objMeta, spec) CustomizeHPA(hpa, runtime) nilSTRKind := hpa.Spec.ScaleTargetRef.Kind runtimeStatefulSet := &appstacksv1.RuntimeComponentStatefulSet{Storage: &storage} spec = appstacksv1.RuntimeComponentSpec{Autoscaling: autoscaling, StatefulSet: runtimeStatefulSet} - runtime = createRuntimeComponent(name, namespace, spec) + runtime = createRuntimeComponent(objMeta, spec) CustomizeHPA(hpa, runtime) STRKind := hpa.Spec.ScaleTargetRef.Kind @@ -580,6 +943,40 @@ func TestCustomizeHPA(t *testing.T) { verifyTests(testCHPA, t) } +func TestValidate(t *testing.T) { + logger := zap.New() + logf.SetLogger(logger) + + spec := appstacksv1.RuntimeComponentSpec{ + StatefulSet: &appstacksv1.RuntimeComponentStatefulSet{ + Storage: &appstacksv1.RuntimeComponentStorage{}, + }, + } + runtime := createRuntimeComponent(objMeta, spec) + valid1, _ := Validate(runtime) + + spec = appstacksv1.RuntimeComponentSpec{ + StatefulSet: &appstacksv1.RuntimeComponentStatefulSet{ + Storage: &appstacksv1.RuntimeComponentStorage{ + Size: "size", + }, + }, + } + runtime = createRuntimeComponent(objMeta, spec) + valid2, _ := Validate(runtime) + + spec = appstacksv1.RuntimeComponentSpec{StatefulSet: &appstacksv1.RuntimeComponentStatefulSet{Storage: &storage}} + runtime = createRuntimeComponent(objMeta, spec) + valid3, _ := Validate(runtime) + + testValidate := []Test{ + {"StatefulSet storage validation fail from empty size", false, valid1}, + {"StatefulSet storage validation fail from size parsing error", false, valid2}, + {"StatefulSet storage validation passed", true, valid3}, + } + verifyTests(testValidate, t) +} + func TestCustomizeServiceMonitor(t *testing.T) { logger := zap.New() @@ -589,10 +986,12 @@ func TestCustomizeServiceMonitor(t *testing.T) { params := map[string][]string{ "params": {"param1", "param2"}, } + targetPortConfig := intstr.FromInt(9000) // Endpoint for runtime endpointApp := &prometheusv1.Endpoint{ Port: "web", + TargetPort: &targetPortConfig, Scheme: "myScheme", Interval: "myInterval", Path: "myPath", @@ -612,7 +1011,7 @@ func TestCustomizeServiceMonitor(t *testing.T) { selector := &metav1.LabelSelector{MatchLabels: labelMap} smspec := &prometheusv1.ServiceMonitorSpec{Endpoints: endpointsSM, Selector: *selector} - sm, runtime := &prometheusv1.ServiceMonitor{Spec: *smspec}, createRuntimeComponent(name, namespace, spec) + sm, runtime := &prometheusv1.ServiceMonitor{Spec: *smspec}, createRuntimeComponent(objMeta, spec) runtime.Spec.Monitoring = &appstacksv1.RuntimeComponentMonitoring{Labels: labelMap, Endpoints: endpointsApp} CustomizeServiceMonitor(sm, runtime) @@ -654,6 +1053,28 @@ func TestCustomizeServiceMonitor(t *testing.T) { } verifyTests(testSM, t) + + var nilTargetPortConfig *intstr.IntOrString + runtime.Spec.Monitoring.Endpoints[0].Port = "" + runtime.Spec.Monitoring.Endpoints[0].TargetPort = nilTargetPortConfig + CustomizeServiceMonitor(sm, runtime) + smPort := sm.Spec.Endpoints[0].Port + + runtime.Spec.Service.PortName = "" + CustomizeServiceMonitor(sm, runtime) + smPortTCP := sm.Spec.Endpoints[0].Port + + runtime.Spec.Monitoring = &appstacksv1.RuntimeComponentMonitoring{Labels: labelMap} + CustomizeServiceMonitor(sm, runtime) + serverName := name + "." + namespace + ".svc" + + testSM = []Test{ + {"Service Monitor endpoints port", svcPortName, smPort}, + {"Service Monitor endpoints port without port name", strconv.Itoa(int(runtime.Spec.Service.Port)) + "-tcp", smPortTCP}, + {"Service Monitor server name", serverName, sm.Spec.Endpoints[0].TLSConfig.ServerName}, + } + + verifyTests(testSM, t) } func TestGetCondition(t *testing.T) { @@ -671,6 +1092,11 @@ func TestGetCondition(t *testing.T) { cond := GetCondition(conditionType, status) testGC := []Test{{"Set status condition", status.Conditions[0].Status, cond.Status}} verifyTests(testGC, t) + + status = &appstacksv1.RuntimeComponentStatus{} + cond = GetCondition(conditionType, status) + testGC = []Test{{"Set status condition", 0, len(status.Conditions)}} + verifyTests(testGC, t) } func TestSetCondition(t *testing.T) { @@ -728,6 +1154,153 @@ func TestGetWatchNamespaces(t *testing.T) { verifyTests(configMapConstTests, t) } +func TestBuildServiceBindingSecretName(t *testing.T) { + // Set the logger to development mode for verbose logs + logger := zap.New() + logf.SetLogger(logger) + + sbSecretName := BuildServiceBindingSecretName(name, namespace) + sbSecretNameTests := []Test{ + {"Service binding secret name", namespace + "-" + name, sbSecretName}, + } + verifyTests(sbSecretNameTests, t) +} + +func TestAppendIfNotSubstring(t *testing.T) { + // Set the logger to development mode for verbose logs + logger := zap.New() + logf.SetLogger(logger) + + subStr := "c" + str := "a,b" + + result1 := AppendIfNotSubstring(subStr, "") + result2 := AppendIfNotSubstring(subStr, str) + result3 := AppendIfNotSubstring(subStr, result2) + subStrTest := []Test{ + {"Substring check when string is empty", subStr, result1}, + {"Substring check when not substring", str + "," + subStr, result2}, + {"Substring check when substring", str + "," + subStr, result3}, + } + verifyTests(subStrTest, t) +} + +func TestEnsureOwnerRef(t *testing.T) { + logger := zap.New() + logf.SetLogger(logger) + + spec := appstacksv1.RuntimeComponentSpec{Service: service} + runtime := createRuntimeComponent(objMeta, spec) + + newOwnerRef := metav1.OwnerReference{APIVersion: "test", Kind: "test", Name: "testRef"} + EnsureOwnerRef(runtime, newOwnerRef) + ownerRef := runtime.GetOwnerReferences()[0] + + newOwnerRef2 := metav1.OwnerReference{APIVersion: "test", Kind: "test", Name: "testRef", UID: "test"} + EnsureOwnerRef(runtime, newOwnerRef2) + ownerRef2 := runtime.GetOwnerReferences()[0] + + testOR := []Test{ + {"Test owner reference", ownerRef, newOwnerRef}, + {"Test owner reference 2", ownerRef2, newOwnerRef2}, + } + verifyTests(testOR, t) +} + +func TestGetOpenShiftAnnotations(t *testing.T) { + logger := zap.New() + logf.SetLogger(logger) + + spec := appstacksv1.RuntimeComponentSpec{Service: service} + runtime := createRuntimeComponent(objMeta, spec) + + annos := map[string]string{ + "image.opencontainers.org/source": "source", + } + runtime.Annotations = annos + + result := GetOpenShiftAnnotations(runtime) + + annos = map[string]string{ + "app.openshift.io/vcs-uri": "source", + } + testOSA := []Test{ + {"OpenShiftAnnotations", annos["app.openshift.io/vcs-uri"], result["app.openshift.io/vcs-uri"]}, + } + verifyTests(testOSA, t) +} + +func TestIsClusterWide(t *testing.T) { + logger := zap.New() + logf.SetLogger(logger) + + namespaces := []string{"namespace"} + result := IsClusterWide(namespaces) + + testCW := []Test{ + {"One namespace", false, result}, + } + verifyTests(testCW, t) + + namespaces = []string{""} + result = IsClusterWide(namespaces) + + testCW = []Test{ + {"All namespaces", true, result}, + } + verifyTests(testCW, t) + + namespaces = []string{"namespace1", "namespace2"} + result = IsClusterWide(namespaces) + + testCW = []Test{ + {"Two namespaces", false, result}, + } + verifyTests(testCW, t) +} + +func TestCustomizeIngress(t *testing.T) { + logger := zap.New() + logf.SetLogger(logger) + + var ISPathType networkingv1.PathType = networkingv1.PathType("ImplementationSpecific") + var prefixPathType networkingv1.PathType = networkingv1.PathType("Prefix") + ing := networkingv1.Ingress{} + + route := appstacksv1.RuntimeComponentRoute{} + spec := appstacksv1.RuntimeComponentSpec{Service: service, Route: &route} + runtime := createRuntimeComponent(objMeta, spec) + CustomizeIngress(&ing, runtime) + defaultPathType := *ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].PathType + + route = appstacksv1.RuntimeComponentRoute{Host: "routeHost", Path: "myPath", PathType: prefixPathType, Annotations: annotations} + spec = appstacksv1.RuntimeComponentSpec{Service: service, Route: &route} + runtime = createRuntimeComponent(objMeta, spec) + CustomizeIngress(&ing, runtime) + + testIng := []Test{ + {"Ingress Annotations", annotations, ing.Annotations}, + {"Ingress Route Host", "routeHost", ing.Spec.Rules[0].Host}, + {"Ingress Route Path", "myPath", ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Path}, + {"Ingress Route PathType", prefixPathType, *ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].PathType}, + {"Ingress Route Default PathType", ISPathType, defaultPathType}, + {"Ingress Route ServiceName", name, ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Backend.Service.Name}, + {"Ingress Route Service Port Name", strconv.Itoa(int(runtime.Spec.Service.Port)) + "-tcp", ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Backend.Service.Port.Name}, + {"Ingress TLS", 0, len(ing.Spec.TLS)}, + } + verifyTests(testIng, t) + + certSecretRef := "my-ref" + route = appstacksv1.RuntimeComponentRoute{Host: "routeHost", Path: "myPath", CertificateSecretRef: &certSecretRef} + + CustomizeIngress(&ing, runtime) + + testIng = []Test{ + {"Ingress TLS SecretName", certSecretRef, ing.Spec.TLS[0].SecretName}, + } + verifyTests(testIng, t) +} + // Helper Functions // Unconditionally set the proper tags for an enabled runtime omponent func createAppDefinitionTags(app *appstacksv1.RuntimeComponent) (map[string]string, map[string]string) { @@ -748,9 +1321,9 @@ func createAppDefinitionTags(app *appstacksv1.RuntimeComponent) (map[string]stri } return label, annotations } -func createRuntimeComponent(n, ns string, spec appstacksv1.RuntimeComponentSpec) *appstacksv1.RuntimeComponent { +func createRuntimeComponent(objMeta metav1.ObjectMeta, spec appstacksv1.RuntimeComponentSpec) *appstacksv1.RuntimeComponent { app := &appstacksv1.RuntimeComponent{ - ObjectMeta: metav1.ObjectMeta{Name: n, Namespace: ns}, + ObjectMeta: metav1.ObjectMeta{Name: objMeta.GetName(), Namespace: objMeta.GetNamespace()}, Spec: spec, } return app @@ -771,7 +1344,7 @@ func makeInLabelSelector(key string, values []string) metav1.LabelSelector { func verifyTests(tests []Test, t *testing.T) { for _, tt := range tests { - if !reflect.DeepEqual(tt.actual, tt.expected) { + if !reflect.DeepEqual(tt.expected, tt.actual) { t.Errorf("%s test expected: (%v) actual: (%v)", tt.test, tt.expected, tt.actual) } }