diff --git a/cmd/agent.go b/cmd/agent.go index 54e69742..deb3f5db 100644 --- a/cmd/agent.go +++ b/cmd/agent.go @@ -4,6 +4,9 @@ import ( "os" "time" + "github.com/pluralsh/deployment-operator/internal/utils" + "github.com/pluralsh/deployment-operator/pkg/controller/stacks" + "github.com/pluralsh/deployment-operator/pkg/controller" "github.com/pluralsh/deployment-operator/pkg/controller/namespaces" "github.com/pluralsh/deployment-operator/pkg/controller/pipelinegates" @@ -69,6 +72,18 @@ func runAgent(opt *options, config *rest.Config, ctx context.Context, k8sClient Queue: ns.NamespaceQueue, }) + namespace, err := utils.GetOperatorNamespace() + if err != nil { + setupLog.Error(err, "unable to get operator namespace") + os.Exit(1) + } + + s := stacks.NewStackReconciler(mgr.GetClient(), k8sClient, r, namespace, opt.consoleUrl, opt.deployToken) + mgr.AddController(&controller.Controller{ + Name: "Stack Controller", + Do: s, + Queue: s.StackQueue, + }) if err := mgr.Start(); err != nil { setupLog.Error(err, "unable to start controller manager") os.Exit(1) diff --git a/cmd/main.go b/cmd/main.go index e5370c49..e9f28261 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -104,6 +104,14 @@ func main() { }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "HealthConvert") } + if err = (&controller.StackRunJobReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + ConsoleClient: ctrlMgr.GetClient(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "StackRun") + } + //+kubebuilder:scaffold:builder if err = (&controller.PipelineGateReconciler{ diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 0bf0d699..3627b27d 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -5,19 +5,5 @@ resources: - bases/deployments.plural.sh_luascripts.yaml #+kubebuilder:scaffold:crdkustomizeresource -patches: -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. -# patches here are for enabling the conversion webhook for each CRD -#- path: patches/webhook_in_luascripts.yaml -#+kubebuilder:scaffold:crdkustomizewebhookpatch - -# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. -# patches here are for enabling the CA injection for each CRD -#- path: patches/cainjection_in_luascripts.yaml -#+kubebuilder:scaffold:crdkustomizecainjectionpatch - -# [WEBHOOK] To enable webhook, uncomment the following section -# the following config is for teaching kustomize how to do kustomization for CRDs. - -#configurations: -#- kustomizeconfig.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization diff --git a/go.mod b/go.mod index 11f91ece..d3901482 100644 --- a/go.mod +++ b/go.mod @@ -19,13 +19,14 @@ require ( github.com/open-policy-agent/gatekeeper/v3 v3.15.1 github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/pkg/errors v0.9.1 - github.com/pluralsh/console-client-go v0.5.6 + github.com/pluralsh/console-client-go v0.5.8 github.com/pluralsh/controller-reconcile-helper v0.0.4 github.com/pluralsh/gophoenix v0.1.3-0.20231201014135-dff1b4309e34 github.com/pluralsh/polly v0.1.10 github.com/samber/lo v1.39.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 + github.com/vektah/gqlparser/v2 v2.5.11 github.com/vektra/mockery/v2 v2.39.0 github.com/vmware-tanzu/velero v1.13.0 github.com/yuin/gopher-lua v1.1.1 @@ -192,7 +193,6 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.4.2 // indirect github.com/ugorji/go/codec v1.1.7 // indirect - github.com/vektah/gqlparser/v2 v2.5.11 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect diff --git a/go.sum b/go.sum index 50bf5040..33ddc0e7 100644 --- a/go.sum +++ b/go.sum @@ -526,8 +526,8 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pluralsh/console-client-go v0.5.6 h1:8CUQco0vJehtKabVVNHAkFE4V9UI9MaMKvYNgQRrJdo= -github.com/pluralsh/console-client-go v0.5.6/go.mod h1:eyCiLA44YbXiYyJh8303jk5JdPkt9McgCo5kBjk4lKo= +github.com/pluralsh/console-client-go v0.5.8 h1:Qm7vS+gCbmWqy5i4saLPc5/SUZaW6RCzxWF+uxyPA+Y= +github.com/pluralsh/console-client-go v0.5.8/go.mod h1:eyCiLA44YbXiYyJh8303jk5JdPkt9McgCo5kBjk4lKo= github.com/pluralsh/controller-reconcile-helper v0.0.4 h1:1o+7qYSyoeqKFjx+WgQTxDz4Q2VMpzprJIIKShxqG0E= github.com/pluralsh/controller-reconcile-helper v0.0.4/go.mod h1:AfY0gtteD6veBjmB6jiRx/aR4yevEf6K0M13/pGan/s= github.com/pluralsh/gophoenix v0.1.3-0.20231201014135-dff1b4309e34 h1:ab2PN+6if/Aq3/sJM0AVdy1SYuMAnq4g20VaKhTm/Bw= diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index 91a17275..626dcccb 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -43,12 +43,12 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // Read resource from Kubernetes cluster. backup := &velerov1.Backup{} if err := r.Get(ctx, req.NamespacedName, backup); err != nil { - logger.Error(err, "Unable to fetch backup") + logger.Error(err, "unable to fetch backup") return ctrl.Result{}, k8sClient.IgnoreNotFound(err) } // Upsert backup data to the Console. - logger.Info("Cluster backup saved", "name", backup.Name, "namespace", backup.Namespace) + logger.Info("cluster backup saved", "name", backup.Name, "namespace", backup.Namespace) _, err := r.ConsoleClient.SaveClusterBackup(console.BackupAttributes{ Name: backup.Name, Namespace: backup.Namespace, diff --git a/internal/controller/pipelinegate_controller_test.go b/internal/controller/pipelinegate_controller_test.go new file mode 100644 index 00000000..2163250a --- /dev/null +++ b/internal/controller/pipelinegate_controller_test.go @@ -0,0 +1,160 @@ +package controller + +import ( + "context" + "time" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + console "github.com/pluralsh/console-client-go" + "github.com/pluralsh/deployment-operator/api/v1alpha1" + "github.com/pluralsh/deployment-operator/pkg/client" + "github.com/pluralsh/deployment-operator/pkg/test/common" + "github.com/pluralsh/deployment-operator/pkg/test/mocks" + "github.com/samber/lo" + "github.com/stretchr/testify/mock" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("PipelineGate Controller", Ordered, func() { + Context("When reconciling a resource", func() { + const ( + gateName = "gate-test" + namespace = "default" + id = "123" + raw = `{"backoffLimit":4,"template":{"metadata":{"namespace":"default","creationTimestamp":null},"spec":{"containers":[{"name":"pi","image":"perl:5.34.0","command":["perl","-Mbignum=bpi","-wle","print bpi(2000)"],"resources":{}}],"restartPolicy":"Never"}}}` + ) + + gateCache := client.NewCache[console.PipelineGateFragment](time.Second, func(id string) (*console.PipelineGateFragment, error) { + return &console.PipelineGateFragment{ + ID: id, + Name: "test", + Spec: &console.GateSpecFragment{ + Job: &console.JobSpecFragment{ + Namespace: namespace, + Raw: lo.ToPtr(raw), + }, + }, + Status: nil, + }, nil + }) + + ctx := context.Background() + gateNamespacedName := types.NamespacedName{Name: gateName, Namespace: namespace} + pipelineGate := &v1alpha1.PipelineGate{} + + BeforeAll(func() { + By("Creating pipeline gate") + err := kClient.Get(ctx, gateNamespacedName, pipelineGate) + if err != nil && errors.IsNotFound(err) { + resource := &v1alpha1.PipelineGate{ + ObjectMeta: metav1.ObjectMeta{ + Name: gateName, + Namespace: namespace, + }, + Spec: v1alpha1.PipelineGateSpec{ + ID: id, + Name: "test", + Type: v1alpha1.GateType(console.GateTypeJob), + GateSpec: &v1alpha1.GateSpec{ + JobSpec: &batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "image1", + }, + }, + }, + }, + }, + }, + }, + } + Expect(kClient.Create(ctx, resource)).To(Succeed()) + } + + }) + + It("should set state pending", func() { + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("UpdateGate", mock.Anything, mock.Anything).Return(nil) + reconciler := &PipelineGateReconciler{ + Client: kClient, + ConsoleClient: fakeConsoleClient, + GateCache: gateCache, + Scheme: kClient.Scheme(), + Log: logr.Logger{}, + } + _, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: gateNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + + existingGate := &v1alpha1.PipelineGate{} + Expect(kClient.Get(ctx, gateNamespacedName, existingGate)).NotTo(HaveOccurred()) + Expect(*existingGate.Status.State).Should(Equal(v1alpha1.GateState(console.GateStatePending))) + + }) + + It("should reconcile Pending Gate", func() { + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("UpdateGate", mock.Anything, mock.Anything).Return(nil) + reconciler := &PipelineGateReconciler{ + Client: kClient, + ConsoleClient: fakeConsoleClient, + GateCache: gateCache, + Scheme: kClient.Scheme(), + Log: logr.Logger{}, + } + _, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: gateNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + + existingGate := &v1alpha1.PipelineGate{} + Expect(kClient.Get(ctx, gateNamespacedName, existingGate)).NotTo(HaveOccurred()) + Expect(*existingGate.Status.State).Should(Equal(v1alpha1.GateState(console.GateStateRunning))) + existingJob := &batchv1.Job{} + Expect(kClient.Get(ctx, gateNamespacedName, existingJob)).NotTo(HaveOccurred()) + }) + + It("should open Gate", func() { + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("UpdateGate", mock.Anything, mock.Anything).Return(nil) + reconciler := &PipelineGateReconciler{ + Client: kClient, + ConsoleClient: fakeConsoleClient, + GateCache: gateCache, + Scheme: kClient.Scheme(), + Log: logr.Logger{}, + } + + existingJob := &batchv1.Job{} + Expect(kClient.Get(ctx, gateNamespacedName, existingJob)).NotTo(HaveOccurred()) + + Expect(common.MaybePatch(kClient, existingJob, + func(p *batchv1.Job) { + p.Status.Conditions = []batchv1.JobCondition{ + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + } + })).To(Succeed()) + + _, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: gateNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + + existingGate := &v1alpha1.PipelineGate{} + Expect(kClient.Get(ctx, gateNamespacedName, existingGate)).NotTo(HaveOccurred()) + Expect(*existingGate.Status.State).Should(Equal(v1alpha1.GateState(console.GateStateOpen))) + + Expect(kClient.Delete(ctx, existingGate)).To(Succeed()) + Expect(kClient.Delete(ctx, existingJob)).To(Succeed()) + }) + }) +}) diff --git a/internal/controller/stackrunjob_controller.go b/internal/controller/stackrunjob_controller.go new file mode 100644 index 00000000..20e35671 --- /dev/null +++ b/internal/controller/stackrunjob_controller.go @@ -0,0 +1,195 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "strings" + + clienterrors "github.com/pluralsh/deployment-operator/internal/errors" + "github.com/pluralsh/deployment-operator/pkg/controller/stacks" + "github.com/pluralsh/polly/algorithms" + "github.com/samber/lo" + "k8s.io/apimachinery/pkg/labels" + + console "github.com/pluralsh/console-client-go" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/pluralsh/deployment-operator/pkg/client" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + k8sClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +const jobSelector = "stackrun.deployments.plural.sh" + +// StackRunJobReconciler reconciles a Job resource. +type StackRunJobReconciler struct { + k8sClient.Client + Scheme *runtime.Scheme + ConsoleClient client.Client +} + +// Reconcile StackRun's Job ensure that Console stays in sync with Kubernetes cluster. +func (r *StackRunJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Read resource from Kubernetes cluster. + job := &batchv1.Job{} + if err := r.Get(ctx, req.NamespacedName, job); err != nil { + logger.Error(err, "unable to fetch job") + return ctrl.Result{}, k8sClient.IgnoreNotFound(err) + } + stackRunID := getStackRunID(job) + stackRun, err := r.ConsoleClient.GetStackRun(stackRunID) + if err != nil { + if clienterrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Update step statuses, i.e., when stack run was successful or failed. + for _, step := range stackRun.Steps { + if update := r.getStepStatusUpdate(stackRun.Status, step.Status); update != nil { + _, err := r.ConsoleClient.UpdateStackRunStep(step.ID, console.RunStepAttributes{Status: *update}) + return ctrl.Result{}, err + } + } + + // Exit if stack run is not in running state (run status already updated), + // or if the job is still running (harness controls run status). + if stackRun.Status != console.StackStatusRunning || job.Status.CompletionTime.IsZero() { + return ctrl.Result{}, nil + } + + if hasSucceeded(job) { + logger.V(2).Info("stack run job succeeded", "name", job.Name, "namespace", job.Namespace) + _, err := r.ConsoleClient.UpdateStackRun(stackRunID, console.StackRunAttributes{ + Status: console.StackStatusSuccessful, + }) + + return ctrl.Result{}, err + + } + + if hasFailed(job) { + logger.V(2).Info("stack run job failed", "name", job.Name, "namespace", job.Namespace) + status, err := r.getJobPodStatus(ctx, job.Spec.Selector.MatchLabels) + if err != nil { + logger.Error(err, "unable to get job pod status") + } + _, err = r.ConsoleClient.UpdateStackRun(stackRunID, console.StackRunAttributes{ + Status: status, + }) + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +func (r *StackRunJobReconciler) getStepStatusUpdate(stackStatus console.StackStatus, stepStatus console.StepStatus) *console.StepStatus { + if stepStatus != console.StepStatusPending && stepStatus != console.StepStatusRunning { + return nil + } + + if stackStatus == console.StackStatusSuccessful { + return lo.ToPtr(console.StepStatusSuccessful) + } + + if stackStatus == console.StackStatusFailed || stackStatus == console.StackStatusCancelled { + return lo.ToPtr(console.StepStatusSuccessful) + } + + return nil +} + +func (r *StackRunJobReconciler) getJobPodStatus(ctx context.Context, selector map[string]string) (console.StackStatus, error) { + pod, err := r.getJobPod(ctx, selector) + if err != nil { + return console.StackStatusFailed, err + } + + return r.getPodStatus(pod) +} + +func (r *StackRunJobReconciler) getJobPod(ctx context.Context, selector map[string]string) (*corev1.Pod, error) { + podList := &corev1.PodList{} + if err := r.List(ctx, podList, &k8sClient.ListOptions{LabelSelector: labels.SelectorFromSet(selector)}); err != nil { + return nil, err + } + + if len(podList.Items) == 0 { + return nil, fmt.Errorf("no pods found") + } + + return &podList.Items[0], nil +} + +func (r *StackRunJobReconciler) getPodStatus(pod *corev1.Pod) (console.StackStatus, error) { + statusIndex := algorithms.Index(pod.Status.ContainerStatuses, func(status corev1.ContainerStatus) bool { + return status.Name == stacks.DefaultJobContainer + }) + if statusIndex == -1 { + return console.StackStatusFailed, fmt.Errorf("no job container with name %s found", stacks.DefaultJobContainer) + } + + containerStatus := pod.Status.ContainerStatuses[statusIndex] + if containerStatus.State.Terminated == nil { + return console.StackStatusFailed, fmt.Errorf("job container is not in terminated state") + } + + return getExitCodeStatus(containerStatus.State.Terminated.ExitCode), nil +} + +func getExitCodeStatus(exitCode int32) console.StackStatus { + switch exitCode { + case 64: + case 66: + return console.StackStatusCancelled + case 65: + return console.StackStatusFailed + } + + return console.StackStatusFailed +} + +func getStackRunID(job *batchv1.Job) string { + return strings.TrimPrefix(job.Name, "stack-") +} + +// SetupWithManager sets up the controller with the Manager. +func (r *StackRunJobReconciler) SetupWithManager(mgr ctrl.Manager) error { + byAnnotation := predicate.NewPredicateFuncs(func(object k8sClient.Object) bool { + annotations := object.GetAnnotations() + if annotations == nil { + return false + } + + _, ok := annotations[jobSelector] + return ok + }) + + return ctrl.NewControllerManagedBy(mgr). + For(&batchv1.Job{}). + WithEventFilter(byAnnotation). + Complete(r) +} diff --git a/internal/controller/stackrunjob_controller_test.go b/internal/controller/stackrunjob_controller_test.go new file mode 100644 index 00000000..606d920e --- /dev/null +++ b/internal/controller/stackrunjob_controller_test.go @@ -0,0 +1,179 @@ +package controller + +import ( + "context" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + console "github.com/pluralsh/console-client-go" + "github.com/pluralsh/deployment-operator/pkg/controller/stacks" + "github.com/pluralsh/deployment-operator/pkg/test/mocks" + "github.com/samber/lo" + "github.com/stretchr/testify/mock" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("Stack Run Job Controller", Ordered, func() { + Context("When reconciling a resource", func() { + const ( + completedName = "stack-1" + runningName = "stack-2" + namespace = "default" + ) + + ctx := context.Background() + + completedJobNamespacedName := types.NamespacedName{Name: completedName, Namespace: namespace} + runningJobNamespacedName := types.NamespacedName{Name: runningName, Namespace: namespace} + + completedJob := &batchv1.Job{} + runningJob := &batchv1.Job{} + + BeforeAll(func() { + By("Creating stack run completed job") + err := kClient.Get(ctx, completedJobNamespacedName, completedJob) + if err != nil && errors.IsNotFound(err) { + resource := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: completedName, + Namespace: namespace, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: stacks.DefaultJobContainer, + Image: "image:v1.0.0", + Args: []string{}, + }}, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + Status: batchv1.JobStatus{ + CompletionTime: lo.ToPtr(metav1.NewTime(time.Now())), + Conditions: []batchv1.JobCondition{{ + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }}, + }, + } + Expect(kClient.Create(ctx, resource)).To(Succeed()) + } + + By("Creating stack run running job") + err = kClient.Get(ctx, runningJobNamespacedName, runningJob) + if err != nil && errors.IsNotFound(err) { + resource := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: runningName, + Namespace: namespace, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: stacks.DefaultJobContainer, + Image: "image:v1.0.0", + Args: []string{}, + }}, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + } + Expect(kClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterAll(func() { + By("Cleanup stack run running job") + runningJob := &batchv1.Job{} + Expect(kClient.Get(ctx, runningJobNamespacedName, runningJob)).NotTo(HaveOccurred()) + Expect(kClient.Delete(ctx, runningJob)).To(Succeed()) + + By("Cleanup stack run completed job") + completedJob := &batchv1.Job{} + Expect(kClient.Get(ctx, completedJobNamespacedName, completedJob)).NotTo(HaveOccurred()) + Expect(kClient.Delete(ctx, completedJob)).To(Succeed()) + }) + + It("should exit without errors and try to update stack run status", func() { + runId := strings.TrimPrefix(completedName, "stack-") + + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("GetStackRun", mock.Anything).Return(&console.StackRunFragment{ + ID: runId, + Status: console.StackStatusSuccessful, + }, nil) + fakeConsoleClient.On("UpdateStackRun", runId, mock.Anything).Return(&console.StackRunFragment{}, nil) + + reconciler := &StackRunJobReconciler{ + Client: kClient, + Scheme: kClient.Scheme(), + ConsoleClient: fakeConsoleClient, + } + _, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: completedJobNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should exit without errors as stack run was already updated", func() { + runId := strings.TrimPrefix(completedName, "stack-") + + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("GetStackRun", mock.Anything).Return(&console.StackRunFragment{ + ID: runId, + Status: console.StackStatusSuccessful, + }, nil) + + reconciler := &StackRunJobReconciler{ + Client: kClient, + Scheme: kClient.Scheme(), + ConsoleClient: fakeConsoleClient, + } + _, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: completedJobNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should exit without errors as stack run status was already updated", func() { + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("GetStackRun", mock.Anything).Return(&console.StackRunFragment{ + ID: "2", + Status: console.StackStatusFailed, + }, nil) + + reconciler := &StackRunJobReconciler{ + Client: kClient, + Scheme: kClient.Scheme(), + ConsoleClient: fakeConsoleClient, + } + _, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: runningJobNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should exit without errors as stack run job is still running", func() { + runId := strings.TrimPrefix(runningName, "stack-") + + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("GetStackRun", mock.Anything).Return(&console.StackRunFragment{ + ID: runId, + Status: console.StackStatusRunning, + }, nil) + + reconciler := &StackRunJobReconciler{ + Client: kClient, + Scheme: kClient.Scheme(), + ConsoleClient: fakeConsoleClient, + } + _, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: runningJobNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 6fb97210..e00be180 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -26,7 +26,6 @@ import ( . "github.com/onsi/gomega" deploymentsv1alpha1 "github.com/pluralsh/deployment-operator/api/v1alpha1" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -35,13 +34,11 @@ import ( // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -var cfg *rest.Config var testEnv *envtest.Environment var kClient client.Client func TestControllers(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Controller Suite") } @@ -52,21 +49,11 @@ var _ = BeforeSuite(func() { testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, ErrorIfCRDPathMissing: true, - // The BinaryAssetsDirectory is only required if you want to run the tests directly - // without call the makefile target test. If not informed it will look for the - // default path defined in controller-runtime which is /usr/local/kubebuilder/. - // Note that you must have the required binaries setup under the bin directory to perform - // the tests directly. When we run make test it will be setup and used automatically. BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", fmt.Sprintf("1.28.3-%s-%s", runtime.GOOS, runtime.GOARCH)), } - var err error - err = deploymentsv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // cfg is defined in this file globally. - cfg, err = testEnv.Start() + cfg, err := testEnv.Start() Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) @@ -76,12 +63,10 @@ var _ = BeforeSuite(func() { kClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(kClient).NotTo(BeNil()) - }) var _ = AfterSuite(func() { By("tearing down the test environment") - err := testEnv.Stop() Expect(err).NotTo(HaveOccurred()) }) diff --git a/pkg/client/console.go b/pkg/client/console.go index 445b2139..bc3cc28b 100644 --- a/pkg/client/console.go +++ b/pkg/client/console.go @@ -79,4 +79,8 @@ type Client interface { UpsertConstraints(constraints []*console.PolicyConstraintAttributes) (*console.UpsertPolicyConstraints, error) GetNamespace(id string) (*console.ManagedNamespaceFragment, error) ListNamespaces(after *string, first *int64) (*console.ListClusterNamespaces_ClusterManagedNamespaces, error) + GetStackRun(id string) (*console.StackRunFragment, error) + ListClusterStackRuns(after *string, first *int64) (*console.ListClusterStacks_ClusterStackRuns, error) + UpdateStackRun(id string, attr console.StackRunAttributes) (*console.StackRunBaseFragment, error) + UpdateStackRunStep(stepID string, attr console.RunStepAttributes) (*console.RunStepFragment, error) } diff --git a/pkg/client/pipelines.go b/pkg/client/pipelines.go index 41535068..4b9853fa 100644 --- a/pkg/client/pipelines.go +++ b/pkg/client/pipelines.go @@ -3,6 +3,8 @@ package client import ( "fmt" + "sigs.k8s.io/yaml" + console "github.com/pluralsh/console-client-go" "github.com/pluralsh/deployment-operator/api/v1alpha1" "github.com/pluralsh/deployment-operator/internal/errors" @@ -11,7 +13,6 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" ) const twentyFourHours = int32(86400) @@ -73,22 +74,10 @@ func (c *client) ParsePipelineGateCR(pgFragment *console.PipelineGateFragment, o return pipelineGate, nil } -func jobFromYaml(yamlString string) (*batchv1.Job, error) { - job := &batchv1.Job{} - - // unmarshal the YAML string into the Job rep - decoder := scheme.Codecs.UniversalDeserializer() - obj, _, err := decoder.Decode([]byte(yamlString), nil, job) - if err != nil { - return nil, err - } - - // ensure decoded object is actually of type Job after using universal deserializer - if obj, ok := obj.(*batchv1.Job); ok { - return obj, nil - } - - return nil, fmt.Errorf("parsed object is not of type Job") +func JobSpecFromYaml(yamlString string) (*batchv1.JobSpec, error) { + jobSpec := &batchv1.JobSpec{} + err := yaml.Unmarshal([]byte(yamlString), jobSpec) + return jobSpec, err } func gateSpecFromGateSpecFragment(gateName string, gsFragment *console.GateSpecFragment) *v1alpha1.GateSpec { @@ -105,12 +94,12 @@ func JobSpecFromJobSpecFragment(gateName string, jsFragment *console.JobSpecFrag return nil } var jobSpec *batchv1.JobSpec + var err error if jsFragment.Raw != nil && *jsFragment.Raw != "null" { - job, err := jobFromYaml(*jsFragment.Raw) + jobSpec, err = JobSpecFromYaml(*jsFragment.Raw) if err != nil { return nil } - jobSpec = &job.Spec } else { name := utils.AsName(gateName) jobSpec = &batchv1.JobSpec{ @@ -119,11 +108,11 @@ func JobSpecFromJobSpecFragment(gateName string, jsFragment *console.JobSpecFrag Name: name, Namespace: jsFragment.Namespace, // convert map[string]interface{} to map[string]string - Labels: stringMapFromInterfaceMap(jsFragment.Labels), - Annotations: stringMapFromInterfaceMap(jsFragment.Annotations), + Labels: StringMapFromInterfaceMap(jsFragment.Labels), + Annotations: StringMapFromInterfaceMap(jsFragment.Annotations), }, Spec: corev1.PodSpec{ - Containers: containersFromContainerSpecFragments(name, jsFragment.Containers), + Containers: ContainersFromContainerSpecFragments(name, jsFragment.Containers), RestartPolicy: corev1.RestartPolicyOnFailure, }, }, @@ -140,7 +129,7 @@ func JobSpecFromJobSpecFragment(gateName string, jsFragment *console.JobSpecFrag return jobSpec } -func containersFromContainerSpecFragments(gateName string, containerSpecFragments []*console.ContainerSpecFragment) []corev1.Container { +func ContainersFromContainerSpecFragments(gateName string, containerSpecFragments []*console.ContainerSpecFragment) []corev1.Container { var containers []corev1.Container for i, csFragment := range containerSpecFragments { @@ -188,7 +177,7 @@ func containersFromContainerSpecFragments(gateName string, containerSpecFragment return containers } -func stringMapFromInterfaceMap(labels map[string]interface{}) map[string]string { +func StringMapFromInterfaceMap(labels map[string]interface{}) map[string]string { result := make(map[string]string) for key, value := range labels { diff --git a/pkg/client/stack.go b/pkg/client/stack.go new file mode 100644 index 00000000..e8d929d2 --- /dev/null +++ b/pkg/client/stack.go @@ -0,0 +1,45 @@ +package client + +import ( + "fmt" + + console "github.com/pluralsh/console-client-go" +) + +func (c *client) UpdateStackRunStep(stepID string, attr console.RunStepAttributes) (*console.RunStepFragment, error) { + update, err := c.consoleClient.UpdateStackRunStep(c.ctx, stepID, attr) + if err != nil { + return nil, err + } + + return update.UpdateRunStep, nil +} + +func (c *client) GetStackRun(id string) (*console.StackRunFragment, error) { + restore, err := c.consoleClient.GetStackRun(c.ctx, id) + if err != nil { + return nil, err + } + + return restore.StackRun, nil +} + +func (c *client) UpdateStackRun(id string, attr console.StackRunAttributes) (*console.StackRunBaseFragment, error) { + restore, err := c.consoleClient.UpdateStackRun(c.ctx, id, attr) + if err != nil { + return nil, err + } + + return restore.UpdateStackRun, nil +} + +func (c *client) ListClusterStackRuns(after *string, first *int64) (*console.ListClusterStacks_ClusterStackRuns, error) { + resp, err := c.consoleClient.ListClusterStacks(c.ctx, after, first, nil, nil) + if err != nil { + return nil, err + } + if resp.ClusterStackRuns == nil { + return nil, fmt.Errorf("the response from ListInfrastructureStacks is nil") + } + return resp.ClusterStackRuns, nil +} diff --git a/pkg/controller/stacks/job.go b/pkg/controller/stacks/job.go new file mode 100644 index 00000000..2fb6b344 --- /dev/null +++ b/pkg/controller/stacks/job.go @@ -0,0 +1,240 @@ +package stacks + +import ( + "context" + "fmt" + + console "github.com/pluralsh/console-client-go" + consoleclient "github.com/pluralsh/deployment-operator/pkg/client" + "github.com/pluralsh/polly/algorithms" + "github.com/samber/lo" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +const ( + podDefaultContainerAnnotation = "kubectl.kubernetes.io/default-container" + jobSelector = "stackrun.deployments.plural.sh" + DefaultJobContainer = "default" + defaultJobVolume = "default" + defaultJobVolumePath = "/plural" +) + +var ( + defaultContainerImages = map[console.StackType]string{ + console.StackTypeTerraform: "ghcr.io/pluralsh/stackrun-harness", + console.StackTypeAnsible: "ghcr.io/pluralsh/stackrun-harness", + } + + defaultContainerVersions = map[console.StackType]string{ + console.StackTypeTerraform: "latest", + console.StackTypeAnsible: "latest", + } +) + +func (r *StackReconciler) reconcileRunJob(ctx context.Context, run *console.StackRunFragment) (*batchv1.Job, error) { + logger := log.FromContext(ctx) + jobName := GetRunJobName(run) + foundJob := &batchv1.Job{} + if err := r.K8sClient.Get(ctx, types.NamespacedName{Name: jobName, Namespace: r.Namespace}, foundJob); err != nil { + if !apierrs.IsNotFound(err) { + return nil, err + } + + logger.V(2).Info("generating job", "namespace", r.Namespace, "name", jobName) + job := r.GenerateRunJob(run, jobName) + + logger.V(2).Info("creating job", "namespace", job.Namespace, "name", job.Name) + if err := r.K8sClient.Create(ctx, job); err != nil { + logger.Error(err, "unable to create job") + return nil, err + } + return job, nil + } + return foundJob, nil + +} + +func GetRunJobName(run *console.StackRunFragment) string { + return fmt.Sprintf("stack-%s", run.ID) +} + +func (r *StackReconciler) GenerateRunJob(run *console.StackRunFragment, name string) *batchv1.Job { + var jobSpec *batchv1.JobSpec + + // Use job spec defined in run as base if it is available. + if run.JobSpec != nil { + jobSpec = getRunJobSpec(name, run.JobSpec) + } + + // If user-defined job spec was not available initialize it here. + if jobSpec == nil { + jobSpec = &batchv1.JobSpec{} + } + + // Set requirements like name, namespace, container and volume. + jobSpec.Template.ObjectMeta.Name = name + + if jobSpec.Template.Annotations == nil { + jobSpec.Template.Annotations = map[string]string{} + } + jobSpec.Template.Annotations[podDefaultContainerAnnotation] = DefaultJobContainer + + if jobSpec.Template.ObjectMeta.Namespace == "" { + jobSpec.Template.ObjectMeta.Namespace = r.Namespace + } + + jobSpec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever + + jobSpec.BackoffLimit = lo.ToPtr(int32(0)) + + jobSpec.Template.Spec.Containers = r.ensureDefaultContainer(jobSpec.Template.Spec.Containers, run) + + jobSpec.Template.Spec.Volumes = ensureDefaultVolume(jobSpec.Template.Spec.Volumes) + + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: r.Namespace, + Annotations: map[string]string{jobSelector: name}, + Labels: map[string]string{jobSelector: name}, + }, + Spec: *jobSpec, + } +} + +func getRunJobSpec(name string, jobSpecFragment *console.JobSpecFragment) *batchv1.JobSpec { + if jobSpecFragment == nil { + return nil + } + var jobSpec *batchv1.JobSpec + var err error + if jobSpecFragment.Raw != nil && *jobSpecFragment.Raw != "null" { + jobSpec, err = consoleclient.JobSpecFromYaml(*jobSpecFragment.Raw) + if err != nil { + return nil + } + } else { + jobSpec = &batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: jobSpecFragment.Namespace, + Labels: consoleclient.StringMapFromInterfaceMap(jobSpecFragment.Labels), + Annotations: consoleclient.StringMapFromInterfaceMap(jobSpecFragment.Annotations), + }, + Spec: corev1.PodSpec{ + Containers: consoleclient.ContainersFromContainerSpecFragments(name, jobSpecFragment.Containers), + }, + }, + } + + if jobSpecFragment.ServiceAccount != nil { + jobSpec.Template.Spec.ServiceAccountName = *jobSpecFragment.ServiceAccount + } + } + + return jobSpec +} + +func (r *StackReconciler) ensureDefaultContainer(containers []corev1.Container, run *console.StackRunFragment) []corev1.Container { + if index := algorithms.Index(containers, func(container corev1.Container) bool { + return container.Name == DefaultJobContainer + }); index == -1 { + containers = append(containers, r.getDefaultContainer(run)) + } else { + if containers[index].Image == "" { + containers[index].Image = r.getDefaultContainerImage(run) + } + + containers[index].Args = r.getDefaultContainerArgs(run.ID) + + containers[index].VolumeMounts = ensureDefaultVolumeMount(containers[index].VolumeMounts) + } + return containers +} + +func (r *StackReconciler) getDefaultContainer(run *console.StackRunFragment) corev1.Container { + dc := corev1.Container{ + Name: DefaultJobContainer, + Image: r.getDefaultContainerImage(run), + Args: r.getDefaultContainerArgs(run.ID), + VolumeMounts: []corev1.VolumeMount{getDefaultContainerVolumeMount()}, + } + + if run.Environment != nil { + dc.Env = make([]corev1.EnvVar, 0) + } + for _, stackEnv := range run.Environment { + dc.Env = append(dc.Env, corev1.EnvVar{ + Name: stackEnv.Name, + Value: stackEnv.Value, + }) + } + + return dc +} + +func (r *StackReconciler) getDefaultContainerImage(run *console.StackRunFragment) string { + image := defaultContainerImages[run.Type] + if run.Configuration != nil && run.Configuration.Image != nil && *run.Configuration.Image != "" { + image = *run.Configuration.Image + } + + version := defaultContainerVersions[run.Type] + if run.Configuration != nil && run.Configuration.Version != "" { + version = run.Configuration.Version + } + + return fmt.Sprintf("%s:%s", image, version) +} + +func (r *StackReconciler) getDefaultContainerArgs(runID string) []string { + return []string{ + fmt.Sprintf("--console-url=%s", r.ConsoleURL), + fmt.Sprintf("--console-token=%s", r.DeployToken), + fmt.Sprintf("--stack-run-id=%s", runID), + } +} + +func getDefaultContainerVolumeMount() corev1.VolumeMount { + return corev1.VolumeMount{ + Name: defaultJobVolume, + MountPath: defaultJobVolumePath, + } +} + +func ensureDefaultVolumeMount(mounts []corev1.VolumeMount) []corev1.VolumeMount { + if index := algorithms.Index(mounts, func(mount corev1.VolumeMount) bool { + return mount.Name == defaultJobVolume + }); index == -1 { + mounts = append(mounts, getDefaultContainerVolumeMount()) + } else { + mounts[index] = getDefaultContainerVolumeMount() + } + return mounts +} + +func ensureDefaultVolume(volumes []corev1.Volume) []corev1.Volume { + if index := algorithms.Index(volumes, func(volume corev1.Volume) bool { + return volume.Name == defaultJobVolume + }); index == -1 { + volumes = append(volumes, getDefaultVolume()) + } else { + volumes[index] = getDefaultVolume() + } + return volumes +} + +func getDefaultVolume() corev1.Volume { + return corev1.Volume{ + Name: defaultJobVolume, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + } +} diff --git a/pkg/controller/stacks/reconciler.go b/pkg/controller/stacks/reconciler.go new file mode 100644 index 00000000..d8c13529 --- /dev/null +++ b/pkg/controller/stacks/reconciler.go @@ -0,0 +1,118 @@ +package stacks + +import ( + "context" + "fmt" + "time" + + console "github.com/pluralsh/console-client-go" + clienterrors "github.com/pluralsh/deployment-operator/internal/errors" + "github.com/pluralsh/deployment-operator/pkg/client" + "github.com/pluralsh/deployment-operator/pkg/controller" + "github.com/pluralsh/deployment-operator/pkg/websocket" + "github.com/pluralsh/polly/algorithms" + "k8s.io/client-go/util/workqueue" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type StackReconciler struct { + ConsoleClient client.Client + K8sClient ctrlclient.Client + StackQueue workqueue.RateLimitingInterface + StackCache *client.Cache[console.StackRunFragment] + Namespace string + ConsoleURL string + DeployToken string +} + +func NewStackReconciler(consoleClient client.Client, k8sClient ctrlclient.Client, refresh time.Duration, namespace, consoleURL, deployToken string) *StackReconciler { + return &StackReconciler{ + ConsoleClient: consoleClient, + K8sClient: k8sClient, + StackQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + StackCache: client.NewCache[console.StackRunFragment](refresh, func(id string) (*console.StackRunFragment, error) { + return consoleClient.GetStackRun(id) + }), + Namespace: namespace, + ConsoleURL: consoleURL, + DeployToken: deployToken, + } +} + +func (r *StackReconciler) GetPublisher() (string, websocket.Publisher) { + return "stack.event", &socketPublisher{ + stackRunQueue: r.StackQueue, + stackRunCache: r.StackCache, + } +} + +func (r *StackReconciler) WipeCache() { + r.StackCache.Wipe() +} + +func (r *StackReconciler) ShutdownQueue() { + r.StackQueue.ShutDown() +} + +func (r *StackReconciler) ListStacks(ctx context.Context) *algorithms.Pager[*console.StackRunEdgeFragment] { + logger := log.FromContext(ctx) + logger.Info("create stack run pager") + fetch := func(page *string, size int64) ([]*console.StackRunEdgeFragment, *algorithms.PageInfo, error) { + resp, err := r.ConsoleClient.ListClusterStackRuns(page, &size) + if err != nil { + logger.Error(err, "failed to fetch stack run") + return nil, nil, err + } + pageInfo := &algorithms.PageInfo{ + HasNext: resp.PageInfo.HasNextPage, + After: resp.PageInfo.EndCursor, + PageSize: size, + } + return resp.Edges, pageInfo, nil + } + return algorithms.NewPager[*console.StackRunEdgeFragment](controller.DefaultPageSize, fetch) +} + +func (r *StackReconciler) Poll(ctx context.Context) (done bool, err error) { + logger := log.FromContext(ctx) + logger.Info("fetching stacks") + pager := r.ListStacks(ctx) + + for pager.HasNext() { + stacks, err := pager.NextPage() + if err != nil { + logger.Error(err, "failed to fetch stack run list") + return false, nil + } + for _, stack := range stacks { + logger.Info("sending update for", "stack run", stack.Node.ID) + r.StackQueue.Add(stack.Node.ID) + } + } + + return false, nil +} + +func (r *StackReconciler) Reconcile(ctx context.Context, id string) (reconcile.Result, error) { + logger := log.FromContext(ctx) + logger.Info("attempting to sync stack run", "id", id) + stackRun, err := r.StackCache.Get(id) + if err != nil { + if clienterrors.IsNotFound(err) { + logger.Info("stack run already deleted", "id", id) + return reconcile.Result{}, nil + } + logger.Error(err, fmt.Sprintf("failed to fetch stack run: %s, ignoring for now", id)) + return reconcile.Result{}, err + } + + // If approval is required but not done yet, exit. + if stackRun.Approval != nil && *stackRun.Approval && stackRun.ApprovedAt == nil { + return reconcile.Result{}, nil + } + + _, err = r.reconcileRunJob(ctx, stackRun) + return reconcile.Result{}, err +} diff --git a/pkg/controller/stacks/reconciler_test.go b/pkg/controller/stacks/reconciler_test.go new file mode 100644 index 00000000..581e8782 --- /dev/null +++ b/pkg/controller/stacks/reconciler_test.go @@ -0,0 +1,236 @@ +package stacks_test + +import ( + "context" + "time" + + "github.com/Yamashou/gqlgenc/clientv2" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + console "github.com/pluralsh/console-client-go" + errors2 "github.com/pluralsh/deployment-operator/internal/errors" + "github.com/pluralsh/deployment-operator/pkg/controller/stacks" + "github.com/pluralsh/deployment-operator/pkg/test/mocks" + "github.com/samber/lo" + "github.com/stretchr/testify/mock" + "github.com/vektah/gqlparser/v2/gqlerror" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/yaml" +) + +var _ = Describe("Reconciler", Ordered, func() { + Context("When reconciling a resource", func() { + const ( + namespace = "default" + stackRunId = "1" + stackRunJobName = "stack-1" + ) + + ctx := context.Background() + + BeforeAll(func() { + By("creating stack run job") + job := &batchv1.Job{} + err := kClient.Get(ctx, types.NamespacedName{Name: stackRunJobName, Namespace: namespace}, job) + if err != nil && errors.IsNotFound(err) { + resource := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: stackRunJobName, + Namespace: namespace, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: stacks.DefaultJobContainer, + Image: "image:v1.0.0", + Args: []string{}, + }}, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + } + Expect(kClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterAll(func() { + By("cleanup stack run job") + job := &batchv1.Job{} + Expect(kClient.Get(ctx, types.NamespacedName{Name: stackRunJobName, Namespace: namespace}, job)).NotTo(HaveOccurred()) + Expect(kClient.Delete(ctx, job)).To(Succeed()) + }) + + It("should exit without errors as stack run is already deleted", func() { + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("GetStackRun", mock.Anything).Return(nil, &clientv2.ErrorResponse{ + GqlErrors: &gqlerror.List{gqlerror.Errorf(errors2.ErrorNotFound.String())}, + }) + + reconciler := stacks.NewStackReconciler(fakeConsoleClient, kClient, time.Minute, namespace, "", "") + + _, err := reconciler.Reconcile(ctx, stackRunId) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should exit with error as unknown error occurred while getting stack run", func() { + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("GetStackRun", mock.Anything).Return(nil, &clientv2.ErrorResponse{ + GqlErrors: &gqlerror.List{gqlerror.Errorf("unknown error")}, + }) + + reconciler := stacks.NewStackReconciler(fakeConsoleClient, kClient, time.Minute, namespace, "", "") + + _, err := reconciler.Reconcile(ctx, stackRunId) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("unknown error")) + }) + + It("should exit without errors as approval is required to be able to reconcile job", func() { + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("GetStackRun", mock.Anything).Return(&console.StackRunFragment{ + ID: stackRunId, + Approval: lo.ToPtr(true), + }, nil) + + reconciler := stacks.NewStackReconciler(fakeConsoleClient, kClient, time.Minute, namespace, "", "") + + _, err := reconciler.Reconcile(ctx, stackRunId) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should exit without errors as job is already created", func() { + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("GetStackRun", mock.Anything).Return(&console.StackRunFragment{ + ID: stackRunId, + Approval: lo.ToPtr(false), + }, nil) + + reconciler := stacks.NewStackReconciler(fakeConsoleClient, kClient, time.Minute, namespace, "", "") + + _, err := reconciler.Reconcile(ctx, stackRunId) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should create new job with default values", func() { + stackRunId := "default-values" + stackRun := &console.StackRunFragment{ + ID: stackRunId, + Approval: lo.ToPtr(false), + } + + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("GetStackRun", mock.Anything).Return(stackRun, nil) + + reconciler := stacks.NewStackReconciler(fakeConsoleClient, kClient, time.Minute, namespace, "", "") + + _, err := reconciler.Reconcile(ctx, stackRunId) + Expect(err).NotTo(HaveOccurred()) + + job := &batchv1.Job{} + Expect(kClient.Get(ctx, types.NamespacedName{Name: stacks.GetRunJobName(stackRun), Namespace: namespace}, job)).NotTo(HaveOccurred()) + Expect(*job.Spec.BackoffLimit).To(Equal(int32(0))) + Expect(job.Spec.Template.Spec.Containers).To(HaveLen(1)) + Expect(job.Spec.Template.Spec.Volumes).To(HaveLen(1)) + Expect(kClient.Delete(ctx, job)).To(Succeed()) + }) + + It("should create new job based on user-defined spec", func() { + labelsValue := "labels-123" + annotationsValue := "annotations-123" + stackRunId := "user-defined-spec" + stackRun := &console.StackRunFragment{ + ID: stackRunId, + JobSpec: &console.JobSpecFragment{ + Namespace: namespace, + Containers: []*console.ContainerSpecFragment{{ + Image: "test", + Args: []*string{lo.ToPtr("arg1"), lo.ToPtr("arg2")}, + }, { + Image: "test2", + Args: []*string{lo.ToPtr("arg1")}, + }}, + Labels: map[string]any{"test": labelsValue}, + Annotations: map[string]any{"test": annotationsValue}, + ServiceAccount: lo.ToPtr("test-sa"), + }, + } + + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("GetStackRun", mock.Anything).Return(stackRun, nil) + + reconciler := stacks.NewStackReconciler(fakeConsoleClient, kClient, time.Minute, namespace, "", "") + + _, err := reconciler.Reconcile(ctx, stackRunId) + Expect(err).NotTo(HaveOccurred()) + + job := &batchv1.Job{} + Expect(kClient.Get(ctx, types.NamespacedName{Name: stacks.GetRunJobName(stackRun), Namespace: namespace}, job)).NotTo(HaveOccurred()) + Expect(*job.Spec.BackoffLimit).To(Equal(int32(0))) + Expect(job.Spec.Template.Spec.Containers).To(HaveLen(3)) + Expect(job.Spec.Template.ObjectMeta.Labels).To(ContainElement(labelsValue)) + Expect(job.Spec.Template.ObjectMeta.Annotations).To(ContainElement(annotationsValue)) + Expect(job.Spec.Template.Spec.ServiceAccountName).To(Equal(*stackRun.JobSpec.ServiceAccount)) + Expect(job.Spec.Template.Spec.Volumes).To(HaveLen(1)) + Expect(kClient.Delete(ctx, job)).To(Succeed()) + }) + + It("should create new job based on user-defined raw spec", func() { + jobSpec := batchv1.JobSpec{ + ActiveDeadlineSeconds: lo.ToPtr(int64(60)), + BackoffLimit: lo.ToPtr(int32(3)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "test", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + Containers: []corev1.Container{{ + Name: stacks.DefaultJobContainer, + Image: "image:v1.0.0", + }}, + ServiceAccountName: "test-sa", + }, + }, + } + marshalledJobSpec, err := yaml.Marshal(jobSpec) + Expect(err).NotTo(HaveOccurred()) + + stackRunId := "user-defined-raw-spec" + stackRun := &console.StackRunFragment{ + ID: stackRunId, + JobSpec: &console.JobSpecFragment{ + Namespace: "", + Raw: lo.ToPtr(string(marshalledJobSpec)), + }, + } + + fakeConsoleClient := mocks.NewClientMock(mocks.TestingT) + fakeConsoleClient.On("GetStackRun", mock.Anything).Return(stackRun, nil) + + reconciler := stacks.NewStackReconciler(fakeConsoleClient, kClient, time.Minute, namespace, "", "") + + _, err = reconciler.Reconcile(ctx, stackRunId) + Expect(err).NotTo(HaveOccurred()) + + job := &batchv1.Job{} + Expect(kClient.Get(ctx, types.NamespacedName{Name: stacks.GetRunJobName(stackRun), Namespace: namespace}, job)).NotTo(HaveOccurred()) + Expect(*job.Spec.ActiveDeadlineSeconds).To(Equal(*jobSpec.ActiveDeadlineSeconds)) + Expect(*job.Spec.BackoffLimit).To(Equal(int32(0))) // Overridden by controller. + Expect(job.Spec.Template.Spec.ServiceAccountName).To(Equal(jobSpec.Template.Spec.ServiceAccountName)) + Expect(job.Spec.Template.Spec.Containers).To(HaveLen(1)) // Merged by controller as default container was specified. + Expect(job.Spec.Template.Spec.Volumes).To(HaveLen(2)) + Expect(kClient.Delete(ctx, job)).To(Succeed()) + }) + }) +}) diff --git a/pkg/controller/stacks/socket_publisher.go b/pkg/controller/stacks/socket_publisher.go new file mode 100644 index 00000000..14cbfad1 --- /dev/null +++ b/pkg/controller/stacks/socket_publisher.go @@ -0,0 +1,17 @@ +package stacks + +import ( + console "github.com/pluralsh/console-client-go" + "github.com/pluralsh/deployment-operator/pkg/client" + "k8s.io/client-go/util/workqueue" +) + +type socketPublisher struct { + stackRunQueue workqueue.RateLimitingInterface + stackRunCache *client.Cache[console.StackRunFragment] +} + +func (sp *socketPublisher) Publish(id string) { + sp.stackRunCache.Expire(id) + sp.stackRunQueue.Add(id) +} diff --git a/pkg/controller/stacks/suite_test.go b/pkg/controller/stacks/suite_test.go new file mode 100644 index 00000000..64c78689 --- /dev/null +++ b/pkg/controller/stacks/suite_test.go @@ -0,0 +1,65 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stacks_test + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. +var testEnv *envtest.Environment +var kClient client.Client + +func TestStacks(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Stack Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", fmt.Sprintf("1.28.3-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + cfg, err := testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + kClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(kClient).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/pkg/lua/suite_test.go b/pkg/lua/suite_test.go index 939e04e4..20b0bf46 100644 --- a/pkg/lua/suite_test.go +++ b/pkg/lua/suite_test.go @@ -24,7 +24,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -32,14 +31,11 @@ import ( // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -var cfg *rest.Config var testEnv *envtest.Environment func TestControllers(t *testing.T) { RegisterFailHandler(Fail) - - RunSpecs(t, "Controller Suite") + RunSpecs(t, "Lua Suite") } var _ = BeforeSuite(func() { @@ -47,26 +43,17 @@ var _ = BeforeSuite(func() { By("bootstrapping test environment") testEnv = &envtest.Environment{ - - // The BinaryAssetsDirectory is only required if you want to run the tests directly - // without call the makefile target test. If not informed it will look for the - // default path defined in controller-runtime which is /usr/local/kubebuilder/. - // Note that you must have the required binaries setup under the bin directory to perform - // the tests directly. When we run make test it will be setup and used automatically. BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", fmt.Sprintf("1.28.3-%s-%s", runtime.GOOS, runtime.GOARCH)), } - var err error - // cfg is defined in this file globally. - cfg, err = testEnv.Start() + cfg, err := testEnv.Start() Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) }) var _ = AfterSuite(func() { By("tearing down the test environment") - err := testEnv.Stop() Expect(err).NotTo(HaveOccurred()) }) diff --git a/pkg/manifests/template/suite_test.go b/pkg/manifests/template/suite_test.go index 43a9f4a3..b3e979da 100644 --- a/pkg/manifests/template/suite_test.go +++ b/pkg/manifests/template/suite_test.go @@ -27,7 +27,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" "k8s.io/kubectl/pkg/cmd/util" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" @@ -37,16 +36,13 @@ import ( // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -var cfg *rest.Config var k8sClient client.Client var utilFactory util.Factory var testEnv *envtest.Environment func TestControllers(t *testing.T) { RegisterFailHandler(Fail) - - RunSpecs(t, "Controller Suite") + RunSpecs(t, "Template Suite") } var _ = BeforeSuite(func() { @@ -54,19 +50,11 @@ var _ = BeforeSuite(func() { By("bootstrapping test environment") testEnv = &envtest.Environment{ - - // The BinaryAssetsDirectory is only required if you want to run the tests directly - // without call the makefile target test. If not informed it will look for the - // default path defined in controller-runtime which is /usr/local/kubebuilder/. - // Note that you must have the required binaries setup under the bin directory to perform - // the tests directly. When we run make test it will be setup and used automatically. BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", fmt.Sprintf("1.28.3-%s-%s", runtime.GOOS, runtime.GOARCH)), } - var err error - // cfg is defined in this file globally. - cfg, err = testEnv.Start() + cfg, err := testEnv.Start() Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) @@ -78,7 +66,6 @@ var _ = BeforeSuite(func() { var _ = AfterSuite(func() { By("tearing down the test environment") - err := testEnv.Stop() Expect(err).NotTo(HaveOccurred()) }) diff --git a/pkg/test/common/kubernetes.go b/pkg/test/common/kubernetes.go new file mode 100644 index 00000000..efa67e4a --- /dev/null +++ b/pkg/test/common/kubernetes.go @@ -0,0 +1,74 @@ +package common + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Patcher[PatchObject client.Object] func(object PatchObject) + +func MaybeCreate[O client.Object](c client.Client, object O, patch Patcher[O]) error { + ctx := context.Background() + original := object.DeepCopyObject().(O) + + err := c.Get(ctx, client.ObjectKey{Name: object.GetName(), Namespace: object.GetNamespace()}, object) + if err != nil && !errors.IsNotFound(err) { + return err + } + + err = c.Create(ctx, object) + if err != nil { + return err + } + + if patch == nil { + return nil + } + + err = c.Get(ctx, client.ObjectKey{Name: object.GetName(), Namespace: object.GetNamespace()}, object) + if err != nil { + return err + } + + patch(object) + + return c.Status().Patch(ctx, object, client.MergeFrom(original)) +} + +func MaybePatch[O client.Object](c client.Client, object O, patch Patcher[O]) error { + ctx := context.Background() + original := object.DeepCopyObject().(O) + + err := c.Get(ctx, client.ObjectKey{Name: object.GetName(), Namespace: object.GetNamespace()}, object) + if err != nil { + return err + } + + if patch == nil { + return nil + } + + patch(object) + + return c.Status().Patch(ctx, object, client.MergeFrom(original)) +} + +func MaybePatchObject[O client.Object](c client.Client, object O, patch Patcher[O]) error { + ctx := context.Background() + original := object.DeepCopyObject().(O) + + err := c.Get(ctx, client.ObjectKey{Name: object.GetName(), Namespace: object.GetNamespace()}, object) + if err != nil { + return err + } + + if patch == nil { + return nil + } + + patch(object) + + return c.Patch(ctx, object, client.MergeFrom(original)) +} diff --git a/pkg/test/mocks/Client_mock.go b/pkg/test/mocks/Client_mock.go index 1257b44a..14b3d69e 100644 --- a/pkg/test/mocks/Client_mock.go +++ b/pkg/test/mocks/Client_mock.go @@ -405,6 +405,64 @@ func (_c *ClientMock_GetCredentials_Call) RunAndReturn(run func() (string, strin return _c } +// GetNamespace provides a mock function with given fields: id +func (_m *ClientMock) GetNamespace(id string) (*gqlclient.ManagedNamespaceFragment, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for GetNamespace") + } + + var r0 *gqlclient.ManagedNamespaceFragment + var r1 error + if rf, ok := ret.Get(0).(func(string) (*gqlclient.ManagedNamespaceFragment, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) *gqlclient.ManagedNamespaceFragment); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*gqlclient.ManagedNamespaceFragment) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientMock_GetNamespace_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNamespace' +type ClientMock_GetNamespace_Call struct { + *mock.Call +} + +// GetNamespace is a helper method to define mock.On call +// - id string +func (_e *ClientMock_Expecter) GetNamespace(id interface{}) *ClientMock_GetNamespace_Call { + return &ClientMock_GetNamespace_Call{Call: _e.mock.On("GetNamespace", id)} +} + +func (_c *ClientMock_GetNamespace_Call) Run(run func(id string)) *ClientMock_GetNamespace_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *ClientMock_GetNamespace_Call) Return(_a0 *gqlclient.ManagedNamespaceFragment, _a1 error) *ClientMock_GetNamespace_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientMock_GetNamespace_Call) RunAndReturn(run func(string) (*gqlclient.ManagedNamespaceFragment, error)) *ClientMock_GetNamespace_Call { + _c.Call.Return(run) + return _c +} + // GetService provides a mock function with given fields: id func (_m *ClientMock) GetService(id string) (*gqlclient.GetServiceDeploymentForAgent_ServiceDeployment, error) { ret := _m.Called(id) @@ -522,6 +580,182 @@ func (_c *ClientMock_GetServices_Call) RunAndReturn(run func(*string, *int64) (* return _c } +// GetStackRun provides a mock function with given fields: id +func (_m *ClientMock) GetStackRun(id string) (*gqlclient.StackRunFragment, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for GetStackRun") + } + + var r0 *gqlclient.StackRunFragment + var r1 error + if rf, ok := ret.Get(0).(func(string) (*gqlclient.StackRunFragment, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) *gqlclient.StackRunFragment); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*gqlclient.StackRunFragment) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientMock_GetStackRun_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStackRun' +type ClientMock_GetStackRun_Call struct { + *mock.Call +} + +// GetStackRun is a helper method to define mock.On call +// - id string +func (_e *ClientMock_Expecter) GetStackRun(id interface{}) *ClientMock_GetStackRun_Call { + return &ClientMock_GetStackRun_Call{Call: _e.mock.On("GetStackRun", id)} +} + +func (_c *ClientMock_GetStackRun_Call) Run(run func(id string)) *ClientMock_GetStackRun_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *ClientMock_GetStackRun_Call) Return(_a0 *gqlclient.StackRunFragment, _a1 error) *ClientMock_GetStackRun_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientMock_GetStackRun_Call) RunAndReturn(run func(string) (*gqlclient.StackRunFragment, error)) *ClientMock_GetStackRun_Call { + _c.Call.Return(run) + return _c +} + +// ListClusterStackRuns provides a mock function with given fields: after, first +func (_m *ClientMock) ListClusterStackRuns(after *string, first *int64) (*gqlclient.ListClusterStacks_ClusterStackRuns, error) { + ret := _m.Called(after, first) + + if len(ret) == 0 { + panic("no return value specified for ListClusterStackRuns") + } + + var r0 *gqlclient.ListClusterStacks_ClusterStackRuns + var r1 error + if rf, ok := ret.Get(0).(func(*string, *int64) (*gqlclient.ListClusterStacks_ClusterStackRuns, error)); ok { + return rf(after, first) + } + if rf, ok := ret.Get(0).(func(*string, *int64) *gqlclient.ListClusterStacks_ClusterStackRuns); ok { + r0 = rf(after, first) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*gqlclient.ListClusterStacks_ClusterStackRuns) + } + } + + if rf, ok := ret.Get(1).(func(*string, *int64) error); ok { + r1 = rf(after, first) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientMock_ListClusterStackRuns_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListClusterStackRuns' +type ClientMock_ListClusterStackRuns_Call struct { + *mock.Call +} + +// ListClusterStackRuns is a helper method to define mock.On call +// - after *string +// - first *int64 +func (_e *ClientMock_Expecter) ListClusterStackRuns(after interface{}, first interface{}) *ClientMock_ListClusterStackRuns_Call { + return &ClientMock_ListClusterStackRuns_Call{Call: _e.mock.On("ListClusterStackRuns", after, first)} +} + +func (_c *ClientMock_ListClusterStackRuns_Call) Run(run func(after *string, first *int64)) *ClientMock_ListClusterStackRuns_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*string), args[1].(*int64)) + }) + return _c +} + +func (_c *ClientMock_ListClusterStackRuns_Call) Return(_a0 *gqlclient.ListClusterStacks_ClusterStackRuns, _a1 error) *ClientMock_ListClusterStackRuns_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientMock_ListClusterStackRuns_Call) RunAndReturn(run func(*string, *int64) (*gqlclient.ListClusterStacks_ClusterStackRuns, error)) *ClientMock_ListClusterStackRuns_Call { + _c.Call.Return(run) + return _c +} + +// ListNamespaces provides a mock function with given fields: after, first +func (_m *ClientMock) ListNamespaces(after *string, first *int64) (*gqlclient.ListClusterNamespaces_ClusterManagedNamespaces, error) { + ret := _m.Called(after, first) + + if len(ret) == 0 { + panic("no return value specified for ListNamespaces") + } + + var r0 *gqlclient.ListClusterNamespaces_ClusterManagedNamespaces + var r1 error + if rf, ok := ret.Get(0).(func(*string, *int64) (*gqlclient.ListClusterNamespaces_ClusterManagedNamespaces, error)); ok { + return rf(after, first) + } + if rf, ok := ret.Get(0).(func(*string, *int64) *gqlclient.ListClusterNamespaces_ClusterManagedNamespaces); ok { + r0 = rf(after, first) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*gqlclient.ListClusterNamespaces_ClusterManagedNamespaces) + } + } + + if rf, ok := ret.Get(1).(func(*string, *int64) error); ok { + r1 = rf(after, first) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientMock_ListNamespaces_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListNamespaces' +type ClientMock_ListNamespaces_Call struct { + *mock.Call +} + +// ListNamespaces is a helper method to define mock.On call +// - after *string +// - first *int64 +func (_e *ClientMock_Expecter) ListNamespaces(after interface{}, first interface{}) *ClientMock_ListNamespaces_Call { + return &ClientMock_ListNamespaces_Call{Call: _e.mock.On("ListNamespaces", after, first)} +} + +func (_c *ClientMock_ListNamespaces_Call) Run(run func(after *string, first *int64)) *ClientMock_ListNamespaces_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*string), args[1].(*int64)) + }) + return _c +} + +func (_c *ClientMock_ListNamespaces_Call) Return(_a0 *gqlclient.ListClusterNamespaces_ClusterManagedNamespaces, _a1 error) *ClientMock_ListNamespaces_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientMock_ListNamespaces_Call) RunAndReturn(run func(*string, *int64) (*gqlclient.ListClusterNamespaces_ClusterManagedNamespaces, error)) *ClientMock_ListNamespaces_Call { + _c.Call.Return(run) + return _c +} + // MyCluster provides a mock function with given fields: func (_m *ClientMock) MyCluster() (*gqlclient.MyCluster, error) { ret := _m.Called() @@ -989,6 +1223,124 @@ func (_c *ClientMock_UpdateGate_Call) RunAndReturn(run func(string, gqlclient.Ga return _c } +// UpdateStackRun provides a mock function with given fields: id, attr +func (_m *ClientMock) UpdateStackRun(id string, attr gqlclient.StackRunAttributes) (*gqlclient.StackRunBaseFragment, error) { + ret := _m.Called(id, attr) + + if len(ret) == 0 { + panic("no return value specified for UpdateStackRun") + } + + var r0 *gqlclient.StackRunBaseFragment + var r1 error + if rf, ok := ret.Get(0).(func(string, gqlclient.StackRunAttributes) (*gqlclient.StackRunBaseFragment, error)); ok { + return rf(id, attr) + } + if rf, ok := ret.Get(0).(func(string, gqlclient.StackRunAttributes) *gqlclient.StackRunBaseFragment); ok { + r0 = rf(id, attr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*gqlclient.StackRunBaseFragment) + } + } + + if rf, ok := ret.Get(1).(func(string, gqlclient.StackRunAttributes) error); ok { + r1 = rf(id, attr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientMock_UpdateStackRun_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateStackRun' +type ClientMock_UpdateStackRun_Call struct { + *mock.Call +} + +// UpdateStackRun is a helper method to define mock.On call +// - id string +// - attr gqlclient.StackRunAttributes +func (_e *ClientMock_Expecter) UpdateStackRun(id interface{}, attr interface{}) *ClientMock_UpdateStackRun_Call { + return &ClientMock_UpdateStackRun_Call{Call: _e.mock.On("UpdateStackRun", id, attr)} +} + +func (_c *ClientMock_UpdateStackRun_Call) Run(run func(id string, attr gqlclient.StackRunAttributes)) *ClientMock_UpdateStackRun_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(gqlclient.StackRunAttributes)) + }) + return _c +} + +func (_c *ClientMock_UpdateStackRun_Call) Return(_a0 *gqlclient.StackRunBaseFragment, _a1 error) *ClientMock_UpdateStackRun_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientMock_UpdateStackRun_Call) RunAndReturn(run func(string, gqlclient.StackRunAttributes) (*gqlclient.StackRunBaseFragment, error)) *ClientMock_UpdateStackRun_Call { + _c.Call.Return(run) + return _c +} + +// UpdateStackRunStep provides a mock function with given fields: stepID, attr +func (_m *ClientMock) UpdateStackRunStep(stepID string, attr gqlclient.RunStepAttributes) (*gqlclient.RunStepFragment, error) { + ret := _m.Called(stepID, attr) + + if len(ret) == 0 { + panic("no return value specified for UpdateStackRunStep") + } + + var r0 *gqlclient.RunStepFragment + var r1 error + if rf, ok := ret.Get(0).(func(string, gqlclient.RunStepAttributes) (*gqlclient.RunStepFragment, error)); ok { + return rf(stepID, attr) + } + if rf, ok := ret.Get(0).(func(string, gqlclient.RunStepAttributes) *gqlclient.RunStepFragment); ok { + r0 = rf(stepID, attr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*gqlclient.RunStepFragment) + } + } + + if rf, ok := ret.Get(1).(func(string, gqlclient.RunStepAttributes) error); ok { + r1 = rf(stepID, attr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientMock_UpdateStackRunStep_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateStackRunStep' +type ClientMock_UpdateStackRunStep_Call struct { + *mock.Call +} + +// UpdateStackRunStep is a helper method to define mock.On call +// - stepID string +// - attr gqlclient.RunStepAttributes +func (_e *ClientMock_Expecter) UpdateStackRunStep(stepID interface{}, attr interface{}) *ClientMock_UpdateStackRunStep_Call { + return &ClientMock_UpdateStackRunStep_Call{Call: _e.mock.On("UpdateStackRunStep", stepID, attr)} +} + +func (_c *ClientMock_UpdateStackRunStep_Call) Run(run func(stepID string, attr gqlclient.RunStepAttributes)) *ClientMock_UpdateStackRunStep_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(gqlclient.RunStepAttributes)) + }) + return _c +} + +func (_c *ClientMock_UpdateStackRunStep_Call) Return(_a0 *gqlclient.RunStepFragment, _a1 error) *ClientMock_UpdateStackRunStep_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientMock_UpdateStackRunStep_Call) RunAndReturn(run func(string, gqlclient.RunStepAttributes) (*gqlclient.RunStepFragment, error)) *ClientMock_UpdateStackRunStep_Call { + _c.Call.Return(run) + return _c +} + // UpsertConstraints provides a mock function with given fields: constraints func (_m *ClientMock) UpsertConstraints(constraints []*gqlclient.PolicyConstraintAttributes) (*gqlclient.UpsertPolicyConstraints, error) { ret := _m.Called(constraints) diff --git a/pkg/test/mocks/testing_mock.go b/pkg/test/mocks/testing_mock.go new file mode 100644 index 00000000..84419570 --- /dev/null +++ b/pkg/test/mocks/testing_mock.go @@ -0,0 +1,35 @@ +package mocks + +import ( + "fmt" +) + +var TestingT = &MockTestingT{} + +// MockTestingT mocks a test struct +type MockTestingT struct{} + +const mockTestingTFailNowCalled = "FailNow was called" + +func (m *MockTestingT) Logf(msg string, opts ...interface{}) { + fmt.Printf(msg, opts...) +} + +func (m *MockTestingT) Errorf(msg string, opts ...interface{}) { + fmt.Printf(msg, opts...) +} + +// FailNow mocks the FailNow call. +// It panics in order to mimic the FailNow behavior in the sense that +// the execution stops. +// When expecting this method, the call that invokes it should use the following code: +// +// assert.PanicsWithValue(t, mockTestingTFailNowCalled, func() {...}) +func (m *MockTestingT) FailNow() { + // this function should panic now to stop the execution as expected + panic(mockTestingTFailNowCalled) +} + +func (m *MockTestingT) Cleanup(f func()) { + f() +} diff --git a/test/kustomize/base/kustomization.yaml b/test/kustomize/base/kustomization.yaml index f8849596..0c00d6fb 100644 --- a/test/kustomize/base/kustomization.yaml +++ b/test/kustomize/base/kustomization.yaml @@ -2,4 +2,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - deployment.yaml \ No newline at end of file +- deployment.yaml diff --git a/test/kustomize/overlays/dev/kustomization.yaml b/test/kustomize/overlays/dev/kustomization.yaml index 01ca70be..0a0e66a5 100644 --- a/test/kustomize/overlays/dev/kustomization.yaml +++ b/test/kustomize/overlays/dev/kustomization.yaml @@ -1,24 +1,22 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -bases: +resources: - ../../base namespace: my-app-dev nameSuffix: -dev -patchesStrategicMerge: -- deployment_env.yaml - - configMapGenerator: - - name: app-config - literals: - - username=demo-user +- literals: + - username=demo-user + name: app-config secretGenerator: - - name: credentials - type: Opaque - literals: - - password=demo \ No newline at end of file +- literals: + - password=demo + name: credentials + type: Opaque +patches: +- path: deployment_env.yaml