diff --git a/Makefile b/Makefile index a0a8a6da1b75..58f29c4dac08 100644 --- a/Makefile +++ b/Makefile @@ -575,7 +575,8 @@ mocks: ## Generate mocks ${MOCKGEN} -destination=pkg/curatedpackages/mocks/bundlemanager.go -package=mocks -source "pkg/curatedpackages/bundlemanager.go" Manager ${MOCKGEN} -destination=pkg/clients/kubernetes/mocks/kubectl.go -package=mocks -source "pkg/clients/kubernetes/unauth.go" ${MOCKGEN} -destination=pkg/clients/kubernetes/mocks/kubeconfig.go -package=mocks -source "pkg/clients/kubernetes/kubeconfig.go" - ${MOCKGEN} -destination=pkg/curatedpackages/mocks/installer.go -package=mocks -source "pkg/curatedpackages/packagecontrollerclient.go" ChartInstaller + ${MOCKGEN} -destination=pkg/curatedpackages/mocks/installer.go -package=mocks -source "pkg/curatedpackages/packagecontrollerclient.go" ChartInstaller ChartUninstaller ClientBuilder + ${MOCKGEN} -destination=pkg/curatedpackages/mocks/kube_client.go -package=mocks -mock_names Client=MockKubeClient sigs.k8s.io/controller-runtime/pkg/client Client ${MOCKGEN} -destination=pkg/cluster/mocks/client_builder.go -package=mocks -source "pkg/cluster/client_builder.go" ${MOCKGEN} -destination=controllers/mocks/factory.go -package=mocks "github.com/aws/eks-anywhere/controllers" Manager ${MOCKGEN} -destination=pkg/networking/cilium/reconciler/mocks/templater.go -package=mocks -source "pkg/networking/cilium/reconciler/reconciler.go" @@ -585,7 +586,7 @@ mocks: ## Generate mocks ${MOCKGEN} -destination=pkg/providers/docker/reconciler/mocks/reconciler.go -package=mocks -source "pkg/providers/docker/reconciler/reconciler.go" ${MOCKGEN} -destination=pkg/providers/tinkerbell/reconciler/mocks/reconciler.go -package=mocks -source "pkg/providers/tinkerbell/reconciler/reconciler.go" ${MOCKGEN} -destination=pkg/awsiamauth/reconciler/mocks/reconciler.go -package=mocks -source "pkg/awsiamauth/reconciler/reconciler.go" - ${MOCKGEN} -destination=controllers/mocks/cluster_controller.go -package=mocks -source "controllers/cluster_controller.go" AWSIamConfigReconciler ClusterValidator + ${MOCKGEN} -destination=controllers/mocks/cluster_controller.go -package=mocks -source "controllers/cluster_controller.go" AWSIamConfigReconciler ClusterValidator PackageControllerClient ${MOCKGEN} -destination=pkg/workflow/task_mock_test.go -package=workflow_test -source "pkg/workflow/task.go" ${MOCKGEN} -destination=pkg/validations/createcluster/mocks/createcluster.go -package=mocks -source "pkg/validations/createcluster/createcluster.go" ${MOCKGEN} -destination=pkg/awsiamauth/mock_test.go -package=awsiamauth_test -source "pkg/awsiamauth/installer.go" diff --git a/cmd/eksctl-anywhere/cmd/installpackagecontroller.go b/cmd/eksctl-anywhere/cmd/installpackagecontroller.go index b7f97fc51469..769503a3b671 100644 --- a/cmd/eksctl-anywhere/cmd/installpackagecontroller.go +++ b/cmd/eksctl-anywhere/cmd/installpackagecontroller.go @@ -72,7 +72,7 @@ func installPackageController(ctx context.Context) error { } curatedpackages.PrintLicense() - err = ctrlClient.EnableCuratedPackages(ctx) + err = ctrlClient.Enable(ctx) if err != nil { return err } diff --git a/config/manifest/eksa-components.yaml b/config/manifest/eksa-components.yaml index f5722bbc54a7..3628eb6f12b8 100644 --- a/config/manifest/eksa-components.yaml +++ b/config/manifest/eksa-components.yaml @@ -6024,6 +6024,18 @@ rules: - patch - update - watch +- apiGroups: + - packages.eks.amazonaws.com + resources: + - packagebundlecontrollers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - tinkerbell.org resources: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 4c81fa1423dd..841ba435400b 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -210,6 +210,18 @@ rules: - patch - update - watch +- apiGroups: + - packages.eks.amazonaws.com + resources: + - packagebundlecontrollers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - tinkerbell.org resources: diff --git a/controllers/cluster_controller.go b/controllers/cluster_controller.go index d4c9d90f481c..55f5a67ac110 100644 --- a/controllers/cluster_controller.go +++ b/controllers/cluster_controller.go @@ -27,7 +27,10 @@ import ( "github.com/aws/eks-anywhere/pkg/controller/clientutil" "github.com/aws/eks-anywhere/pkg/controller/clusters" "github.com/aws/eks-anywhere/pkg/controller/handlers" + "github.com/aws/eks-anywhere/pkg/curatedpackages" + "github.com/aws/eks-anywhere/pkg/registrymirror" "github.com/aws/eks-anywhere/pkg/utils/ptr" + "github.com/aws/eks-anywhere/release/api/v1alpha1" ) const ( @@ -42,6 +45,15 @@ type ClusterReconciler struct { providerReconcilerRegistry ProviderClusterReconcilerRegistry awsIamAuth AWSIamConfigReconciler clusterValidator ClusterValidator + packagesClient PackagesClient +} + +// PackagesClient handles curated packages operations from within the cluster +// controller. +type PackagesClient interface { + EnableFullLifecycle(ctx context.Context, log logr.Logger, clusterName string, kubeConfig string, chart *v1alpha1.Image, registry *registrymirror.RegistryMirror, options ...curatedpackages.PackageControllerClientOpt) error + ReconcileDelete(context.Context, logr.Logger, curatedpackages.KubeDeleter, *anywherev1.Cluster) error + Reconcile(context.Context, logr.Logger, client.Client, *anywherev1.Cluster) error } type ProviderClusterReconcilerRegistry interface { @@ -61,12 +73,13 @@ type ClusterValidator interface { } // NewClusterReconciler constructs a new ClusterReconciler. -func NewClusterReconciler(client client.Client, registry ProviderClusterReconcilerRegistry, awsIamAuth AWSIamConfigReconciler, clusterValidator ClusterValidator) *ClusterReconciler { +func NewClusterReconciler(client client.Client, registry ProviderClusterReconcilerRegistry, awsIamAuth AWSIamConfigReconciler, clusterValidator ClusterValidator, pkgs PackagesClient) *ClusterReconciler { return &ClusterReconciler{ client: client, providerReconcilerRegistry: registry, awsIamAuth: awsIamAuth, clusterValidator: clusterValidator, + packagesClient: pkgs, } } @@ -266,6 +279,14 @@ func (r *ClusterReconciler) postClusterProviderReconcile(ctx context.Context, lo } } + // Self-managed clusters can support curated packages, but that support + // comes from the CLI at this time. + if cluster.IsManaged() && cluster.IsPackagesEnabled() { + if err := r.packagesClient.Reconcile(ctx, log, r.client, cluster); err != nil { + return controller.Result{}, err + } + } + return controller.Result{}, nil } @@ -314,6 +335,12 @@ func (r *ClusterReconciler) reconcileDelete(ctx context.Context, log logr.Logger } } + if cluster.IsManaged() { + if err := r.packagesClient.ReconcileDelete(ctx, log, r.client, cluster); err != nil { + return ctrl.Result{}, fmt.Errorf("deleting packages for cluster %q: %w", cluster.Name, err) + } + } + return ctrl.Result{}, nil } diff --git a/controllers/cluster_controller_legacy.go b/controllers/cluster_controller_legacy.go index 40715fdf7f38..82737e0b742b 100644 --- a/controllers/cluster_controller_legacy.go +++ b/controllers/cluster_controller_legacy.go @@ -60,6 +60,13 @@ func NewClusterReconcilerLegacy(client client.Client, log logr.Logger, scheme *r // +kubebuilder:rbac:groups="",namespace=eksa-system,resources=secrets,verbs=delete; // +kubebuilder:rbac:groups=tinkerbell.org,resources=hardware;hardware/status,verbs=get;list;watch;update;patch // +kubebuilder:rbac:groups=bmc.tinkerbell.org,resources=machines;machines/status,verbs=get;list;watch +// +// For the full cluster lifecycle to support Curated Packages, the controller +// must be able to create, delete, update, and patch package bundle controller +// resources, which will trigger the curated packages controller to do the +// rest. +// +// +kubebuilder:rbac:groups=packages.eks.amazonaws.com,resources=packagebundlecontrollers,verbs=create;delete;get;list;patch;update;watch; // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. diff --git a/controllers/cluster_controller_test.go b/controllers/cluster_controller_test.go index 954d89b34dac..67c4d9ce442a 100644 --- a/controllers/cluster_controller_test.go +++ b/controllers/cluster_controller_test.go @@ -3,10 +3,12 @@ package controllers_test import ( "context" "fmt" + "strings" "testing" "time" "github.com/go-logr/logr" + "github.com/go-logr/logr/testr" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" apiv1 "k8s.io/api/core/v1" @@ -24,6 +26,7 @@ import ( "github.com/aws/eks-anywhere/controllers/mocks" "github.com/aws/eks-anywhere/internal/test/envtest" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/controller/clusters" "github.com/aws/eks-anywhere/pkg/govmomi" "github.com/aws/eks-anywhere/pkg/providers/vsphere" @@ -68,7 +71,12 @@ func newVsphereClusterReconcilerTest(t *testing.T, objs ...runtime.Object) *vsph Add(anywherev1.VSphereDatacenterKind, reconciler). Build() - r := controllers.NewClusterReconciler(cl, ®istry, iam, clusterValidator) + mockPkgs := mocks.NewMockPackagesClient(ctrl) + mockPkgs.EXPECT(). + ReconcileDelete(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil).AnyTimes() + + r := controllers.NewClusterReconciler(cl, ®istry, iam, clusterValidator, mockPkgs) return &vsphereClusterReconcilerTest{ govcClient: govcClient, @@ -98,10 +106,10 @@ func TestClusterReconcilerReconcileSelfManagedCluster(t *testing.T) { clusterValidator := mocks.NewMockClusterValidator(controller) registry := newRegistryMock(providerReconciler) c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster).Build() - + mockPkgs := mocks.NewMockPackagesClient(controller) providerReconciler.EXPECT().ReconcileWorkerNodes(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(selfManagedCluster)) - r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator) + r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator, mockPkgs) result, err := r.Reconcile(ctx, clusterRequest(selfManagedCluster)) g.Expect(err).ToNot(HaveOccurred()) g.Expect(result).To(Equal(ctrl.Result{})) @@ -128,7 +136,7 @@ func TestClusterReconcilerReconcilePausedCluster(t *testing.T) { iam := mocks.NewMockAWSIamConfigReconciler(ctrl) clusterValidator := mocks.NewMockClusterValidator(ctrl) registry := newRegistryMock(providerReconciler) - r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator) + r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator, nil) g.Expect(r.Reconcile(ctx, clusterRequest(cluster))).To(Equal(reconcile.Result{})) api := envtest.NewAPIExpecter(t, c) @@ -164,7 +172,7 @@ func TestClusterReconcilerReconcileDeletedSelfManagedCluster(t *testing.T) { registry := newRegistryMock(providerReconciler) c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster).Build() - r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator) + r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator, nil) _, err := r.Reconcile(ctx, clusterRequest(selfManagedCluster)) g.Expect(err).To(MatchError(ContainSubstring("deleting self-managed clusters is not supported"))) } @@ -233,7 +241,7 @@ func TestClusterReconcilerReconcileDeletePausedCluster(t *testing.T) { managementCluster, cluster, capiCluster, ).Build() - r := controllers.NewClusterReconciler(c, newRegistryForDummyProviderReconciler(), iam, clusterValidator) + r := controllers.NewClusterReconciler(c, newRegistryForDummyProviderReconciler(), iam, clusterValidator, nil) g.Expect(r.Reconcile(ctx, clusterRequest(cluster))).To(Equal(reconcile.Result{})) api := envtest.NewAPIExpecter(t, c) @@ -276,7 +284,7 @@ func TestClusterReconcilerReconcileDeleteClusterManagedByCLI(t *testing.T) { iam := mocks.NewMockAWSIamConfigReconciler(controller) clusterValidator := mocks.NewMockClusterValidator(controller) - r := controllers.NewClusterReconciler(c, newRegistryForDummyProviderReconciler(), iam, clusterValidator) + r := controllers.NewClusterReconciler(c, newRegistryForDummyProviderReconciler(), iam, clusterValidator, nil) g.Expect(r.Reconcile(ctx, clusterRequest(cluster))).To(Equal(reconcile.Result{})) api := envtest.NewAPIExpecter(t, c) @@ -335,6 +343,178 @@ func TestClusterReconcilerDeleteNoCAPIClusterSuccess(t *testing.T) { } } +func TestClusterReconcilerSkipDontInstallPackagesOnSelfManaged(t *testing.T) { + ctx := context.Background() + cluster := &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "my-namespace", + }, + Spec: anywherev1.ClusterSpec{ + KubernetesVersion: "v1.25", + BundlesRef: &anywherev1.BundlesRef{ + Name: "my-bundles-ref", + Namespace: "my-namespace", + }, + ManagementCluster: anywherev1.ManagementCluster{ + Name: "", + }, + }, + } + objs := []runtime.Object{cluster} + cb := fake.NewClientBuilder() + mockClient := cb.WithRuntimeObjects(objs...).Build() + nullRegistry := newRegistryForDummyProviderReconciler() + + ctrl := gomock.NewController(t) + mockPkgs := mocks.NewMockPackagesClient(ctrl) + mockPkgs.EXPECT().ReconcileDelete(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + r := controllers.NewClusterReconciler(mockClient, nullRegistry, nil, nil, mockPkgs) + _, err := r.Reconcile(ctx, clusterRequest(cluster)) + if err != nil { + t.Fatalf("expected err to be nil, got %s", err) + } +} + +func TestClusterReconcilerDontDeletePackagesOnSelfManaged(t *testing.T) { + ctx := context.Background() + deleteTime := metav1.NewTime(time.Now().Add(-1 * time.Second)) + cluster := &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "my-namespace", + DeletionTimestamp: &deleteTime, + }, + Spec: anywherev1.ClusterSpec{ + KubernetesVersion: "v1.25", + BundlesRef: &anywherev1.BundlesRef{ + Name: "my-bundles-ref", + Namespace: "my-namespace", + }, + ManagementCluster: anywherev1.ManagementCluster{ + Name: "", + }, + }, + } + objs := []runtime.Object{cluster} + cb := fake.NewClientBuilder() + mockClient := cb.WithRuntimeObjects(objs...).Build() + nullRegistry := newRegistryForDummyProviderReconciler() + + ctrl := gomock.NewController(t) + // At the moment, Reconcile won't get this far, but if the time comes when + // deleting self-managed clusters via full cluster lifecycle happens, we + // need to be aware and adapt appropriately. + mockPkgs := mocks.NewMockPackagesClient(ctrl) + mockPkgs.EXPECT().ReconcileDelete(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + r := controllers.NewClusterReconciler(mockClient, nullRegistry, nil, nil, mockPkgs) + _, err := r.Reconcile(ctx, clusterRequest(cluster)) + if err == nil || !strings.Contains(err.Error(), "deleting self-managed clusters is not supported") { + t.Fatalf("unexpected error %s", err) + } +} + +func TestClusterReconcilerPackagesDeletion(s *testing.T) { + newTestCluster := func() *anywherev1.Cluster { + deleteTime := metav1.NewTime(time.Now().Add(-1 * time.Second)) + return &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-workload-cluster", + Namespace: "my-namespace", + DeletionTimestamp: &deleteTime, + }, + Spec: anywherev1.ClusterSpec{ + KubernetesVersion: "v1.25", + BundlesRef: &anywherev1.BundlesRef{ + Name: "my-bundles-ref", + Namespace: "my-namespace", + }, + ManagementCluster: anywherev1.ManagementCluster{ + Name: "my-management-cluster", + }, + }, + } + } + + s.Run("errors when packages client errors", func(t *testing.T) { + ctx := context.Background() + log := testr.New(t) + logCtx := ctrl.LoggerInto(ctx, log) + cluster := newTestCluster() + cluster.Spec.BundlesRef.Name = "non-existent" + ctrl := gomock.NewController(t) + objs := []runtime.Object{cluster} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + nullRegistry := newRegistryForDummyProviderReconciler() + mockPkgs := mocks.NewMockPackagesClient(ctrl) + mockPkgs.EXPECT().ReconcileDelete(logCtx, log, gomock.Any(), gomock.Any()).Return(fmt.Errorf("test error")) + mockIAM := mocks.NewMockAWSIamConfigReconciler(ctrl) + mockValid := mocks.NewMockClusterValidator(ctrl) + + r := controllers.NewClusterReconciler(fakeClient, nullRegistry, mockIAM, mockValid, mockPkgs) + _, err := r.Reconcile(logCtx, clusterRequest(cluster)) + if err == nil || !strings.Contains(err.Error(), "test error") { + t.Errorf("expected packages client deletion error, got %s", err) + } + }) +} + +func TestClusterReconcilerPackagesInstall(s *testing.T) { + newTestCluster := func() *anywherev1.Cluster { + return &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-workload-cluster", + Namespace: "my-namespace", + }, + Spec: anywherev1.ClusterSpec{ + KubernetesVersion: "v1.25", + BundlesRef: &anywherev1.BundlesRef{ + Name: "my-bundles-ref", + Namespace: "my-namespace", + }, + ManagementCluster: anywherev1.ManagementCluster{ + Name: "my-management-cluster", + }, + }, + } + } + + s.Run("skips installation when disabled via cluster spec", func(t *testing.T) { + ctx := context.Background() + log := testr.New(t) + logCtx := ctrl.LoggerInto(ctx, log) + cluster := newTestCluster() + cluster.Spec.Packages = &anywherev1.PackageConfiguration{Disable: true} + ctrl := gomock.NewController(t) + bundles := createBundle(cluster) + bundles.Spec.VersionsBundles[0].KubeVersion = string(cluster.Spec.KubernetesVersion) + bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name + bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace + secret := &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: constants.EksaSystemNamespace, + Name: cluster.Name + "-kubeconfig", + }, + } + objs := []runtime.Object{cluster, bundles, secret} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + nullRegistry := newRegistryForDummyProviderReconciler() + mockIAM := mocks.NewMockAWSIamConfigReconciler(ctrl) + mockValid := mocks.NewMockClusterValidator(ctrl) + mockValid.EXPECT().ValidateManagementClusterName(logCtx, log, gomock.Any()).Return(nil) + mockPkgs := mocks.NewMockPackagesClient(ctrl) + mockPkgs.EXPECT(). + EnableFullLifecycle(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) + + r := controllers.NewClusterReconciler(fakeClient, nullRegistry, mockIAM, mockValid, mockPkgs) + _, err := r.Reconcile(logCtx, clusterRequest(cluster)) + if err != nil { + t.Errorf("expected nil error, got %s", err) + } + }) +} + func createWNMachineConfig() *anywherev1.VSphereMachineConfig { return &anywherev1.VSphereMachineConfig{ TypeMeta: metav1.TypeMeta{ diff --git a/controllers/cluster_controller_test_test.go b/controllers/cluster_controller_test_test.go index 2bb91c6a0726..d0a1b0ba47d6 100644 --- a/controllers/cluster_controller_test_test.go +++ b/controllers/cluster_controller_test_test.go @@ -8,6 +8,7 @@ import ( "github.com/go-logr/logr" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -21,8 +22,10 @@ import ( "github.com/aws/eks-anywhere/internal/test/envtest" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" + "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/controller" "github.com/aws/eks-anywhere/pkg/controller/clusters" + "github.com/aws/eks-anywhere/release/api/v1alpha1" ) func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) { @@ -34,6 +37,11 @@ func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) { Name: "my-management-cluster", Namespace: "my-namespace", }, + Spec: anywherev1.ClusterSpec{ + BundlesRef: &anywherev1.BundlesRef{ + Name: "my-bundles-ref", + }, + }, } cluster := &anywherev1.Cluster{ @@ -41,6 +49,13 @@ func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) { Name: "my-cluster", Namespace: "my-namespace", }, + Spec: anywherev1.ClusterSpec{ + KubernetesVersion: "v1.25", + BundlesRef: &anywherev1.BundlesRef{ + Name: "my-bundles-ref", + Namespace: "my-namespace", + }, + }, } cluster.Spec.IdentityProviderRefs = []anywherev1.Ref{ { @@ -73,7 +88,29 @@ func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) { }, }, } - objs := []runtime.Object{cluster, managementCluster, oidc, awsIAM} + bundles := &v1alpha1.Bundles{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-bundles-ref", + Namespace: cluster.Namespace, + }, + Spec: v1alpha1.BundlesSpec{ + VersionsBundles: []v1alpha1.VersionsBundle{ + { + KubeVersion: "v1.25", + PackageController: v1alpha1.PackageBundle{ + HelmChart: v1alpha1.Image{}, + }, + }, + }, + }, + } + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-kubeconfig", + Namespace: constants.EksaSystemNamespace, + }, + } + objs := []runtime.Object{cluster, managementCluster, oidc, awsIAM, bundles, secret} cb := fake.NewClientBuilder() cl := cb.WithRuntimeObjects(objs...).Build() @@ -84,8 +121,15 @@ func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) { validator := newMockClusterValidator(t) validator.EXPECT().ValidateManagementClusterName(ctx, gomock.AssignableToTypeOf(logr.Logger{}), gomock.AssignableToTypeOf(cluster)).Return(nil) - r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), iam, validator) + pcc := newMockPackagesClient(t) + pcc.EXPECT().Reconcile(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + + r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), iam, validator, pcc) _, err := r.Reconcile(ctx, clusterRequest(cluster)) + + g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: cluster.Spec.BundlesRef.Namespace, Name: cluster.Spec.BundlesRef.Name}, bundles)).To(Succeed()) + g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: constants.EksaSystemNamespace, Name: cluster.Name + "-kubeconfig"}, secret)).To(Succeed()) + g.Expect(err).NotTo(HaveOccurred()) newOidc := &anywherev1.OIDCConfig{} @@ -133,7 +177,7 @@ func TestClusterReconcilerReconcileChildObjectNotFound(t *testing.T) { cl := cb.WithRuntimeObjects(objs...).Build() api := envtest.NewAPIExpecter(t, cl) - r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), newMockClusterValidator(t)) + r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), newMockClusterValidator(t), nil) g.Expect(r.Reconcile(ctx, clusterRequest(cluster))).Error().To(MatchError(ContainSubstring("not found"))) c := envtest.CloneNameNamespace(cluster) api.ShouldEventuallyMatch(ctx, c, func(g Gomega) { @@ -145,7 +189,7 @@ func TestClusterReconcilerReconcileChildObjectNotFound(t *testing.T) { func TestClusterReconcilerSetupWithManager(t *testing.T) { client := env.Client() - r := controllers.NewClusterReconciler(client, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), newMockClusterValidator(t)) + r := controllers.NewClusterReconciler(client, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), newMockClusterValidator(t), nil) g := NewWithT(t) g.Expect(r.SetupWithManager(env.Manager(), env.Manager().GetLogger())).To(Succeed()) @@ -174,7 +218,7 @@ func TestClusterReconcilerManagementClusterNotFound(t *testing.T) { cl := cb.WithRuntimeObjects(objs...).Build() api := envtest.NewAPIExpecter(t, cl) - r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), newMockClusterValidator(t)) + r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), newMockClusterValidator(t), nil) g.Expect(r.Reconcile(ctx, clusterRequest(cluster))).Error().To(MatchError(ContainSubstring("\"my-management-cluster\" not found"))) c := envtest.CloneNameNamespace(cluster) api.ShouldEventuallyMatch(ctx, c, func(g Gomega) { @@ -192,7 +236,8 @@ func TestClusterReconcilerSetBundlesRef(t *testing.T) { }, Spec: anywherev1.ClusterSpec{ BundlesRef: &anywherev1.BundlesRef{ - Name: "my-bundles-ref", + Name: "my-bundles-ref", + Namespace: "my-namespace", }, }, } @@ -201,20 +246,53 @@ func TestClusterReconcilerSetBundlesRef(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, + Spec: anywherev1.ClusterSpec{ + KubernetesVersion: "v1.25", + BundlesRef: &anywherev1.BundlesRef{ + Name: "my-bundles-ref", + Namespace: "my-namespace", + }, + }, } cluster.SetManagedBy("my-management-cluster") + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-kubeconfig", + Namespace: constants.EksaSystemNamespace, + }, + } + bundles := &v1alpha1.Bundles{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-bundles-ref", + Namespace: cluster.Spec.BundlesRef.Namespace, + }, + Spec: v1alpha1.BundlesSpec{ + VersionsBundles: []v1alpha1.VersionsBundle{ + { + KubeVersion: "v1.25", + PackageController: v1alpha1.PackageBundle{ + HelmChart: v1alpha1.Image{}, + }, + }, + }, + }, + } - objs := []runtime.Object{cluster, managementCluster} + objs := []runtime.Object{cluster, managementCluster, secret, bundles} cb := fake.NewClientBuilder() cl := cb.WithRuntimeObjects(objs...).Build() mgmtCluster := &anywherev1.Cluster{} g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: managementCluster.Name}, mgmtCluster)).To(Succeed()) + g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: cluster.Spec.BundlesRef.Namespace, Name: cluster.Spec.BundlesRef.Name}, bundles)).To(Succeed()) + g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: constants.EksaSystemNamespace, Name: cluster.Name + "-kubeconfig"}, secret)).To(Succeed()) + pcc := newMockPackagesClient(t) + pcc.EXPECT().Reconcile(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) validator := newMockClusterValidator(t) validator.EXPECT().ValidateManagementClusterName(ctx, gomock.AssignableToTypeOf(logr.Logger{}), gomock.AssignableToTypeOf(cluster)).Return(nil) - r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), validator) + r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), validator, pcc) _, err := r.Reconcile(ctx, clusterRequest(cluster)) g.Expect(err).ToNot(HaveOccurred()) @@ -250,7 +328,7 @@ func TestClusterReconcilerWorkloadClusterMgmtClusterNameFail(t *testing.T) { validator.EXPECT().ValidateManagementClusterName(ctx, gomock.AssignableToTypeOf(logr.Logger{}), gomock.AssignableToTypeOf(cluster)). Return(errors.New("test error")) - r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), validator) + r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), newMockAWSIamConfigReconciler(t), validator, nil) _, err := r.Reconcile(ctx, clusterRequest(cluster)) g.Expect(err).To(HaveOccurred()) } @@ -309,3 +387,8 @@ func newMockClusterValidator(t *testing.T) *mocks.MockClusterValidator { ctrl := gomock.NewController(t) return mocks.NewMockClusterValidator(ctrl) } + +func newMockPackagesClient(t *testing.T) *mocks.MockPackagesClient { + ctrl := gomock.NewController(t) + return mocks.NewMockPackagesClient(ctrl) +} diff --git a/controllers/factory.go b/controllers/factory.go index 6cb3a6459e8f..dc7b9603ee0d 100644 --- a/controllers/factory.go +++ b/controllers/factory.go @@ -13,6 +13,7 @@ import ( awsiamconfigreconciler "github.com/aws/eks-anywhere/pkg/awsiamauth/reconciler" "github.com/aws/eks-anywhere/pkg/controller/clusters" "github.com/aws/eks-anywhere/pkg/crypto" + "github.com/aws/eks-anywhere/pkg/curatedpackages" "github.com/aws/eks-anywhere/pkg/dependencies" ciliumreconciler "github.com/aws/eks-anywhere/pkg/networking/cilium/reconciler" cnireconciler "github.com/aws/eks-anywhere/pkg/networking/reconciler" @@ -42,6 +43,7 @@ type Factory struct { awsIamConfigReconciler *awsiamconfigreconciler.Reconciler logger logr.Logger deps *dependencies.Dependencies + packageControllerClient *curatedpackages.PackageControllerClient } type Reconcilers struct { @@ -91,7 +93,10 @@ func (f *Factory) Close(ctx context.Context) error { func (f *Factory) WithClusterReconciler(capiProviders []clusterctlv1.Provider) *Factory { f.dependencyFactory.WithGovc() - f.withTracker().WithProviderClusterReconcilerRegistry(capiProviders).withAWSIamConfigReconciler() + f.withTracker(). + WithProviderClusterReconcilerRegistry(capiProviders). + withAWSIamConfigReconciler(). + withPackageControllerClient() f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { if f.reconcilers.ClusterReconciler != nil { @@ -103,6 +108,7 @@ func (f *Factory) WithClusterReconciler(capiProviders []clusterctlv1.Provider) * f.registry, f.awsIamConfigReconciler, clusters.NewClusterValidator(f.manager.GetClient()), + f.packageControllerClient, ) return nil @@ -422,3 +428,17 @@ func (f *Factory) withAWSIamConfigReconciler() *Factory { return f } + +func (f *Factory) withPackageControllerClient() *Factory { + f.dependencyFactory.WithHelm().WithKubectl() + + f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { + if f.packageControllerClient != nil { + return nil + } + f.packageControllerClient = curatedpackages.NewPackageControllerClientFullLifecycle(f.logger, f.deps.Helm, f.deps.Helm, f.deps.Kubectl, f.tracker) + return nil + }) + + return f +} diff --git a/controllers/mocks/cluster_controller.go b/controllers/mocks/cluster_controller.go index 64d1e5f75d53..2ad400f1260f 100644 --- a/controllers/mocks/cluster_controller.go +++ b/controllers/mocks/cluster_controller.go @@ -11,10 +11,84 @@ import ( v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" controller "github.com/aws/eks-anywhere/pkg/controller" clusters "github.com/aws/eks-anywhere/pkg/controller/clusters" + curatedpackages "github.com/aws/eks-anywhere/pkg/curatedpackages" + registrymirror "github.com/aws/eks-anywhere/pkg/registrymirror" + v1alpha10 "github.com/aws/eks-anywhere/release/api/v1alpha1" logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" + client "sigs.k8s.io/controller-runtime/pkg/client" ) +// MockPackagesClient is a mock of PackagesClient interface. +type MockPackagesClient struct { + ctrl *gomock.Controller + recorder *MockPackagesClientMockRecorder +} + +// MockPackagesClientMockRecorder is the mock recorder for MockPackagesClient. +type MockPackagesClientMockRecorder struct { + mock *MockPackagesClient +} + +// NewMockPackagesClient creates a new mock instance. +func NewMockPackagesClient(ctrl *gomock.Controller) *MockPackagesClient { + mock := &MockPackagesClient{ctrl: ctrl} + mock.recorder = &MockPackagesClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPackagesClient) EXPECT() *MockPackagesClientMockRecorder { + return m.recorder +} + +// EnableFullLifecycle mocks base method. +func (m *MockPackagesClient) EnableFullLifecycle(ctx context.Context, log logr.Logger, clusterName, kubeConfig string, chart *v1alpha10.Image, registry *registrymirror.RegistryMirror, options ...curatedpackages.PackageControllerClientOpt) error { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, log, clusterName, kubeConfig, chart, registry} + for _, a := range options { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EnableFullLifecycle", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// EnableFullLifecycle indicates an expected call of EnableFullLifecycle. +func (mr *MockPackagesClientMockRecorder) EnableFullLifecycle(ctx, log, clusterName, kubeConfig, chart, registry interface{}, options ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, log, clusterName, kubeConfig, chart, registry}, options...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableFullLifecycle", reflect.TypeOf((*MockPackagesClient)(nil).EnableFullLifecycle), varargs...) +} + +// Reconcile mocks base method. +func (m *MockPackagesClient) Reconcile(arg0 context.Context, arg1 logr.Logger, arg2 client.Client, arg3 *v1alpha1.Cluster) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reconcile", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Reconcile indicates an expected call of Reconcile. +func (mr *MockPackagesClientMockRecorder) Reconcile(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockPackagesClient)(nil).Reconcile), arg0, arg1, arg2, arg3) +} + +// ReconcileDelete mocks base method. +func (m *MockPackagesClient) ReconcileDelete(arg0 context.Context, arg1 logr.Logger, arg2 curatedpackages.KubeDeleter, arg3 *v1alpha1.Cluster) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReconcileDelete", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReconcileDelete indicates an expected call of ReconcileDelete. +func (mr *MockPackagesClientMockRecorder) ReconcileDelete(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileDelete", reflect.TypeOf((*MockPackagesClient)(nil).ReconcileDelete), arg0, arg1, arg2, arg3) +} + // MockProviderClusterReconcilerRegistry is a mock of ProviderClusterReconcilerRegistry interface. type MockProviderClusterReconcilerRegistry struct { ctrl *gomock.Controller diff --git a/pkg/api/v1alpha1/cluster_types.go b/pkg/api/v1alpha1/cluster_types.go index 2cd1983c7b15..b71300b93c70 100644 --- a/pkg/api/v1alpha1/cluster_types.go +++ b/pkg/api/v1alpha1/cluster_types.go @@ -70,6 +70,12 @@ func (c *Cluster) HasAWSIamConfig() bool { return false } +// IsPackagesEnabled checks if the user has opted out of curated packages +// installation. +func (c *Cluster) IsPackagesEnabled() bool { + return c.Spec.Packages == nil || !c.Spec.Packages.Disable +} + func (n *Cluster) Equal(o *Cluster) bool { if n == o { return true diff --git a/pkg/curatedpackages/mocks/installer.go b/pkg/curatedpackages/mocks/installer.go index efcf494f3fc4..3ef93795b8a1 100644 --- a/pkg/curatedpackages/mocks/installer.go +++ b/pkg/curatedpackages/mocks/installer.go @@ -9,8 +9,48 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" + types "k8s.io/apimachinery/pkg/types" + client "sigs.k8s.io/controller-runtime/pkg/client" ) +// MockClientBuilder is a mock of ClientBuilder interface. +type MockClientBuilder struct { + ctrl *gomock.Controller + recorder *MockClientBuilderMockRecorder +} + +// MockClientBuilderMockRecorder is the mock recorder for MockClientBuilder. +type MockClientBuilderMockRecorder struct { + mock *MockClientBuilder +} + +// NewMockClientBuilder creates a new mock instance. +func NewMockClientBuilder(ctrl *gomock.Controller) *MockClientBuilder { + mock := &MockClientBuilder{ctrl: ctrl} + mock.recorder = &MockClientBuilderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClientBuilder) EXPECT() *MockClientBuilderMockRecorder { + return m.recorder +} + +// GetClient mocks base method. +func (m *MockClientBuilder) GetClient(arg0 context.Context, arg1 types.NamespacedName) (client.Client, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClient", arg0, arg1) + ret0, _ := ret[0].(client.Client) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetClient indicates an expected call of GetClient. +func (mr *MockClientBuilderMockRecorder) GetClient(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockClientBuilder)(nil).GetClient), arg0, arg1) +} + // MockChartInstaller is a mock of ChartInstaller interface. type MockChartInstaller struct { ctrl *gomock.Controller @@ -35,15 +75,94 @@ func (m *MockChartInstaller) EXPECT() *MockChartInstallerMockRecorder { } // InstallChart mocks base method. -func (m *MockChartInstaller) InstallChart(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath string, values []string) error { +func (m *MockChartInstaller) InstallChart(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath string, skipCRDs bool, values []string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "InstallChart", ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, values) + ret := m.ctrl.Call(m, "InstallChart", ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, skipCRDs, values) ret0, _ := ret[0].(error) return ret0 } // InstallChart indicates an expected call of InstallChart. -func (mr *MockChartInstallerMockRecorder) InstallChart(ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, values interface{}) *gomock.Call { +func (mr *MockChartInstallerMockRecorder) InstallChart(ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, skipCRDs, values interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallChart", reflect.TypeOf((*MockChartInstaller)(nil).InstallChart), ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, skipCRDs, values) +} + +// MockChartUninstaller is a mock of ChartUninstaller interface. +type MockChartUninstaller struct { + ctrl *gomock.Controller + recorder *MockChartUninstallerMockRecorder +} + +// MockChartUninstallerMockRecorder is the mock recorder for MockChartUninstaller. +type MockChartUninstallerMockRecorder struct { + mock *MockChartUninstaller +} + +// NewMockChartUninstaller creates a new mock instance. +func NewMockChartUninstaller(ctrl *gomock.Controller) *MockChartUninstaller { + mock := &MockChartUninstaller{ctrl: ctrl} + mock.recorder = &MockChartUninstallerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockChartUninstaller) EXPECT() *MockChartUninstallerMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockChartUninstaller) Delete(ctx context.Context, kubeconfigFilePath, installName, namespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, kubeconfigFilePath, installName, namespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockChartUninstallerMockRecorder) Delete(ctx, kubeconfigFilePath, installName, namespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockChartUninstaller)(nil).Delete), ctx, kubeconfigFilePath, installName, namespace) +} + +// MockKubeDeleter is a mock of KubeDeleter interface. +type MockKubeDeleter struct { + ctrl *gomock.Controller + recorder *MockKubeDeleterMockRecorder +} + +// MockKubeDeleterMockRecorder is the mock recorder for MockKubeDeleter. +type MockKubeDeleterMockRecorder struct { + mock *MockKubeDeleter +} + +// NewMockKubeDeleter creates a new mock instance. +func NewMockKubeDeleter(ctrl *gomock.Controller) *MockKubeDeleter { + mock := &MockKubeDeleter{ctrl: ctrl} + mock.recorder = &MockKubeDeleterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockKubeDeleter) EXPECT() *MockKubeDeleterMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockKubeDeleter) Delete(arg0 context.Context, arg1 client.Object, arg2 ...client.DeleteOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Delete", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockKubeDeleterMockRecorder) Delete(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallChart", reflect.TypeOf((*MockChartInstaller)(nil).InstallChart), ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, values) + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubeDeleter)(nil).Delete), varargs...) } diff --git a/pkg/curatedpackages/mocks/kube_client.go b/pkg/curatedpackages/mocks/kube_client.go new file mode 100644 index 000000000000..6bae74146c97 --- /dev/null +++ b/pkg/curatedpackages/mocks/kube_client.go @@ -0,0 +1,214 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: sigs.k8s.io/controller-runtime/pkg/client (interfaces: Client) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + meta "k8s.io/apimachinery/pkg/api/meta" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// MockKubeClient is a mock of Client interface. +type MockKubeClient struct { + ctrl *gomock.Controller + recorder *MockKubeClientMockRecorder +} + +// MockKubeClientMockRecorder is the mock recorder for MockKubeClient. +type MockKubeClientMockRecorder struct { + mock *MockKubeClient +} + +// NewMockKubeClient creates a new mock instance. +func NewMockKubeClient(ctrl *gomock.Controller) *MockKubeClient { + mock := &MockKubeClient{ctrl: ctrl} + mock.recorder = &MockKubeClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockKubeClient) EXPECT() *MockKubeClientMockRecorder { + return m.recorder +} + +// Create mocks base method. +func (m *MockKubeClient) Create(arg0 context.Context, arg1 client.Object, arg2 ...client.CreateOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Create", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create. +func (mr *MockKubeClientMockRecorder) Create(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockKubeClient)(nil).Create), varargs...) +} + +// Delete mocks base method. +func (m *MockKubeClient) Delete(arg0 context.Context, arg1 client.Object, arg2 ...client.DeleteOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Delete", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockKubeClientMockRecorder) Delete(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubeClient)(nil).Delete), varargs...) +} + +// DeleteAllOf mocks base method. +func (m *MockKubeClient) DeleteAllOf(arg0 context.Context, arg1 client.Object, arg2 ...client.DeleteAllOfOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteAllOf", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteAllOf indicates an expected call of DeleteAllOf. +func (mr *MockKubeClientMockRecorder) DeleteAllOf(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllOf", reflect.TypeOf((*MockKubeClient)(nil).DeleteAllOf), varargs...) +} + +// Get mocks base method. +func (m *MockKubeClient) Get(arg0 context.Context, arg1 types.NamespacedName, arg2 client.Object, arg3 ...client.GetOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Get", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockKubeClientMockRecorder) Get(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKubeClient)(nil).Get), varargs...) +} + +// List mocks base method. +func (m *MockKubeClient) List(arg0 context.Context, arg1 client.ObjectList, arg2 ...client.ListOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "List", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// List indicates an expected call of List. +func (mr *MockKubeClientMockRecorder) List(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockKubeClient)(nil).List), varargs...) +} + +// Patch mocks base method. +func (m *MockKubeClient) Patch(arg0 context.Context, arg1 client.Object, arg2 client.Patch, arg3 ...client.PatchOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Patch", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Patch indicates an expected call of Patch. +func (mr *MockKubeClientMockRecorder) Patch(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Patch", reflect.TypeOf((*MockKubeClient)(nil).Patch), varargs...) +} + +// RESTMapper mocks base method. +func (m *MockKubeClient) RESTMapper() meta.RESTMapper { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RESTMapper") + ret0, _ := ret[0].(meta.RESTMapper) + return ret0 +} + +// RESTMapper indicates an expected call of RESTMapper. +func (mr *MockKubeClientMockRecorder) RESTMapper() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RESTMapper", reflect.TypeOf((*MockKubeClient)(nil).RESTMapper)) +} + +// Scheme mocks base method. +func (m *MockKubeClient) Scheme() *runtime.Scheme { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Scheme") + ret0, _ := ret[0].(*runtime.Scheme) + return ret0 +} + +// Scheme indicates an expected call of Scheme. +func (mr *MockKubeClientMockRecorder) Scheme() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scheme", reflect.TypeOf((*MockKubeClient)(nil).Scheme)) +} + +// Status mocks base method. +func (m *MockKubeClient) Status() client.StatusWriter { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Status") + ret0, _ := ret[0].(client.StatusWriter) + return ret0 +} + +// Status indicates an expected call of Status. +func (mr *MockKubeClientMockRecorder) Status() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockKubeClient)(nil).Status)) +} + +// Update mocks base method. +func (m *MockKubeClient) Update(arg0 context.Context, arg1 client.Object, arg2 ...client.UpdateOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Update", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Update indicates an expected call of Update. +func (mr *MockKubeClientMockRecorder) Update(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockKubeClient)(nil).Update), varargs...) +} diff --git a/pkg/curatedpackages/mocks/packageinstaller.go b/pkg/curatedpackages/mocks/packageinstaller.go index a36ef0ec3c24..50609050885b 100644 --- a/pkg/curatedpackages/mocks/packageinstaller.go +++ b/pkg/curatedpackages/mocks/packageinstaller.go @@ -34,18 +34,18 @@ func (m *MockPackageController) EXPECT() *MockPackageControllerMockRecorder { return m.recorder } -// EnableCuratedPackages mocks base method. -func (m *MockPackageController) EnableCuratedPackages(ctx context.Context) error { +// Enable mocks base method. +func (m *MockPackageController) Enable(ctx context.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EnableCuratedPackages", ctx) + ret := m.ctrl.Call(m, "Enable", ctx) ret0, _ := ret[0].(error) return ret0 } -// EnableCuratedPackages indicates an expected call of EnableCuratedPackages. -func (mr *MockPackageControllerMockRecorder) EnableCuratedPackages(ctx interface{}) *gomock.Call { +// Enable indicates an expected call of Enable. +func (mr *MockPackageControllerMockRecorder) Enable(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableCuratedPackages", reflect.TypeOf((*MockPackageController)(nil).EnableCuratedPackages), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enable", reflect.TypeOf((*MockPackageController)(nil).Enable), ctx) } // IsInstalled mocks base method. diff --git a/pkg/curatedpackages/packagecontrollerclient.go b/pkg/curatedpackages/packagecontrollerclient.go index 18c6f4f4cc96..fe375bd753e3 100644 --- a/pkg/curatedpackages/packagecontrollerclient.go +++ b/pkg/curatedpackages/packagecontrollerclient.go @@ -7,10 +7,19 @@ import ( "fmt" "strconv" "strings" + "sync" "time" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/constants" @@ -32,9 +41,11 @@ const ( type PackageControllerClientOpt func(client *PackageControllerClient) type PackageControllerClient struct { - kubeConfig string - chart *releasev1.Image - chartInstaller ChartInstaller + kubeConfig string + chart *releasev1.Image + chartInstaller ChartInstaller + // uninstaller of helm charts. + uninstaller ChartUninstaller clusterName string clusterSpec *v1alpha1.ClusterSpec managementClusterName string @@ -49,19 +60,104 @@ type PackageControllerClient struct { // activeBundleTimeout is the timeout to activate a bundle on installation. activeBundleTimeout time.Duration valuesFileWriter filewriter.FileWriter + // skipWaitForPackageBundle indicates whether the installer should wait + // until a package bundle is activated. + // + // Skipping the wait is desirable for full cluster lifecycle use cases, + // where resource creation and error reporting are asynchronous in nature. + skipWaitForPackageBundle bool + // tracker creates k8s clients for workload clusters managed via full + // cluster lifecycle API. + clientBuilder ClientBuilder + + // mu provides some thread-safety. + mu sync.Mutex +} + +// ClientBuilder returns a k8s client for the specified cluster. +type ClientBuilder interface { + GetClient(context.Context, types.NamespacedName) (client.Client, error) } type ChartInstaller interface { - InstallChart(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath string, values []string) error + InstallChart(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath string, skipCRDs bool, values []string) error +} + +// ChartUninstaller handles deleting chart installations. +type ChartUninstaller interface { + Delete(ctx context.Context, kubeconfigFilePath, installName, namespace string) error +} + +// NewPackageControllerClientFullLifecycle creates a PackageControllerClient +// for the Full Cluster Lifecycle controller. +// +// It differs because the CLI use case has far more information available at +// instantiation, while the FCL use case has less information at +// instantiation, and the rest when cluster creation is triggered. +func NewPackageControllerClientFullLifecycle(logger logr.Logger, chartInstaller ChartInstaller, uninstaller ChartUninstaller, kubectl KubectlRunner, clientBuilder ClientBuilder) *PackageControllerClient { + return &PackageControllerClient{ + chartInstaller: chartInstaller, + uninstaller: uninstaller, + kubectl: kubectl, + skipWaitForPackageBundle: true, + eksaRegion: eksaDefaultRegion, + clientBuilder: clientBuilder, + } +} + +// EnableFullLifecycle wraps Enable to handle run-time arguments. +// +// This method fills in the gaps between the original CLI use case, where all +// information is known at PackageControllerClient initialization, and the +// Full Cluster Lifecycle use case, where there's limited information at +// initialization. Basically any parameter here isn't known at instantiation +// of the PackageControllerClient during full cluster lifecycle usage, hence +// why this method exists. +func (pc *PackageControllerClient) EnableFullLifecycle(ctx context.Context, log logr.Logger, clusterName, kubeConfig string, chart *releasev1.Image, registryMirror *registrymirror.RegistryMirror, options ...PackageControllerClientOpt) (err error) { + log.V(6).Info("enabling curated packages full lifecycle") + defer func(err *error) { + if err != nil && *err != nil { + log.Error(*err, "Enabling curated packages full lifecycle", "clusterName", clusterName) + } else { + log.Info("Successfully enabled curated packages full lifecycle") + } + }(&err) + pc.mu.Lock() + // This anonymous function ensures that the pc.mu is unlocked before + // Enable is called, preventing deadlocks in the event that Enable tries + // to acquire pc.mu. + err = func() error { + defer pc.mu.Unlock() + pc.skipWaitForPackageBundle = true + pc.clusterName = clusterName + pc.kubeConfig = kubeConfig + pc.chart = chart + pc.registryMirror = registryMirror + writer, err := filewriter.NewWriter(clusterName) + if err != nil { + return fmt.Errorf("creating file writer for helm values: %w", err) + } + options = append(options, WithValuesFileWriter(writer)) + for _, o := range options { + o(pc) + } + return nil + }() + if err != nil { + return err + } + + return pc.Enable(ctx) } // NewPackageControllerClient instantiates a new instance of PackageControllerClient. -func NewPackageControllerClient(chartInstaller ChartInstaller, kubectl KubectlRunner, clusterName string, kubeConfig string, chart *releasev1.Image, registryMirror *registrymirror.RegistryMirror, options ...PackageControllerClientOpt) *PackageControllerClient { +func NewPackageControllerClient(chartInstaller ChartInstaller, uninstaller ChartUninstaller, kubectl KubectlRunner, clusterName, kubeConfig string, chart *releasev1.Image, registryMirror *registrymirror.RegistryMirror, options ...PackageControllerClientOpt) *PackageControllerClient { pcc := &PackageControllerClient{ kubeConfig: kubeConfig, clusterName: clusterName, chart: chart, chartInstaller: chartInstaller, + uninstaller: uninstaller, kubectl: kubectl, registryMirror: registryMirror, eksaRegion: eksaDefaultRegion, @@ -73,7 +169,8 @@ func NewPackageControllerClient(chartInstaller ChartInstaller, kubectl KubectlRu return pcc } -// EnableCuratedPackages enables curated packages in a cluster +// Enable curated packages in a cluster +// // In case the cluster is management cluster, it performs the following actions: // - Installation of Package Controller through helm chart installation // - Creation of secret credentials @@ -82,7 +179,7 @@ func NewPackageControllerClient(chartInstaller ChartInstaller, kubectl KubectlRu // // In case the cluster is a workload cluster, it performs the following actions: // - Creation of package bundle controller (PBC) custom resource in management cluster -func (pc *PackageControllerClient) EnableCuratedPackages(ctx context.Context) error { +func (pc *PackageControllerClient) Enable(ctx context.Context) error { ociURI := fmt.Sprintf("%s%s", "oci://", pc.registryMirror.ReplaceRegistry(pc.chart.Image())) clusterName := fmt.Sprintf("clusterName=%s", pc.clusterName) sourceRegistry, defaultRegistry, defaultImageRegistry := pc.GetCuratedPackagesRegistries() @@ -110,17 +207,23 @@ func (pc *PackageControllerClient) EnableCuratedPackages(ctx context.Context) er return err } + skipCRDs := false chartName := pc.chart.Name if pc.managementClusterName != pc.clusterName { values = append(values, "workloadOnly=true") chartName = chartName + "-" + pc.clusterName + skipCRDs = true } - if err := pc.chartInstaller.InstallChart(ctx, chartName, ociURI, pc.chart.Tag(), pc.kubeConfig, "eksa-packages", valueFilePath, values); err != nil { + if err := pc.chartInstaller.InstallChart(ctx, chartName, ociURI, pc.chart.Tag(), pc.kubeConfig, constants.EksaPackagesName, valueFilePath, skipCRDs, values); err != nil { return err } - return pc.waitForActiveBundle(ctx) + if !pc.skipWaitForPackageBundle { + return pc.waitForActiveBundle(ctx) + } + + return nil } // GetCuratedPackagesRegistries gets value for configurable registries from PBC. @@ -352,6 +455,73 @@ func (pc *PackageControllerClient) GetPackageControllerConfiguration() (result s return result, err } +// Reconcile installs resources when a full cluster lifecycle cluster is created. +func (pc *PackageControllerClient) Reconcile(ctx context.Context, logger logr.Logger, client client.Client, cluster *anywherev1.Cluster) error { + image, err := pc.getBundleFromCluster(ctx, client, cluster) + if err != nil { + return err + } + + registry := registrymirror.FromCluster(cluster) + + // No Kubeconfig is passed. This is intentional. The helm executable will + // get that configuration from its environment. + if err := pc.EnableFullLifecycle(ctx, logger, cluster.Name, "", image, registry); err != nil { + return fmt.Errorf("packages client error: %w", err) + } + + return nil +} + +// getBundleFromCluster based on the cluster's k8s version. +func (pc *PackageControllerClient) getBundleFromCluster(ctx context.Context, client client.Client, clusterObj *anywherev1.Cluster) (*releasev1.Image, error) { + bundles := &releasev1.Bundles{} + nn := types.NamespacedName{ + Name: clusterObj.Spec.BundlesRef.Name, + Namespace: clusterObj.Spec.BundlesRef.Namespace, + } + if err := client.Get(ctx, nn, bundles); err != nil { + return nil, fmt.Errorf("retrieving bundle: %w", err) + } + + verBundle, err := cluster.GetVersionsBundle(clusterObj, bundles) + if err != nil { + return nil, err + } + + return &verBundle.PackageController.HelmChart, nil +} + +// KubeDeleter abstracts client.Client so mocks can be substituted in tests. +type KubeDeleter interface { + Delete(context.Context, client.Object, ...client.DeleteOption) error +} + +// ReconcileDelete removes resources after a full cluster lifecycle cluster is +// deleted. +func (pc *PackageControllerClient) ReconcileDelete(ctx context.Context, logger logr.Logger, client KubeDeleter, cluster *anywherev1.Cluster) error { + namespace := "eksa-packages-" + cluster.Name + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + if err := client.Delete(ctx, ns); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("deleting workload cluster curated packages namespace %q %w", namespace, err) + } + logger.V(6).Info("not found", "namespace", namespace) + } + + name := "eks-anywhere-packages-" + pc.clusterName + if err := pc.uninstaller.Delete(ctx, pc.kubeConfig, name, constants.EksaPackagesName); err != nil { + if !strings.Contains(err.Error(), "release: not found") { + return err + } + logger.V(6).Info("not found", "release", name) + } + + logger.Info("Removed curated packages installation", "clusterName") + + return nil +} + func WithEksaAccessKeyId(eksaAccessKeyId string) func(client *PackageControllerClient) { return func(config *PackageControllerClient) { config.eksaAccessKeyID = eksaAccessKeyId diff --git a/pkg/curatedpackages/packagecontrollerclient_test.go b/pkg/curatedpackages/packagecontrollerclient_test.go index 08d51eb6f6a7..55909177a3ab 100644 --- a/pkg/curatedpackages/packagecontrollerclient_test.go +++ b/pkg/curatedpackages/packagecontrollerclient_test.go @@ -11,12 +11,20 @@ import ( "testing" "time" + "github.com/go-logr/logr/testr" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/fake" packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/curatedpackages" @@ -37,6 +45,7 @@ type packageControllerTest struct { ctx context.Context kubectl *mocks.MockKubectlRunner chartInstaller *mocks.MockChartInstaller + uninstaller *mocks.MockChartUninstaller command *curatedpackages.PackageControllerClient clusterName string kubeConfig string @@ -56,6 +65,7 @@ func newPackageControllerTests(t *testing.T) []*packageControllerTest { ctrl := gomock.NewController(t) k := mocks.NewMockKubectlRunner(ctrl) ci := mocks.NewMockChartInstaller(ctrl) + del := mocks.NewMockChartUninstaller(ctrl) kubeConfig := "kubeconfig.kubeconfig" chart := &artifactsv1.Image{ Name: "test_controller", @@ -108,7 +118,7 @@ func newPackageControllerTests(t *testing.T) []*packageControllerTest { kubectl: k, chartInstaller: ci, command: curatedpackages.NewPackageControllerClient( - ci, k, clusterName, kubeConfig, chart, registryMirror, + ci, del, k, clusterName, kubeConfig, chart, registryMirror, curatedpackages.WithEksaSecretAccessKey(eksaAccessKey), curatedpackages.WithEksaRegion(eksaRegion), curatedpackages.WithEksaAccessKeyId(eksaAccessId), @@ -135,7 +145,7 @@ func newPackageControllerTests(t *testing.T) []*packageControllerTest { kubectl: k, chartInstaller: ci, command: curatedpackages.NewPackageControllerClient( - ci, k, clusterName, kubeConfig, chart, + ci, del, k, clusterName, kubeConfig, chart, nil, curatedpackages.WithEksaSecretAccessKey(eksaAccessKey), curatedpackages.WithEksaRegion(eksaRegion), @@ -162,7 +172,7 @@ func newPackageControllerTests(t *testing.T) []*packageControllerTest { kubectl: k, chartInstaller: ci, command: curatedpackages.NewPackageControllerClient( - ci, k, clusterName, kubeConfig, chartDev, + ci, del, k, clusterName, kubeConfig, chartDev, nil, curatedpackages.WithEksaSecretAccessKey(eksaAccessKey), curatedpackages.WithEksaRegion(eksaRegion), @@ -189,7 +199,7 @@ func newPackageControllerTests(t *testing.T) []*packageControllerTest { kubectl: k, chartInstaller: ci, command: curatedpackages.NewPackageControllerClient( - ci, k, clusterName, kubeConfig, chartStaging, + ci, del, k, clusterName, kubeConfig, chartStaging, nil, curatedpackages.WithEksaSecretAccessKey(eksaAccessKey), curatedpackages.WithEksaRegion(eksaRegion), @@ -216,7 +226,7 @@ func newPackageControllerTests(t *testing.T) []*packageControllerTest { kubectl: k, chartInstaller: ci, command: curatedpackages.NewPackageControllerClient( - ci, k, clusterName, kubeConfig, chart, registryMirrorInsecure, + ci, del, k, clusterName, kubeConfig, chart, registryMirrorInsecure, curatedpackages.WithManagementClusterName(clusterName), curatedpackages.WithValuesFileWriter(writer), ), @@ -239,7 +249,7 @@ func newPackageControllerTests(t *testing.T) []*packageControllerTest { kubectl: k, chartInstaller: ci, command: curatedpackages.NewPackageControllerClient( - ci, k, clusterName, kubeConfig, chart, nil, + ci, del, k, clusterName, kubeConfig, chart, nil, curatedpackages.WithManagementClusterName(clusterName), curatedpackages.WithValuesFileWriter(writer), ), @@ -259,7 +269,7 @@ func newPackageControllerTests(t *testing.T) []*packageControllerTest { } } -func TestEnableCuratedPackagesSuccess(t *testing.T) { +func TestEnableSuccess(t *testing.T) { for _, tt := range newPackageControllerTests(t) { clusterName := fmt.Sprintf("clusterName=%s", "billy") valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName) @@ -276,7 +286,7 @@ func TestEnableCuratedPackagesSuccess(t *testing.T) { if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil { values = append(values, "cronjob.suspend=true") } - tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, "eksa-packages", valueFilePath, values).Return(nil) + tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil) tt.kubectl.EXPECT(). GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(getPBCSuccess(t)). @@ -286,17 +296,17 @@ func TestEnableCuratedPackagesSuccess(t *testing.T) { DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }). AnyTimes() - err := tt.command.EnableCuratedPackages(tt.ctx) + err := tt.command.Enable(tt.ctx) if err != nil { t.Errorf("Install Controller Should succeed when installation passes") } } } -func TestEnableCuratedPackagesSucceedInWorkloadCluster(t *testing.T) { +func TestEnableSucceedInWorkloadCluster(t *testing.T) { for _, tt := range newPackageControllerTests(t) { tt.command = curatedpackages.NewPackageControllerClient( - tt.chartInstaller, tt.kubectl, tt.clusterName, tt.kubeConfig, tt.chart, + tt.chartInstaller, tt.uninstaller, tt.kubectl, tt.clusterName, tt.kubeConfig, tt.chart, tt.registryMirror, curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey), curatedpackages.WithEksaRegion("us-west-2"), @@ -320,7 +330,7 @@ func TestEnableCuratedPackagesSucceedInWorkloadCluster(t *testing.T) { values = append(values, "cronjob.suspend=true") } values = append(values, "workloadOnly=true") - tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name+"-billy", ociURI, tt.chart.Tag(), tt.kubeConfig, "eksa-packages", valueFilePath, values).Return(nil) + tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name+"-billy", ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, true, values).Return(nil) tt.kubectl.EXPECT(). GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(getPBCSuccess(t)). @@ -330,7 +340,7 @@ func TestEnableCuratedPackagesSucceedInWorkloadCluster(t *testing.T) { DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }). AnyTimes() - err := tt.command.EnableCuratedPackages(tt.ctx) + err := tt.command.Enable(tt.ctx) tt.Expect(err).To(BeNil()) } } @@ -353,10 +363,10 @@ func getPBCFail(t *testing.T) func(context.Context, string, string, string, stri } } -func TestEnableCuratedPackagesWithProxy(t *testing.T) { +func TestEnableWithProxy(t *testing.T) { for _, tt := range newPackageControllerTests(t) { tt.command = curatedpackages.NewPackageControllerClient( - tt.chartInstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, + tt.chartInstaller, tt.uninstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, tt.registryMirror, curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey), curatedpackages.WithEksaRegion(tt.eksaRegion), @@ -390,7 +400,7 @@ func TestEnableCuratedPackagesWithProxy(t *testing.T) { if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil { values = append(values, "cronjob.suspend=true") } - tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, "eksa-packages", valueFilePath, values).Return(nil) + tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil) tt.kubectl.EXPECT(). GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(getPBCSuccess(t)). @@ -400,17 +410,17 @@ func TestEnableCuratedPackagesWithProxy(t *testing.T) { DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }). AnyTimes() - err := tt.command.EnableCuratedPackages(tt.ctx) + err := tt.command.Enable(tt.ctx) if err != nil { t.Errorf("Install Controller Should succeed when installation passes") } } } -func TestEnableCuratedPackagesWithEmptyProxy(t *testing.T) { +func TestEnableWithEmptyProxy(t *testing.T) { for _, tt := range newPackageControllerTests(t) { tt.command = curatedpackages.NewPackageControllerClient( - tt.chartInstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, + tt.chartInstaller, tt.uninstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, tt.registryMirror, curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey), curatedpackages.WithEksaRegion(tt.eksaRegion), @@ -441,7 +451,7 @@ func TestEnableCuratedPackagesWithEmptyProxy(t *testing.T) { if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil { values = append(values, "cronjob.suspend=true") } - tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, "eksa-packages", valueFilePath, values).Return(nil) + tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil) tt.kubectl.EXPECT(). GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(getPBCSuccess(t)). @@ -451,14 +461,14 @@ func TestEnableCuratedPackagesWithEmptyProxy(t *testing.T) { DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }). AnyTimes() - err := tt.command.EnableCuratedPackages(tt.ctx) + err := tt.command.Enable(tt.ctx) if err != nil { t.Errorf("Install Controller Should succeed when installation passes") } } } -func TestEnableCuratedPackagesFail(t *testing.T) { +func TestEnableFail(t *testing.T) { for _, tt := range newPackageControllerTests(t) { clusterName := fmt.Sprintf("clusterName=%s", "billy") valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName) @@ -475,20 +485,20 @@ func TestEnableCuratedPackagesFail(t *testing.T) { if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil { values = append(values, "cronjob.suspend=true") } - tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, "eksa-packages", valueFilePath, values).Return(errors.New("login failed")) + tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(errors.New("login failed")) tt.kubectl.EXPECT(). GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(getPBCSuccess(t)). AnyTimes() - err := tt.command.EnableCuratedPackages(tt.ctx) + err := tt.command.Enable(tt.ctx) if err == nil { t.Errorf("Install Controller Should fail when installation fails") } } } -func TestEnableCuratedPackagesFailNoActiveBundle(t *testing.T) { +func TestEnableFailNoActiveBundle(t *testing.T) { for _, tt := range newPackageControllerTests(t) { clusterName := fmt.Sprintf("clusterName=%s", "billy") valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName) @@ -505,20 +515,20 @@ func TestEnableCuratedPackagesFailNoActiveBundle(t *testing.T) { if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil { values = append(values, "cronjob.suspend=true") } - tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, "eksa-packages", valueFilePath, values).Return(nil) + tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil) tt.kubectl.EXPECT(). GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(getPBCFail(t)). AnyTimes() - err := tt.command.EnableCuratedPackages(tt.ctx) + err := tt.command.Enable(tt.ctx) if err == nil { t.Errorf("expected error, got nil") } } } -func TestEnableCuratedPackagesSuccessWhenCronJobFails(t *testing.T) { +func TestEnableSuccessWhenCronJobFails(t *testing.T) { for _, tt := range newPackageControllerTests(t) { clusterName := fmt.Sprintf("clusterName=%s", "billy") valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName) @@ -535,7 +545,7 @@ func TestEnableCuratedPackagesSuccessWhenCronJobFails(t *testing.T) { if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil { values = append(values, "cronjob.suspend=true") } - tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, "eksa-packages", valueFilePath, values).Return(nil) + tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil) tt.kubectl.EXPECT(). GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(getPBCSuccess(t)). @@ -545,7 +555,7 @@ func TestEnableCuratedPackagesSuccessWhenCronJobFails(t *testing.T) { DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }). AnyTimes() - err := tt.command.EnableCuratedPackages(tt.ctx) + err := tt.command.Enable(tt.ctx) if err != nil { t.Errorf("Install Controller Should succeed when cron job fails") } @@ -576,10 +586,10 @@ func TestIsInstalledFalse(t *testing.T) { } } -func TestEnableCuratedPackagesActiveBundleCustomTimeout(t *testing.T) { +func TestEnableActiveBundleCustomTimeout(t *testing.T) { for _, tt := range newPackageControllerTests(t) { tt.command = curatedpackages.NewPackageControllerClient( - tt.chartInstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, + tt.chartInstaller, tt.uninstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, tt.registryMirror, curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey), curatedpackages.WithEksaRegion(tt.eksaRegion), @@ -608,7 +618,7 @@ func TestEnableCuratedPackagesActiveBundleCustomTimeout(t *testing.T) { if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil { values = append(values, "cronjob.suspend=true") } - tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, "eksa-packages", valueFilePath, values).Return(nil) + tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil) tt.kubectl.EXPECT(). GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(getPBCSuccess(t)). @@ -618,14 +628,14 @@ func TestEnableCuratedPackagesActiveBundleCustomTimeout(t *testing.T) { DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }). AnyTimes() - err := tt.command.EnableCuratedPackages(tt.ctx) + err := tt.command.Enable(tt.ctx) if err != nil { t.Errorf("Install Controller Should succeed when installation passes") } } } -func TestEnableCuratedPackagesActiveBundleWaitLoops(t *testing.T) { +func TestEnableActiveBundleWaitLoops(t *testing.T) { for _, tt := range newPackageControllerTests(t) { clusterName := fmt.Sprintf("clusterName=%s", "billy") valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName) @@ -642,7 +652,7 @@ func TestEnableCuratedPackagesActiveBundleWaitLoops(t *testing.T) { if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil { values = append(values, "cronjob.suspend=true") } - tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, "eksa-packages", valueFilePath, values).Return(nil) + tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil) tt.kubectl.EXPECT(). GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(getPBCLoops(t, 3)). @@ -652,7 +662,7 @@ func TestEnableCuratedPackagesActiveBundleWaitLoops(t *testing.T) { DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }). AnyTimes() - err := tt.command.EnableCuratedPackages(tt.ctx) + err := tt.command.Enable(tt.ctx) if err != nil { t.Errorf("expected no error, got %v", err) } @@ -675,10 +685,10 @@ func getPBCLoops(t *testing.T, loops int) func(context.Context, string, string, } } -func TestEnableCuratedPackagesActiveBundleTimesOut(t *testing.T) { +func TestEnableActiveBundleTimesOut(t *testing.T) { for _, tt := range newPackageControllerTests(t) { tt.command = curatedpackages.NewPackageControllerClient( - tt.chartInstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, + tt.chartInstaller, tt.uninstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, tt.registryMirror, curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey), curatedpackages.WithEksaRegion(tt.eksaRegion), @@ -707,13 +717,13 @@ func TestEnableCuratedPackagesActiveBundleTimesOut(t *testing.T) { if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil { values = append(values, "cronjob.suspend=true") } - tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, "eksa-packages", valueFilePath, values).Return(nil) + tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil) tt.kubectl.EXPECT(). GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(getPBCDelay(t, time.Second)). AnyTimes() - err := tt.command.EnableCuratedPackages(tt.ctx) + err := tt.command.Enable(tt.ctx) expectedErr := fmt.Errorf("timed out finding an active package bundle / eksa-packages-billy namespace for the current cluster: %v", context.DeadlineExceeded) if err.Error() != expectedErr.Error() { t.Errorf("expected %v, got %v", expectedErr, err) @@ -721,10 +731,10 @@ func TestEnableCuratedPackagesActiveBundleTimesOut(t *testing.T) { } } -func TestEnableCuratedPackagesActiveBundleNamespaceTimesOut(t *testing.T) { +func TestEnableActiveBundleNamespaceTimesOut(t *testing.T) { for _, tt := range newPackageControllerTests(t) { tt.command = curatedpackages.NewPackageControllerClient( - tt.chartInstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, + tt.chartInstaller, tt.uninstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, tt.registryMirror, curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey), curatedpackages.WithEksaRegion(tt.eksaRegion), @@ -753,7 +763,7 @@ func TestEnableCuratedPackagesActiveBundleNamespaceTimesOut(t *testing.T) { if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil { values = append(values, "cronjob.suspend=true") } - tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, "eksa-packages", valueFilePath, values).Return(nil) + tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil) tt.kubectl.EXPECT(). GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(getPBCSuccess(t)). @@ -763,7 +773,7 @@ func TestEnableCuratedPackagesActiveBundleNamespaceTimesOut(t *testing.T) { DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return false, nil }). AnyTimes() - err := tt.command.EnableCuratedPackages(tt.ctx) + err := tt.command.Enable(tt.ctx) expectedErr := fmt.Errorf("timed out finding an active package bundle / eksa-packages-billy namespace for the current cluster: %v", context.DeadlineExceeded) if err.Error() != expectedErr.Error() { t.Errorf("expected %v, got %v", expectedErr, err) @@ -810,7 +820,7 @@ func TestCreateHelmOverrideValuesYamlFail(t *testing.T) { func TestCreateHelmOverrideValuesYamlFailWithNoWriter(t *testing.T) { for _, tt := range newPackageControllerTests(t) { tt.command = curatedpackages.NewPackageControllerClient( - tt.chartInstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, + tt.chartInstaller, tt.uninstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, tt.registryMirror, curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey), curatedpackages.WithEksaRegion(tt.eksaRegion), @@ -823,7 +833,7 @@ func TestCreateHelmOverrideValuesYamlFailWithNoWriter(t *testing.T) { t.Setenv("REGISTRY_PASSWORD", "password") } - err := tt.command.EnableCuratedPackages(tt.ctx) + err := tt.command.Enable(tt.ctx) expectedErr := fmt.Errorf("valuesFileWriter is nil") if err.Error() != expectedErr.Error() { t.Errorf("expected %v, got %v", expectedErr, err) @@ -836,7 +846,7 @@ func TestCreateHelmOverrideValuesYamlFailWithWriteError(t *testing.T) { writer := writermocks.NewMockFileWriter(ctrl) for _, tt := range newPackageControllerTests(t) { tt.command = curatedpackages.NewPackageControllerClient( - tt.chartInstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, + tt.chartInstaller, tt.uninstaller, tt.kubectl, "billy", tt.kubeConfig, tt.chart, tt.registryMirror, curatedpackages.WithValuesFileWriter(writer), ) @@ -855,7 +865,7 @@ func TestCreateHelmOverrideValuesYamlFailWithWriteError(t *testing.T) { func TestGetPackageControllerConfigurationNil(t *testing.T) { g := NewWithT(t) - sut := curatedpackages.NewPackageControllerClient(nil, nil, "billy", "", nil, nil) + sut := curatedpackages.NewPackageControllerClient(nil, nil, nil, "billy", "", nil, nil) result, err := sut.GetPackageControllerConfiguration() g.Expect(result).To(Equal("")) g.Expect(err).To(BeNil()) @@ -892,7 +902,7 @@ func TestGetPackageControllerConfigurationAll(t *testing.T) { } cluster := cluster.Spec{Config: &cluster.Config{Cluster: &v1alpha1.Cluster{Spec: clusterSpec}}} g := NewWithT(t) - sut := curatedpackages.NewPackageControllerClient(nil, nil, "billy", "", nil, nil, curatedpackages.WithClusterSpec(&cluster)) + sut := curatedpackages.NewPackageControllerClient(nil, nil, nil, "billy", "", nil, nil, curatedpackages.WithClusterSpec(&cluster)) result, err := sut.GetPackageControllerConfiguration() g.Expect(result).To(Equal(expectedAllValues)) g.Expect(err).To(BeNil()) @@ -906,7 +916,7 @@ func TestGetPackageControllerConfigurationNothing(t *testing.T) { } g := NewWithT(t) cluster := cluster.Spec{Config: &cluster.Config{Cluster: &v1alpha1.Cluster{Spec: clusterSpec}}} - sut := curatedpackages.NewPackageControllerClient(nil, nil, "billy", "", nil, nil, curatedpackages.WithClusterSpec(&cluster)) + sut := curatedpackages.NewPackageControllerClient(nil, nil, nil, "billy", "", nil, nil, curatedpackages.WithClusterSpec(&cluster)) result, err := sut.GetPackageControllerConfiguration() g.Expect(result).To(Equal("")) g.Expect(err).To(BeNil()) @@ -924,7 +934,7 @@ func TestGetCuratedPackagesRegistriesDefaultRegion(t *testing.T) { } g := NewWithT(t) cluster := cluster.Spec{Config: &cluster.Config{Cluster: &v1alpha1.Cluster{Spec: clusterSpec}}} - sut := curatedpackages.NewPackageControllerClient(nil, nil, "billy", "", chart, nil, curatedpackages.WithClusterSpec(&cluster)) + sut := curatedpackages.NewPackageControllerClient(nil, nil, nil, "billy", "", chart, nil, curatedpackages.WithClusterSpec(&cluster)) _, _, img := sut.GetCuratedPackagesRegistries() g.Expect(img).To(Equal("783794618700.dkr.ecr.us-west-2.amazonaws.com")) } @@ -941,7 +951,7 @@ func TestGetCuratedPackagesRegistriesCustomRegion(t *testing.T) { } g := NewWithT(t) cluster := cluster.Spec{Config: &cluster.Config{Cluster: &v1alpha1.Cluster{Spec: clusterSpec}}} - sut := curatedpackages.NewPackageControllerClient(nil, nil, "billy", "", chart, nil, curatedpackages.WithClusterSpec(&cluster), curatedpackages.WithEksaRegion("test")) + sut := curatedpackages.NewPackageControllerClient(nil, nil, nil, "billy", "", chart, nil, curatedpackages.WithClusterSpec(&cluster), curatedpackages.WithEksaRegion("test")) _, _, img := sut.GetCuratedPackagesRegistries() g.Expect(img).To(Equal("783794618700.dkr.ecr.test.amazonaws.com")) } @@ -957,8 +967,338 @@ func TestGetPackageControllerConfigurationError(t *testing.T) { } g := NewWithT(t) cluster := cluster.Spec{Config: &cluster.Config{Cluster: &v1alpha1.Cluster{Spec: clusterSpec}}} - sut := curatedpackages.NewPackageControllerClient(nil, nil, "billy", "", nil, nil, curatedpackages.WithClusterSpec(&cluster)) + sut := curatedpackages.NewPackageControllerClient(nil, nil, nil, "billy", "", nil, nil, curatedpackages.WithClusterSpec(&cluster)) _, err := sut.GetPackageControllerConfiguration() g.Expect(err).NotTo(BeNil()) g.Expect(err.Error()).To(Equal("invalid environment in specification ")) } + +func TestReconcileDeleteGoldenPath(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + log := testr.New(t) + + cluster := &v1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "billy"}} + kubeconfig := "test.kubeconfig" + nsName := constants.EksaPackagesName + "-" + cluster.Name + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsName}} + client := fake.NewClientBuilder().WithRuntimeObjects(ns).Build() + ctrl := gomock.NewController(t) + deleter := mocks.NewMockChartUninstaller(ctrl) + deleter.EXPECT().Delete(ctx, kubeconfig, "eks-anywhere-packages-"+cluster.Name, constants.EksaPackagesName) + + sut := curatedpackages.NewPackageControllerClient(nil, deleter, nil, "billy", kubeconfig, nil, nil) + + err := sut.ReconcileDelete(ctx, log, client, cluster) + g.Expect(err).To(BeNil()) +} + +func TestReconcileDeleteNamespaceErrorHandling(s *testing.T) { + s.Run("ignores not found errors", func(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + log := testr.New(t) + cluster := &v1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "billy"}} + kubeconfig := "test.kubeconfig" + ctrl := gomock.NewController(t) + client := mocks.NewMockKubeDeleter(ctrl) + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "eksa-packages-" + cluster.Name}} + notFoundErr := apierrors.NewNotFound(schema.GroupResource{}, "NOT FOUND: test error") + client.EXPECT().Delete(ctx, ns).Return(notFoundErr) + helmDeleter := mocks.NewMockChartUninstaller(ctrl) + helmDeleter.EXPECT().Delete(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + + sut := curatedpackages.NewPackageControllerClient(nil, helmDeleter, nil, "billy", kubeconfig, nil, nil) + + err := sut.ReconcileDelete(ctx, log, client, cluster) + g.Expect(err).ShouldNot(HaveOccurred()) + }) + + s.Run("aborts on errors other than not found", func(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + log := testr.New(t) + cluster := &v1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "billy"}} + kubeconfig := "test.kubeconfig" + testErr := fmt.Errorf("test error") + ctrl := gomock.NewController(t) + client := mocks.NewMockKubeDeleter(ctrl) + client.EXPECT().Delete(ctx, gomock.Any()).Return(testErr) + helmDeleter := mocks.NewMockChartUninstaller(ctrl) + + sut := curatedpackages.NewPackageControllerClient(nil, helmDeleter, nil, "billy", kubeconfig, nil, nil) + + err := sut.ReconcileDelete(ctx, log, client, cluster) + g.Expect(err).Should(HaveOccurred()) + }) +} + +func TestReconcileDeleteHelmErrorsHandling(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + log := testr.New(t) + + cluster := &v1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "billy"}} + kubeconfig := "test.kubeconfig" + nsName := constants.EksaPackagesName + "-" + cluster.Name + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsName}} + client := fake.NewClientBuilder().WithRuntimeObjects(ns).Build() + ctrl := gomock.NewController(t) + deleter := mocks.NewMockChartUninstaller(ctrl) + // TODO this namespace should no longer be empty, following PR 5081 + testErr := fmt.Errorf("test error") + deleter.EXPECT(). + Delete(ctx, kubeconfig, "eks-anywhere-packages-"+cluster.Name, constants.EksaPackagesName). + Return(testErr) + + sut := curatedpackages.NewPackageControllerClient(nil, deleter, nil, "billy", kubeconfig, nil, nil) + + err := sut.ReconcileDelete(ctx, log, client, cluster) + g.Expect(err).Should(HaveOccurred()) + g.Expect(err.Error()).Should(Equal("test error")) +} + +func TestEnableFullLifecyclePath(t *testing.T) { + log := testr.New(t) + ctrl := gomock.NewController(t) + k := mocks.NewMockKubectlRunner(ctrl) + ci := mocks.NewMockChartInstaller(ctrl) + del := mocks.NewMockChartUninstaller(ctrl) + kubeConfig := "kubeconfig.kubeconfig" + chart := &artifactsv1.Image{ + Name: "test_controller", + URI: "test_registry/eks-anywhere/eks-anywhere-packages:v1", + } + clusterName := "billy" + writer, _ := filewriter.NewWriter(clusterName) + + tt := packageControllerTest{ + WithT: NewWithT(t), + ctx: context.Background(), + kubectl: k, + chartInstaller: ci, + command: curatedpackages.NewPackageControllerClientFullLifecycle(log, ci, del, k, nil), + clusterName: clusterName, + kubeConfig: kubeConfig, + chart: chart, + registryMirror: nil, + writer: writer, + wantValueFile: "testdata/values_empty.yaml", + } + + valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName) + ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image())) + // GetCuratedPackagesRegistries can't be used here, as when initialized + // via full cluster lifecycle the package controller client hasn't yet + // determined its chart. + values := []string{ + "clusterName=" + clusterName, + "workloadOnly=true", + "sourceRegistry=public.ecr.aws/eks-anywhere", + "defaultRegistry=public.ecr.aws/eks-anywhere", + "defaultImageRegistry=783794618700.dkr.ecr.us-west-2.amazonaws.com", + "cronjob.suspend=true", + } + + tt.chartInstaller.EXPECT().InstallChart(tt.ctx, tt.chart.Name+"-"+clusterName, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, true, gomock.InAnyOrder(values)).Return(nil) + tt.kubectl.EXPECT(). + GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(getPBCSuccess(t)). + AnyTimes() + tt.kubectl.EXPECT(). + HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }). + AnyTimes() + chartImage := &artifactsv1.Image{ + Name: "test_controller", + URI: "test_registry/eks-anywhere/eks-anywhere-packages:v1", + } + + err := tt.command.EnableFullLifecycle(tt.ctx, log, clusterName, kubeConfig, chartImage, tt.registryMirror, curatedpackages.WithEksaRegion("us-west-2")) + if err != nil { + t.Errorf("Install Controller Should succeed when installation passes") + } +} + +func TestGetCuratedPackagesRegistries(s *testing.T) { + s.Run("substitutes a region if set", func(t *testing.T) { + ctrl := gomock.NewController(t) + k := mocks.NewMockKubectlRunner(ctrl) + ci := mocks.NewMockChartInstaller(ctrl) + del := mocks.NewMockChartUninstaller(ctrl) + kubeConfig := "kubeconfig.kubeconfig" + chart := &artifactsv1.Image{ + Name: "test_controller", + URI: "test_registry/eks-anywhere/eks-anywhere-packages:v1", + } + // eksaRegion := "test-region" + clusterName := "billy" + writer, _ := filewriter.NewWriter(clusterName) + client := curatedpackages.NewPackageControllerClient( + ci, del, k, clusterName, kubeConfig, chart, nil, + curatedpackages.WithManagementClusterName(clusterName), + curatedpackages.WithValuesFileWriter(writer), + curatedpackages.WithEksaRegion("testing"), + ) + + expected := "783794618700.dkr.ecr.testing.amazonaws.com" + _, _, got := client.GetCuratedPackagesRegistries() + + if got != expected { + t.Errorf("expected %q, got %q", expected, got) + } + }) + + s.Run("won't substitute a blank region", func(t *testing.T) { + ctrl := gomock.NewController(t) + k := mocks.NewMockKubectlRunner(ctrl) + ci := mocks.NewMockChartInstaller(ctrl) + del := mocks.NewMockChartUninstaller(ctrl) + kubeConfig := "kubeconfig.kubeconfig" + chart := &artifactsv1.Image{ + Name: "test_controller", + URI: "test_registry/eks-anywhere/eks-anywhere-packages:v1", + } + // eksaRegion := "test-region" + clusterName := "billy" + writer, _ := filewriter.NewWriter(clusterName) + client := curatedpackages.NewPackageControllerClient( + ci, del, k, clusterName, kubeConfig, chart, nil, + curatedpackages.WithManagementClusterName(clusterName), + curatedpackages.WithValuesFileWriter(writer), + ) + + expected := "783794618700.dkr.ecr.us-west-2.amazonaws.com" + _, _, got := client.GetCuratedPackagesRegistries() + + if got != expected { + t.Errorf("expected %q, got %q", expected, got) + } + }) +} + +func TestReconcile(s *testing.T) { + s.Run("errors when bundles aren't found", func(t *testing.T) { + ctx := context.Background() + log := testr.New(t) + cluster := newReconcileTestCluster() + ctrl := gomock.NewController(t) + k := mocks.NewMockKubectlRunner(ctrl) + ci := mocks.NewMockChartInstaller(ctrl) + del := mocks.NewMockChartUninstaller(ctrl) + + objs := []runtime.Object{cluster} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + + pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, ci, del, k, nil) + err := pcc.Reconcile(ctx, log, fakeClient, cluster) + if err == nil || !apierrors.IsNotFound(err) { + t.Errorf("expected not found err getting cluster resource, got %s", err) + } + }) + + s.Run("errors when a matching k8s bundle version isn't found", func(t *testing.T) { + ctx := context.Background() + log := testr.New(t) + cluster := newReconcileTestCluster() + cluster.Spec.KubernetesVersion = "non-existent" + ctrl := gomock.NewController(t) + k := mocks.NewMockKubectlRunner(ctrl) + ci := mocks.NewMockChartInstaller(ctrl) + del := mocks.NewMockChartUninstaller(ctrl) + bundles := createBundle(cluster) + bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name + bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace + objs := []runtime.Object{cluster, bundles} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + + pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, ci, del, k, nil) + err := pcc.Reconcile(ctx, log, fakeClient, cluster) + if err == nil || !strings.Contains(err.Error(), "kubernetes version non-existent") { + t.Errorf("expected \"kubernetes version non-existent\" error, got %s", err) + } + }) + + s.Run("errors when helm fails", func(t *testing.T) { + ctx := context.Background() + log := testr.New(t) + cluster := newReconcileTestCluster() + ctrl := gomock.NewController(t) + k := mocks.NewMockKubectlRunner(ctrl) + ci := mocks.NewMockChartInstaller(ctrl) + del := mocks.NewMockChartUninstaller(ctrl) + bundles := createBundle(cluster) + bundles.Spec.VersionsBundles[0].KubeVersion = string(cluster.Spec.KubernetesVersion) + bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name + bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: constants.EksaSystemNamespace, + Name: cluster.Name + "-kubeconfig", + }, + } + objs := []runtime.Object{cluster, bundles, secret} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + ci.EXPECT().InstallChart(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("test error")) + + pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, ci, del, k, nil) + err := pcc.Reconcile(ctx, log, fakeClient, cluster) + if err == nil || !strings.Contains(err.Error(), "packages client error: test error") { + t.Errorf("expected packages client error, got %s", err) + } + }) +} + +func newReconcileTestCluster() *anywherev1.Cluster { + return &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-workload-cluster", + Namespace: "my-namespace", + }, + Spec: anywherev1.ClusterSpec{ + KubernetesVersion: "v1.25", + BundlesRef: &anywherev1.BundlesRef{ + Name: "my-bundles-ref", + Namespace: "my-namespace", + }, + ManagementCluster: anywherev1.ManagementCluster{ + Name: "my-management-cluster", + }, + }, + } +} + +func createBundle(cluster *anywherev1.Cluster) *artifactsv1.Bundles { + return &artifactsv1.Bundles{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name, + Namespace: "default", + }, + Spec: artifactsv1.BundlesSpec{ + VersionsBundles: []artifactsv1.VersionsBundle{ + { + KubeVersion: "1.20", + EksD: artifactsv1.EksDRelease{ + Name: "test", + EksDReleaseUrl: "testdata/release.yaml", + KubeVersion: "1.20", + }, + CertManager: artifactsv1.CertManagerBundle{}, + ClusterAPI: artifactsv1.CoreClusterAPI{}, + Bootstrap: artifactsv1.KubeadmBootstrapBundle{}, + ControlPlane: artifactsv1.KubeadmControlPlaneBundle{}, + VSphere: artifactsv1.VSphereBundle{}, + Docker: artifactsv1.DockerBundle{}, + Eksa: artifactsv1.EksaBundle{}, + Cilium: artifactsv1.CiliumBundle{}, + Kindnetd: artifactsv1.KindnetdBundle{}, + Flux: artifactsv1.FluxBundle{}, + BottleRocketHostContainers: artifactsv1.BottlerocketHostContainersBundle{}, + ExternalEtcdBootstrap: artifactsv1.EtcdadmBootstrapBundle{}, + ExternalEtcdController: artifactsv1.EtcdadmControllerBundle{}, + Tinkerbell: artifactsv1.TinkerbellBundle{}, + }, + }, + }, + } +} diff --git a/pkg/curatedpackages/packageinstaller.go b/pkg/curatedpackages/packageinstaller.go index 669eaee661d5..affcffbf9e62 100644 --- a/pkg/curatedpackages/packageinstaller.go +++ b/pkg/curatedpackages/packageinstaller.go @@ -9,7 +9,8 @@ import ( ) type PackageController interface { - EnableCuratedPackages(ctx context.Context) error + // Enable curated packages support. + Enable(ctx context.Context) error IsInstalled(ctx context.Context) bool } @@ -68,7 +69,7 @@ func (pi *Installer) installPackagesController(ctx context.Context) error { logger.Info(" Package controller disabled") return nil } - err := pi.packageController.EnableCuratedPackages(ctx) + err := pi.packageController.Enable(ctx) if err != nil { return err } diff --git a/pkg/curatedpackages/packageinstaller_test.go b/pkg/curatedpackages/packageinstaller_test.go index 8db6140759c7..c2a70180e372 100644 --- a/pkg/curatedpackages/packageinstaller_test.go +++ b/pkg/curatedpackages/packageinstaller_test.go @@ -72,7 +72,7 @@ func TestPackageInstallerSuccess(t *testing.T) { tt := newPackageInstallerTest(t) tt.packageClient.EXPECT().CreatePackages(tt.ctx, tt.packagePath, tt.kubeConfigPath).Return(nil) - tt.packageControllerClient.EXPECT().EnableCuratedPackages(tt.ctx).Return(nil) + tt.packageControllerClient.EXPECT().Enable(tt.ctx).Return(nil) tt.command.InstallCuratedPackages(tt.ctx) } @@ -80,7 +80,7 @@ func TestPackageInstallerSuccess(t *testing.T) { func TestPackageInstallerFailWhenControllerFails(t *testing.T) { tt := newPackageInstallerTest(t) - tt.packageControllerClient.EXPECT().EnableCuratedPackages(tt.ctx).Return(errors.New("controller installation failed")) + tt.packageControllerClient.EXPECT().Enable(tt.ctx).Return(errors.New("controller installation failed")) tt.command.InstallCuratedPackages(tt.ctx) } @@ -89,7 +89,7 @@ func TestPackageInstallerFailWhenPackageFails(t *testing.T) { tt := newPackageInstallerTest(t) tt.packageClient.EXPECT().CreatePackages(tt.ctx, tt.packagePath, tt.kubeConfigPath).Return(errors.New("path doesn't exist")) - tt.packageControllerClient.EXPECT().EnableCuratedPackages(tt.ctx).Return(nil) + tt.packageControllerClient.EXPECT().Enable(tt.ctx).Return(nil) tt.command.InstallCuratedPackages(tt.ctx) } diff --git a/pkg/dependencies/factory.go b/pkg/dependencies/factory.go index 7f18e40ea4d9..6a6488666646 100644 --- a/pkg/dependencies/factory.go +++ b/pkg/dependencies/factory.go @@ -1049,7 +1049,8 @@ func (f *Factory) WithPackageControllerClient(spec *cluster.Spec, kubeConfig str return err } f.dependencies.PackageControllerClient = curatedpackages.NewPackageControllerClient( - f.dependencies.Helm, + f.dependencies.Helm, // installer + f.dependencies.Helm, // uninstaller (sometimes these need to be different) f.dependencies.Kubectl, spec.Cluster.Name, mgmtKubeConfig, diff --git a/pkg/executables/helm.go b/pkg/executables/helm.go index 5d51a467dcfc..e2b4024cd6ad 100644 --- a/pkg/executables/helm.go +++ b/pkg/executables/helm.go @@ -32,6 +32,8 @@ func WithRegistryMirror(mirror *registrymirror.RegistryMirror) HelmOpt { } } +// WithInsecure configures helm to skip validating TLS certificates when +// communicating with the Kubernetes API. func WithInsecure() HelmOpt { return func(h *Helm) { h.insecure = true @@ -116,7 +118,11 @@ func (h *Helm) SaveChart(ctx context.Context, ociURI, version, folder string) er } func (h *Helm) InstallChartFromName(ctx context.Context, ociURI, kubeConfig, name, version string) error { - params := []string{"install", name, ociURI, "--version", version, "--kubeconfig", kubeConfig} + // Using upgrade --install will install the chart if it doesn't exist, but + // upgrades it otherwise, making this more idempotent than install, which + // would error out if the chart is already installed, and has no similar + // "--upgrade" flag. + params := []string{"upgrade", "--install", name, ociURI, "--version", version, "--kubeconfig", kubeConfig} params = h.addInsecureFlagIfProvided(params) _, err := h.executable.Command(ctx, params...). WithEnvVars(h.env).Run() @@ -124,11 +130,18 @@ func (h *Helm) InstallChartFromName(ctx context.Context, ociURI, kubeConfig, nam } // InstallChart installs a helm chart to the target cluster. -func (h *Helm) InstallChart(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath string, values []string) error { +// +// If kubeconfigFilePath is the empty string, it won't be passed at all. +func (h *Helm) InstallChart(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath string, skipCRDs bool, values []string) error { valueArgs := GetHelmValueArgs(values) - params := []string{"install", chart, ociURI, "--version", version} + params := []string{"upgrade", "--install", chart, ociURI, "--version", version} + if skipCRDs { + params = append(params, "--skip-crds") + } params = append(params, valueArgs...) - params = append(params, "--kubeconfig", kubeconfigFilePath) + if kubeconfigFilePath != "" { + params = append(params, "--kubeconfig", kubeconfigFilePath) + } if len(namespace) > 0 { params = append(params, "--create-namespace", "--namespace", namespace) } @@ -145,12 +158,31 @@ func (h *Helm) InstallChart(ctx context.Context, chart, ociURI, version, kubecon // InstallChartWithValuesFile installs a helm chart with the provided values file and waits for the chart deployment to be ready // The default timeout for the chart to reach ready state is 5m. func (h *Helm) InstallChartWithValuesFile(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, valuesFilePath string) error { - params := []string{"install", chart, ociURI, "--version", version, "--values", valuesFilePath, "--kubeconfig", kubeconfigFilePath, "--wait"} + params := []string{"upgrade", "--install", chart, ociURI, "--version", version, "--values", valuesFilePath, "--kubeconfig", kubeconfigFilePath, "--wait"} params = h.addInsecureFlagIfProvided(params) _, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run() return err } +// Delete removes an installation. +func (h *Helm) Delete(ctx context.Context, kubeconfigFilePath, installName, namespace string) error { + params := []string{ + "delete", installName, + "--kubeconfig", kubeconfigFilePath, + } + if namespace != "" { + params = append(params, "--namespace", namespace) + } + + params = h.addInsecureFlagIfProvided(params) + if _, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run(); err != nil { + return fmt.Errorf("deleting helm installation %w", err) + } + logger.V(6).Info("Deleted helm installation", "name", installName, "namespace", namespace) + + return nil +} + func (h *Helm) ListCharts(ctx context.Context, kubeconfigFilePath string) ([]string, error) { params := []string{"list", "-q", "--kubeconfig", kubeconfigFilePath} out, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run() diff --git a/pkg/executables/helm.go.orig b/pkg/executables/helm.go.orig new file mode 100644 index 000000000000..f6816efba75c --- /dev/null +++ b/pkg/executables/helm.go.orig @@ -0,0 +1,232 @@ +package executables + +import ( + "context" + "fmt" + "strings" + + "sigs.k8s.io/yaml" + + "github.com/aws/eks-anywhere/pkg/logger" + "github.com/aws/eks-anywhere/pkg/registrymirror" +) + +const ( + helmPath = "helm" + insecureSkipVerifyFlag = "--insecure-skip-tls-verify" +) + +type Helm struct { + executable Executable + registryMirror *registrymirror.RegistryMirror + env map[string]string + insecure bool +} + +type HelmOpt func(*Helm) + +// WithRegistryMirror sets up registry mirror for helm. +func WithRegistryMirror(mirror *registrymirror.RegistryMirror) HelmOpt { + return func(h *Helm) { + h.registryMirror = mirror + } +} + +// WithInsecure configures helm to skip validating TLS certificates when +// communicating with the Kubernetes API. +func WithInsecure() HelmOpt { + return func(h *Helm) { + h.insecure = true + } +} + +// join the default and the provided maps together. +func WithEnv(env map[string]string) HelmOpt { + return func(h *Helm) { + for k, v := range env { + h.env[k] = v + } + } +} + +func NewHelm(executable Executable, opts ...HelmOpt) *Helm { + h := &Helm{ + executable: executable, + env: map[string]string{ + "HELM_EXPERIMENTAL_OCI": "1", + }, + insecure: false, + } + + for _, o := range opts { + o(h) + } + + return h +} + +func (h *Helm) Template(ctx context.Context, ociURI, version, namespace string, values interface{}, kubeVersion string) ([]byte, error) { + valuesYaml, err := yaml.Marshal(values) + if err != nil { + return nil, fmt.Errorf("failed marshalling values for helm template: %v", err) + } + + params := []string{"template", h.url(ociURI), "--version", version, "--namespace", namespace, "--kube-version", kubeVersion} + params = h.addInsecureFlagIfProvided(params) + params = append(params, "-f", "-") + + result, err := h.executable.Command(ctx, params...).WithStdIn(valuesYaml).WithEnvVars(h.env).Run() + if err != nil { + return nil, err + } + + return result.Bytes(), nil +} + +func (h *Helm) PullChart(ctx context.Context, ociURI, version string) error { + params := []string{"pull", h.url(ociURI), "--version", version} + params = h.addInsecureFlagIfProvided(params) + _, err := h.executable.Command(ctx, params...). + WithEnvVars(h.env).Run() + return err +} + +func (h *Helm) PushChart(ctx context.Context, chart, registry string) error { + logger.Info("Pushing", "chart", chart) + params := []string{"push", chart, registry} + params = h.addInsecureFlagIfProvided(params) + _, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run() + return err +} + +func (h *Helm) RegistryLogin(ctx context.Context, registry, username, password string) error { + logger.Info("Logging in to helm registry", "registry", registry) + params := []string{"registry", "login", registry, "--username", username, "--password", password} + if h.insecure { + params = append(params, "--insecure") + } + _, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run() + return err +} + +func (h *Helm) SaveChart(ctx context.Context, ociURI, version, folder string) error { + params := []string{"pull", h.url(ociURI), "--version", version, "--destination", folder} + params = h.addInsecureFlagIfProvided(params) + _, err := h.executable.Command(ctx, params...). + WithEnvVars(h.env).Run() + return err +} + +func (h *Helm) InstallChartFromName(ctx context.Context, ociURI, kubeConfig, name, version string) error { + // Using upgrade --install will install the chart if it doesn't exist, but + // upgrades it otherwise, making this more idempotent than install, which + // would error out if the chart is already installed, and has no similar + // "--upgrade" flag. + params := []string{"upgrade", "--install", name, ociURI, "--version", version, "--kubeconfig", kubeConfig} + params = h.addInsecureFlagIfProvided(params) + _, err := h.executable.Command(ctx, params...). + WithEnvVars(h.env).Run() + return err +} + +// InstallChart installs a helm chart to the target cluster. +// +// If kubeconfigFilePath is the empty string, it won't be passed at all. +func (h *Helm) InstallChart(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath string, skipCRDs bool, values []string) error { + logger.Init(logger.Options{Level: 6}) + valueArgs := GetHelmValueArgs(values) + params := []string{"--debug", "upgrade", "--install", chart, ociURI, "--version", version} + if skipCRDs { + params = append(params, "--skip-crds") + } + params = append(params, valueArgs...) + if kubeconfigFilePath != "" { + params = append(params, "--kubeconfig", kubeconfigFilePath) + } + if len(namespace) > 0 { + params = append(params, "--create-namespace", "--namespace", namespace) + } + if valueFilePath != "" { + params = append(params, "-f", valueFilePath) + } + params = h.addInsecureFlagIfProvided(params) + + logger.Info("Installing helm chart on cluster", "chart", chart, "version", version) + _, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run() + return err +} + +// InstallChartWithValuesFile installs a helm chart with the provided values file and waits for the chart deployment to be ready +// The default timeout for the chart to reach ready state is 5m. +func (h *Helm) InstallChartWithValuesFile(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, valuesFilePath string) error { + params := []string{"upgrade", "--install", chart, ociURI, "--version", version, "--values", valuesFilePath, "--kubeconfig", kubeconfigFilePath, "--wait"} + params = h.addInsecureFlagIfProvided(params) + _, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run() + return err +} + +// Delete removes an installation. +func (h *Helm) Delete(ctx context.Context, kubeconfigFilePath, installName, namespace string) error { + params := []string{ + "delete", installName, + "--kubeconfig", kubeconfigFilePath, + } + if namespace != "" { + params = append(params, "--namespace", namespace) + } + + params = h.addInsecureFlagIfProvided(params) + if _, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run(); err != nil { + return fmt.Errorf("deleting helm installation %w", err) + } + logger.V(6).Info("Deleted helm installation", "name", installName, "namespace", namespace) + + return nil +} + +func (h *Helm) ListCharts(ctx context.Context, kubeconfigFilePath string) ([]string, error) { + params := []string{"list", "-q", "--kubeconfig", kubeconfigFilePath} + out, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run() + if err != nil { + return nil, err + } + charts := strings.FieldsFunc(out.String(), func(c rune) bool { + return c == '\n' + }) + return charts, nil +} + +func (h *Helm) addInsecureFlagIfProvided(params []string) []string { + if h.insecure { + return append(params, insecureSkipVerifyFlag) + } + return params +} + +func (h *Helm) url(originalURL string) string { + return h.registryMirror.ReplaceRegistry(originalURL) +} + +func GetHelmValueArgs(values []string) []string { + valueArgs := []string{} + for _, value := range values { + valueArgs = append(valueArgs, "--set", value) + } + + return valueArgs +} + +// UpgradeChartWithValuesFile tuns a helm upgrade with the provided values file and waits for the +// chart deployment to be ready. +func (h *Helm) UpgradeChartWithValuesFile(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, valuesFilePath string) error { + params := []string{ + "upgrade", chart, ociURI, + "--version", version, + "--values", valuesFilePath, + "--kubeconfig", kubeconfigFilePath, + "--wait", + } + params = h.addInsecureFlagIfProvided(params) + _, err := h.executable.Command(ctx, params...).WithEnvVars(h.env).Run() + return err +} diff --git a/pkg/executables/helm_test.go b/pkg/executables/helm_test.go index 4a811928e2fa..9181d2bba0e4 100644 --- a/pkg/executables/helm_test.go +++ b/pkg/executables/helm_test.go @@ -4,11 +4,13 @@ import ( "bytes" "context" "errors" + "fmt" "testing" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" + "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/executables" "github.com/aws/eks-anywhere/pkg/executables/mocks" "github.com/aws/eks-anywhere/pkg/registrymirror" @@ -140,6 +142,19 @@ func TestHelmSaveChartSuccessWithInsecure(t *testing.T) { tt.Expect(tt.h.SaveChart(tt.ctx, url, version, destinationFolder)).To(Succeed()) } +func TestHelmSkipCRDs(t *testing.T) { + tt := newHelmTest(t) + url := "url" + version := "1.1" + kubeconfig := "kubeconfig" + chart := "chart" + expectCommand( + tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--skip-crds", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", constants.EksaPackagesName, + ).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil) + + tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, constants.EksaPackagesName, "", true, nil)).To(Succeed()) +} + func TestHelmInstallChartSuccess(t *testing.T) { tt := newHelmTest(t) chart := "chart" @@ -148,10 +163,10 @@ func TestHelmInstallChartSuccess(t *testing.T) { kubeconfig := "/root/.kube/config" values := []string{"key1=value1"} expectCommand( - tt.e, tt.ctx, "install", chart, url, "--version", version, "--set", "key1=value1", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", "eksa-packages", + tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--set", "key1=value1", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", "eksa-packages", ).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil) - tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, "eksa-packages", "", values)).To(Succeed()) + tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, "eksa-packages", "", false, values)).To(Succeed()) } func TestHelmInstallChartSuccessWithValuesFile(t *testing.T) { @@ -163,10 +178,10 @@ func TestHelmInstallChartSuccessWithValuesFile(t *testing.T) { values := []string{"key1=value1"} valuesFileName := "values.yaml" expectCommand( - tt.e, tt.ctx, "install", chart, url, "--version", version, "--set", "key1=value1", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", "eksa-packages", "-f", valuesFileName, + tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--set", "key1=value1", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", "eksa-packages", "-f", valuesFileName, ).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil) - tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, "eksa-packages", valuesFileName, values)).To(Succeed()) + tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, "eksa-packages", valuesFileName, false, values)).To(Succeed()) } func TestHelmInstallChartSuccessWithInsecure(t *testing.T) { @@ -177,10 +192,10 @@ func TestHelmInstallChartSuccessWithInsecure(t *testing.T) { kubeconfig := "/root/.kube/config" values := []string{"key1=value1"} expectCommand( - tt.e, tt.ctx, "install", chart, url, "--version", version, "--set", "key1=value1", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", "eksa-packages", "--insecure-skip-tls-verify", + tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--set", "key1=value1", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", "eksa-packages", "--insecure-skip-tls-verify", ).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil) - tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, "eksa-packages", "", values)).To(Succeed()) + tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, "eksa-packages", "", false, values)).To(Succeed()) } func TestHelmInstallChartSuccessWithInsecureAndValuesFile(t *testing.T) { @@ -192,10 +207,10 @@ func TestHelmInstallChartSuccessWithInsecureAndValuesFile(t *testing.T) { values := []string{"key1=value1"} valuesFileName := "values.yaml" expectCommand( - tt.e, tt.ctx, "install", chart, url, "--version", version, "--set", "key1=value1", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", "eksa-packages", "-f", valuesFileName, "--insecure-skip-tls-verify", + tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--set", "key1=value1", "--kubeconfig", kubeconfig, "--create-namespace", "--namespace", "eksa-packages", "-f", valuesFileName, "--insecure-skip-tls-verify", ).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil) - tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, "eksa-packages", valuesFileName, values)).To(Succeed()) + tt.Expect(tt.h.InstallChart(tt.ctx, chart, url, version, kubeconfig, "eksa-packages", valuesFileName, false, values)).To(Succeed()) } func TestHelmGetValueArgs(t *testing.T) { @@ -232,7 +247,7 @@ func TestHelmInstallChartWithValuesFileSuccess(t *testing.T) { kubeconfig := "/root/.kube/config" valuesFileName := "values.yaml" expectCommand( - tt.e, tt.ctx, "install", chart, url, "--version", version, "--values", valuesFileName, "--kubeconfig", kubeconfig, "--wait", + tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--values", valuesFileName, "--kubeconfig", kubeconfig, "--wait", ).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil) tt.Expect(tt.h.InstallChartWithValuesFile(tt.ctx, chart, url, version, kubeconfig, valuesFileName)).To(Succeed()) @@ -246,7 +261,7 @@ func TestHelmInstallChartWithValuesFileSuccessWithInsecure(t *testing.T) { kubeconfig := "/root/.kube/config" valuesFileName := "values.yaml" expectCommand( - tt.e, tt.ctx, "install", chart, url, "--version", version, "--values", valuesFileName, "--kubeconfig", kubeconfig, "--wait", "--insecure-skip-tls-verify", + tt.e, tt.ctx, "upgrade", "--install", chart, url, "--version", version, "--values", valuesFileName, "--kubeconfig", kubeconfig, "--wait", "--insecure-skip-tls-verify", ).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil) tt.Expect(tt.h.InstallChartWithValuesFile(tt.ctx, chart, url, version, kubeconfig, valuesFileName)).To(Succeed()) @@ -277,3 +292,40 @@ func TestHelmListCharts(t *testing.T) { tt.Expect(result).To(Equal(expected)) }) } + +func TestHelmDelete(s *testing.T) { + kubeconfig := "/root/.kube/config" + + s.Run("Success", func(t *testing.T) { + tt := newHelmTest(s) + installName := "test-install" + expectCommand(tt.e, tt.ctx, "delete", installName, "--kubeconfig", kubeconfig).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil) + err := tt.h.Delete(tt.ctx, kubeconfig, installName, "") + tt.Expect(err).NotTo(HaveOccurred()) + }) + + s.Run("passes the namespace, if present", func(t *testing.T) { + tt := newHelmTest(s) + testNamespace := "testing" + installName := "test-install" + expectCommand(tt.e, tt.ctx, "delete", installName, "--kubeconfig", kubeconfig, "--namespace", testNamespace).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil) + err := tt.h.Delete(tt.ctx, kubeconfig, installName, testNamespace) + tt.Expect(err).NotTo(HaveOccurred()) + }) + + s.Run("passes the insecure skip flag", func(t *testing.T) { + tt := newHelmTest(t, executables.WithInsecure()) + installName := "test-install" + expectCommand(tt.e, tt.ctx, "delete", installName, "--kubeconfig", kubeconfig, "--insecure-skip-tls-verify").withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, nil) + err := tt.h.Delete(tt.ctx, kubeconfig, installName, "") + tt.Expect(err).NotTo(HaveOccurred()) + }) + + s.Run("returns errors from the helm executable", func(t *testing.T) { + tt := newHelmTest(s) + installName := "test-install" + expectCommand(tt.e, tt.ctx, "delete", installName, "--kubeconfig", kubeconfig).withEnvVars(tt.envVars).to().Return(bytes.Buffer{}, fmt.Errorf("test error")) + err := tt.h.Delete(tt.ctx, kubeconfig, installName, "") + tt.Expect(err).To(HaveOccurred()) + }) +} diff --git a/test/framework/cluster.go b/test/framework/cluster.go index 73fbe17a2136..deaae904c568 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -1092,7 +1092,7 @@ func (e *ClusterE2ETest) InstallHelmChart() { kubeconfig := e.kubeconfigFilePath() ctx := context.Background() - err := e.HelmInstallConfig.HelmClient.InstallChart(ctx, e.HelmInstallConfig.chartName, e.HelmInstallConfig.chartURI, e.HelmInstallConfig.chartVersion, kubeconfig, "", "", e.HelmInstallConfig.chartValues) + err := e.HelmInstallConfig.HelmClient.InstallChart(ctx, e.HelmInstallConfig.chartName, e.HelmInstallConfig.chartURI, e.HelmInstallConfig.chartVersion, kubeconfig, "", "", false, e.HelmInstallConfig.chartValues) if err != nil { e.T.Fatalf("Error installing %s helm chart on the cluster: %v", e.HelmInstallConfig.chartName, err) }