From 8e3fa9308d3889f2e1a33c0db70932737fb136f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Doucy?= <111791574+c3-clement@users.noreply.github.com> Date: Tue, 11 Jun 2024 15:35:50 +0200 Subject: [PATCH 1/4] Clean up MedusaBackupJobs and propagate MedusaTask labels (#1339) --- CHANGELOG/CHANGELOG-1.14.md | 5 +++ .../medusa/medusabackupjob_controller_test.go | 8 ++++- controllers/medusa/medusatask_controller.go | 15 +++++++++ .../medusa/medusatask_controller_test.go | 32 +++++++++++++++++++ 4 files changed, 59 insertions(+), 1 deletion(-) diff --git a/CHANGELOG/CHANGELOG-1.14.md b/CHANGELOG/CHANGELOG-1.14.md index 888ee13d6..3fb7092aa 100644 --- a/CHANGELOG/CHANGELOG-1.14.md +++ b/CHANGELOG/CHANGELOG-1.14.md @@ -13,6 +13,11 @@ Changelog for the K8ssandra Operator, new PRs should update the `unreleased` sec When cutting a new release, update the `unreleased` heading to the tag being generated and date, like `## vX.Y.Z - YYYY-MM-DD` and create a new placeholder section for `unreleased` entries. +## unreleased + +* [BUGFIX] [#1334](https://github.com/k8ssandra/k8ssandra-operator/issues/1334) Delete `MedusaBackupJobs` during purge +* [BUGFIX] [#1336](https://github.com/k8ssandra/k8ssandra-operator/issues/1336) Propagate labels to the sync `MedusaTask` created after purge + ## v1.14.0 - 2024-04-02 * [FEATURE] [#1242](https://github.com/k8ssandra/k8ssandra-operator/issues/1242) Allow for creation of replicated secrets with a prefix, so that we can distinguish between multiple secrets with the same origin but targeting different clusters. diff --git a/controllers/medusa/medusabackupjob_controller_test.go b/controllers/medusa/medusabackupjob_controller_test.go index 6c1704073..257e39807 100644 --- a/controllers/medusa/medusabackupjob_controller_test.go +++ b/controllers/medusa/medusabackupjob_controller_test.go @@ -36,6 +36,7 @@ const ( fakeBackupFileCount = int64(13) fakeBackupByteSize = int64(42) fakeBackupHumanSize = "42.00 B" + fakeMaxBackupCount = 1 ) func testMedusaBackupDatacenter(t *testing.T, ctx context.Context, f *framework.Framework, namespace string) { @@ -395,8 +396,13 @@ func (c *fakeMedusaClient) BackupStatus(ctx context.Context, name string) (*medu } func (c *fakeMedusaClient) PurgeBackups(ctx context.Context) (*medusa.PurgeBackupsResponse, error) { + size := len(c.RequestedBackups) + if size > fakeMaxBackupCount { + c.RequestedBackups = c.RequestedBackups[size-fakeMaxBackupCount:] + } + response := &medusa.PurgeBackupsResponse{ - NbBackupsPurged: 2, + NbBackupsPurged: int32(size - len(c.RequestedBackups)), NbObjectsPurged: 10, TotalObjectsWithinGcGrace: 0, TotalPurgedSize: 1000, diff --git a/controllers/medusa/medusatask_controller.go b/controllers/medusa/medusatask_controller.go index ec9d0419f..5f3303cfb 100644 --- a/controllers/medusa/medusatask_controller.go +++ b/controllers/medusa/medusatask_controller.go @@ -301,6 +301,20 @@ func (r *MedusaTaskReconciler) syncOperation(ctx context.Context, task *medusav1 return ctrl.Result{}, err } else { logger.Info("Deleted Cassandra Backup", "Backup", backup.ObjectMeta.Name) + + backupJob := medusav1alpha1.MedusaBackupJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: backup.GetName(), + Namespace: backup.GetNamespace(), + }, + } + logger.Info("Deleting MedusaBackupJob", "MedusaBackupJob", backupJob.GetName()) + + if err := r.Delete(ctx, &backupJob); err != nil && !errors.IsNotFound(err) { + logger.Error(err, "failed to delete MedusaBackupJob", "MedusaBackupJob", backupJob.GetName()) + } else { + logger.Info("Deleted MedusaBackupJob", "MedusaBackupJob", backupJob.GetName()) + } } } } @@ -377,6 +391,7 @@ func (r *MedusaTaskReconciler) scheduleSyncForPurge(task *medusav1alpha1.MedusaT ObjectMeta: metav1.ObjectMeta{ Name: task.GetObjectMeta().GetName() + "-sync", Namespace: task.Namespace, + Labels: task.GetLabels(), }, Spec: medusav1alpha1.MedusaTaskSpec{ Operation: medusav1alpha1.OperationTypeSync, diff --git a/controllers/medusa/medusatask_controller_test.go b/controllers/medusa/medusatask_controller_test.go index fb592f3f6..a3ba65a56 100644 --- a/controllers/medusa/medusatask_controller_test.go +++ b/controllers/medusa/medusatask_controller_test.go @@ -154,6 +154,9 @@ func testMedusaTasks(t *testing.T, ctx context.Context, f *framework.Framework, backup4Created := createAndVerifyMedusaBackup(dc2Key, dc2, f, ctx, require, t, namespace, backup4) require.True(backup4Created, "failed to create backup4") + // Ensure that 4 backups and backup jobs were created + checkBackupsAndJobs(require, ctx, 4, namespace, f, []string{}) + // Purge backups and verify that only one out of three remains t.Log("purge backups") @@ -161,6 +164,9 @@ func testMedusaTasks(t *testing.T, ctx context.Context, f *framework.Framework, ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: "purge-backups", + Labels: map[string]string{ + "app": "medusa", + }, }, Spec: api.MedusaTaskSpec{ CassandraDatacenter: "dc1", @@ -194,9 +200,18 @@ func testMedusaTasks(t *testing.T, ctx context.Context, f *framework.Framework, return false } + v, ok := updated.Labels["app"] + if !ok || v != "medusa" { + return false + } + return !updated.Status.FinishTime.IsZero() }, timeout, interval) + // Ensure that 2 backups and backup jobs were deleted + deletedBackups := []string{backup1, backup2} + checkBackupsAndJobs(require, ctx, 2, namespace, f, deletedBackups) + medusaBackup4Key := framework.NewClusterKey(f.DataPlaneContexts[0], namespace, backup4) medusaBackup4 := &api.MedusaBackup{} err = f.Get(ctx, medusaBackup4Key, medusaBackup4) @@ -207,3 +222,20 @@ func testMedusaTasks(t *testing.T, ctx context.Context, f *framework.Framework, verifyObjectDoesNotExist(ctx, t, f, dc1Key, &cassdcapi.CassandraDatacenter{}) verifyObjectDoesNotExist(ctx, t, f, dc2Key, &cassdcapi.CassandraDatacenter{}) } + +func checkBackupsAndJobs(require *require.Assertions, ctx context.Context, expectedLen int, namespace string, f *framework.Framework, deleted []string) { + var backups api.MedusaBackupList + err := f.List(ctx, framework.NewClusterKey(f.DataPlaneContexts[0], namespace, "list-backups"), &backups) + require.NoError(err, "failed to list medusabackup") + require.Len(backups.Items, expectedLen, "expected %d backups, got %d", expectedLen, len(backups.Items)) + + var jobs api.MedusaBackupJobList + err = f.List(ctx, framework.NewClusterKey(f.DataPlaneContexts[0], namespace, "list-backup-jobs"), &jobs) + require.NoError(err, "failed to list medusabackupjobs") + require.Len(jobs.Items, expectedLen, "expected %d jobs, got %d", expectedLen, len(jobs.Items)) + + for _, d := range deleted { + require.NotContains(backups.Items, d, "MedusaBackup %s to have been deleted", d) + require.NotContains(jobs.Items, d, "MedusaBackupJob %s to have been deleted", d) + } +} From 3d55c914624af063b222304c2c3b2e97095becf3 Mon Sep 17 00:00:00 2001 From: Olivier Michallat Date: Tue, 11 Jun 2024 09:52:35 -0700 Subject: [PATCH 2/4] Update changelog for v1.14.1 --- CHANGELOG/CHANGELOG-1.14.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG/CHANGELOG-1.14.md b/CHANGELOG/CHANGELOG-1.14.md index 3fb7092aa..5df02af82 100644 --- a/CHANGELOG/CHANGELOG-1.14.md +++ b/CHANGELOG/CHANGELOG-1.14.md @@ -13,7 +13,7 @@ Changelog for the K8ssandra Operator, new PRs should update the `unreleased` sec When cutting a new release, update the `unreleased` heading to the tag being generated and date, like `## vX.Y.Z - YYYY-MM-DD` and create a new placeholder section for `unreleased` entries. -## unreleased +## v1.14.1 - 2024-06-11 * [BUGFIX] [#1334](https://github.com/k8ssandra/k8ssandra-operator/issues/1334) Delete `MedusaBackupJobs` during purge * [BUGFIX] [#1336](https://github.com/k8ssandra/k8ssandra-operator/issues/1336) Propagate labels to the sync `MedusaTask` created after purge From 94d6d50eba7f598878c395457d02afa492cce07e Mon Sep 17 00:00:00 2001 From: Olivier Michallat Date: Tue, 11 Jun 2024 10:04:18 -0700 Subject: [PATCH 3/4] Release v1.14.1 --- charts/k8ssandra-operator/Chart.yaml | 2 +- config/deployments/default/kustomization.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/k8ssandra-operator/Chart.yaml b/charts/k8ssandra-operator/Chart.yaml index 512a7e7d6..5e51c9ba8 100644 --- a/charts/k8ssandra-operator/Chart.yaml +++ b/charts/k8ssandra-operator/Chart.yaml @@ -3,7 +3,7 @@ name: k8ssandra-operator description: | Kubernetes operator which handles the provisioning and management of K8ssandra clusters. type: application -version: 1.14.1-SNAPSHOT +version: 1.14.1 dependencies: - name: k8ssandra-common version: 0.29.0 diff --git a/config/deployments/default/kustomization.yaml b/config/deployments/default/kustomization.yaml index bc7083db1..638e525b9 100644 --- a/config/deployments/default/kustomization.yaml +++ b/config/deployments/default/kustomization.yaml @@ -5,4 +5,4 @@ resources: - ../../default images: - name: cr.k8ssandra.io/k8ssandra/k8ssandra-operator - newTag: 1.14-latest + newTag: v1.14.1 From d11580e50c6ebd1bfb00d8bab865d54d224ba881 Mon Sep 17 00:00:00 2001 From: Olivier Michallat Date: Tue, 11 Jun 2024 15:21:23 -0700 Subject: [PATCH 4/4] Prepare next patch release --- charts/k8ssandra-operator/Chart.yaml | 2 +- config/deployments/default/kustomization.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/k8ssandra-operator/Chart.yaml b/charts/k8ssandra-operator/Chart.yaml index 5e51c9ba8..9007ffb08 100644 --- a/charts/k8ssandra-operator/Chart.yaml +++ b/charts/k8ssandra-operator/Chart.yaml @@ -3,7 +3,7 @@ name: k8ssandra-operator description: | Kubernetes operator which handles the provisioning and management of K8ssandra clusters. type: application -version: 1.14.1 +version: 1.14.2-SNAPSHOT dependencies: - name: k8ssandra-common version: 0.29.0 diff --git a/config/deployments/default/kustomization.yaml b/config/deployments/default/kustomization.yaml index 638e525b9..bc7083db1 100644 --- a/config/deployments/default/kustomization.yaml +++ b/config/deployments/default/kustomization.yaml @@ -5,4 +5,4 @@ resources: - ../../default images: - name: cr.k8ssandra.io/k8ssandra/k8ssandra-operator - newTag: v1.14.1 + newTag: 1.14-latest