diff --git a/CHANGELOG/CHANGELOG-1.18.md b/CHANGELOG/CHANGELOG-1.18.md index eadb85000..d19e07dde 100644 --- a/CHANGELOG/CHANGELOG-1.18.md +++ b/CHANGELOG/CHANGELOG-1.18.md @@ -18,5 +18,6 @@ When cutting a new release, update the `unreleased` heading to the tag being gen * [CHANGE] Update cassandra-medusa to 0.22.0 * [CHANGE] Update cass-operator to v1.22.0 * [FEATURE] [#1310](https://github.com/k8ssandra/k8ssandra-operator/issues/1310) Enhance the MedusaBackupSchedule API to allow scheduling purge tasks +* [ENHANCEMENT] [#1274](https://github.com/k8ssandra/k8ssandra-operator/issues/1274) On upgrade, do not modify the CassandraDatacenter object unless instructed with an annotation `k8ssandra.io/autoupdate-spec` with value `once` or `always` * [BUGFIX] [#1222](https://github.com/k8ssandra/k8ssandra-operator/issues/1222) Consider DC-level config when validating numToken updates in webhook * [BUGFIX] [#1366](https://github.com/k8ssandra/k8ssandra-operator/issues/1366) Reaper deployment can't be created on OpenShift due to missing RBAC rule diff --git a/apis/k8ssandra/v1alpha1/constants.go b/apis/k8ssandra/v1alpha1/constants.go index 128e714a5..606fe0554 100644 --- a/apis/k8ssandra/v1alpha1/constants.go +++ b/apis/k8ssandra/v1alpha1/constants.go @@ -64,8 +64,17 @@ const ( // Annotation to indicate the purpose of a given resource. PurposeAnnotation = "k8ssandra.io/purpose" + + // AutomatedUpdateAnnotation is an annotation that allows the Datacenters to be updated even if no changes were done to the K8ssandraCluster spec + AutomatedUpdateAnnotation = "k8ssandra.io/autoupdate-spec" + + AllowUpdateAlways AllowUpdateType = "always" + AllowUpdateOnce AllowUpdateType = "once" ) +// TODO Use the accepted values from cass-operator's api instead to prevent drift, once Kubernetes dependencies are updated in k8ssandra-operator +type AllowUpdateType string + var ( SystemKeyspaces = []string{"system_traces", "system_distributed", "system_auth"} DseKeyspaces = []string{"dse_leases", "dse_perf", "dse_security"} diff --git a/apis/k8ssandra/v1alpha1/k8ssandracluster_types.go b/apis/k8ssandra/v1alpha1/k8ssandracluster_types.go index 3fd6670ab..7e44c5cdf 100644 --- a/apis/k8ssandra/v1alpha1/k8ssandracluster_types.go +++ b/apis/k8ssandra/v1alpha1/k8ssandracluster_types.go @@ -106,10 +106,17 @@ type K8ssandraClusterStatus struct { // +kubebuilder:default=None Error string `json:"error,omitempty"` + + // ObservedGeneration is the last observed generation of the K8ssandraCluster. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` } type K8ssandraClusterConditionType string +const ( + ClusterRequiresUpdate K8ssandraClusterConditionType = "RequiresUpdate" +) + type DecommissionProgress string const ( @@ -511,6 +518,15 @@ func (s *K8ssandraClusterStatus) GetConditionStatus(conditionType K8ssandraClust return corev1.ConditionUnknown } +func (s *K8ssandraClusterStatus) SetConditionStatus(conditionType K8ssandraClusterConditionType, status corev1.ConditionStatus) { + now := metav1.Now() + s.SetCondition(K8ssandraClusterCondition{ + Type: conditionType, + Status: status, + LastTransitionTime: &now, + }) +} + func (s *K8ssandraClusterStatus) SetCondition(condition K8ssandraClusterCondition) { for i, c := range s.Conditions { if c.Type == condition.Type { @@ -548,3 +564,7 @@ func (sd *ServerDistribution) IsDse() bool { func (kc *K8ssandraCluster) GetClusterIdHash() string { return utils.HashNameNamespace(kc.Name, kc.Namespace) } + +func (k *K8ssandraCluster) GenerationChanged() bool { + return k.Status.ObservedGeneration < k.Generation +} diff --git a/apis/k8ssandra/v1alpha1/k8ssandracluster_types_test.go b/apis/k8ssandra/v1alpha1/k8ssandracluster_types_test.go index 3ab6bafd7..154620021 100644 --- a/apis/k8ssandra/v1alpha1/k8ssandracluster_types_test.go +++ b/apis/k8ssandra/v1alpha1/k8ssandracluster_types_test.go @@ -3,6 +3,7 @@ package v1alpha1 import ( "testing" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" "sigs.k8s.io/yaml" @@ -194,3 +195,23 @@ metadata: assert.Equal(t, "nodePortSvcLabelValue1", dc.Meta.ServiceConfig.NodePortService.Labels["nodePortSvcLabel1"]) assert.Equal(t, "nodePortSvcAnnotationValue1", dc.Meta.ServiceConfig.NodePortService.Annotations["nodePortSvcAnnotation1"]) } + +func TestGenerationChanged(t *testing.T) { + assert := assert.New(t) + kc := &K8ssandraCluster{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 2, + }, + Spec: K8ssandraClusterSpec{}, + } + + kc.Status = K8ssandraClusterStatus{ + ObservedGeneration: 0, + } + + assert.True(kc.GenerationChanged()) + kc.Status.ObservedGeneration = 2 + assert.False(kc.GenerationChanged()) + kc.ObjectMeta.Generation = 3 + assert.True(kc.GenerationChanged()) +} diff --git a/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go b/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go index 4b30f3bf0..53e996ead 100644 --- a/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go +++ b/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go @@ -21,6 +21,7 @@ import ( "github.com/Masterminds/semver/v3" "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation" "github.com/k8ssandra/k8ssandra-operator/pkg/clientcache" @@ -100,6 +101,14 @@ func (r *K8ssandraCluster) validateK8ssandraCluster() error { } } + if metav1.HasAnnotation(r.ObjectMeta, AutomatedUpdateAnnotation) { + // Allow only always and once in the annotation + annotationValue := r.ObjectMeta.GetAnnotations()[AutomatedUpdateAnnotation] + if annotationValue != string(AllowUpdateAlways) && annotationValue != string(AllowUpdateOnce) { + return fmt.Errorf("invalid value for %s annotation: %s", AutomatedUpdateAnnotation, annotationValue) + } + } + if err := r.ValidateMedusa(); err != nil { return err } diff --git a/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go b/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go index d7e62821f..be713bd10 100644 --- a/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go +++ b/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go @@ -159,6 +159,7 @@ func TestWebhook(t *testing.T) { t.Run("MedusaPrefixMissing", testMedusaPrefixMissing) t.Run("InvalidDcName", testInvalidDcName) t.Run("MedusaConfigNonLocalNamespace", testMedusaNonLocalNamespace) + t.Run("AutomatedUpdateAnnotation", testAutomatedUpdateAnnotation) } func testContextValidation(t *testing.T) { @@ -577,3 +578,20 @@ func TestValidateUpdateNumTokens(t *testing.T) { } } } + +func testAutomatedUpdateAnnotation(t *testing.T) { + require := require.New(t) + createNamespace(require, "automated-update-namespace") + cluster := createMinimalClusterObj("automated-update-test", "automated-update-namespace") + require.NoError(cluster.validateK8ssandraCluster()) + + // Test should accept values once and always + metav1.SetMetaDataAnnotation(&cluster.ObjectMeta, AutomatedUpdateAnnotation, string(AllowUpdateOnce)) + require.NoError(cluster.validateK8ssandraCluster()) + + metav1.SetMetaDataAnnotation(&cluster.ObjectMeta, AutomatedUpdateAnnotation, string(AllowUpdateAlways)) + require.NoError(cluster.validateK8ssandraCluster()) + + cluster.Annotations[AutomatedUpdateAnnotation] = string("true") + require.Error(cluster.validateK8ssandraCluster()) +} diff --git a/charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml b/charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml index 7a404f056..9f52314df 100644 --- a/charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml +++ b/charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml @@ -31025,6 +31025,11 @@ spec: error: default: None type: string + observedGeneration: + description: ObservedGeneration is the last observed generation of + the K8ssandraCluster. + format: int64 + type: integer type: object type: object served: true diff --git a/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml b/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml index 83ec43242..c3977e758 100644 --- a/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml +++ b/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml @@ -30963,6 +30963,11 @@ spec: error: default: None type: string + observedGeneration: + description: ObservedGeneration is the last observed generation of + the K8ssandraCluster. + format: int64 + type: integer type: object type: object served: true diff --git a/controllers/k8ssandra/add_dc_test.go b/controllers/k8ssandra/add_dc_test.go index 71daec2b0..6c30a6855 100644 --- a/controllers/k8ssandra/add_dc_test.go +++ b/controllers/k8ssandra/add_dc_test.go @@ -50,8 +50,9 @@ func addDcSetupForSingleDc(ctx context.Context, t *testing.T, f *framework.Frame require := require.New(t) kc := &api.K8ssandraCluster{ ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "add-dc-test", + Namespace: namespace, + Name: "add-dc-test", + Annotations: map[string]string{api.AutomatedUpdateAnnotation: string(api.AllowUpdateAlways)}, }, Spec: api.K8ssandraClusterSpec{ Cassandra: &api.CassandraClusterTemplate{ @@ -119,8 +120,9 @@ func addDcSetupForMultiDc(ctx context.Context, t *testing.T, f *framework.Framew require := require.New(t) kc := &api.K8ssandraCluster{ ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "add-dc-test", + Namespace: namespace, + Name: "add-dc-test", + Annotations: map[string]string{api.AutomatedUpdateAnnotation: string(api.AllowUpdateAlways)}, }, Spec: api.K8ssandraClusterSpec{ Cassandra: &api.CassandraClusterTemplate{ diff --git a/controllers/k8ssandra/auth_test.go b/controllers/k8ssandra/auth_test.go index f3a299934..3d33ce79d 100644 --- a/controllers/k8ssandra/auth_test.go +++ b/controllers/k8ssandra/auth_test.go @@ -55,12 +55,11 @@ func createSingleDcClusterNoAuth(t *testing.T, ctx context.Context, f *framework err := f.Client.Create(ctx, kc) require.NoError(t, err, "failed to create K8ssandraCluster") - kcKey := framework.ClusterKey{K8sContext: f.ControlPlaneContext, NamespacedName: types.NamespacedName{Namespace: namespace, Name: kc.Name}} dcKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[1], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc1"}} reaperKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[1], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "cluster1-dc1-reaper"}} stargateKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[1], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "cluster1-dc1-stargate"}} - verifyFinalizerAdded(ctx, t, f, kcKey.NamespacedName) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) verifySecretNotCreated(ctx, t, f, kc.Namespace, reaper.DefaultUserSecretName(kc.SanitizedName())) verifyReplicatedSecretReconciled(ctx, t, f, kc) @@ -165,12 +164,11 @@ func createSingleDcClusterAuth(t *testing.T, ctx context.Context, f *framework.F err := f.Client.Create(ctx, kc) require.NoError(t, err, "failed to create K8ssandraCluster") - kcKey := framework.ClusterKey{K8sContext: f.ControlPlaneContext, NamespacedName: types.NamespacedName{Namespace: namespace, Name: kc.Name}} dcKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[1], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc1"}} reaperKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[1], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "cluster1-dc1-reaper"}} stargateKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[1], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "cluster1-dc1-stargate"}} - verifyFinalizerAdded(ctx, t, f, kcKey.NamespacedName) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) verifySecretCreated(ctx, t, f, kc.Namespace, reaper.DefaultUserSecretName(kc.Name)) verifyReplicatedSecretReconciled(ctx, t, f, kc) @@ -285,12 +283,11 @@ func createSingleDcClusterAuthExternalSecrets(t *testing.T, ctx context.Context, err := f.Client.Create(ctx, kc) require.NoError(t, err, "failed to create K8ssandraCluster") - kcKey := framework.ClusterKey{K8sContext: f.ControlPlaneContext, NamespacedName: types.NamespacedName{Namespace: namespace, Name: kc.Name}} dcKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[1], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc1"}} reaperKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[1], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "cluster1-dc1-reaper"}} stargateKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[1], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "cluster1-dc1-stargate"}} - verifyFinalizerAdded(ctx, t, f, kcKey.NamespacedName) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretNotCreated(ctx, t, f, kc) // verify not created @@ -416,10 +413,9 @@ func createSingleDcClusterExternalInternode(t *testing.T, ctx context.Context, f err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - kcKey := framework.ClusterKey{K8sContext: f.ControlPlaneContext, NamespacedName: types.NamespacedName{Namespace: namespace, Name: kc.Name}} dcKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[1], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc1"}} - verifyFinalizerAdded(ctx, t, f, kcKey.NamespacedName) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) verifyReplicatedSecretReconciled(ctx, t, f, kc) diff --git a/controllers/k8ssandra/cassandra_metrics_agent_test.go b/controllers/k8ssandra/cassandra_metrics_agent_test.go index c56d017d9..e5defbfdc 100644 --- a/controllers/k8ssandra/cassandra_metrics_agent_test.go +++ b/controllers/k8ssandra/cassandra_metrics_agent_test.go @@ -67,7 +67,7 @@ func createSingleDcClusterWithMetricsAgent(t *testing.T, ctx context.Context, f err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) diff --git a/controllers/k8ssandra/datacenters.go b/controllers/k8ssandra/datacenters.go index 066679b4f..b96d4fa07 100644 --- a/controllers/k8ssandra/datacenters.go +++ b/controllers/k8ssandra/datacenters.go @@ -11,14 +11,17 @@ import ( "github.com/go-logr/logr" cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" cassctlapi "github.com/k8ssandra/cass-operator/apis/control/v1alpha1" + ktaskapi "github.com/k8ssandra/k8ssandra-operator/apis/control/v1alpha1" api "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" telemetryapi "github.com/k8ssandra/k8ssandra-operator/apis/telemetry/v1alpha1" "github.com/k8ssandra/k8ssandra-operator/pkg/annotations" "github.com/k8ssandra/k8ssandra-operator/pkg/cassandra" + "github.com/k8ssandra/k8ssandra-operator/pkg/labels" "github.com/k8ssandra/k8ssandra-operator/pkg/result" "github.com/k8ssandra/k8ssandra-operator/pkg/secret" agent "github.com/k8ssandra/k8ssandra-operator/pkg/telemetry/cassandra_agent" "github.com/k8ssandra/k8ssandra-operator/pkg/utils" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,6 +33,10 @@ const ( rebuildNodesLabel = "k8ssandra.io/rebuild-nodes" ) +func AllowUpdate(kc *api.K8ssandraCluster) bool { + return kc.GenerationChanged() || metav1.HasAnnotation(kc.ObjectMeta, api.AutomatedUpdateAnnotation) +} + func (r *K8ssandraClusterReconciler) reconcileDatacenters(ctx context.Context, kc *api.K8ssandraCluster, logger logr.Logger) (result.ReconcileResult, []*cassdcapi.CassandraDatacenter) { kcKey := utils.GetKey(kc) @@ -143,9 +150,15 @@ func (r *K8ssandraClusterReconciler) reconcileDatacenters(ctx context.Context, k r.setStatusForDatacenter(kc, actualDc) - if !annotations.CompareHashAnnotations(actualDc, desiredDc) { - dcLogger.Info("Updating datacenter") - + if !annotations.CompareHashAnnotations(actualDc, desiredDc) && !AllowUpdate(kc) { + logger.Info("Datacenter requires an update, but we're not allowed to do it", "CassandraDatacenter", dcKey) + // We're not allowed to update, but need to + patch := client.MergeFrom(kc.DeepCopy()) + kc.Status.SetConditionStatus(api.ClusterRequiresUpdate, corev1.ConditionTrue) + if err := r.Client.Status().Patch(ctx, kc, patch); err != nil { + return result.Error(fmt.Errorf("failed to set %s condition: %v", api.ClusterRequiresUpdate, err)), actualDcs + } + } else if !annotations.CompareHashAnnotations(actualDc, desiredDc) { if actualDc.Spec.SuperuserSecretName != desiredDc.Spec.SuperuserSecretName { // If actualDc is created with SuperuserSecretName, it can't be changed anymore. We should reject all changes coming from K8ssandraCluster desiredDc.Spec.SuperuserSecretName = actualDc.Spec.SuperuserSecretName @@ -178,6 +191,8 @@ func (r *K8ssandraClusterReconciler) reconcileDatacenters(ctx context.Context, k dcLogger.Error(err, "Failed to update datacenter") return result.Error(err), actualDcs } + + return result.RequeueSoon(r.DefaultDelay), actualDcs } if actualDc.Spec.Stopped { @@ -233,17 +248,60 @@ func (r *K8ssandraClusterReconciler) reconcileDatacenters(ctx context.Context, k } } + if AllowUpdate(kc) { + dcsRequiringUpdate := make([]string, 0, len(actualDcs)) + for _, dc := range actualDcs { + if dc.Status.GetConditionStatus(cassdcapi.DatacenterRequiresUpdate) == corev1.ConditionTrue { + dcsRequiringUpdate = append(dcsRequiringUpdate, dc.ObjectMeta.Name) + } + } + + if len(dcsRequiringUpdate) > 0 { + generatedName := fmt.Sprintf("refresh-%d-%d", kc.Generation, kc.Status.ObservedGeneration) + internalTask := &ktaskapi.K8ssandraTask{} + err := r.Get(ctx, types.NamespacedName{Namespace: kc.Namespace, Name: generatedName}, internalTask) + // If task wasn't found, create it and if task is still running, requeue + if errors.IsNotFound(err) { + // Delegate work to the task controller for Datacenter operations + task := &ktaskapi.K8ssandraTask{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: kc.Namespace, + Name: generatedName, + Labels: labels.WatchedByK8ssandraClusterLabels(kcKey), + }, + Spec: ktaskapi.K8ssandraTaskSpec{ + Cluster: corev1.ObjectReference{ + Name: kc.Name, + }, + Datacenters: dcsRequiringUpdate, + Template: cassctlapi.CassandraTaskTemplate{ + Jobs: []cassctlapi.CassandraJob{{ + Name: fmt.Sprintf("refresh-%s", kc.Name), + Command: "refresh", + }}, + }, + DcConcurrencyPolicy: batchv1.ForbidConcurrent, + }, + } + + if err := r.Create(ctx, task); err != nil { + return result.Error(err), actualDcs + } + + return result.RequeueSoon(r.DefaultDelay), actualDcs + + } else if internalTask.Status.CompletionTime.IsZero() { + return result.RequeueSoon(r.DefaultDelay), actualDcs + } + } + } + // If we reach this point all CassandraDatacenters are ready. We only set the // CassandraInitialized condition if it is unset, i.e., only once. This allows us to // distinguish whether we are deploying a CassandraDatacenter as part of a new cluster // or as part of an existing cluster. if kc.Status.GetConditionStatus(api.CassandraInitialized) == corev1.ConditionUnknown { - now := metav1.Now() - kc.Status.SetCondition(api.K8ssandraClusterCondition{ - Type: api.CassandraInitialized, - Status: corev1.ConditionTrue, - LastTransitionTime: &now, - }) + kc.Status.SetConditionStatus(api.CassandraInitialized, corev1.ConditionTrue) } return result.Continue(), actualDcs diff --git a/controllers/k8ssandra/datacenters_test.go b/controllers/k8ssandra/datacenters_test.go index 7f13a92c2..dd6e35b2b 100644 --- a/controllers/k8ssandra/datacenters_test.go +++ b/controllers/k8ssandra/datacenters_test.go @@ -1,6 +1,8 @@ package k8ssandra import ( + "testing" + "github.com/Masterminds/semver/v3" cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" api "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" @@ -8,7 +10,6 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "testing" ) var ( @@ -263,3 +264,27 @@ func TestGetSourceDatacenterName_Conflict(t *testing.T) { } } + +func TestAllowUpdate(t *testing.T) { + assert := assert.New(t) + kc := &api.K8ssandraCluster{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 2, + }, + Spec: api.K8ssandraClusterSpec{}, + } + + kc.Status = api.K8ssandraClusterStatus{ + ObservedGeneration: 0, + } + + assert.True(AllowUpdate(kc)) + kc.Status.ObservedGeneration = 1 + assert.True(AllowUpdate(kc)) + kc.Status.ObservedGeneration = 2 + assert.False(AllowUpdate(kc)) + metav1.SetMetaDataAnnotation(&kc.ObjectMeta, api.AutomatedUpdateAnnotation, string(api.AllowUpdateOnce)) + assert.True(AllowUpdate(kc)) + metav1.SetMetaDataAnnotation(&kc.ObjectMeta, api.AutomatedUpdateAnnotation, string(api.AllowUpdateAlways)) + assert.True(AllowUpdate(kc)) +} diff --git a/controllers/k8ssandra/k8ssandracluster_controller.go b/controllers/k8ssandra/k8ssandracluster_controller.go index e7d2d77d5..dd3372d6d 100644 --- a/controllers/k8ssandra/k8ssandracluster_controller.go +++ b/controllers/k8ssandra/k8ssandracluster_controller.go @@ -19,6 +19,7 @@ package k8ssandra import ( "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" "github.com/go-logr/logr" @@ -33,6 +34,7 @@ import ( "github.com/k8ssandra/k8ssandra-operator/pkg/result" "github.com/k8ssandra/k8ssandra-operator/pkg/utils" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -94,7 +96,7 @@ func (r *K8ssandraClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req if kc.GetDeletionTimestamp() == nil { if err != nil { kc.Status.Error = err.Error() - r.Recorder.Event(kc, v1.EventTypeWarning, "Reconcile Error", err.Error()) + r.Recorder.Event(kc, corev1.EventTypeWarning, "Reconcile Error", err.Error()) } else { kc.Status.Error = "None" } @@ -154,6 +156,10 @@ func (r *K8ssandraClusterReconciler) reconcile(ctx context.Context, kc *api.K8ss return recResult.Output() } + if res := updateStatus(ctx, r.Client, kc); res.Completed() { + return res.Output() + } + kcLogger.Info("Finished reconciling the k8ssandracluster") return result.Done().Output() @@ -179,10 +185,31 @@ func (r *K8ssandraClusterReconciler) afterCassandraReconciled(ctx context.Contex return result.Continue() } +func updateStatus(ctx context.Context, r client.Client, kc *api.K8ssandraCluster) result.ReconcileResult { + if AllowUpdate(kc) { + if metav1.HasAnnotation(kc.ObjectMeta, api.AutomatedUpdateAnnotation) { + if kc.Annotations[api.AutomatedUpdateAnnotation] == string(api.AllowUpdateOnce) { + delete(kc.ObjectMeta.Annotations, api.AutomatedUpdateAnnotation) + if err := r.Update(ctx, kc); err != nil { + return result.Error(err) + } + } + } + kc.Status.SetConditionStatus(api.ClusterRequiresUpdate, corev1.ConditionFalse) + } + + kc.Status.ObservedGeneration = kc.Generation + if err := r.Status().Update(ctx, kc); err != nil { + return result.Error(err) + } + + return result.Continue() +} + // SetupWithManager sets up the controller with the Manager. func (r *K8ssandraClusterReconciler) SetupWithManager(mgr ctrl.Manager, clusters []cluster.Cluster) error { cb := ctrl.NewControllerManagedBy(mgr). - For(&api.K8ssandraCluster{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})) // No generation changed predicate here? + For(&api.K8ssandraCluster{}, builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{}))) clusterLabelFilter := func(ctx context.Context, mapObj client.Object) []reconcile.Request { requests := make([]reconcile.Request, 0) diff --git a/controllers/k8ssandra/k8ssandracluster_controller_test.go b/controllers/k8ssandra/k8ssandracluster_controller_test.go index 6a30e9b77..7fd3c30c5 100644 --- a/controllers/k8ssandra/k8ssandracluster_controller_test.go +++ b/controllers/k8ssandra/k8ssandracluster_controller_test.go @@ -121,6 +121,7 @@ func TestK8ssandraCluster(t *testing.T) { t.Run("PerNodeConfiguration", testEnv.ControllerTest(ctx, perNodeConfiguration)) t.Run("CreateSingleDcClusterWithVector", testEnv.ControllerTest(ctx, createSingleDcClusterWithVector)) t.Run("createSingleDcClusterWithMetricsAgent", testEnv.ControllerTest(ctx, createSingleDcClusterWithMetricsAgent)) + t.Run("GenerationCheck", testEnv.ControllerTest(ctx, testGenerationCheck)) } // createSingleDcCluster verifies that the CassandraDatacenter is created and that the @@ -166,7 +167,7 @@ func createSingleDcCluster(t *testing.T, ctx context.Context, f *framework.Frame err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) @@ -393,7 +394,7 @@ func applyClusterTemplateConfigs(t *testing.T, ctx context.Context, f *framework err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8sandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) @@ -558,7 +559,7 @@ func applyDatacenterTemplateConfigs(t *testing.T, ctx context.Context, f *framew err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8sandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) @@ -708,7 +709,7 @@ func applyClusterTemplateAndDatacenterTemplateConfigs(t *testing.T, ctx context. err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8sandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) @@ -810,7 +811,7 @@ func createMultiDcCluster(t *testing.T, ctx context.Context, f *framework.Framew err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) @@ -1049,7 +1050,7 @@ func createSingleDcCassandra4ClusterWithStargate(t *testing.T, ctx context.Conte err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) verifyReplicatedSecretReconciled(ctx, t, f, kc) verifySystemReplicationAnnotationSet(ctx, t, f, kc) @@ -1208,7 +1209,7 @@ func createMultiDcClusterWithStargate(t *testing.T, ctx context.Context, f *fram err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) @@ -1573,7 +1574,7 @@ func applyClusterWithEncryptionOptions(t *testing.T, ctx context.Context, f *fra err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) @@ -1798,7 +1799,7 @@ func applyClusterWithEncryptionOptionsFail(t *testing.T, ctx context.Context, f err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) @@ -1952,7 +1953,7 @@ func applyClusterWithEncryptionOptionsExternalSecrets(t *testing.T, ctx context. err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) t.Log("check that dc1 was created") dc1Key := framework.ClusterKey{NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc1"}, K8sContext: f.DataPlaneContexts[0]} require.Eventually(f.DatacenterExists(ctx, dc1Key), timeout, interval) @@ -2143,8 +2144,9 @@ func systemReplicationAnnotationIsSet(t *testing.T, f *framework.Framework, ctx } } -func verifyFinalizerAdded(ctx context.Context, t *testing.T, f *framework.Framework, key client.ObjectKey) { +func verifyFinalizerAdded(ctx context.Context, t *testing.T, f *framework.Framework, kc *api.K8ssandraCluster) { t.Log("check finalizer added to K8ssandraCluster") + key := client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name} assert.Eventually(t, func() bool { kc := &api.K8ssandraCluster{} @@ -2156,6 +2158,33 @@ func verifyFinalizerAdded(ctx context.Context, t *testing.T, f *framework.Framew }, timeout, interval, "failed to verify that finalizer was added") } +func verifyClusterReconcileFinished(ctx context.Context, t *testing.T, f *framework.Framework, kc *api.K8ssandraCluster) { + t.Log("check K8ssandraCluster reconciliation finished") + key := client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name} + + assert.Eventually(t, func() bool { + kc := &api.K8ssandraCluster{} + if err := f.Client.Get(ctx, key, kc); err != nil { + t.Logf("failed to get K8ssandraCluster: %v", err) + return false + } + return kc.ObjectMeta.Generation == kc.Status.ObservedGeneration + }, timeout, interval, "cluster hasn't finished reconciliation") +} + +func waitForConditionStatus(ctx context.Context, t *testing.T, f *framework.Framework, conditionType api.K8ssandraClusterConditionType, status corev1.ConditionStatus, kc *api.K8ssandraCluster) { + key := client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name} + assert.Eventually(t, func() bool { + kc := &api.K8ssandraCluster{} + if err := f.Client.Get(ctx, key, kc); err != nil { + t.Logf("failed to get K8ssandraCluster: %v", err) + return false + } + kcCondition := kc.Status.GetConditionStatus(conditionType) + return kcCondition == status + }, timeout, interval, "cluster didn't reach the expected condition status") +} + func verifyReplicatedSecretReconciled(ctx context.Context, t *testing.T, f *framework.Framework, kc *api.K8ssandraCluster) { t.Log("check ReplicatedSecret reconciled") @@ -2257,7 +2286,7 @@ func convertSystemReplicationAnnotation(t *testing.T, ctx context.Context, f *fr err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) @@ -2370,7 +2399,7 @@ func changeClusterNameFails(t *testing.T, ctx context.Context, f *framework.Fram err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) @@ -2487,7 +2516,7 @@ func injectContainersAndVolumes(t *testing.T, ctx context.Context, f *framework. err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) @@ -2598,7 +2627,7 @@ func createMultiDcDseCluster(t *testing.T, ctx context.Context, f *framework.Fra err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) @@ -2633,3 +2662,87 @@ func createMultiDcDseCluster(t *testing.T, ctx context.Context, f *framework.Fra f.AssertObjectDoesNotExist(ctx, t, dc1Key, &cassdcapi.CassandraDatacenter{}, timeout, interval) f.AssertObjectDoesNotExist(ctx, t, dc2Key, &cassdcapi.CassandraDatacenter{}, timeout, interval) } + +func testGenerationCheck(t *testing.T, ctx context.Context, f *framework.Framework, namespace string) { + require := require.New(t) + + kc := &api.K8ssandraCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "test", + }, + Spec: api.K8ssandraClusterSpec{ + Cassandra: &api.CassandraClusterTemplate{ + ClusterName: "Not K8s_Compliant", + Datacenters: []api.CassandraDatacenterTemplate{ + { + Meta: api.EmbeddedObjectMeta{ + Name: "dc1", + }, + K8sContext: f.DataPlaneContexts[1], + Size: 1, + DatacenterOptions: api.DatacenterOptions{ + ServerVersion: "3.11.14", + StorageConfig: &cassdcapi.StorageConfig{ + CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &defaultStorageClass, + }, + }, + PodSecurityContext: &corev1.PodSecurityContext{ + RunAsUser: ptr.To(int64(999)), + }, + ManagementApiAuth: &cassdcapi.ManagementApiAuthConfig{ + Insecure: &cassdcapi.ManagementApiAuthInsecureConfig{}, + }, + }, + }, + }, + }, + }, + } + + err := f.Client.Create(ctx, kc) + require.NoError(err, "failed to create K8ssandraCluster") + + verifyFinalizerAdded(ctx, t, f, kc) + + verifySuperuserSecretCreated(ctx, t, f, kc) + + verifyReplicatedSecretReconciled(ctx, t, f, kc) + + verifySystemReplicationAnnotationSet(ctx, t, f, kc) + + t.Log("check that the datacenter was created") + dcKey := framework.ClusterKey{NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc1"}, K8sContext: f.DataPlaneContexts[1]} + require.Eventually(f.DatacenterExists(ctx, dcKey), timeout, interval) + + t.Log("update datacenter status to ready") + err = f.SetDatacenterStatusReady(ctx, dcKey) + require.NoError(err, "failed to set datacenter status ready") + + verifyClusterReconcileFinished(ctx, t, f, kc) + + // Modify the CassandraDatacenter hash to be some gibberish + dc := &cassdcapi.CassandraDatacenter{} + require.NoError(f.Get(ctx, dcKey, dc), "failed to get CassandraDatacenter dc1") + metav1.SetMetaDataAnnotation(&dc.ObjectMeta, api.ResourceHashAnnotation, "gibberish") + require.NoError(f.Update(ctx, dcKey, dc), "failed to update CassandraDatacenter dc1") + + waitForConditionStatus(ctx, t, f, api.ClusterRequiresUpdate, corev1.ConditionTrue, kc) + verifyClusterReconcileFinished(ctx, t, f, kc) + + require.NoError(f.Get(ctx, dcKey, dc), "failed to get CassandraDatacenter dc1") + require.Equal("gibberish", dc.Annotations[api.ResourceHashAnnotation]) + + t.Log("Modifying K8ssandraCluster to allow upgrade") + // Modify K8ssandraCluster to allow upgrade + kcKey := client.ObjectKey{Namespace: namespace, Name: kc.Name} + require.NoError(f.Client.Get(ctx, kcKey, kc), "failed to get K8ssandraCluster") + metav1.SetMetaDataAnnotation(&kc.ObjectMeta, api.AutomatedUpdateAnnotation, "once") + require.NoError(f.Client.Update(ctx, kc), "failed to update K8ssandraCluster") + // Wait for process to start.. + waitForConditionStatus(ctx, t, f, api.ClusterRequiresUpdate, corev1.ConditionFalse, kc) + + require.NoError(f.Get(ctx, dcKey, dc), "failed to get CassandraDatacenter dc1") + require.NotEqual("gibberish", dc.Annotations[api.ResourceHashAnnotation]) +} diff --git a/controllers/k8ssandra/per_node_config_test.go b/controllers/k8ssandra/per_node_config_test.go index 68fd5d8c4..c737acb2c 100644 --- a/controllers/k8ssandra/per_node_config_test.go +++ b/controllers/k8ssandra/per_node_config_test.go @@ -2,6 +2,8 @@ package k8ssandra import ( "context" + "testing" + "github.com/go-logr/logr/testr" cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" api "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" @@ -19,7 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" - "testing" ) func TestK8ssandraClusterReconciler_reconcilePerNodeConfiguration(t *testing.T) { @@ -217,7 +218,7 @@ func defaultPerNodeConfiguration(t *testing.T, ctx context.Context, f *framework } }() - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) verifyReplicatedSecretReconciled(ctx, t, f, kc) verifySystemReplicationAnnotationSet(ctx, t, f, kc) @@ -332,7 +333,7 @@ func userDefinedPerNodeConfiguration(t *testing.T, ctx context.Context, f *frame } }() - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) verifyReplicatedSecretReconciled(ctx, t, f, kc) verifySystemReplicationAnnotationSet(ctx, t, f, kc) diff --git a/controllers/k8ssandra/stop_dc_test.go b/controllers/k8ssandra/stop_dc_test.go index f25dd1784..812a46c3b 100644 --- a/controllers/k8ssandra/stop_dc_test.go +++ b/controllers/k8ssandra/stop_dc_test.go @@ -49,8 +49,9 @@ func stopDcTestSetup(t *testing.T, f *framework.Framework, ctx context.Context, kc := &api.K8ssandraCluster{ ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "stop-dc-test", + Namespace: namespace, + Name: "stop-dc-test", + Annotations: map[string]string{api.AutomatedUpdateAnnotation: string(api.AllowUpdateAlways)}, }, Spec: api.K8ssandraClusterSpec{ Cassandra: &api.CassandraClusterTemplate{ diff --git a/controllers/k8ssandra/vector_test.go b/controllers/k8ssandra/vector_test.go index 9f2a77874..343c68d1e 100644 --- a/controllers/k8ssandra/vector_test.go +++ b/controllers/k8ssandra/vector_test.go @@ -67,7 +67,7 @@ func createSingleDcClusterWithVector(t *testing.T, ctx context.Context, f *frame err := f.Client.Create(ctx, kc) require.NoError(err, "failed to create K8ssandraCluster") - verifyFinalizerAdded(ctx, t, f, client.ObjectKey{Namespace: kc.Namespace, Name: kc.Name}) + verifyFinalizerAdded(ctx, t, f, kc) verifySuperuserSecretCreated(ctx, t, f, kc) diff --git a/docs/content/en/install/_index.md b/docs/content/en/install/_index.md index b541226ef..c57530484 100644 --- a/docs/content/en/install/_index.md +++ b/docs/content/en/install/_index.md @@ -25,4 +25,8 @@ If you are using a cloud provider, explore the following topics for cloud-specif * [DigitalOcean Kubernetes]({{< relref "install/doks/" >}}) (DOKS) * [Google Kubernetes Engine]({{< relref "install/gke/" >}}) (GKE) +## Upgrade + +* [Upgrade notes]({{< relref "install/upgrade/" >}}) (Upgrade notes) + **Tip:** For an architectural overview of K8ssandra Operator and its new `K8ssandraCluster` custom resource, see the [K8ssandra Operator]({{< relref "components/k8ssandra-operator/" >}}) component page. diff --git a/docs/content/en/install/upgrade/_index.md b/docs/content/en/install/upgrade/_index.md new file mode 100644 index 000000000..f42e2df55 --- /dev/null +++ b/docs/content/en/install/upgrade/_index.md @@ -0,0 +1,29 @@ +--- +title: "Upgrade notes" +linkTitle: "Docs" +no_list: true +weight: 2 +description: "Notes on upgrading existing installation" +--- + +Upgrading the operators is usually a straight-forward operation based on the standard installation method's upgrade procedure. In certain cases however, updates will require certain manual processing to avoid disruptions to the running clusters. + +## Updates after operator upgrade to running Cassandra clusters + +Sometimes the updates to operators might bring new features or improvements to existing running Cassandra clusters. However, starting from release 1.18 we will no longer update them automatically when the operators are upgraded to prevent a rolling restart at an inconvenient time. If there are changes to be applied after upgrading, the ``K8ssandraCluster`` instances are marked with a Status Condition ``RequiresUpdate`` set to True. + +The updates are applied automatically if the ``K8ssandraCluster`` Spec is modified. However, since this is not often necessary the alternative way to apply the updates is to place an annotation on the ``K8ssandraCluster`` (in ``metadata.annotations``). The annotation ``k8ssandra.io/autoupdate-spec`` has two accepted values, ``once`` and ``always``. When setting the value to ``once``, the clusters are upgraded with a rolling restart (if needed) and then the annotation is removed. If set to ``always`` the cluster is upgraded yet the annotation is not removed and the old behavior of updating the clusters as soon as operator is upgraded will be used. + +Example of setting the annotation: + +```yaml +apiVersion: k8ssandra.io/v1alpha1 +kind: K8ssandraCluster +metadata: + name: test-cluster + annotations: + k8ssandra.io/autoupdate-spec: "always" +spec: +``` + +We recommend updating the clusters after upgrading the operators as it will also apply newer images to running clusters which could have CVEs or bugs fixed. diff --git a/pkg/cassandra/datacenter.go b/pkg/cassandra/datacenter.go index 1162a96e3..660b6e644 100644 --- a/pkg/cassandra/datacenter.go +++ b/pkg/cassandra/datacenter.go @@ -158,9 +158,6 @@ func NewDatacenter(klusterKey types.NamespacedName, template *DatacenterConfig) ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: template.Meta.Name, - Annotations: map[string]string{ - cassdcapi.UpdateAllowedAnnotation: string(cassdcapi.AllowUpdateAlways), - }, Labels: utils.MergeMap(map[string]string{ api.NameLabel: api.NameLabelValue, api.PartOfLabel: api.PartOfLabelValue, diff --git a/scripts/prepare-helm-release.sh b/scripts/prepare-helm-release.sh index efc3e04cf..17d94ce34 100755 --- a/scripts/prepare-helm-release.sh +++ b/scripts/prepare-helm-release.sh @@ -13,15 +13,15 @@ mkdir -p build/helm kustomize build config/crd > charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml # Generate the role.yaml and clusterrole.yaml files using the RBAC generated manifests kustomize build config/rbac > build/helm/k8ssandra-operator-rbac.yaml -cat charts/templates/role.tmpl.yaml | tee build/helm/role.yaml -cat build/helm/k8ssandra-operator-rbac.yaml | yq 'select(di == 1).rules' | tee -a build/helm/role.yaml +cat charts/templates/role.tmpl.yaml | tee build/helm/role.yaml > /dev/null +cat build/helm/k8ssandra-operator-rbac.yaml | yq 'select(di == 1).rules' | tee -a build/helm/role.yaml > /dev/null echo "{{- end }}" >> build/helm/role.yaml -cat charts/templates/clusterrole.tmpl.yaml | tee build/helm/clusterrole.yaml -cat build/helm/k8ssandra-operator-rbac.yaml | yq 'select(di == 1).rules' | tee -a build/helm/clusterrole.yaml +cat charts/templates/clusterrole.tmpl.yaml | tee build/helm/clusterrole.yaml > /dev/null +cat build/helm/k8ssandra-operator-rbac.yaml | yq 'select(di == 1).rules' | tee -a build/helm/clusterrole.yaml > /dev/null echo "{{- end }}" >> build/helm/clusterrole.yaml cp build/helm/role.yaml charts/k8ssandra-operator/templates/role.yaml cp build/helm/clusterrole.yaml charts/k8ssandra-operator/templates/clusterrole.yaml # Generate the leader election role from the RBAC generated manifests -cat charts/templates/leader-role.tmpl.yaml | tee build/helm/leader-role.yaml -cat build/helm/k8ssandra-operator-rbac.yaml | yq 'select(di == 2).rules' | tee -a build/helm/leader-role.yaml +cat charts/templates/leader-role.tmpl.yaml | tee build/helm/leader-role.yaml > /dev/null +cat build/helm/k8ssandra-operator-rbac.yaml | yq 'select(di == 2).rules' | tee -a build/helm/leader-role.yaml > /dev/null cp build/helm/leader-role.yaml charts/k8ssandra-operator/templates/leader-role.yaml \ No newline at end of file