diff --git a/CHANGELOG/CHANGELOG-1.19.md b/CHANGELOG/CHANGELOG-1.19.md index 58ab84827..e9a365091 100644 --- a/CHANGELOG/CHANGELOG-1.19.md +++ b/CHANGELOG/CHANGELOG-1.19.md @@ -14,3 +14,5 @@ Changelog for the K8ssandra Operator, new PRs should update the `unreleased` sec When cutting a new release, update the `unreleased` heading to the tag being generated and date, like `## vX.Y.Z - YYYY-MM-DD` and create a new placeholder section for `unreleased` entries. ## unreleased + +* [FEATURE] [#1277](https://github.com/k8ssandra/k8ssandra-operator/issues/1277) Introduce Reaper's Control Plane deployment mode diff --git a/apis/k8ssandra/v1alpha1/k8ssandracluster_types.go b/apis/k8ssandra/v1alpha1/k8ssandracluster_types.go index 7e44c5cdf..f0a874420 100644 --- a/apis/k8ssandra/v1alpha1/k8ssandracluster_types.go +++ b/apis/k8ssandra/v1alpha1/k8ssandracluster_types.go @@ -55,7 +55,8 @@ type K8ssandraClusterSpec struct { Stargate *stargateapi.StargateClusterTemplate `json:"stargate,omitempty"` // Reaper defines the desired deployment characteristics for Reaper in this K8ssandraCluster. - // If this is non-nil, Reaper will be deployed on every Cassandra datacenter in this K8ssandraCluster. + // If this is non-nil, Reaper might be deployed on every Cassandra datacenter in this K8ssandraCluster, unless + // there is a Control Plane Reaper present. In that case, the K8ssandraCluster will get registered to it. // +optional Reaper *reaperapi.ReaperClusterTemplate `json:"reaper,omitempty"` diff --git a/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go b/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go index 64c17db86..4501ebae2 100644 --- a/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go +++ b/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go @@ -46,6 +46,7 @@ var ( ErrNoReaperAccessMode = fmt.Errorf("reaper StorageConfig.AccessModes not set") ErrNoReaperResourceRequests = fmt.Errorf("reaper StorageConfig.Resources.Requests not set") ErrNoReaperStorageRequest = fmt.Errorf("reaper StorageConfig.Resources.Requests.Storage not set") + ErrNoReaperPerDcWithLocal = fmt.Errorf("reaper DeploymentModePerDc is only supported when using cassandra storage") ) // log is for logging in this package. @@ -294,21 +295,25 @@ func (r *K8ssandraCluster) validateReaper() error { if r.Spec.Reaper == nil { return nil } - if r.Spec.Reaper.StorageType != reaperapi.StorageTypeLocal { - return nil - } - if r.Spec.Reaper.StorageConfig == nil { + if r.Spec.Reaper.StorageType == reaperapi.StorageTypeLocal && r.Spec.Reaper.StorageConfig == nil { return ErrNoReaperStorageConfig } - // not checking StorageClassName because Kubernetes will use a default one if it's not set - if r.Spec.Reaper.StorageConfig.AccessModes == nil { - return ErrNoReaperAccessMode - } - if r.Spec.Reaper.StorageConfig.Resources.Requests == nil { - return ErrNoReaperResourceRequests + if r.Spec.Reaper.StorageType == reaperapi.StorageTypeLocal { + // we're checking for validity of the storage config in Reaper's webhook too, so this is a duplicate of that + // for now, I don't see a better way of reusing code to validate the storage config + // not checking StorageClassName because Kubernetes will use a default one if it's not set + if r.Spec.Reaper.StorageConfig.AccessModes == nil { + return ErrNoReaperAccessMode + } + if r.Spec.Reaper.StorageConfig.Resources.Requests == nil { + return ErrNoReaperResourceRequests + } + if r.Spec.Reaper.StorageConfig.Resources.Requests.Storage().IsZero() { + return ErrNoReaperStorageRequest + } } - if r.Spec.Reaper.StorageConfig.Resources.Requests.Storage().IsZero() { - return ErrNoReaperStorageRequest + if r.Spec.Reaper.StorageType == reaperapi.StorageTypeLocal && r.Spec.Reaper.DeploymentMode == reaperapi.DeploymentModePerDc { + return ErrNoReaperPerDcWithLocal } return nil } diff --git a/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go b/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go index 4b81edb81..f39ad758f 100644 --- a/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go +++ b/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go @@ -543,6 +543,12 @@ func testReaperStorage(t *testing.T) { reaperWithoutStorageSize.Spec.Reaper.StorageConfig.Resources.Requests = corev1.ResourceList{} err = reaperWithoutStorageSize.validateK8ssandraCluster() required.Error(err) + + kc := createClusterObjWithCassandraConfig("kc-with-per-dc-reaper-and-local-storage", "ns") + kc.Spec.Reaper = minimalInMemoryReaperConfig.DeepCopy() + kc.Spec.Reaper.DeploymentMode = reaperapi.DeploymentModePerDc + err = kc.validateK8ssandraCluster() + required.Equal(ErrNoReaperPerDcWithLocal, err) } // TestValidateUpdateNumTokens is a unit test for numTokens updates. diff --git a/apis/reaper/v1alpha1/reaper_types.go b/apis/reaper/v1alpha1/reaper_types.go index d32ee5077..21a330a10 100644 --- a/apis/reaper/v1alpha1/reaper_types.go +++ b/apis/reaper/v1alpha1/reaper_types.go @@ -30,12 +30,13 @@ import ( // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. const ( - DeploymentModeSingle = "SINGLE" - DeploymentModePerDc = "PER_DC" - ReaperLabel = "k8ssandra.io/reaper" - DefaultKeyspace = "reaper_db" - StorageTypeCassandra = "cassandra" - StorageTypeLocal = "local" + DeploymentModeSingle = "SINGLE" + DeploymentModePerDc = "PER_DC" + DeploymentModeControlPlane = "CONTROL_PLANE" + ReaperLabel = "k8ssandra.io/reaper" + DefaultKeyspace = "reaper_db" + StorageTypeCassandra = "cassandra" + StorageTypeLocal = "local" ) type ReaperTemplate struct { @@ -235,16 +236,30 @@ type ReaperClusterTemplate struct { // +optional // +kubebuilder:default="PER_DC" - // +kubebuilder:validation:Enum:=PER_DC;SINGLE + // +kubebuilder:validation:Enum:=PER_DC;SINGLE;CONTROL_PLANE DeploymentMode string `json:"deploymentMode,omitempty"` + + // When there is a CONTROL_PLANE Reaper out there, this field allows registering a K8ssandra cluster to it. + // Populating this field disables some operator behaviour related to setting Reaper up. + // +optional + ReaperRef corev1.ObjectReference `json:"reaperRef,omitempty"` +} + +func (rct *ReaperClusterTemplate) HasReaperRef() bool { + return rct != nil && rct.ReaperRef.Name != "" } -// EnsureDeploymentMode ensures that a deployment mode is SINGLE if we use the local storage type. This is to prevent -// several instances of Reapers with local storage that would interfere with each other. +func (rct *ReaperClusterTemplate) IsControlPlane() bool { + return rct != nil && rct.DeploymentMode == DeploymentModeControlPlane +} + +// EnsureDeploymentMode ensures that a deployment mode is no PER_DC if we use the local storage type. This is to prevent +// several instances of Reapers with local storage that would interfere with each other. Deployment mode CONTROL_PLANE +// is allowed. func (t *ReaperClusterTemplate) EnsureDeploymentMode() bool { if t != nil { if t.StorageType == StorageTypeLocal { - if t.DeploymentMode != DeploymentModeSingle { + if t.DeploymentMode == DeploymentModePerDc { t.DeploymentMode = DeploymentModeSingle return true } @@ -258,8 +273,8 @@ func (t *ReaperClusterTemplate) EnsureDeploymentMode() bool { type CassandraDatacenterRef struct { // The datacenter name. - // +kubebuilder:validation:Required - Name string `json:"name"` + // +optional + Name string `json:"name,omitempty"` // The datacenter namespace. If empty, the datacenter will be assumed to reside in the same namespace as the Reaper // instance. @@ -274,8 +289,8 @@ type ReaperSpec struct { // DatacenterRef is the reference of a CassandraDatacenter resource that this Reaper instance should manage. It will // also be used as the backend for persisting Reaper's state. Reaper must be able to access the JMX port (7199 by // default) and the CQL port (9042 by default) on this DC. - // +kubebuilder:validation:Required - DatacenterRef CassandraDatacenterRef `json:"datacenterRef"` + // +optional + DatacenterRef CassandraDatacenterRef `json:"datacenterRef,omitempty"` // DatacenterAvailability indicates to Reaper its deployment in relation to the target datacenter's network. // For single-DC clusters, the default (ALL) is fine. For multi-DC clusters, it is recommended to use EACH, diff --git a/apis/reaper/v1alpha1/reaper_types_test.go b/apis/reaper/v1alpha1/reaper_types_test.go index 0905353f4..1c78aed05 100644 --- a/apis/reaper/v1alpha1/reaper_types_test.go +++ b/apis/reaper/v1alpha1/reaper_types_test.go @@ -41,4 +41,10 @@ func TestEnsureDeploymentMode(t *testing.T) { changed = rct.EnsureDeploymentMode() assert.False(t, changed) assert.Equal(t, DeploymentModePerDc, rct.DeploymentMode) + + cpReaper := rct.DeepCopy() + cpReaper.DeploymentMode = DeploymentModeControlPlane + changed = cpReaper.EnsureDeploymentMode() + assert.False(t, changed) + assert.Equal(t, DeploymentModeControlPlane, cpReaper.DeploymentMode) } diff --git a/apis/reaper/v1alpha1/zz_generated.deepcopy.go b/apis/reaper/v1alpha1/zz_generated.deepcopy.go index 57b67b752..84af7ab5e 100644 --- a/apis/reaper/v1alpha1/zz_generated.deepcopy.go +++ b/apis/reaper/v1alpha1/zz_generated.deepcopy.go @@ -120,6 +120,7 @@ func (in *Reaper) DeepCopyObject() runtime.Object { func (in *ReaperClusterTemplate) DeepCopyInto(out *ReaperClusterTemplate) { *out = *in in.ReaperTemplate.DeepCopyInto(&out.ReaperTemplate) + out.ReaperRef = in.ReaperRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReaperClusterTemplate. diff --git a/charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml b/charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml index 069cfd76b..6295111fe 100644 --- a/charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml +++ b/charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml @@ -26559,7 +26559,8 @@ spec: reaper: description: |- Reaper defines the desired deployment characteristics for Reaper in this K8ssandraCluster. - If this is non-nil, Reaper will be deployed on every Cassandra datacenter in this K8ssandraCluster. + If this is non-nil, Reaper might be deployed on every Cassandra datacenter in this K8ssandraCluster, unless + there is a Control Plane Reaper present. In that case, the K8ssandraCluster will get registered to it. properties: ServiceAccountName: default: default @@ -27600,6 +27601,7 @@ spec: enum: - PER_DC - SINGLE + - CONTROL_PLANE type: string heapSize: anyOf: @@ -28439,6 +28441,52 @@ spec: format: int32 type: integer type: object + reaperRef: + description: |- + When there is a CONTROL_PLANE Reaper out there, this field allows registering a K8ssandra cluster to it. + Populating this field disables some operator behaviour related to setting Reaper up. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic resources: description: Main Container resources. properties: @@ -33695,8 +33743,6 @@ spec: The datacenter namespace. If empty, the datacenter will be assumed to reside in the same namespace as the Reaper instance. type: string - required: - - name type: object heapSize: anyOf: @@ -35282,8 +35328,6 @@ spec: type: string type: object x-kubernetes-map-type: atomic - required: - - datacenterRef type: object status: description: ReaperStatus defines the observed state of Reaper diff --git a/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml b/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml index 943e5e3c2..74c93c120 100644 --- a/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml +++ b/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml @@ -26497,7 +26497,8 @@ spec: reaper: description: |- Reaper defines the desired deployment characteristics for Reaper in this K8ssandraCluster. - If this is non-nil, Reaper will be deployed on every Cassandra datacenter in this K8ssandraCluster. + If this is non-nil, Reaper might be deployed on every Cassandra datacenter in this K8ssandraCluster, unless + there is a Control Plane Reaper present. In that case, the K8ssandraCluster will get registered to it. properties: ServiceAccountName: default: default @@ -27538,6 +27539,7 @@ spec: enum: - PER_DC - SINGLE + - CONTROL_PLANE type: string heapSize: anyOf: @@ -28377,6 +28379,52 @@ spec: format: int32 type: integer type: object + reaperRef: + description: |- + When there is a CONTROL_PLANE Reaper out there, this field allows registering a K8ssandra cluster to it. + Populating this field disables some operator behaviour related to setting Reaper up. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic resources: description: Main Container resources. properties: diff --git a/config/crd/bases/reaper.k8ssandra.io_reapers.yaml b/config/crd/bases/reaper.k8ssandra.io_reapers.yaml index b93edba11..f32c358ae 100644 --- a/config/crd/bases/reaper.k8ssandra.io_reapers.yaml +++ b/config/crd/bases/reaper.k8ssandra.io_reapers.yaml @@ -1181,8 +1181,6 @@ spec: The datacenter namespace. If empty, the datacenter will be assumed to reside in the same namespace as the Reaper instance. type: string - required: - - name type: object heapSize: anyOf: @@ -2768,8 +2766,6 @@ spec: type: string type: object x-kubernetes-map-type: atomic - required: - - datacenterRef type: object status: description: ReaperStatus defines the observed state of Reaper diff --git a/controllers/k8ssandra/dcconfigs.go b/controllers/k8ssandra/dcconfigs.go index 8709edac0..51a88d7cd 100644 --- a/controllers/k8ssandra/dcconfigs.go +++ b/controllers/k8ssandra/dcconfigs.go @@ -71,8 +71,8 @@ func (r *K8ssandraClusterReconciler) createDatacenterConfigs( cassandra.AllowAlterRfDuringRangeMovement(dcConfig) } - // Inject Reaper settings - if kc.Spec.Reaper != nil { + // Inject Reaper settings, unless we just reference an existing Reaper + if kc.Spec.Reaper != nil && kc.Spec.Reaper.ReaperRef.Name == "" { reaper.AddReaperSettingsToDcConfig(kc.Spec.Reaper.DeepCopy(), dcConfig, kc.Spec.IsAuthEnabled()) } diff --git a/controllers/k8ssandra/k8ssandracluster_controller_test.go b/controllers/k8ssandra/k8ssandracluster_controller_test.go index 7fd3c30c5..767642ac0 100644 --- a/controllers/k8ssandra/k8ssandracluster_controller_test.go +++ b/controllers/k8ssandra/k8ssandracluster_controller_test.go @@ -99,6 +99,7 @@ func TestK8ssandraCluster(t *testing.T) { t.Run("CreateSingleDcCassandra4ClusterWithStargate", testEnv.ControllerTest(ctx, createSingleDcCassandra4ClusterWithStargate)) t.Run("CreateMultiDcClusterWithStargate", testEnv.ControllerTest(ctx, createMultiDcClusterWithStargate)) t.Run("CreateMultiDcClusterWithReaper", testEnv.ControllerTest(ctx, createMultiDcClusterWithReaper)) + t.Run("createMultiDcClusterWithControlPlaneReaper", testEnv.ControllerTest(ctx, createMultiDcClusterWithControlPlaneReaper)) t.Run("CreateMultiDcClusterWithMedusa", testEnv.ControllerTest(ctx, createMultiDcClusterWithMedusa)) t.Run("CreateSingleDcClusterWithMedusaConfigRef", testEnv.ControllerTest(ctx, createSingleDcClusterWithMedusaConfigRef)) t.Run("CreateSingleDcClusterWithManagementApiSecured", testEnv.ControllerTest(ctx, createSingleDcClusterWithManagementApiSecured)) @@ -771,40 +772,7 @@ func createMultiDcCluster(t *testing.T, ctx context.Context, f *framework.Framew Name: clusterName, }, Spec: api.K8ssandraClusterSpec{ - Cassandra: &api.CassandraClusterTemplate{ - Datacenters: []api.CassandraDatacenterTemplate{ - { - Meta: api.EmbeddedObjectMeta{ - Name: "dc1", - }, - K8sContext: f.DataPlaneContexts[0], - Size: 3, - DatacenterOptions: api.DatacenterOptions{ - ServerVersion: "3.11.14", - StorageConfig: &cassdcapi.StorageConfig{ - CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: &defaultStorageClass, - }, - }, - }, - }, - { - Meta: api.EmbeddedObjectMeta{ - Name: "dc2", - }, - K8sContext: f.DataPlaneContexts[1], - Size: 3, - DatacenterOptions: api.DatacenterOptions{ - ServerVersion: "3.11.14", - StorageConfig: &cassdcapi.StorageConfig{ - CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: &defaultStorageClass, - }, - }, - }, - }, - }, - }, + Cassandra: newTwoDcCassandraClusterTemplate(f), }, } @@ -1152,6 +1120,15 @@ func createSingleDcCassandra4ClusterWithStargate(t *testing.T, ctx context.Conte func createMultiDcClusterWithStargate(t *testing.T, ctx context.Context, f *framework.Framework, namespace string) { require := require.New(t) + stargate := &stargateapi.StargateDatacenterTemplate{ + StargateClusterTemplate: stargateapi.StargateClusterTemplate{ + Size: 1, + }, + } + cct := newTwoDcCassandraClusterTemplate(f) + cct.Datacenters[0].Stargate = stargate.DeepCopy() + cct.Datacenters[1].Stargate = stargate.DeepCopy() + clusterName := "cluster-multi-stargate" kc := &api.K8ssandraCluster{ ObjectMeta: metav1.ObjectMeta{ @@ -1159,50 +1136,7 @@ func createMultiDcClusterWithStargate(t *testing.T, ctx context.Context, f *fram Name: clusterName, }, Spec: api.K8ssandraClusterSpec{ - Cassandra: &api.CassandraClusterTemplate{ - Datacenters: []api.CassandraDatacenterTemplate{ - { - Meta: api.EmbeddedObjectMeta{ - Name: "dc1", - }, - K8sContext: f.DataPlaneContexts[0], - Size: 3, - DatacenterOptions: api.DatacenterOptions{ - ServerVersion: "3.11.14", - StorageConfig: &cassdcapi.StorageConfig{ - CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: &defaultStorageClass, - }, - }, - }, - Stargate: &stargateapi.StargateDatacenterTemplate{ - StargateClusterTemplate: stargateapi.StargateClusterTemplate{ - Size: 1, - }, - }, - }, - { - Meta: api.EmbeddedObjectMeta{ - Name: "dc2", - }, - K8sContext: f.DataPlaneContexts[1], - Size: 3, - DatacenterOptions: api.DatacenterOptions{ - ServerVersion: "3.11.14", - StorageConfig: &cassdcapi.StorageConfig{ - CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: &defaultStorageClass, - }, - }, - }, - Stargate: &stargateapi.StargateDatacenterTemplate{ - StargateClusterTemplate: stargateapi.StargateClusterTemplate{ - Size: 1, - }, - }, - }, - }, - }, + Cassandra: cct, }, } diff --git a/controllers/k8ssandra/reaper.go b/controllers/k8ssandra/reaper.go index d24d7ba08..4783e5f43 100644 --- a/controllers/k8ssandra/reaper.go +++ b/controllers/k8ssandra/reaper.go @@ -96,6 +96,14 @@ func (r *K8ssandraClusterReconciler) reconcileReaper( } } + // we might have nil-ed the template because a DC got stopped, so we need to re-check + if reaperTemplate != nil { + if reaperTemplate.HasReaperRef() { + logger.Info("ReaperRef present, registering with referenced Reaper instead of creating a new one") + return r.addClusterToExternalReaper(ctx, kc, actualDc, logger) + } + } + if updated := reaperTemplate.EnsureDeploymentMode(); updated { logger.Info("Forced SINGLE deployment mode for Reaper because it has 'local' storage type") } @@ -256,3 +264,27 @@ func getSingleReaperDcName(kc *api.K8ssandraCluster) string { } return "" } + +func (r *K8ssandraClusterReconciler) addClusterToExternalReaper( + ctx context.Context, + kc *api.K8ssandraCluster, + actualDc *cassdcapi.CassandraDatacenter, + logger logr.Logger, +) result.ReconcileResult { + manager := reaper.NewManager() + manager.SetK8sClient(r) + if username, password, err := manager.GetUiCredentials(ctx, kc.Spec.Reaper.UiUserSecretRef, kc.Namespace); err != nil { + logger.Error(err, "Failed to get Reaper UI user secret") + return result.RequeueSoon(r.DefaultDelay) + } else { + if err = manager.ConnectWithReaperRef(ctx, kc, username, password); err != nil { + logger.Error(err, "Failed to connect to external Reaper") + return result.RequeueSoon(r.DefaultDelay) + } + if err = manager.AddClusterToReaper(ctx, actualDc); err != nil { + logger.Error(err, "Failed to add cluster to external Reaper") + return result.RequeueSoon(r.DefaultDelay) + } + } + return result.Continue() +} diff --git a/controllers/k8ssandra/reaper_test.go b/controllers/k8ssandra/reaper_test.go index 7bb054c0d..3ed5c7945 100644 --- a/controllers/k8ssandra/reaper_test.go +++ b/controllers/k8ssandra/reaper_test.go @@ -3,6 +3,8 @@ package k8ssandra import ( "context" "fmt" + "github.com/k8ssandra/k8ssandra-operator/pkg/utils" + "k8s.io/apimachinery/pkg/api/resource" "testing" cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" @@ -28,40 +30,7 @@ func createMultiDcClusterWithReaper(t *testing.T, ctx context.Context, f *framew Name: "test", }, Spec: api.K8ssandraClusterSpec{ - Cassandra: &api.CassandraClusterTemplate{ - Datacenters: []api.CassandraDatacenterTemplate{ - { - Meta: api.EmbeddedObjectMeta{ - Name: "dc1", - }, - K8sContext: f.DataPlaneContexts[0], - Size: 3, - DatacenterOptions: api.DatacenterOptions{ - ServerVersion: "3.11.14", - StorageConfig: &cassdcapi.StorageConfig{ - CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: &defaultStorageClass, - }, - }, - }, - }, - { - Meta: api.EmbeddedObjectMeta{ - Name: "dc2", - }, - K8sContext: f.DataPlaneContexts[1], - Size: 3, - DatacenterOptions: api.DatacenterOptions{ - ServerVersion: "3.11.14", - StorageConfig: &cassdcapi.StorageConfig{ - CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: &defaultStorageClass, - }, - }, - }, - }, - }, - }, + Cassandra: newTwoDcCassandraClusterTemplate(f), Reaper: &reaperapi.ReaperClusterTemplate{ ReaperTemplate: reaperapi.ReaperTemplate{ AutoScheduling: reaperapi.AutoScheduling{Enabled: true}, @@ -264,6 +233,88 @@ func createMultiDcClusterWithReaper(t *testing.T, ctx context.Context, f *framew } +func createMultiDcClusterWithControlPlaneReaper(t *testing.T, ctx context.Context, f *framework.Framework, namespace string) { + require := require.New(t) + + cpr := &reaperapi.Reaper{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "reaper", + }, + Spec: newControlPlaneReaper(), + } + + err := f.Client.Create(ctx, cpr) + require.NoError(err, "failed to create control plane reaper") + + cpReaperKey := framework.ClusterKey{ + K8sContext: f.ControlPlaneContext, + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: "reaper"}, + } + t.Log("check that control plane reaper is created") + require.Eventually(f.ReaperExists(ctx, cpReaperKey), timeout, interval) + + kc := &api.K8ssandraCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "test", + }, + Spec: api.K8ssandraClusterSpec{ + Cassandra: newTwoDcCassandraClusterTemplate(f), + Reaper: &reaperapi.ReaperClusterTemplate{ + ReaperRef: corev1.ObjectReference{ + Name: cpr.Name, + Namespace: cpr.Namespace, + }, + }, + }, + } + + err = f.Client.Create(ctx, kc) + require.NoError(err, "failed to create K8ssandraCluster") + + verifySuperuserSecretCreated(ctx, t, f, kc) + verifyReplicatedSecretReconciled(ctx, t, f, kc) + + t.Log("check that dc1 was created") + dc1Key := framework.ClusterKey{NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc1"}, K8sContext: f.DataPlaneContexts[0]} + require.Eventually(f.DatacenterExists(ctx, dc1Key), timeout, interval) + + t.Log("check that dc2 has not been created yet") + dc2Key := framework.ClusterKey{NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc2"}, K8sContext: f.DataPlaneContexts[1]} + dc2 := &cassdcapi.CassandraDatacenter{} + err = f.Get(ctx, dc2Key, dc2) + require.True(err != nil && errors.IsNotFound(err), "dc2 should not be created until dc1 is ready") + + t.Log("update dc1 status to ready") + err = f.SetDatacenterStatusReady(ctx, dc1Key) + require.NoError(err, "failed to update dc1 status to ready") + + t.Log("check that dc2 was created") + require.Eventually(f.DatacenterExists(ctx, dc2Key), timeout, interval) + + t.Log("check that remote seeds are set on dc2") + dc2 = &cassdcapi.CassandraDatacenter{} + err = f.Get(ctx, dc2Key, dc2) + require.NoError(err, "failed to get dc2") + + t.Log("update dc2 status to ready") + err = f.SetDatacenterStatusReady(ctx, dc2Key) + require.NoError(err, "failed to update dc2 status to ready") + + // check that reapers were not created together with cass DCs + verifyReaperAbsent(t, f, ctx, kc, f.DataPlaneContexts[0], dc1Key, namespace) + verifyReaperAbsent(t, f, ctx, kc, f.DataPlaneContexts[1], dc2Key, namespace) + + // check the kc is added to reaper + verifyClusterRegistered(t, f, ctx, kc, namespace) + + err = f.DeleteK8ssandraCluster(ctx, utils.GetKey(kc), timeout, interval) + require.NoError(err, "failed to delete K8ssandraCluster") +} + func getReaperAnnotations(t *testing.T, f *framework.Framework, ctx context.Context, key framework.ClusterKey) map[string]string { reaper := &reaperapi.Reaper{} err := f.Get(ctx, key, reaper) @@ -278,3 +329,79 @@ func verifyReaperSecretAnnotationAdded(t *testing.T, f *framework.Framework, ctx t.Logf("check that the superuser secret annotation is added") assert.Eventually(t, secretAnnotationAdded(t, f, ctx, dcKey, getReaperAnnotations, secretName), timeout, interval, " failed to verify reaper secret annotation added") } + +func newTwoDcCassandraClusterTemplate(f *framework.Framework) *api.CassandraClusterTemplate { + return &api.CassandraClusterTemplate{ + Datacenters: []api.CassandraDatacenterTemplate{ + { + Meta: api.EmbeddedObjectMeta{ + Name: "dc1", + }, + K8sContext: f.DataPlaneContexts[0], + Size: 3, + DatacenterOptions: api.DatacenterOptions{ + ServerVersion: "3.11.14", + StorageConfig: &cassdcapi.StorageConfig{ + CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &defaultStorageClass, + }, + }, + }, + }, + { + Meta: api.EmbeddedObjectMeta{ + Name: "dc2", + }, + K8sContext: f.DataPlaneContexts[1], + Size: 3, + DatacenterOptions: api.DatacenterOptions{ + ServerVersion: "3.11.14", + StorageConfig: &cassdcapi.StorageConfig{ + CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &defaultStorageClass, + }, + }, + }, + }, + }, + } +} + +func newControlPlaneReaper() reaperapi.ReaperSpec { + return reaperapi.ReaperSpec{ + ReaperTemplate: reaperapi.ReaperTemplate{ + StorageType: reaperapi.StorageTypeLocal, + StorageConfig: &corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + AutoScheduling: reaperapi.AutoScheduling{ + Enabled: true, + // we need to be explicit to prevent checking the cass-dc spec, which is not available if reaper's in control plane mode + RepairType: "ADAPTIVE", + }, + }, + } +} + +func verifyReaperAbsent(t *testing.T, f *framework.Framework, ctx context.Context, kc *api.K8ssandraCluster, dataPlaneContext string, dcKey framework.ClusterKey, namespace string) { + reaperKey := framework.ClusterKey{ + K8sContext: dataPlaneContext, + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: kc.Name + "-" + dcKey.Name + "-reaper"}, + } + + t.Logf("check that reaper %s has not been created", reaperKey) + reaper := &reaperapi.Reaper{} + err := f.Get(ctx, reaperKey, reaper) + require.True(t, err != nil && errors.IsNotFound(err), fmt.Sprintf("reaper %s should not be created in dc %s", reaperKey, dcKey)) +} + +func verifyClusterRegistered(t *testing.T, f *framework.Framework, ctx context.Context, kc *api.K8ssandraCluster, namespace string) { + +} diff --git a/controllers/k8ssandra/schemas.go b/controllers/k8ssandra/schemas.go index 3ef3eeb7e..94cce2088 100644 --- a/controllers/k8ssandra/schemas.go +++ b/controllers/k8ssandra/schemas.go @@ -48,8 +48,12 @@ func (r *K8ssandraClusterReconciler) checkSchemas( return recResult } - if recResult := r.reconcileReaperSchema(ctx, kc, mgmtApi, logger); recResult.Completed() { - return recResult + // we only want to reconcile the Reaper schema if we're deploying Reaper together with k8ssandra cluster + // otherwise we just register the k8ssandra cluster with an external reaper (happens after reconciling the DCs) + if kc.Spec.Reaper != nil && !kc.Spec.Reaper.HasReaperRef() { + if recResult := r.reconcileReaperSchema(ctx, kc, mgmtApi, logger); recResult.Completed() { + return recResult + } } decommCassDcName, dcNameOverride := k8ssandra.GetDatacenterForDecommission(kc) diff --git a/controllers/k8ssandra/secrets.go b/controllers/k8ssandra/secrets.go index 9722314da..dd75dbc7b 100644 --- a/controllers/k8ssandra/secrets.go +++ b/controllers/k8ssandra/secrets.go @@ -41,6 +41,10 @@ func SuperuserSecretName(kc *api.K8ssandraCluster) string { } func (r *K8ssandraClusterReconciler) reconcileReaperSecrets(ctx context.Context, kc *api.K8ssandraCluster, logger logr.Logger) result.ReconcileResult { + if kc.Spec.Reaper != nil && kc.Spec.Reaper.ReaperRef.Name != "" { + logger.Info("ReaperRef points to an existing Reaper, we don't need a new Reaper CQL secret") + return result.Continue() + } if kc.Spec.Reaper != nil { // Reaper secrets are only required when authentication is enabled on the cluster if kc.Spec.IsAuthEnabled() && !kc.Spec.UseExternalSecrets() { @@ -60,9 +64,12 @@ func (r *K8ssandraClusterReconciler) reconcileReaperSecrets(ctx context.Context, uiUserSecretRef.Name = reaper.DefaultUiSecretName(kc.SanitizedName()) } kcKey := utils.GetKey(kc) - if err := secret.ReconcileSecret(ctx, r.Client, cassandraUserSecretRef.Name, kcKey); err != nil { - logger.Error(err, "Failed to reconcile Reaper CQL user secret", "ReaperCassandraUserSecretRef", cassandraUserSecretRef) - return result.Error(err) + if kc.Spec.Reaper != nil && kc.Spec.Reaper.ReaperRef.Name == "" { + // ReaperRef presence indicates we use an Control Plane Reaper, which talks to C* via mgmt API, so we don't need a CQL user + if err := secret.ReconcileSecret(ctx, r.Client, cassandraUserSecretRef.Name, kcKey); err != nil { + logger.Error(err, "Failed to reconcile Reaper CQL user secret", "ReaperCassandraUserSecretRef", cassandraUserSecretRef) + return result.Error(err) + } } if kc.Spec.Reaper.UiUserSecretRef == nil || kc.Spec.Reaper.UiUserSecretRef.Name != "" { if err := secret.ReconcileSecret(ctx, r.Client, uiUserSecretRef.Name, kcKey); err != nil { diff --git a/controllers/k8ssandra/vector.go b/controllers/k8ssandra/vector.go index b97d166c2..730d4a2ea 100644 --- a/controllers/k8ssandra/vector.go +++ b/controllers/k8ssandra/vector.go @@ -2,6 +2,7 @@ package k8ssandra import ( "context" + "github.com/k8ssandra/k8ssandra-operator/pkg/shared" "github.com/go-logr/logr" k8ssandraapi "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" @@ -12,8 +13,6 @@ import ( "github.com/k8ssandra/k8ssandra-operator/pkg/result" "github.com/k8ssandra/k8ssandra-operator/pkg/telemetry" "github.com/k8ssandra/k8ssandra-operator/pkg/utils" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -50,7 +49,7 @@ func (r *K8ssandraClusterReconciler) reconcileVector( return recRes } } else { - if err := deleteConfigMapIfExists(ctx, remoteClient, configMapKey, dcLogger); err != nil { + if err := shared.DeleteConfigMapIfExists(ctx, remoteClient, configMapKey, dcLogger); err != nil { return result.Error(err) } } @@ -58,19 +57,3 @@ func (r *K8ssandraClusterReconciler) reconcileVector( dcLogger.Info("Vector Agent ConfigMap successfully reconciled") return result.Continue() } - -func deleteConfigMapIfExists(ctx context.Context, remoteClient client.Client, configMapKey client.ObjectKey, logger logr.Logger) error { - configMap := &corev1.ConfigMap{} - if err := remoteClient.Get(ctx, configMapKey, configMap); err != nil { - if errors.IsNotFound(err) { - return nil - } - logger.Error(err, "Failed to get ConfigMap", configMapKey) - return err - } - if err := remoteClient.Delete(ctx, configMap); err != nil { - logger.Error(err, "Failed to delete ConfigMap", configMapKey) - return err - } - return nil -} diff --git a/controllers/reaper/reaper_controller.go b/controllers/reaper/reaper_controller.go index 10a0a672f..385ecec6e 100644 --- a/controllers/reaper/reaper_controller.go +++ b/controllers/reaper/reaper_controller.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" + "math" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -91,25 +92,36 @@ func (r *ReaperReconciler) reconcile(ctx context.Context, actualReaper *reaperap actualReaper.Status.Progress = reaperapi.ReaperProgressPending actualReaper.Status.SetNotReady() - actualDc, result, err := r.reconcileDatacenter(ctx, actualReaper, logger) - if !result.IsZero() || err != nil { - return result, err + var actualDc *cassdcapi.CassandraDatacenter + if actualReaper.Spec.DatacenterRef.Name == "" { + logger.Info("No CassandraDatacenter reference specified, skipping CassandraDatacenter reconciliation") + actualDc = &cassdcapi.CassandraDatacenter{} + } else { + reconciledDc, result, err := r.reconcileDatacenter(ctx, actualReaper, logger) + if !result.IsZero() || err != nil { + return result, err + } + actualDc = reconciledDc.DeepCopy() } actualReaper.Status.Progress = reaperapi.ReaperProgressDeploying - if result, err = r.reconcileDeployment(ctx, actualReaper, actualDc, logger); !result.IsZero() || err != nil { + if result, err := r.reconcileDeployment(ctx, actualReaper, actualDc, logger); !result.IsZero() || err != nil { return result, err } - if result, err = r.reconcileService(ctx, actualReaper, logger); !result.IsZero() || err != nil { + if result, err := r.reconcileService(ctx, actualReaper, logger); !result.IsZero() || err != nil { return result, err } actualReaper.Status.Progress = reaperapi.ReaperProgressConfiguring - if result, err = r.configureReaper(ctx, actualReaper, actualDc, logger); !result.IsZero() || err != nil { - return result, err + if actualReaper.Spec.DatacenterRef.Name == "" { + logger.Info("skipping adding DC to Reaper because its a Control Plane Reaper") + } else { + if result, err := r.configureReaper(ctx, actualReaper, actualDc, logger); !result.IsZero() || err != nil { + return result, err + } } actualReaper.Status.Progress = reaperapi.ReaperProgressRunning @@ -185,10 +197,13 @@ func (r *ReaperReconciler) reconcileDeployment( } // reconcile Vector configmap - if vectorReconcileResult, err := r.reconcileVectorConfigMap(ctx, *actualReaper, actualDc, r.Client, logger); err != nil { - return vectorReconcileResult, err - } else if vectorReconcileResult.Requeue { - return vectorReconcileResult, nil + // do this only if we're not a control plane reaper. a CP reaper has no vector agent with it + if actualReaper.Spec.DatacenterRef.Name != "" { + if vectorReconcileResult, err := r.reconcileVectorConfigMap(ctx, *actualReaper, actualDc, r.Client, logger); err != nil { + return vectorReconcileResult, err + } else if vectorReconcileResult.Requeue { + return vectorReconcileResult, nil + } } logger.Info("Reconciling reaper deployment", "actualReaper", actualReaper) @@ -236,10 +251,18 @@ func (r *ReaperReconciler) reconcileDeployment( return ctrl.Result{}, err } - // if using local storage, we need to ensure only one Reaper exists ~ the STS has at most 1 replica - err = reaper.EnsureSingleReplica(actualReaper, actualDeployment, desiredDeployment, logger) - if err != nil { - return ctrl.Result{}, err + // if using memory storage, we need to ensure only one Reaper exists ~ the STS has at most 1 replica + if actualReaper.Spec.StorageType == reaperapi.StorageTypeLocal { + desiredReplicas := getDeploymentReplicas(desiredDeployment, logger) + if desiredReplicas > 1 { + logger.Info(fmt.Sprintf("reaper with memory storage can only have one replica, not allowing the %d that are desired", desiredReplicas)) + forceSingleReplica(&desiredDeployment, logger) + } + actualReplicas := getDeploymentReplicas(actualDeployment, logger) + if actualReplicas > 1 { + logger.Info(fmt.Sprintf("reaper with memory storage currently has %d replicas, scaling down to 1", actualReplicas)) + forceSingleReplica(&desiredDeployment, logger) + } } // Check if the deployment needs to be updated @@ -275,6 +298,29 @@ func (r *ReaperReconciler) reconcileDeployment( return ctrl.Result{}, nil } +func getDeploymentReplicas(actualDeployment client.Object, logger logr.Logger) int32 { + switch actual := actualDeployment.(type) { + case *appsv1.Deployment: + return *actual.Spec.Replicas + case *appsv1.StatefulSet: + return *actual.Spec.Replicas + default: + logger.Error(fmt.Errorf("unexpected type %T", actualDeployment), "Failed to get deployment replicas") + return math.MaxInt32 + } +} + +func forceSingleReplica(desiredDeployment *client.Object, logger logr.Logger) { + switch desired := (*desiredDeployment).(type) { + case *appsv1.Deployment: + desired.Spec.Replicas = ptr.To[int32](1) + case *appsv1.StatefulSet: + desired.Spec.Replicas = ptr.To[int32](1) + default: + logger.Error(fmt.Errorf("unexpected type %T", desiredDeployment), "Failed to set deployment replicas") + } +} + func (r *ReaperReconciler) reconcileService( ctx context.Context, actualReaper *reaperapi.Reaper, @@ -334,8 +380,9 @@ func (r *ReaperReconciler) reconcileService( func (r *ReaperReconciler) configureReaper(ctx context.Context, actualReaper *reaperapi.Reaper, actualDc *cassdcapi.CassandraDatacenter, logger logr.Logger) (ctrl.Result, error) { manager := r.NewManager() + manager.SetK8sClient(r) // Get the Reaper UI secret username and password values if auth is enabled - if username, password, err := r.getReaperUICredentials(ctx, actualReaper, logger); err != nil { + if username, password, err := manager.GetUiCredentials(ctx, actualReaper.Spec.UiUserSecretRef, actualReaper.Namespace); err != nil { return ctrl.Result{RequeueAfter: r.DefaultDelay}, err } else { if err := manager.Connect(ctx, actualReaper, username, password); err != nil { @@ -355,27 +402,6 @@ func (r *ReaperReconciler) configureReaper(ctx context.Context, actualReaper *re return ctrl.Result{}, nil } -func (r *ReaperReconciler) getReaperUICredentials(ctx context.Context, actualReaper *reaperapi.Reaper, logger logr.Logger) (string, string, error) { - - if actualReaper.Spec.UiUserSecretRef == nil || actualReaper.Spec.UiUserSecretRef.Name == "" { - // The UI user secret doesn't exist, meaning auth is disabled - return "", "", nil - } - - secretKey := types.NamespacedName{Namespace: actualReaper.Namespace, Name: actualReaper.Spec.UiUserSecretRef.Name} - if secret, err := r.getSecret(ctx, secretKey); err != nil { - if errors.IsNotFound(err) { - logger.Info("Reaper ui secret does not exist") - return "", "", err - } else { - logger.Error(err, "failed to get reaper ui secret") - return "", "", err - } - } else { - return string(secret.Data["username"]), string(secret.Data["password"]), nil - } -} - func (r *ReaperReconciler) collectAuthVars(ctx context.Context, actualReaper *reaperapi.Reaper, logger logr.Logger) ([]*corev1.EnvVar, error) { cqlVars, err := r.collectAuthVarsForType(ctx, actualReaper, logger, "cql") if err != nil { diff --git a/controllers/reaper/reaper_controller_test.go b/controllers/reaper/reaper_controller_test.go index dca1c8f44..edb66e248 100644 --- a/controllers/reaper/reaper_controller_test.go +++ b/controllers/reaper/reaper_controller_test.go @@ -69,8 +69,11 @@ func TestReaper(t *testing.T) { func newMockManager() reaper.Manager { m := new(mocks.ReaperManager) m.On("Connect", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + m.On("ConnectWithReaperRef", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) m.On("AddClusterToReaper", mock.Anything, mock.Anything).Return(nil) m.On("VerifyClusterIsConfigured", mock.Anything, mock.Anything).Return(true, nil) + m.On("GetUiCredentials", mock.Anything, mock.Anything, mock.Anything).Return("admin", "admin", nil) + m.On("SetK8sClient", mock.Anything) m.Test(currentTest) return m } diff --git a/controllers/reaper/vector.go b/controllers/reaper/vector.go index 2ff935cf3..c0a6f572f 100644 --- a/controllers/reaper/vector.go +++ b/controllers/reaper/vector.go @@ -3,6 +3,7 @@ package reaper import ( "bytes" "context" + "github.com/k8ssandra/k8ssandra-operator/pkg/shared" "text/template" "github.com/go-logr/logr" @@ -13,8 +14,6 @@ import ( "github.com/k8ssandra/k8ssandra-operator/pkg/reconciliation" "github.com/k8ssandra/k8ssandra-operator/pkg/telemetry" "github.com/k8ssandra/k8ssandra-operator/pkg/vector" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -52,7 +51,7 @@ func (r *ReaperReconciler) reconcileVectorConfigMap( } } else { - if err := deleteConfigMapIfExists(ctx, remoteClient, configMapKey, dcLogger); err != nil { + if err := shared.DeleteConfigMapIfExists(ctx, remoteClient, configMapKey, dcLogger); err != nil { return ctrl.Result{}, err } } @@ -61,22 +60,6 @@ func (r *ReaperReconciler) reconcileVectorConfigMap( return ctrl.Result{}, nil } -func deleteConfigMapIfExists(ctx context.Context, remoteClient client.Client, configMapKey client.ObjectKey, logger logr.Logger) error { - configMap := &corev1.ConfigMap{} - if err := remoteClient.Get(ctx, configMapKey, configMap); err != nil { - if errors.IsNotFound(err) { - return nil - } - logger.Error(err, "Failed to get ConfigMap", configMapKey) - return err - } - if err := remoteClient.Delete(ctx, configMap); err != nil { - logger.Error(err, "Failed to delete ConfigMap", configMapKey) - return err - } - return nil -} - func CreateVectorToml(telemetrySpec *telemetryapi.TelemetrySpec) (string, error) { vectorConfigToml := ` [sinks.console] diff --git a/pkg/mocks/ManagementApiFacade.go b/pkg/mocks/ManagementApiFacade.go index a11d70c39..b00908e03 100644 --- a/pkg/mocks/ManagementApiFacade.go +++ b/pkg/mocks/ManagementApiFacade.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.9.4. DO NOT EDIT. +// Code generated by mockery v2.43.2. DO NOT EDIT. package mocks @@ -16,6 +16,10 @@ type ManagementApiFacade struct { func (_m *ManagementApiFacade) AlterKeyspace(keyspaceName string, replicationSettings map[string]int) error { ret := _m.Called(keyspaceName, replicationSettings) + if len(ret) == 0 { + panic("no return value specified for AlterKeyspace") + } + var r0 error if rf, ok := ret.Get(0).(func(string, map[string]int) error); ok { r0 = rf(keyspaceName, replicationSettings) @@ -30,6 +34,10 @@ func (_m *ManagementApiFacade) AlterKeyspace(keyspaceName string, replicationSet func (_m *ManagementApiFacade) CreateKeyspaceIfNotExists(keyspaceName string, replication map[string]int) error { ret := _m.Called(keyspaceName, replication) + if len(ret) == 0 { + panic("no return value specified for CreateKeyspaceIfNotExists") + } + var r0 error if rf, ok := ret.Get(0).(func(string, map[string]int) error); ok { r0 = rf(keyspaceName, replication) @@ -44,6 +52,10 @@ func (_m *ManagementApiFacade) CreateKeyspaceIfNotExists(keyspaceName string, re func (_m *ManagementApiFacade) CreateTable(definition *httphelper.TableDefinition) error { ret := _m.Called(definition) + if len(ret) == 0 { + panic("no return value specified for CreateTable") + } + var r0 error if rf, ok := ret.Get(0).(func(*httphelper.TableDefinition) error); ok { r0 = rf(definition) @@ -58,6 +70,10 @@ func (_m *ManagementApiFacade) CreateTable(definition *httphelper.TableDefinitio func (_m *ManagementApiFacade) EnsureKeyspaceReplication(keyspaceName string, replication map[string]int) error { ret := _m.Called(keyspaceName, replication) + if len(ret) == 0 { + panic("no return value specified for EnsureKeyspaceReplication") + } + var r0 error if rf, ok := ret.Get(0).(func(string, map[string]int) error); ok { r0 = rf(keyspaceName, replication) @@ -72,7 +88,15 @@ func (_m *ManagementApiFacade) EnsureKeyspaceReplication(keyspaceName string, re func (_m *ManagementApiFacade) GetKeyspaceReplication(keyspaceName string) (map[string]string, error) { ret := _m.Called(keyspaceName) + if len(ret) == 0 { + panic("no return value specified for GetKeyspaceReplication") + } + var r0 map[string]string + var r1 error + if rf, ok := ret.Get(0).(func(string) (map[string]string, error)); ok { + return rf(keyspaceName) + } if rf, ok := ret.Get(0).(func(string) map[string]string); ok { r0 = rf(keyspaceName) } else { @@ -81,7 +105,6 @@ func (_m *ManagementApiFacade) GetKeyspaceReplication(keyspaceName string) (map[ } } - var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(keyspaceName) } else { @@ -95,7 +118,15 @@ func (_m *ManagementApiFacade) GetKeyspaceReplication(keyspaceName string) (map[ func (_m *ManagementApiFacade) GetSchemaVersions() (map[string][]string, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetSchemaVersions") + } + var r0 map[string][]string + var r1 error + if rf, ok := ret.Get(0).(func() (map[string][]string, error)); ok { + return rf() + } if rf, ok := ret.Get(0).(func() map[string][]string); ok { r0 = rf() } else { @@ -104,7 +135,6 @@ func (_m *ManagementApiFacade) GetSchemaVersions() (map[string][]string, error) } } - var r1 error if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { @@ -118,7 +148,15 @@ func (_m *ManagementApiFacade) GetSchemaVersions() (map[string][]string, error) func (_m *ManagementApiFacade) ListKeyspaces(keyspaceName string) ([]string, error) { ret := _m.Called(keyspaceName) + if len(ret) == 0 { + panic("no return value specified for ListKeyspaces") + } + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(string) ([]string, error)); ok { + return rf(keyspaceName) + } if rf, ok := ret.Get(0).(func(string) []string); ok { r0 = rf(keyspaceName) } else { @@ -127,7 +165,6 @@ func (_m *ManagementApiFacade) ListKeyspaces(keyspaceName string) ([]string, err } } - var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(keyspaceName) } else { @@ -141,7 +178,15 @@ func (_m *ManagementApiFacade) ListKeyspaces(keyspaceName string) ([]string, err func (_m *ManagementApiFacade) ListTables(keyspaceName string) ([]string, error) { ret := _m.Called(keyspaceName) + if len(ret) == 0 { + panic("no return value specified for ListTables") + } + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(string) ([]string, error)); ok { + return rf(keyspaceName) + } if rf, ok := ret.Get(0).(func(string) []string); ok { r0 = rf(keyspaceName) } else { @@ -150,7 +195,6 @@ func (_m *ManagementApiFacade) ListTables(keyspaceName string) ([]string, error) } } - var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(keyspaceName) } else { @@ -159,3 +203,17 @@ func (_m *ManagementApiFacade) ListTables(keyspaceName string) ([]string, error) return r0, r1 } + +// NewManagementApiFacade creates a new instance of ManagementApiFacade. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManagementApiFacade(t interface { + mock.TestingT + Cleanup(func()) +}) *ManagementApiFacade { + mock := &ManagementApiFacade{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/mocks/reaper_manager.go b/pkg/mocks/reaper_manager.go index 2d5636ce3..fffb5e9a0 100644 --- a/pkg/mocks/reaper_manager.go +++ b/pkg/mocks/reaper_manager.go @@ -1,12 +1,17 @@ -// Code generated by mockery v2.9.4. DO NOT EDIT. +// Code generated by mockery v2.43.2. DO NOT EDIT. package mocks import ( context "context" + alpha1 "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + + client "sigs.k8s.io/controller-runtime/pkg/client" mock "github.com/stretchr/testify/mock" + v1 "k8s.io/api/core/v1" + v1alpha1 "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" v1beta1 "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" @@ -21,6 +26,10 @@ type ReaperManager struct { func (_m *ReaperManager) AddClusterToReaper(ctx context.Context, cassdc *v1beta1.CassandraDatacenter) error { ret := _m.Called(ctx, cassdc) + if len(ret) == 0 { + panic("no return value specified for AddClusterToReaper") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *v1beta1.CassandraDatacenter) error); ok { r0 = rf(ctx, cassdc) @@ -35,6 +44,10 @@ func (_m *ReaperManager) AddClusterToReaper(ctx context.Context, cassdc *v1beta1 func (_m *ReaperManager) Connect(ctx context.Context, _a1 *v1alpha1.Reaper, username string, password string) error { ret := _m.Called(ctx, _a1, username, password) + if len(ret) == 0 { + panic("no return value specified for Connect") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *v1alpha1.Reaper, string, string) error); ok { r0 = rf(ctx, _a1, username, password) @@ -45,18 +58,83 @@ func (_m *ReaperManager) Connect(ctx context.Context, _a1 *v1alpha1.Reaper, user return r0 } +// ConnectWithReaperRef provides a mock function with given fields: ctx, reaperRef, username, password +func (_m *ReaperManager) ConnectWithReaperRef(ctx context.Context, kc *alpha1.K8ssandraCluster, username, password string) error { + ret := _m.Called(ctx, kc, username, password) + + if len(ret) == 0 { + panic("no return value specified for ConnectWithReaperRef") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *alpha1.K8ssandraCluster, string, string) error); ok { + r0 = rf(ctx, kc, username, password) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetUiCredentials provides a mock function with given fields: ctx, uiUserSecretRef, namespace +func (_m *ReaperManager) GetUiCredentials(ctx context.Context, uiUserSecretRef *v1.LocalObjectReference, namespace string) (string, string, error) { + ret := _m.Called(ctx, uiUserSecretRef, namespace) + + if len(ret) == 0 { + panic("no return value specified for GetUiCredentials") + } + + var r0 string + var r1 string + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, *v1.LocalObjectReference, string) (string, string, error)); ok { + return rf(ctx, uiUserSecretRef, namespace) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1.LocalObjectReference, string) string); ok { + r0 = rf(ctx, uiUserSecretRef, namespace) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1.LocalObjectReference, string) string); ok { + r1 = rf(ctx, uiUserSecretRef, namespace) + } else { + r1 = ret.Get(1).(string) + } + + if rf, ok := ret.Get(2).(func(context.Context, *v1.LocalObjectReference, string) error); ok { + r2 = rf(ctx, uiUserSecretRef, namespace) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SetK8sClient provides a mock function with given fields: _a0 +func (_m *ReaperManager) SetK8sClient(_a0 client.Reader) { + _m.Called(_a0) +} + // VerifyClusterIsConfigured provides a mock function with given fields: ctx, cassdc func (_m *ReaperManager) VerifyClusterIsConfigured(ctx context.Context, cassdc *v1beta1.CassandraDatacenter) (bool, error) { ret := _m.Called(ctx, cassdc) + if len(ret) == 0 { + panic("no return value specified for VerifyClusterIsConfigured") + } + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1beta1.CassandraDatacenter) (bool, error)); ok { + return rf(ctx, cassdc) + } if rf, ok := ret.Get(0).(func(context.Context, *v1beta1.CassandraDatacenter) bool); ok { r0 = rf(ctx, cassdc) } else { r0 = ret.Get(0).(bool) } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, *v1beta1.CassandraDatacenter) error); ok { r1 = rf(ctx, cassdc) } else { @@ -65,3 +143,17 @@ func (_m *ReaperManager) VerifyClusterIsConfigured(ctx context.Context, cassdc * return r0, r1 } + +// NewReaperManager creates a new instance of ReaperManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReaperManager(t interface { + mock.TestingT + Cleanup(func()) +}) *ReaperManager { + mock := &ReaperManager{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/reaper/deployment.go b/pkg/reaper/deployment.go index 91b481690..b06927de2 100644 --- a/pkg/reaper/deployment.go +++ b/pkg/reaper/deployment.go @@ -61,22 +61,27 @@ func computeEnvVars(reaper *api.Reaper, dc *cassdcapi.CassandraDatacenter) []cor Name: "REAPER_ENABLE_DYNAMIC_SEED_LIST", Value: "false", }, - { - Name: "REAPER_CASS_CONTACT_POINTS", - Value: fmt.Sprintf("[%s]", dc.GetDatacenterServiceName()), - }, { Name: "REAPER_DATACENTER_AVAILABILITY", Value: reaper.Spec.DatacenterAvailability, }, - { + } + + // env vars used to interact with Cassandra cluster used for storage (not the one to repair) are only needed + // when we actually have a cass-dc available + if dc.DatacenterName() != "" { + envVars = append(envVars, corev1.EnvVar{ Name: "REAPER_CASS_LOCAL_DC", Value: dc.DatacenterName(), - }, - { + }) + envVars = append(envVars, corev1.EnvVar{ Name: "REAPER_CASS_KEYSPACE", Value: reaper.Spec.Keyspace, - }, + }) + envVars = append(envVars, corev1.EnvVar{ + Name: "REAPER_CASS_CONTACT_POINTS", + Value: fmt.Sprintf("[%s]", dc.GetDatacenterServiceName()), + }) } if reaper.Spec.AutoScheduling.Enabled { @@ -84,7 +89,11 @@ func computeEnvVars(reaper *api.Reaper, dc *cassdcapi.CassandraDatacenter) []cor Name: "REAPER_AUTO_SCHEDULING_ENABLED", Value: "true", }) - adaptive, incremental := getAdaptiveIncremental(reaper, dc) + var serverVersion = "" + if dc != nil && dc.Spec.ServerVersion != "" { + serverVersion = dc.Spec.ServerVersion + } + adaptive, incremental := getAdaptiveIncremental(reaper, serverVersion) envVars = append(envVars, corev1.EnvVar{ Name: "REAPER_AUTO_SCHEDULING_ADAPTIVE", Value: fmt.Sprintf("%v", adaptive), @@ -496,14 +505,14 @@ func addAuthEnvVars(template *corev1.PodTemplateSpec, vars []*corev1.EnvVar) { } } -func getAdaptiveIncremental(reaper *api.Reaper, dc *cassdcapi.CassandraDatacenter) (adaptive bool, incremental bool) { +func getAdaptiveIncremental(reaper *api.Reaper, serverVersion string) (adaptive bool, incremental bool) { switch reaper.Spec.AutoScheduling.RepairType { case "ADAPTIVE": adaptive = true case "INCREMENTAL": incremental = true case "AUTO": - if semver.MustParse(dc.Spec.ServerVersion).Major() == 3 { + if serverVersion == "" || semver.MustParse(serverVersion).Major() == 3 { adaptive = true } else { incremental = true diff --git a/pkg/reaper/deployment_test.go b/pkg/reaper/deployment_test.go index e95076498..9dc08f590 100644 --- a/pkg/reaper/deployment_test.go +++ b/pkg/reaper/deployment_test.go @@ -294,6 +294,30 @@ func TestNewStatefulSet(t *testing.T) { }) } +func TestNewStatefulSetForControlPlane(t *testing.T) { + reaper := newTestControlPlaneReaper() + noDataCenter := &cassdcapi.CassandraDatacenter{} + + sts := NewStatefulSet(reaper, noDataCenter, testlogr.NewTestLogger(t), nil, nil) + podSpec := sts.Spec.Template.Spec + container := podSpec.Containers[0] + assert.ElementsMatch(t, container.Env, []corev1.EnvVar{ + { + Name: "REAPER_STORAGE_TYPE", + Value: "memory", + }, + { + Name: "REAPER_ENABLE_DYNAMIC_SEED_LIST", + Value: "false", + }, + { + Name: "REAPER_DATACENTER_AVAILABILITY", + Value: "", + }, + }) + +} + func TestHttpManagementConfiguration(t *testing.T) { reaper := newTestReaper() reaper.Spec.HttpManagement.Enabled = true @@ -698,6 +722,23 @@ func newTestReaper() *reaperapi.Reaper { } } +func newTestControlPlaneReaper() *reaperapi.Reaper { + namespace := "reaper-cp-test" + reaperName := "cp-reaper" + return &reaperapi.Reaper{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: reaperName, + }, + Spec: reaperapi.ReaperSpec{ + ReaperTemplate: reaperapi.ReaperTemplate{ + StorageType: "local", + StorageConfig: newTestStorageConfig(), + }, + }, + } +} + func newTestDatacenter() *cassdcapi.CassandraDatacenter { namespace := "service-test" dcName := "dc1" @@ -806,3 +847,37 @@ func TestLabelsAnnotations(t *testing.T) { assert.Equal(t, deploymentLabels, deployment.Labels) assert.Equal(t, podLabels, deployment.Spec.Template.Labels) } + +func TestGetAdaptiveIncremental(t *testing.T) { + reaper := &reaperapi.Reaper{} + dc := &cassdcapi.CassandraDatacenter{} + + adaptive, incremental := getAdaptiveIncremental(reaper, dc.Spec.ServerVersion) + assert.False(t, adaptive) + assert.False(t, incremental) + + reaper.Spec.AutoScheduling.RepairType = "ADAPTIVE" + adaptive, incremental = getAdaptiveIncremental(reaper, dc.Spec.ServerVersion) + assert.True(t, adaptive) + assert.False(t, incremental) + + reaper.Spec.AutoScheduling.RepairType = "INCREMENTAL" + adaptive, incremental = getAdaptiveIncremental(reaper, dc.Spec.ServerVersion) + assert.False(t, adaptive) + assert.True(t, incremental) + + reaper.Spec.AutoScheduling.RepairType = "AUTO" + adaptive, incremental = getAdaptiveIncremental(reaper, dc.Spec.ServerVersion) + assert.True(t, adaptive) + assert.False(t, incremental) + + dc.Spec.ServerVersion = "3.11.1" + adaptive, incremental = getAdaptiveIncremental(reaper, dc.Spec.ServerVersion) + assert.True(t, adaptive) + assert.False(t, incremental) + + dc.Spec.ServerVersion = "4.0.0" + adaptive, incremental = getAdaptiveIncremental(reaper, dc.Spec.ServerVersion) + assert.False(t, adaptive) + assert.True(t, incremental) +} diff --git a/pkg/reaper/manager.go b/pkg/reaper/manager.go index 67f8ad5a2..d8287b0fb 100644 --- a/pkg/reaper/manager.go +++ b/pkg/reaper/manager.go @@ -3,7 +3,12 @@ package reaper import ( "context" "fmt" + "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" "net/url" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/k8ssandra/k8ssandra-operator/pkg/utils" @@ -14,8 +19,11 @@ import ( type Manager interface { Connect(ctx context.Context, reaper *api.Reaper, username, password string) error + ConnectWithReaperRef(ctx context.Context, kc *v1alpha1.K8ssandraCluster, username, password string) error AddClusterToReaper(ctx context.Context, cassdc *cassdcapi.CassandraDatacenter) error VerifyClusterIsConfigured(ctx context.Context, cassdc *cassdcapi.CassandraDatacenter) (bool, error) + GetUiCredentials(ctx context.Context, uiUserSecretRef *corev1.LocalObjectReference, namespace string) (string, string, error) + SetK8sClient(client.Reader) } func NewManager() Manager { @@ -24,12 +32,28 @@ func NewManager() Manager { type restReaperManager struct { reaperClient reaperclient.Client + k8sClient client.Reader +} + +func (r *restReaperManager) SetK8sClient(k8sClient client.Reader) { + r.k8sClient = k8sClient +} + +func (r *restReaperManager) ConnectWithReaperRef(ctx context.Context, kc *v1alpha1.K8ssandraCluster, username, password string) error { + var namespace = kc.Spec.Reaper.ReaperRef.Namespace + if namespace == "" { + namespace = kc.Namespace + } + reaperSvc := fmt.Sprintf("%s.%s", GetServiceName(kc.Spec.Reaper.ReaperRef.Name), namespace) + return r.connect(ctx, reaperSvc, username, password) } func (r *restReaperManager) Connect(ctx context.Context, reaper *api.Reaper, username, password string) error { - // Include the namespace in case Reaper is deployed in a different namespace than - // the CassandraDatacenter. - reaperSvc := GetServiceName(reaper.Name) + "." + reaper.Namespace + reaperSvc := fmt.Sprintf("%s.%s", GetServiceName(reaper.Name), reaper.Namespace) + return r.connect(ctx, reaperSvc, username, password) +} + +func (r *restReaperManager) connect(ctx context.Context, reaperSvc, username, password string) error { u, err := url.Parse(fmt.Sprintf("http://%s:8080", reaperSvc)) if err != nil { return err @@ -40,7 +64,6 @@ func (r *restReaperManager) Connect(ctx context.Context, reaper *api.Reaper, use return err } } - return nil } @@ -55,3 +78,22 @@ func (r *restReaperManager) VerifyClusterIsConfigured(ctx context.Context, cassd } return utils.SliceContains(clusters, cassdcapi.CleanupForKubernetes(cassdc.Spec.ClusterName)), nil } + +func (r *restReaperManager) GetUiCredentials(ctx context.Context, uiUserSecretRef *corev1.LocalObjectReference, namespace string) (string, string, error) { + if uiUserSecretRef == nil || uiUserSecretRef.Name == "" { + // The UI user secret doesn't exist, meaning auth is disabled + return "", "", nil + } + + secretKey := types.NamespacedName{Namespace: namespace, Name: uiUserSecretRef.Name} + + secret := &corev1.Secret{} + err := r.k8sClient.Get(ctx, secretKey, secret) + if errors.IsNotFound(err) { + return "", "", fmt.Errorf("reaper ui secret does not exist") + } else if err != nil { + return "", "", fmt.Errorf("failed to get reaper ui secret") + } else { + return string(secret.Data["username"]), string(secret.Data["password"]), nil + } +} diff --git a/pkg/reaper/resource.go b/pkg/reaper/resource.go index dccb430e8..7da5b35ac 100644 --- a/pkg/reaper/resource.go +++ b/pkg/reaper/resource.go @@ -29,12 +29,12 @@ func NewReaper( reaperKey types.NamespacedName, kc *k8ssandraapi.K8ssandraCluster, dc *cassdcapi.CassandraDatacenter, - reaperTemplate *reaperapi.ReaperClusterTemplate, + reaperClusterTemplate *reaperapi.ReaperClusterTemplate, logger logr.Logger, ) (*reaperapi.Reaper, error) { labels := createResourceLabels(kc) var anns map[string]string - if m := reaperTemplate.ResourceMeta; m != nil { + if m := reaperClusterTemplate.ResourceMeta; m != nil { labels = utils.MergeMap(labels, m.Labels) anns = m.Annotations } @@ -47,15 +47,21 @@ func NewReaper( Labels: labels, }, Spec: reaperapi.ReaperSpec{ - ReaperTemplate: reaperTemplate.ReaperTemplate, - DatacenterRef: reaperapi.CassandraDatacenterRef{ - Name: dc.Name, - Namespace: dc.Namespace, - }, + ReaperTemplate: reaperClusterTemplate.ReaperTemplate, DatacenterAvailability: computeReaperDcAvailability(kc), ClientEncryptionStores: kc.Spec.Cassandra.ClientEncryptionStores, }, } + if kc.Spec.Reaper != nil { + if kc.Spec.Reaper.ReaperRef.Name == "" { + // we only set the DatacenterRef if we do not have an external Reaper referred to via the ReaperRef + desiredReaper.Spec.DatacenterRef = reaperapi.CassandraDatacenterRef{ + Name: dc.Name, + Namespace: dc.Namespace, + } + } + } + if kc.Spec.IsAuthEnabled() && !kc.Spec.UseExternalSecrets() { logger.Info("Auth is enabled, adding user secrets to Reaper spec") // if auth is enabled in this cluster, the k8ssandra controller will automatically create two secrets for diff --git a/pkg/shared/vector.go b/pkg/shared/vector.go new file mode 100644 index 000000000..7b9fa7a49 --- /dev/null +++ b/pkg/shared/vector.go @@ -0,0 +1,25 @@ +package shared + +import ( + "context" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func DeleteConfigMapIfExists(ctx context.Context, remoteClient client.Client, configMapKey client.ObjectKey, logger logr.Logger) error { + configMap := &corev1.ConfigMap{} + if err := remoteClient.Get(ctx, configMapKey, configMap); err != nil { + if errors.IsNotFound(err) { + return nil + } + logger.Error(err, "Failed to get ConfigMap", "configMapKey", configMapKey) + return err + } + if err := remoteClient.Delete(ctx, configMap); err != nil { + logger.Error(err, "Failed to delete ConfigMap", "configMapKey", configMapKey) + return err + } + return nil +} diff --git a/test/e2e/reaper_test.go b/test/e2e/reaper_test.go index 848d29dc6..89df3e6b9 100644 --- a/test/e2e/reaper_test.go +++ b/test/e2e/reaper_test.go @@ -305,6 +305,34 @@ func createReaperAndDatacenter(t *testing.T, ctx context.Context, namespace stri }) } +func createControlPlaneReaperAndDatacenter(t *testing.T, ctx context.Context, namespace string, f *framework.E2eFramework) { + reaperKey := framework.ClusterKey{K8sContext: f.ControlPlaneContext, NamespacedName: types.NamespacedName{Namespace: namespace, Name: "reaper1"}} + dcKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[0], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc1"}} + + checkReaperReady(t, f, ctx, reaperKey) + checkDatacenterReady(t, ctx, dcKey, f) + + dcPrefix := DcPrefix(t, f, dcKey) + + createKeyspaceAndTable(t, f, ctx, f.DataPlaneContexts[0], namespace, "e2etestcluster", dcPrefix+"-default-sts-0", "test_ks", "test_table", 2) + + t.Log("deploying Reaper ingress routes in context", f.ControlPlaneContext) + reaperRestHostAndPort := ingressConfigs[f.ControlPlaneContext].ReaperRest + f.DeployReaperIngresses(t, f.ControlPlaneContext, namespace, "reaper1-service", reaperRestHostAndPort) + defer f.UndeployAllIngresses(t, f.ControlPlaneContext, namespace) + checkReaperApiReachable(t, ctx, reaperRestHostAndPort) + + t.Run("TestReaperApi[0]", func(t *testing.T) { + t.Log("test Reaper API in context", f.ControlPlaneContext) + secretKey := framework.ClusterKey{K8sContext: f.ControlPlaneContext, NamespacedName: types.NamespacedName{Namespace: namespace, Name: "reaper-ui-secret"}} + username, password := retrieveCredentials(t, f, ctx, secretKey) + testReaperApi(t, ctx, f.ControlPlaneContext, DcClusterName(t, f, dcKey), "test_ks", username, password) + }) + + t.Log("verify Reaper keyspace absent") + checkKeyspaceNeverCreated(t, f, ctx, f.DataPlaneContexts[0], namespace, "e2etestcluster", dcPrefix+"-default-sts-0", "reaper_db") +} + func checkReaperReady(t *testing.T, f *framework.E2eFramework, ctx context.Context, reaperKey framework.ClusterKey) { t.Logf("check that Reaper %s in cluster %s is ready", reaperKey.Name, reaperKey.K8sContext) withReaper := f.NewWithReaper(ctx, reaperKey) @@ -428,10 +456,12 @@ func testReaperApi(t *testing.T, ctx context.Context, k8sContext, clusterName, k sanitizedClusterName := cassdcapi.CleanupForKubernetes(clusterName) reaperClient := connectReaperApi(t, ctx, k8sContext, sanitizedClusterName, username, password) repairId := triggerRepair(t, ctx, sanitizedClusterName, keyspace, reaperClient) - t.Log("Waiting for one segment to be repaired and canceling run") + t.Log("waiting for one segment to be repaired, then canceling run") waitForOneSegmentToBeDone(t, ctx, repairId, reaperClient) + t.Log("a segment has completed") err := reaperClient.AbortRepairRun(ctx, repairId) require.NoErrorf(t, err, "Failed to abort repair run %s: %s", repairId, err) + t.Log("repair aborted") } func checkClusterIsRegisteredInReaper(t *testing.T, ctx context.Context, clusterName string, reaperClient reaperclient.Client) { diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index c310b5bc9..a94013d8a 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -274,6 +274,12 @@ func TestOperator(t *testing.T) { skipK8ssandraClusterCleanup: true, doCassandraDatacenterCleanup: true, })) + t.Run("CreateControlPlaneReaperAndDataCenter", e2eTest(ctx, &e2eTestOpts{ + testFunc: createControlPlaneReaperAndDatacenter, + fixture: framework.NewTestFixture("reaper-control-plane", controlPlane), + skipK8ssandraClusterCleanup: true, + doCassandraDatacenterCleanup: false, + })) t.Run("ClusterScoped", func(t *testing.T) { t.Run("MultiDcMultiCluster", e2eTest(ctx, &e2eTestOpts{ testFunc: multiDcMultiCluster, @@ -1911,6 +1917,35 @@ func getSecret(t *testing.T, f *framework.E2eFramework, ctx context.Context, sec return secret } +func createKeyspaceAndTable( + t *testing.T, + f *framework.E2eFramework, + ctx context.Context, + k8sContext, namespace, clusterName, pod, keyspace, table string, replicationFactor int, +) { + _, err := f.ExecuteCql(ctx, k8sContext, namespace, clusterName, pod, fmt.Sprintf("CREATE KEYSPACE %s WITH REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor' : %d};", keyspace, replicationFactor)) + require.NoError(t, err, "failed to create keyspace") + + _, err = f.ExecuteCql(ctx, k8sContext, namespace, clusterName, pod, fmt.Sprintf("CREATE TABLE %s.%s (id int PRIMARY KEY);", keyspace, table)) + require.NoError(t, err, "failed to create table") +} + +func checkKeyspaceNeverCreated( + t *testing.T, + f *framework.E2eFramework, + ctx context.Context, + k8sContext, namespace, clusterName, pod, keyspace string, +) { + require.Never(t, func() bool { + keyspaces, err := f.ExecuteCql(ctx, k8sContext, namespace, clusterName, pod, "DESCRIBE KEYSPACES") + if err != nil { + t.Logf("failed to describe keyspaces: %v", err) + return false + } + return strings.Contains(keyspaces, keyspace) + }, 1*time.Minute, 3*time.Second) +} + func checkKeyspaceExists( t *testing.T, f *framework.E2eFramework, diff --git a/test/testdata/fixtures/reaper-control-plane/k8ssandra.yaml b/test/testdata/fixtures/reaper-control-plane/k8ssandra.yaml new file mode 100644 index 000000000..bbb7597e6 --- /dev/null +++ b/test/testdata/fixtures/reaper-control-plane/k8ssandra.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: cassandra-config +data: + cassandra.yaml: | + concurrent_reads: 32 + concurrent_writes: 32 + concurrent_counter_writes: 32 +--- +apiVersion: k8ssandra.io/v1alpha1 +kind: K8ssandraCluster +metadata: + name: test +spec: + reaper: + reaperRef: + name: reaper1 + uiUserSecretRef: + name: reaper-ui-secret + cassandra: + clusterName: "E2E Test Cluster" + serverVersion: "3.11.14" + serverImage: "k8ssandra/cass-management-api:3.11.14" + jmxInitContainerImage: + repository: library + name: busybox + datacenters: + - metadata: + name: dc1 + k8sContext: kind-k8ssandra-0 + size: 2 + storageConfig: + cassandraDataVolumeClaimSpec: + storageClassName: standard + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + config: + jvmOptions: + heapSize: 384Mi + cassandra_ring_delay_ms: 0 + mgmtAPIHeap: 64Mi diff --git a/test/testdata/fixtures/reaper-control-plane/kustomization.yaml b/test/testdata/fixtures/reaper-control-plane/kustomization.yaml new file mode 100644 index 000000000..a60c99d22 --- /dev/null +++ b/test/testdata/fixtures/reaper-control-plane/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - reaper-ui-secret.yaml + - reaper.yaml + - k8ssandra.yaml diff --git a/test/testdata/fixtures/reaper-control-plane/reaper-ui-secret.yaml b/test/testdata/fixtures/reaper-control-plane/reaper-ui-secret.yaml new file mode 100644 index 000000000..0822c4ff4 --- /dev/null +++ b/test/testdata/fixtures/reaper-control-plane/reaper-ui-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: reaper-ui-secret +data: + # username: reaper-jmx (actually) + username: cmVhcGVyLWpteA== + # password: R3ap3r + password: UjNhcDNy diff --git a/test/testdata/fixtures/reaper-control-plane/reaper.yaml b/test/testdata/fixtures/reaper-control-plane/reaper.yaml new file mode 100644 index 000000000..c3ee64c99 --- /dev/null +++ b/test/testdata/fixtures/reaper-control-plane/reaper.yaml @@ -0,0 +1,19 @@ +apiVersion: reaper.k8ssandra.io/v1alpha1 +kind: Reaper +metadata: + name: reaper1 +spec: + storageType: local + storageConfig: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 256Mi + httpManagement: + enabled: true + heapSize: 256Mi + autoScheduling: + enabled: false + uiUserSecretRef: + name: reaper-ui-secret diff --git a/test/testdata/fixtures/single-dc-reaper/k8ssandra.yaml b/test/testdata/fixtures/single-dc-reaper/k8ssandra.yaml index a043f3b78..6d73bdb08 100644 --- a/test/testdata/fixtures/single-dc-reaper/k8ssandra.yaml +++ b/test/testdata/fixtures/single-dc-reaper/k8ssandra.yaml @@ -4,6 +4,7 @@ metadata: name: test spec: reaper: + deploymentMode: SINGLE storageType: local storageConfig: accessModes: