diff --git a/CHANGELOG/CHANGELOG-1.18.md b/CHANGELOG/CHANGELOG-1.18.md index 11b2b1709..fa474877b 100644 --- a/CHANGELOG/CHANGELOG-1.18.md +++ b/CHANGELOG/CHANGELOG-1.18.md @@ -26,3 +26,5 @@ When cutting a new release, update the `unreleased` heading to the tag being gen * [ENHANCEMENT] [#1274](https://github.com/k8ssandra/k8ssandra-operator/issues/1274) On upgrade, do not modify the CassandraDatacenter object unless instructed with an annotation `k8ssandra.io/autoupdate-spec` with value `once` or `always` * [BUGFIX] [#1222](https://github.com/k8ssandra/k8ssandra-operator/issues/1222) Consider DC-level config when validating numToken updates in webhook * [BUGFIX] [#1366](https://github.com/k8ssandra/k8ssandra-operator/issues/1366) Reaper deployment can't be created on OpenShift due to missing RBAC rule +* [CHANGE] Update cassandra-medusa to 0.22.0 +* [FEATURE] [#1275](https://github.com/k8ssandra/k8ssandra-operator/issues/1275) Allow configuring Reaper to use a memory storage backend diff --git a/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go b/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go index 53e996ead..64c17db86 100644 --- a/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go +++ b/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go @@ -19,6 +19,7 @@ package v1alpha1 import ( "fmt" "github.com/Masterminds/semver/v3" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,13 +35,17 @@ import ( ) var ( - clientCache *clientcache.ClientCache - ErrNumTokens = fmt.Errorf("num_tokens value can't be changed") - ErrReaperKeyspace = fmt.Errorf("reaper keyspace can not be changed") - ErrNoStorageConfig = fmt.Errorf("storageConfig must be defined at cluster level or dc level") - ErrNoResourcesSet = fmt.Errorf("softPodAntiAffinity requires Resources to be set") - ErrClusterName = fmt.Errorf("cluster name can not be changed") - ErrNoStoragePrefix = fmt.Errorf("medusa storage prefix must be set when a medusaConfigurationRef is used") + clientCache *clientcache.ClientCache + ErrNumTokens = fmt.Errorf("num_tokens value can't be changed") + ErrReaperKeyspace = fmt.Errorf("reaper keyspace can not be changed") + ErrNoStorageConfig = fmt.Errorf("storageConfig must be defined at cluster level or dc level") + ErrNoResourcesSet = fmt.Errorf("softPodAntiAffinity requires Resources to be set") + ErrClusterName = fmt.Errorf("cluster name can not be changed") + ErrNoStoragePrefix = fmt.Errorf("medusa storage prefix must be set when a medusaConfigurationRef is used") + ErrNoReaperStorageConfig = fmt.Errorf("reaper StorageConfig not set") + ErrNoReaperAccessMode = fmt.Errorf("reaper StorageConfig.AccessModes not set") + ErrNoReaperResourceRequests = fmt.Errorf("reaper StorageConfig.Resources.Requests not set") + ErrNoReaperStorageRequest = fmt.Errorf("reaper StorageConfig.Resources.Requests.Storage not set") ) // log is for logging in this package. @@ -113,6 +118,10 @@ func (r *K8ssandraCluster) validateK8ssandraCluster() error { return err } + if err := r.validateReaper(); err != nil { + return err + } + if err := r.validateStatefulsetNameSize(); err != nil { return err } @@ -280,3 +289,26 @@ func (r *K8ssandraCluster) ValidateMedusa() error { return nil } + +func (r *K8ssandraCluster) validateReaper() error { + if r.Spec.Reaper == nil { + return nil + } + if r.Spec.Reaper.StorageType != reaperapi.StorageTypeLocal { + return nil + } + if r.Spec.Reaper.StorageConfig == nil { + return ErrNoReaperStorageConfig + } + // not checking StorageClassName because Kubernetes will use a default one if it's not set + if r.Spec.Reaper.StorageConfig.AccessModes == nil { + return ErrNoReaperAccessMode + } + if r.Spec.Reaper.StorageConfig.Resources.Requests == nil { + return ErrNoReaperResourceRequests + } + if r.Spec.Reaper.StorageConfig.Resources.Requests.Storage().IsZero() { + return ErrNoReaperStorageRequest + } + return nil +} diff --git a/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go b/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go index be713bd10..4b81edb81 100644 --- a/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go +++ b/apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go @@ -20,6 +20,7 @@ import ( "context" "crypto/tls" "fmt" + "k8s.io/apimachinery/pkg/api/resource" "net" "path/filepath" "testing" @@ -53,6 +54,23 @@ var testEnv *envtest.Environment var ctx context.Context var cancel context.CancelFunc +var minimalInMemoryReaperStorageConfig = &corev1.PersistentVolumeClaimSpec{ + StorageClassName: func() *string { s := "test"; return &s }(), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, +} + +var minimalInMemoryReaperConfig = &reaperapi.ReaperClusterTemplate{ + ReaperTemplate: reaperapi.ReaperTemplate{ + StorageType: reaperapi.StorageTypeLocal, + StorageConfig: minimalInMemoryReaperStorageConfig, + }, +} + func TestWebhook(t *testing.T) { required := require.New(t) ctx, cancel = context.WithCancel(context.TODO()) @@ -160,6 +178,7 @@ func TestWebhook(t *testing.T) { t.Run("InvalidDcName", testInvalidDcName) t.Run("MedusaConfigNonLocalNamespace", testMedusaNonLocalNamespace) t.Run("AutomatedUpdateAnnotation", testAutomatedUpdateAnnotation) + t.Run("ReaperStorage", testReaperStorage) } func testContextValidation(t *testing.T) { @@ -496,6 +515,36 @@ func testMedusaNonLocalNamespace(t *testing.T) { required.Contains(err.Error(), "Medusa config must be namespace local") } +func testReaperStorage(t *testing.T) { + required := require.New(t) + + reaperWithNoStorageConfig := createMinimalClusterObj("reaper-no-storage-config", "ns") + reaperWithNoStorageConfig.Spec.Reaper = &reaperapi.ReaperClusterTemplate{ + ReaperTemplate: reaperapi.ReaperTemplate{ + StorageType: reaperapi.StorageTypeLocal, + }, + } + err := reaperWithNoStorageConfig.validateK8ssandraCluster() + required.Error(err) + + reaperWithDefaultConfig := createClusterObjWithCassandraConfig("reaper-default-storage-config", "ns") + reaperWithDefaultConfig.Spec.Reaper = minimalInMemoryReaperConfig.DeepCopy() + err = reaperWithDefaultConfig.validateK8ssandraCluster() + required.NoError(err) + + reaperWithoutAccessMode := createClusterObjWithCassandraConfig("reaper-no-access-mode", "ns") + reaperWithoutAccessMode.Spec.Reaper = minimalInMemoryReaperConfig.DeepCopy() + reaperWithoutAccessMode.Spec.Reaper.StorageConfig.AccessModes = nil + err = reaperWithoutAccessMode.validateK8ssandraCluster() + required.Error(err) + + reaperWithoutStorageSize := createClusterObjWithCassandraConfig("reaper-no-storage-size", "ns") + reaperWithoutStorageSize.Spec.Reaper = minimalInMemoryReaperConfig.DeepCopy() + reaperWithoutStorageSize.Spec.Reaper.StorageConfig.Resources.Requests = corev1.ResourceList{} + err = reaperWithoutStorageSize.validateK8ssandraCluster() + required.Error(err) +} + // TestValidateUpdateNumTokens is a unit test for numTokens updates. func TestValidateUpdateNumTokens(t *testing.T) { type config struct { diff --git a/apis/reaper/v1alpha1/reaper_types.go b/apis/reaper/v1alpha1/reaper_types.go index 8745ca454..d32ee5077 100644 --- a/apis/reaper/v1alpha1/reaper_types.go +++ b/apis/reaper/v1alpha1/reaper_types.go @@ -30,12 +30,29 @@ import ( // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. const ( - ReaperLabel = "k8ssandra.io/reaper" - DefaultKeyspace = "reaper_db" + DeploymentModeSingle = "SINGLE" + DeploymentModePerDc = "PER_DC" + ReaperLabel = "k8ssandra.io/reaper" + DefaultKeyspace = "reaper_db" + StorageTypeCassandra = "cassandra" + StorageTypeLocal = "local" ) type ReaperTemplate struct { + // The storage backend to store Reaper's data. Defaults to "cassandra" which causes Reaper to be stateless and store + // its state to a Cassandra cluster it repairs (implying there must be one Reaper for each Cassandra cluster). + // The "local" option makes Reaper to store its state locally, allowing a single Reaper to repair several clusters. + // +kubebuilder:validation:Enum=cassandra;local + // +kubebuilder:default="cassandra" + // +optional + StorageType string `json:"storageType,omitempty"` + + // If StorageType is "local", Reaper will need a Persistent Volume to persist its data. This field allows + // configuring that Persistent Volume. + // +optional + StorageConfig *corev1.PersistentVolumeClaimSpec `json:"storageConfig,omitempty"` + // The keyspace to use to store Reaper's state. Will default to "reaper_db" if unspecified. Will be created if it // does not exist, and if this Reaper resource is managed by K8ssandra. // +kubebuilder:default="reaper_db" @@ -222,6 +239,20 @@ type ReaperClusterTemplate struct { DeploymentMode string `json:"deploymentMode,omitempty"` } +// EnsureDeploymentMode ensures that a deployment mode is SINGLE if we use the local storage type. This is to prevent +// several instances of Reapers with local storage that would interfere with each other. +func (t *ReaperClusterTemplate) EnsureDeploymentMode() bool { + if t != nil { + if t.StorageType == StorageTypeLocal { + if t.DeploymentMode != DeploymentModeSingle { + t.DeploymentMode = DeploymentModeSingle + return true + } + } + } + return false +} + // CassandraDatacenterRef references the target Cassandra DC that Reaper should manage. // TODO this object could be used by Stargate too; which currently cannot locate DCs outside of its own namespace. type CassandraDatacenterRef struct { diff --git a/apis/reaper/v1alpha1/reaper_types_test.go b/apis/reaper/v1alpha1/reaper_types_test.go new file mode 100644 index 000000000..0905353f4 --- /dev/null +++ b/apis/reaper/v1alpha1/reaper_types_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestEnsureDeploymentMode(t *testing.T) { + rct := &ReaperClusterTemplate{ + ReaperTemplate: ReaperTemplate{ + StorageType: StorageTypeLocal, + }, + DeploymentMode: DeploymentModePerDc, + } + changed := rct.EnsureDeploymentMode() + assert.True(t, changed) + assert.Equal(t, DeploymentModeSingle, rct.DeploymentMode) + + rct = &ReaperClusterTemplate{ + ReaperTemplate: ReaperTemplate{ + StorageType: StorageTypeCassandra, + }, + DeploymentMode: DeploymentModePerDc, + } + changed = rct.EnsureDeploymentMode() + assert.False(t, changed) + assert.Equal(t, DeploymentModePerDc, rct.DeploymentMode) +} diff --git a/apis/reaper/v1alpha1/zz_generated.deepcopy.go b/apis/reaper/v1alpha1/zz_generated.deepcopy.go index e64edecb9..57b67b752 100644 --- a/apis/reaper/v1alpha1/zz_generated.deepcopy.go +++ b/apis/reaper/v1alpha1/zz_generated.deepcopy.go @@ -230,6 +230,11 @@ func (in *ReaperStatus) DeepCopy() *ReaperStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReaperTemplate) DeepCopyInto(out *ReaperTemplate) { *out = *in + if in.StorageConfig != nil { + in, out := &in.StorageConfig, &out.StorageConfig + *out = new(v1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } out.CassandraUserSecretRef = in.CassandraUserSecretRef out.JmxUserSecretRef = in.JmxUserSecretRef if in.UiUserSecretRef != nil { diff --git a/charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml b/charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml index 9f52314df..069cfd76b 100644 --- a/charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml +++ b/charts/k8ssandra-operator/crds/k8ssandra-operator-crds.yaml @@ -28672,6 +28672,212 @@ spec: type: string type: object type: object + storageConfig: + description: |- + If StorageType is "local", Reaper will need a Persistent Volume to persist its data. This field allows + configuring that Persistent Volume. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + storageType: + default: cassandra + description: |- + The storage backend to store Reaper's data. Defaults to "cassandra" which causes Reaper to be stateless and store + its state to a Cassandra cluster it repairs (implying there must be one Reaper for each Cassandra cluster). + The "local" option makes Reaper to store its state locally, allowing a single Reaper to repair several clusters. + enum: + - cassandra + - local + type: string telemetry: description: |- Telemetry defines the desired telemetry integrations to deploy targeting the Reaper pods for all DCs in this cluster @@ -34565,6 +34771,212 @@ spec: if you know that the schema is already up-to-date, or if you know upfront that QUORUM cannot be achieved (for example, because a DC is down). type: boolean + storageConfig: + description: |- + If StorageType is "local", Reaper will need a Persistent Volume to persist its data. This field allows + configuring that Persistent Volume. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + storageType: + default: cassandra + description: |- + The storage backend to store Reaper's data. Defaults to "cassandra" which causes Reaper to be stateless and store + its state to a Cassandra cluster it repairs (implying there must be one Reaper for each Cassandra cluster). + The "local" option makes Reaper to store its state locally, allowing a single Reaper to repair several clusters. + enum: + - cassandra + - local + type: string telemetry: description: |- Telemetry defines the desired telemetry integrations to deploy targeting the Reaper pods for all DCs in this cluster diff --git a/charts/k8ssandra-operator/templates/clusterrole.yaml b/charts/k8ssandra-operator/templates/clusterrole.yaml index 12aea8b75..e32183243 100644 --- a/charts/k8ssandra-operator/templates/clusterrole.yaml +++ b/charts/k8ssandra-operator/templates/clusterrole.yaml @@ -33,6 +33,19 @@ rules: - patch - update - watch +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - apps resources: diff --git a/charts/k8ssandra-operator/templates/role.yaml b/charts/k8ssandra-operator/templates/role.yaml index 4b06c55f1..e673ef8b0 100644 --- a/charts/k8ssandra-operator/templates/role.yaml +++ b/charts/k8ssandra-operator/templates/role.yaml @@ -33,6 +33,19 @@ rules: - patch - update - watch +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - apps resources: diff --git a/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml b/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml index c3977e758..943e5e3c2 100644 --- a/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml +++ b/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml @@ -28610,6 +28610,212 @@ spec: type: string type: object type: object + storageConfig: + description: |- + If StorageType is "local", Reaper will need a Persistent Volume to persist its data. This field allows + configuring that Persistent Volume. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + storageType: + default: cassandra + description: |- + The storage backend to store Reaper's data. Defaults to "cassandra" which causes Reaper to be stateless and store + its state to a Cassandra cluster it repairs (implying there must be one Reaper for each Cassandra cluster). + The "local" option makes Reaper to store its state locally, allowing a single Reaper to repair several clusters. + enum: + - cassandra + - local + type: string telemetry: description: |- Telemetry defines the desired telemetry integrations to deploy targeting the Reaper pods for all DCs in this cluster diff --git a/config/crd/bases/reaper.k8ssandra.io_reapers.yaml b/config/crd/bases/reaper.k8ssandra.io_reapers.yaml index 7f6f7aa38..b93edba11 100644 --- a/config/crd/bases/reaper.k8ssandra.io_reapers.yaml +++ b/config/crd/bases/reaper.k8ssandra.io_reapers.yaml @@ -2257,6 +2257,212 @@ spec: if you know that the schema is already up-to-date, or if you know upfront that QUORUM cannot be achieved (for example, because a DC is down). type: boolean + storageConfig: + description: |- + If StorageType is "local", Reaper will need a Persistent Volume to persist its data. This field allows + configuring that Persistent Volume. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + storageType: + default: cassandra + description: |- + The storage backend to store Reaper's data. Defaults to "cassandra" which causes Reaper to be stateless and store + its state to a Cassandra cluster it repairs (implying there must be one Reaper for each Cassandra cluster). + The "local" option makes Reaper to store its state locally, allowing a single Reaper to repair several clusters. + enum: + - cassandra + - local + type: string telemetry: description: |- Telemetry defines the desired telemetry integrations to deploy targeting the Reaper pods for all DCs in this cluster diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index bfadcc7cc..4965740dd 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -33,6 +33,19 @@ rules: - patch - update - watch +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - apps resources: diff --git a/controllers/k8ssandra/reaper.go b/controllers/k8ssandra/reaper.go index 4145de48b..d24d7ba08 100644 --- a/controllers/k8ssandra/reaper.go +++ b/controllers/k8ssandra/reaper.go @@ -18,7 +18,6 @@ package k8ssandra import ( "context" - "github.com/go-logr/logr" cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" api "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" @@ -87,7 +86,7 @@ func (r *K8ssandraClusterReconciler) reconcileReaper( reaperTemplate := kc.Spec.Reaper.DeepCopy() if reaperTemplate != nil { - if reaperTemplate.DeploymentMode == reaper.DeploymentModeSingle && getSingleReaperDcName(kc) != actualDc.Name { + if reaperTemplate.DeploymentMode == reaperapi.DeploymentModeSingle && getSingleReaperDcName(kc) != actualDc.Name { logger.Info("DC is not Reaper DC: skipping Reaper deployment") reaperTemplate = nil } @@ -97,6 +96,10 @@ func (r *K8ssandraClusterReconciler) reconcileReaper( } } + if updated := reaperTemplate.EnsureDeploymentMode(); updated { + logger.Info("Forced SINGLE deployment mode for Reaper because it has 'local' storage type") + } + actualReaper := &reaperapi.Reaper{} if reaperTemplate != nil { diff --git a/controllers/reaper/reaper_controller.go b/controllers/reaper/reaper_controller.go index 4e9264ce2..10a0a672f 100644 --- a/controllers/reaper/reaper_controller.go +++ b/controllers/reaper/reaper_controller.go @@ -19,7 +19,6 @@ package reaper import ( "context" "fmt" - "github.com/go-logr/logr" cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" @@ -53,7 +52,7 @@ type ReaperReconciler struct { // +kubebuilder:rbac:groups=reaper.k8ssandra.io,namespace="k8ssandra",resources=reapers,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=reaper.k8ssandra.io,namespace="k8ssandra",resources=reapers/status,verbs=get;update;patch // +kubebuilder:rbac:groups=reaper.k8ssandra.io,namespace="k8ssandra",resources=reapers/finalizers,verbs=update -// +kubebuilder:rbac:groups="apps",namespace="k8ssandra",resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="apps",namespace="k8ssandra",resources=deployments;statefulsets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="core",namespace="k8ssandra",resources=pods;secrets,verbs=get;list;watch // +kubebuilder:rbac:groups="core",namespace="k8ssandra",resources=services,verbs=get;list;watch;create @@ -193,9 +192,17 @@ func (r *ReaperReconciler) reconcileDeployment( } logger.Info("Reconciling reaper deployment", "actualReaper", actualReaper) - desiredDeployment := reaper.NewDeployment(actualReaper, actualDc, keystorePassword, truststorePassword, logger, authVars...) - actualDeployment := &appsv1.Deployment{} + // work out how to deploy Reaper + actualDeployment, err := reaper.MakeActualDeploymentType(actualReaper) + if err != nil { + return ctrl.Result{}, err + } + desiredDeployment, err := reaper.MakeDesiredDeploymentType(actualReaper, actualDc, keystorePassword, truststorePassword, logger, authVars...) + if err != nil { + return ctrl.Result{}, err + } + if err := r.Get(ctx, deploymentKey, actualDeployment); err != nil { if errors.IsNotFound(err) { if err = controllerutil.SetControllerReference(actualReaper, desiredDeployment, r.Scheme); err != nil { @@ -224,13 +231,33 @@ func (r *ReaperReconciler) reconcileDeployment( return ctrl.Result{}, err } - actualDeployment = actualDeployment.DeepCopy() + actualDeployment, err = reaper.DeepCopyActualDeployment(actualDeployment) + if err != nil { + return ctrl.Result{}, err + } + + // if using local storage, we need to ensure only one Reaper exists ~ the STS has at most 1 replica + err = reaper.EnsureSingleReplica(actualReaper, actualDeployment, desiredDeployment, logger) + if err != nil { + return ctrl.Result{}, err + } // Check if the deployment needs to be updated if !annotations.CompareHashAnnotations(actualDeployment, desiredDeployment) { logger.Info("Updating Reaper Deployment") resourceVersion := actualDeployment.GetResourceVersion() - desiredDeployment.DeepCopyInto(actualDeployment) + + // the DeepCopyInto is called on an actual Deployment or STS, can't easily refactor that out + switch desired := desiredDeployment.(type) { + case *appsv1.Deployment: + desired.DeepCopyInto(actualDeployment.(*appsv1.Deployment)) + case *appsv1.StatefulSet: + desired.DeepCopyInto(actualDeployment.(*appsv1.StatefulSet)) + default: + err := fmt.Errorf("unexpected type %T", desiredDeployment) + return ctrl.Result{}, err + } + actualDeployment.SetResourceVersion(resourceVersion) if err := controllerutil.SetControllerReference(actualReaper, actualDeployment, r.Scheme); err != nil { logger.Error(err, "Failed to set controller reference on updated Reaper Deployment") diff --git a/controllers/reaper/reaper_controller_test.go b/controllers/reaper/reaper_controller_test.go index c6039cfe4..dca1c8f44 100644 --- a/controllers/reaper/reaper_controller_test.go +++ b/controllers/reaper/reaper_controller_test.go @@ -2,6 +2,8 @@ package reaper import ( "context" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/ptr" "testing" "time" @@ -61,6 +63,7 @@ func TestReaper(t *testing.T) { t.Run("CreateReaperWithAutoSchedulingEnabled", reaperControllerTest(ctx, testEnv, testCreateReaperWithAutoSchedulingEnabled)) t.Run("CreateReaperWithAuthEnabled", reaperControllerTest(ctx, testEnv, testCreateReaperWithAuthEnabled)) t.Run("CreateReaperWithAuthEnabledExternalSecret", reaperControllerTest(ctx, testEnv, testCreateReaperWithAuthEnabledExternalSecret)) + t.Run("CreateReaperWithLocalStorageBackend", reaperControllerTest(ctx, testEnv, testCreateReaperWithLocalStorageType)) } func newMockManager() reaper.Manager { @@ -315,6 +318,7 @@ func testCreateReaperWithExistingObjects(t *testing.T, ctx context.Context, k8sC func testCreateReaperWithAutoSchedulingEnabled(t *testing.T, ctx context.Context, k8sClient client.Client, testNamespace string) { t.Log("create the Reaper object") rpr := newReaper(testNamespace) + rpr.Spec.StorageType = reaperapi.StorageTypeCassandra rpr.Spec.AutoScheduling = reaperapi.AutoScheduling{ Enabled: true, } @@ -331,6 +335,13 @@ func testCreateReaperWithAutoSchedulingEnabled(t *testing.T, ctx context.Context assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) + // deployment with Cassandra backend has an init container + assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) + assert.Len(t, deployment.Spec.Template.Spec.InitContainers, 1) + + // reaper with cassandra-storage backend and this config has just one volume - for configuration + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1) + autoSchedulingEnabled := false for _, env := range deployment.Spec.Template.Spec.Containers[0].Env { if env.Name == "REAPER_AUTO_SCHEDULING_ENABLED" && env.Value == "true" { @@ -525,6 +536,37 @@ func testCreateReaperWithAuthEnabledExternalSecret(t *testing.T, ctx context.Con assert.False(t, envVarSecretHasKey(envVars, "REAPER_AUTH_PASSWORD", "password"), "Cassandra auth password env var secret key not found") } +func testCreateReaperWithLocalStorageType(t *testing.T, ctx context.Context, k8sClient client.Client, testNamespace string) { + t.Log("create the Reaper object") + r := newReaper(testNamespace) + r.Spec.StorageType = reaperapi.StorageTypeLocal + r.Spec.StorageConfig = newStorageConfig() + err := k8sClient.Create(ctx, r) + require.NoError(t, err) + + t.Log("check that the stateful set is created") + stsKey := types.NamespacedName{Namespace: testNamespace, Name: reaperName} + sts := &appsv1.StatefulSet{} + + require.Eventually(t, func() bool { + return k8sClient.Get(ctx, stsKey, sts) == nil + }, timeout, interval, "stateful set creation check failed") + + // when deployed as STS, Reaper has no init container + assert.Len(t, sts.Spec.Template.Spec.Containers, 1) + assert.Len(t, sts.Spec.Template.Spec.InitContainers, 0) + + // Reaper's API does not allow specifying replica count, so we have no easy way to increase this + assert.Equal(t, ptr.To[int32](1), sts.Spec.Replicas) + + // In this configuration, we expect Reaper to have a config volume mount, and a data volume mount + assert.Len(t, sts.Spec.Template.Spec.Containers[0].VolumeMounts, 2) + confVolumeMount := sts.Spec.Template.Spec.Containers[0].VolumeMounts[0].DeepCopy() + assert.Equal(t, "conf", confVolumeMount.Name) + dataVolumeMount := sts.Spec.Template.Spec.Containers[0].VolumeMounts[1].DeepCopy() + assert.Equal(t, "reaper-data", dataVolumeMount.Name) +} + // Check if env var exists func envVarExists(envVars []corev1.EnvVar, name string) bool { for _, envVar := range envVars { @@ -605,3 +647,15 @@ func patchDeploymentStatus(t *testing.T, ctx context.Context, deployment *appsv1 err := k8sClient.Status().Patch(ctx, deployment, deploymentPatch) require.NoError(t, err) } + +func newStorageConfig() *corev1.PersistentVolumeClaimSpec { + return &corev1.PersistentVolumeClaimSpec{ + StorageClassName: func() *string { s := "test"; return &s }(), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + } +} diff --git a/pkg/reaper/deployment.go b/pkg/reaper/deployment.go index 0980e8de6..91b481690 100644 --- a/pkg/reaper/deployment.go +++ b/pkg/reaper/deployment.go @@ -2,6 +2,11 @@ package reaper import ( "fmt" + "github.com/k8ssandra/k8ssandra-operator/pkg/cassandra" + "github.com/k8ssandra/k8ssandra-operator/pkg/encryption" + "k8s.io/utils/ptr" + "math" + "sigs.k8s.io/controller-runtime/pkg/client" "strings" "github.com/Masterminds/semver/v3" @@ -10,8 +15,6 @@ import ( "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" api "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" "github.com/k8ssandra/k8ssandra-operator/pkg/annotations" - "github.com/k8ssandra/k8ssandra-operator/pkg/cassandra" - "github.com/k8ssandra/k8ssandra-operator/pkg/encryption" "github.com/k8ssandra/k8ssandra-operator/pkg/images" "github.com/k8ssandra/k8ssandra-operator/pkg/meta" appsv1 "k8s.io/api/apps/v1" @@ -42,31 +45,17 @@ var defaultImage = images.Image{ Tag: DefaultVersion, } -func NewDeployment(reaper *api.Reaper, dc *cassdcapi.CassandraDatacenter, keystorePassword *string, truststorePassword *string, logger logr.Logger, authVars ...*corev1.EnvVar) *appsv1.Deployment { - selector := metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - // Note: managed-by shouldn't be used here, but we're keeping it for backwards compatibility, since changing - // a deployment's selector is a breaking change. - { - Key: v1alpha1.ManagedByLabel, - Operator: metav1.LabelSelectorOpIn, - Values: []string{v1alpha1.NameLabelValue}, - }, - { - Key: api.ReaperLabel, - Operator: metav1.LabelSelectorOpIn, - Values: []string{reaper.Name}, - }, - }, +func computeEnvVars(reaper *api.Reaper, dc *cassdcapi.CassandraDatacenter) []corev1.EnvVar { + var storageType string + if reaper.Spec.StorageType == api.StorageTypeLocal { + storageType = "memory" + } else { + storageType = "cassandra" } - - readinessProbe := computeProbe(reaper.Spec.ReadinessProbe) - livenessProbe := computeProbe(reaper.Spec.LivenessProbe) - envVars := []corev1.EnvVar{ { Name: "REAPER_STORAGE_TYPE", - Value: "cassandra", + Value: storageType, }, { Name: "REAPER_ENABLE_DYNAMIC_SEED_LIST", @@ -169,121 +158,246 @@ func NewDeployment(reaper *api.Reaper, dc *cassdcapi.CassandraDatacenter, keysto } } + return envVars +} + +func computeVolumes(reaper *api.Reaper) ([]corev1.Volume, []corev1.VolumeMount) { + volumes := []corev1.Volume{ + { + Name: "conf", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + } + volumeMounts := []corev1.VolumeMount{ { Name: "conf", MountPath: "/etc/cassandra-reaper/config", }, } - volumes := []corev1.Volume{ - { - Name: "conf", + + if reaper.Spec.HttpManagement.Enabled && reaper.Spec.HttpManagement.Keystores != nil { + volumes = append(volumes, corev1.Volume{ + Name: "management-api-keystore", VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, + Secret: &corev1.SecretVolumeSource{ + SecretName: reaper.Spec.HttpManagement.Keystores.Name, + }, + }, + }) + + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "management-api-keystore", + MountPath: "/etc/encryption/mgmt", + }) + } + + if reaper.Spec.StorageType == api.StorageTypeLocal { + volumes = append(volumes, corev1.Volume{ + Name: "reaper-data", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: fmt.Sprintf("reaper-data-%s", reaper.Name), + }, + }, + }) + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "reaper-data", + MountPath: "/var/lib/cassandra-reaper/storage", + }) + } + + return volumes, volumeMounts +} + +func makeSelector(reaper *api.Reaper) *metav1.LabelSelector { + return &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + // Note: managed-by shouldn't be used here, but we're keeping it for backwards compatibility, since changing + // a deployment's selector is a breaking change. + { + Key: v1alpha1.ManagedByLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{v1alpha1.NameLabelValue}, + }, + { + Key: api.ReaperLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{reaper.Name}, }, }, } +} + +func makeObjectMeta(reaper *api.Reaper) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: reaper.Namespace, + Name: reaper.Name, + Labels: createServiceAndDeploymentLabels(reaper), + Annotations: map[string]string{}, + } +} + +func computePodMeta(reaper *api.Reaper) metav1.ObjectMeta { + podMeta := getPodMeta(reaper) + return metav1.ObjectMeta{ + Labels: podMeta.Labels, + Annotations: podMeta.Annotations, + } +} + +func configureClientEncryption(reaper *api.Reaper, envVars *[]corev1.EnvVar, volumes *[]corev1.Volume, volumeMounts *[]corev1.VolumeMount, keystorePassword *string, truststorePassword *string) { // if client encryption is turned on, we need to mount the keystore and truststore volumes + // by client we mean a C* client, so this is only relevant if we are making a Deployment which uses C* as storage backend if reaper.Spec.ClientEncryptionStores != nil && keystorePassword != nil && truststorePassword != nil { keystoreVolume, truststoreVolume := cassandra.EncryptionVolumes(encryption.StoreTypeClient, *reaper.Spec.ClientEncryptionStores) - volumes = append(volumes, *keystoreVolume) - volumeMounts = append(volumeMounts, corev1.VolumeMount{ + *volumes = append(*volumes, *keystoreVolume) + *volumeMounts = append(*volumeMounts, corev1.VolumeMount{ Name: keystoreVolume.Name, MountPath: cassandra.StoreMountFullPath(encryption.StoreTypeClient, encryption.StoreNameKeystore), }) - volumes = append(volumes, *truststoreVolume) - volumeMounts = append(volumeMounts, corev1.VolumeMount{ + *volumes = append(*volumes, *truststoreVolume) + *volumeMounts = append(*volumeMounts, corev1.VolumeMount{ Name: truststoreVolume.Name, MountPath: cassandra.StoreMountFullPath(encryption.StoreTypeClient, encryption.StoreNameTruststore), }) javaOpts := fmt.Sprintf("-Djavax.net.ssl.keyStore=/mnt/client-keystore/keystore -Djavax.net.ssl.keyStorePassword=%s -Djavax.net.ssl.trustStore=/mnt/client-truststore/truststore -Djavax.net.ssl.trustStorePassword=%s -Dssl.enable=true", *keystorePassword, *truststorePassword) - envVars = append(envVars, corev1.EnvVar{ + *envVars = append(*envVars, corev1.EnvVar{ Name: "JAVA_OPTS", Value: javaOpts, }) - envVars = append(envVars, corev1.EnvVar{ + *envVars = append(*envVars, corev1.EnvVar{ Name: "REAPER_CASS_NATIVE_PROTOCOL_SSL_ENCRYPTION_ENABLED", Value: "true", }) } +} - if reaper.Spec.HttpManagement.Enabled && reaper.Spec.HttpManagement.Keystores != nil { - volumes = append(volumes, corev1.Volume{ - Name: "management-api-keystore", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: reaper.Spec.HttpManagement.Keystores.Name, +func computePodSpec(reaper *api.Reaper, dc *cassdcapi.CassandraDatacenter, initContainerResources *corev1.ResourceRequirements, keystorePassword *string, truststorePassword *string) corev1.PodSpec { + envVars := computeEnvVars(reaper, dc) + volumes, volumeMounts := computeVolumes(reaper) + mainImage := reaper.Spec.ContainerImage.ApplyDefaults(defaultImage) + mainContainerResources := computeMainContainerResources(reaper.Spec.Resources) + + if keystorePassword != nil && truststorePassword != nil { + configureClientEncryption(reaper, &envVars, &volumes, &volumeMounts, keystorePassword, truststorePassword) + } + + var initContainers []corev1.Container + if initContainerResources != nil { + initContainers = computeInitContainers(reaper, mainImage, envVars, volumeMounts, initContainerResources) + } else { + initContainers = nil + } + + return corev1.PodSpec{ + Affinity: reaper.Spec.Affinity, + InitContainers: initContainers, + Containers: []corev1.Container{ + { + Name: "reaper", + Image: mainImage.String(), + ImagePullPolicy: mainImage.PullPolicy, + SecurityContext: reaper.Spec.SecurityContext, + Ports: []corev1.ContainerPort{ + { + Name: "app", + ContainerPort: 8080, + Protocol: "TCP", + }, + { + Name: "admin", + ContainerPort: 8081, + Protocol: "TCP", + }, }, + ReadinessProbe: computeProbe(reaper.Spec.ReadinessProbe), + LivenessProbe: computeProbe(reaper.Spec.LivenessProbe), + Env: envVars, + VolumeMounts: volumeMounts, + Resources: *mainContainerResources, }, - }) - - volumeMounts = append(volumeMounts, corev1.VolumeMount{ - Name: "management-api-keystore", - MountPath: "/etc/encryption/mgmt", - }) + }, + ServiceAccountName: reaper.Spec.ServiceAccountName, + Tolerations: reaper.Spec.Tolerations, + SecurityContext: reaper.Spec.PodSecurityContext, + ImagePullSecrets: computeImagePullSecrets(reaper, mainImage), + Volumes: volumes, } +} - mainImage := reaper.Spec.ContainerImage.ApplyDefaults(defaultImage) +func computeVolumeClaims(reaper *api.Reaper) []corev1.PersistentVolumeClaim { - initContainerResources := computeInitContainerResources(reaper.Spec.InitContainerResources) - mainContainerResources := computeMainContainerResources(reaper.Spec.Resources) + vcs := make([]corev1.PersistentVolumeClaim, 0) - podMeta := getPodMeta(reaper) + volumeClaimsPec := reaper.Spec.StorageConfig.DeepCopy() - deployment := &appsv1.Deployment{ + pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ - Namespace: reaper.Namespace, - Name: reaper.Name, - Labels: createServiceAndDeploymentLabels(reaper), - Annotations: map[string]string{}, + Name: "reaper-data", + Namespace: reaper.Namespace, }, + Spec: *volumeClaimsPec, + } + vcs = append(vcs, *pvc) + + return vcs +} + +func NewStatefulSet(reaper *api.Reaper, dc *cassdcapi.CassandraDatacenter, logger logr.Logger, _ *string, _ *string, authVars ...*corev1.EnvVar) *appsv1.StatefulSet { + + if reaper.Spec.ReaperTemplate.StorageType != api.StorageTypeLocal { + logger.Error(fmt.Errorf("cannot be creating a Reaper statefulset with storage type other than Memory"), "bad storage type", "storageType", reaper.Spec.StorageType) + return nil + } + + if reaper.Spec.ReaperTemplate.StorageConfig == nil { + logger.Error(fmt.Errorf("reaper spec needs storage config when using memory sotrage type"), "missing storage config") + return nil + } + + statefulSet := &appsv1.StatefulSet{ + ObjectMeta: makeObjectMeta(reaper), + Spec: appsv1.StatefulSetSpec{ + Selector: makeSelector(reaper), + Template: corev1.PodTemplateSpec{ + ObjectMeta: computePodMeta(reaper), + Spec: computePodSpec(reaper, dc, nil, nil, nil), + }, + VolumeClaimTemplates: computeVolumeClaims(reaper), + Replicas: ptr.To[int32](1), + }, + } + addAuthEnvVars(&statefulSet.Spec.Template, authVars) + configureVector(reaper, &statefulSet.Spec.Template, dc, logger) + annotations.AddHashAnnotation(statefulSet) + return statefulSet +} + +func NewDeployment(reaper *api.Reaper, dc *cassdcapi.CassandraDatacenter, keystorePassword *string, truststorePassword *string, logger logr.Logger, authVars ...*corev1.EnvVar) *appsv1.Deployment { + + if reaper.Spec.ReaperTemplate.StorageType != api.StorageTypeCassandra { + logger.Error(fmt.Errorf("cannot be creating a Reaper deployment with storage type other than Cassandra"), "bad storage type", "storageType", reaper.Spec.ReaperTemplate.StorageType) + return nil + } + + initContainerResources := computeInitContainerResources(reaper.Spec.InitContainerResources) + + deployment := &appsv1.Deployment{ + ObjectMeta: makeObjectMeta(reaper), Spec: appsv1.DeploymentSpec{ - Selector: &selector, + Selector: makeSelector(reaper), Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: podMeta.Labels, - Annotations: podMeta.Annotations, - }, - Spec: corev1.PodSpec{ - Affinity: reaper.Spec.Affinity, - InitContainers: computeInitContainers(reaper, mainImage, envVars, volumeMounts, initContainerResources), - Containers: []corev1.Container{ - { - Name: "reaper", - Image: mainImage.String(), - ImagePullPolicy: mainImage.PullPolicy, - SecurityContext: reaper.Spec.SecurityContext, - Ports: []corev1.ContainerPort{ - { - Name: "app", - ContainerPort: 8080, - Protocol: "TCP", - }, - { - Name: "admin", - ContainerPort: 8081, - Protocol: "TCP", - }, - }, - ReadinessProbe: readinessProbe, - LivenessProbe: livenessProbe, - Env: envVars, - VolumeMounts: volumeMounts, - Resources: *mainContainerResources, - }, - }, - ServiceAccountName: reaper.Spec.ServiceAccountName, - Tolerations: reaper.Spec.Tolerations, - SecurityContext: reaper.Spec.PodSecurityContext, - ImagePullSecrets: computeImagePullSecrets(reaper, mainImage), - Volumes: volumes, - }, + ObjectMeta: computePodMeta(reaper), + Spec: computePodSpec(reaper, dc, initContainerResources, keystorePassword, truststorePassword), }, }, } - addAuthEnvVars(deployment, authVars) - configureVector(reaper, deployment, dc, logger) + addAuthEnvVars(&deployment.Spec.Template, authVars) + configureVector(reaper, &deployment.Spec.Template, dc, logger) annotations.AddHashAnnotation(deployment) return deployment } @@ -367,18 +481,18 @@ func computeProbe(probeTemplate *corev1.Probe) *corev1.Probe { return probe } -func addAuthEnvVars(deployment *appsv1.Deployment, vars []*corev1.EnvVar) { - envVars := deployment.Spec.Template.Spec.Containers[0].Env +func addAuthEnvVars(template *corev1.PodTemplateSpec, vars []*corev1.EnvVar) { + envVars := template.Spec.Containers[0].Env for _, v := range vars { envVars = append(envVars, *v) } - deployment.Spec.Template.Spec.Containers[0].Env = envVars - if len(deployment.Spec.Template.Spec.InitContainers) > 0 { - initEnvVars := deployment.Spec.Template.Spec.InitContainers[0].Env + template.Spec.Containers[0].Env = envVars + if len(template.Spec.InitContainers) > 0 { + initEnvVars := template.Spec.InitContainers[0].Env for _, v := range vars { initEnvVars = append(initEnvVars, *v) } - deployment.Spec.Template.Spec.InitContainers[0].Env = initEnvVars + template.Spec.InitContainers[0].Env = initEnvVars } } @@ -412,3 +526,92 @@ func getPodMeta(reaper *api.Reaper) meta.Tags { Annotations: podAnnotations, } } + +func MakeActualDeploymentType(actualReaper *api.Reaper) (client.Object, error) { + if actualReaper.Spec.StorageType == api.StorageTypeCassandra { + return &appsv1.Deployment{}, nil + } else if actualReaper.Spec.StorageType == api.StorageTypeLocal { + return &appsv1.StatefulSet{}, nil + } else { + err := fmt.Errorf("invalid storage type %s", actualReaper.Spec.StorageType) + return nil, err + } +} + +func MakeDesiredDeploymentType(actualReaper *api.Reaper, dc *cassdcapi.CassandraDatacenter, keystorePassword *string, truststorePassword *string, logger logr.Logger, authVars ...*corev1.EnvVar) (client.Object, error) { + if actualReaper.Spec.StorageType == api.StorageTypeCassandra { + return NewDeployment(actualReaper, dc, keystorePassword, truststorePassword, logger, authVars...), nil + } else if actualReaper.Spec.StorageType == api.StorageTypeLocal { + // we're checking for this same thing in the k8ssandra-cluster webohooks + // but in tests (and in the future) we'll be creating reaper directly (not through k8ssandra-cluster) + // so we need to double-check + if actualReaper.Spec.StorageConfig == nil { + err := fmt.Errorf("storageConfig is required for memory storage") + return nil, err + } + return NewStatefulSet(actualReaper, dc, logger, keystorePassword, truststorePassword, authVars...), nil + } else { + err := fmt.Errorf("invalid storage type %s", actualReaper.Spec.StorageType) + return nil, err + } +} + +func DeepCopyActualDeployment(actualDeployment client.Object) (client.Object, error) { + switch actual := actualDeployment.(type) { + case *appsv1.Deployment: + actualDeployment = actual.DeepCopy() + return actualDeployment, nil + case *appsv1.StatefulSet: + actualDeployment = actual.DeepCopy() + return actualDeployment, nil + default: + err := fmt.Errorf("unexpected type %T", actualDeployment) + return nil, err + } +} + +func EnsureSingleReplica(actualReaper *api.Reaper, actualDeployment client.Object, desiredDeployment client.Object, logger logr.Logger) error { + if actualReaper.Spec.StorageType == api.StorageTypeLocal { + desiredReplicas := getDeploymentReplicas(desiredDeployment, logger) + if desiredReplicas > 1 { + logger.Info(fmt.Sprintf("reaper with memory storage can only have one replica, not allowing the desired %d", desiredReplicas)) + if err := setDeploymentReplicas(&desiredDeployment, ptr.To[int32](1)); err != nil { + return err + } + } + actualReplicas := getDeploymentReplicas(actualDeployment, logger) + if actualReplicas > 1 { + logger.Info(fmt.Sprintf("reaper with memory storage currently has %d replicas, scaling down to 1", actualReplicas)) + if err := setDeploymentReplicas(&desiredDeployment, ptr.To[int32](1)); err != nil { + // returning error if the setter failed + return err + } + } + } + return nil +} + +func getDeploymentReplicas(actualDeployment client.Object, logger logr.Logger) int32 { + switch actual := actualDeployment.(type) { + case *appsv1.Deployment: + return *actual.Spec.Replicas + case *appsv1.StatefulSet: + return *actual.Spec.Replicas + default: + logger.Error(fmt.Errorf("unexpected type %T", actualDeployment), "Failed to get deployment replicas") + return math.MaxInt32 + } +} + +func setDeploymentReplicas(desiredDeployment *client.Object, numberOfReplicas *int32) error { + switch desired := (*desiredDeployment).(type) { + case *appsv1.Deployment: + desired.Spec.Replicas = numberOfReplicas + case *appsv1.StatefulSet: + desired.Spec.Replicas = numberOfReplicas + default: + err := fmt.Errorf("unexpected type %T", desiredDeployment) + return err + } + return nil +} diff --git a/pkg/reaper/deployment_test.go b/pkg/reaper/deployment_test.go index 686f4476d..e95076498 100644 --- a/pkg/reaper/deployment_test.go +++ b/pkg/reaper/deployment_test.go @@ -1,6 +1,7 @@ package reaper import ( + appsv1 "k8s.io/api/apps/v1" "testing" testlogr "github.com/go-logr/logr/testing" @@ -251,6 +252,48 @@ func TestNewDeployment(t *testing.T) { assert.Equal(t, probe, container.ReadinessProbe) } +func TestNewStatefulSet(t *testing.T) { + reaper := newTestReaper() + reaper.Spec.StorageType = reaperapi.StorageTypeLocal + reaper.Spec.StorageConfig = newTestStorageConfig() + + logger := testlogr.NewTestLogger(t) + + sts := NewStatefulSet(reaper, newTestDatacenter(), logger, nil, nil) + + podSpec := sts.Spec.Template.Spec + assert.Len(t, podSpec.Containers, 1) + + container := podSpec.Containers[0] + + assert.ElementsMatch(t, container.Env, []corev1.EnvVar{ + { + Name: "REAPER_STORAGE_TYPE", + Value: "memory", + }, + { + Name: "REAPER_ENABLE_DYNAMIC_SEED_LIST", + Value: "false", + }, + { + Name: "REAPER_CASS_CONTACT_POINTS", + Value: "[cluster1-dc1-service]", + }, + { + Name: "REAPER_DATACENTER_AVAILABILITY", + Value: "", + }, + { + Name: "REAPER_CASS_LOCAL_DC", + Value: "dc1", + }, + { + Name: "REAPER_CASS_KEYSPACE", + Value: "reaper_db", + }, + }) +} + func TestHttpManagementConfiguration(t *testing.T) { reaper := newTestReaper() reaper.Spec.HttpManagement.Enabled = true @@ -487,6 +530,141 @@ func TestSkipSchemaMigration(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.InitContainers, 0, "expected pod template to not have any init container") } +func TestDeploymentTypes(t *testing.T) { + logger := testlogr.NewTestLogger(t) + + // reaper with cassandra backend becomes a deployment + reaper := newTestReaper() + reaper.Spec.ReaperTemplate.StorageType = reaperapi.StorageTypeCassandra + deployment := NewDeployment(reaper, newTestDatacenter(), nil, nil, logger) + assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) + assert.Equal(t, reaperapi.StorageTypeCassandra, deployment.Spec.Template.Spec.Containers[0].Env[0].Value) + + // asking for a deployment with memory backend does not work + reaper = newTestReaper() + reaper.Spec.ReaperTemplate.StorageType = reaperapi.StorageTypeLocal + deployment = NewDeployment(reaper, newTestDatacenter(), nil, nil, logger) + assert.Nil(t, deployment) + + // reaper with memory backend becomes a stateful set + reaper = newTestReaper() + reaper.Spec.ReaperTemplate.StorageType = reaperapi.StorageTypeLocal + reaper.Spec.ReaperTemplate.StorageConfig = &corev1.PersistentVolumeClaimSpec{ + StorageClassName: func() *string { s := "test"; return &s }(), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + } + sts := NewStatefulSet(reaper, newTestDatacenter(), logger, nil, nil) + assert.Len(t, sts.Spec.Template.Spec.Containers, 1) + assert.Equal(t, "memory", sts.Spec.Template.Spec.Containers[0].Env[0].Value) + + // asking for a stateful set with cassandra backend does not work + reaper = newTestReaper() + reaper.Spec.ReaperTemplate.StorageType = reaperapi.StorageTypeCassandra + sts = NewStatefulSet(reaper, newTestDatacenter(), logger, nil, nil) + assert.Nil(t, sts) +} + +func TestMakeActualDeploymentType(t *testing.T) { + reaper := newTestReaper() + reaper.Spec.StorageType = reaperapi.StorageTypeCassandra + d, err := MakeActualDeploymentType(reaper) + assert.Nil(t, err) + assert.IsType(t, &appsv1.Deployment{}, d) + + reaper.Spec.StorageType = reaperapi.StorageTypeLocal + sts, err := MakeActualDeploymentType(reaper) + assert.Nil(t, err) + assert.IsType(t, &appsv1.StatefulSet{}, sts) + + reaper.Spec.StorageType = "invalid" + d, err = MakeActualDeploymentType(reaper) + assert.NotNil(t, err) + assert.Nil(t, d) +} + +func TestMakeDesiredDeploymentType(t *testing.T) { + reaper := newTestReaper() + fakeDc := newTestDatacenter() + logger := testlogr.NewTestLogger(t) + + reaper.Spec.StorageType = reaperapi.StorageTypeCassandra + d, err := MakeDesiredDeploymentType(reaper, fakeDc, nil, nil, logger) + assert.Nil(t, err) + assert.IsType(t, &appsv1.Deployment{}, d) + + reaper.Spec.StorageType = reaperapi.StorageTypeLocal + reaper.Spec.StorageConfig = newTestStorageConfig() + sts, err := MakeDesiredDeploymentType(reaper, fakeDc, nil, nil, logger) + assert.Nil(t, err) + assert.IsType(t, &appsv1.StatefulSet{}, sts) + + reaper.Spec.StorageType = "invalid" + d, err = MakeDesiredDeploymentType(reaper, fakeDc, nil, nil, logger) + assert.NotNil(t, err) + assert.Nil(t, d) +} + +func TestDeepCopyActualDeployment(t *testing.T) { + reaper := newTestReaper() + + reaper.Spec.StorageType = reaperapi.StorageTypeCassandra + deployment, err := MakeDesiredDeploymentType(reaper, newTestDatacenter(), nil, nil, testlogr.NewTestLogger(t)) + assert.Nil(t, err) + deepCopy, err := DeepCopyActualDeployment(deployment) + assert.Nil(t, err) + assert.Equal(t, deployment, deepCopy) + + wrongDeployment := &appsv1.DaemonSet{} + deepCopy, err = DeepCopyActualDeployment(wrongDeployment) + assert.NotNil(t, err) + assert.Nil(t, deepCopy) +} + +func TestEnsureSingleReplica(t *testing.T) { + reaper := newTestReaper() + logger := testlogr.NewTestLogger(t) + oneReplica := int32(1) + twoReplicas := int32(2) + + // deployment size is not touched on Deployments + actualDeployment, _ := MakeActualDeploymentType(reaper) + desiredDeployment := NewDeployment(reaper, newTestDatacenter(), nil, nil, logger) + desiredDeployment.Spec.Replicas = func() *int32 { i := int32(2); return &i }() + err := EnsureSingleReplica(reaper, actualDeployment, desiredDeployment, logger) + assert.Nil(t, err) + assert.Equal(t, int32(2), *desiredDeployment.Spec.Replicas) + + // deployment size greater than 1 is not allowed on Stateful Sets + reaper.Spec.StorageType = reaperapi.StorageTypeLocal + reaper.Spec.StorageConfig = newTestStorageConfig() + actualStatefulSet, _ := MakeActualDeploymentType(reaper) + err = setDeploymentReplicas(&actualStatefulSet, &oneReplica) + assert.Nil(t, err) + desiredStatefulSet := NewStatefulSet(reaper, newTestDatacenter(), logger, nil, nil) + desiredStatefulSet.Spec.Replicas = &twoReplicas + err = EnsureSingleReplica(reaper, actualStatefulSet, desiredStatefulSet, logger) + // errors out because we desire 2 replicas + assert.Nil(t, err) + + // if we find a STS with more than 1 replicas, we forcefully scale it down + actualStatefulSet, _ = MakeActualDeploymentType(reaper) + err = setDeploymentReplicas(&actualStatefulSet, &twoReplicas) + assert.Nil(t, err) + desiredStatefulSetObject, _ := MakeDesiredDeploymentType(reaper, newTestDatacenter(), nil, nil, logger) + err = setDeploymentReplicas(&desiredStatefulSetObject, &oneReplica) + assert.Nil(t, err) + + assert.Equal(t, twoReplicas, getDeploymentReplicas(desiredDeployment, logger)) + err = EnsureSingleReplica(reaper, actualStatefulSet, desiredStatefulSetObject, logger) + assert.Nil(t, err) + assert.Equal(t, oneReplica, getDeploymentReplicas(desiredStatefulSetObject, logger)) +} + func newTestReaper() *reaperapi.Reaper { namespace := "service-test" reaperName := "test-reaper" @@ -502,7 +680,8 @@ func newTestReaper() *reaperapi.Reaper { Namespace: namespace, }, ReaperTemplate: reaperapi.ReaperTemplate{ - Keyspace: "reaper_db", + Keyspace: "reaper_db", + StorageType: "cassandra", ResourceMeta: &meta.ResourceMeta{ CommonLabels: map[string]string{"common": "everywhere", "override": "commonLevel"}, Pods: meta.Tags{ @@ -535,6 +714,18 @@ func newTestDatacenter() *cassdcapi.CassandraDatacenter { } } +func newTestStorageConfig() *corev1.PersistentVolumeClaimSpec { + return &corev1.PersistentVolumeClaimSpec{ + StorageClassName: func() *string { s := "test"; return &s }(), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + } +} + func TestDefaultResources(t *testing.T) { reaper := newTestReaper() logger := testlogr.NewTestLogger(t) diff --git a/pkg/reaper/resource.go b/pkg/reaper/resource.go index d26992cf4..dccb430e8 100644 --- a/pkg/reaper/resource.go +++ b/pkg/reaper/resource.go @@ -15,9 +15,6 @@ import ( ) const ( - DeploymentModeSingle = "SINGLE" - DeploymentModePerDc = "PER_DC" - DatacenterAvailabilityEach = "EACH" DatacenterAvailabilityAll = "ALL" ) @@ -104,7 +101,7 @@ func NewReaper( // See https://cassandra-reaper.io/docs/usage/multi_dc/. // If we have more than one DC, and each DC has its own Reaper instance, use EACH; otherwise, use ALL. func computeReaperDcAvailability(kc *k8ssandraapi.K8ssandraCluster) string { - if kc.Spec.Reaper.DeploymentMode == DeploymentModeSingle || len(kc.Spec.Cassandra.Datacenters) == 1 { + if kc.Spec.Reaper.DeploymentMode == reaperapi.DeploymentModeSingle || len(kc.Spec.Cassandra.Datacenters) == 1 { return DatacenterAvailabilityAll } return DatacenterAvailabilityEach diff --git a/pkg/reaper/resource_test.go b/pkg/reaper/resource_test.go index 86a904c5e..70daac3e5 100644 --- a/pkg/reaper/resource_test.go +++ b/pkg/reaper/resource_test.go @@ -38,7 +38,7 @@ func Test_computeReaperDcAvailability(t *testing.T) { }, }, Reaper: &reaperapi.ReaperClusterTemplate{ - DeploymentMode: DeploymentModePerDc, + DeploymentMode: reaperapi.DeploymentModePerDc, }, }, }, @@ -55,7 +55,7 @@ func Test_computeReaperDcAvailability(t *testing.T) { }, }, Reaper: &reaperapi.ReaperClusterTemplate{ - DeploymentMode: DeploymentModeSingle, + DeploymentMode: reaperapi.DeploymentModeSingle, }, }, }, diff --git a/pkg/reaper/vector.go b/pkg/reaper/vector.go index dd9da7e13..41b27ab30 100644 --- a/pkg/reaper/vector.go +++ b/pkg/reaper/vector.go @@ -2,7 +2,6 @@ package reaper import ( "fmt" - "github.com/k8ssandra/k8ssandra-operator/pkg/labels" "github.com/k8ssandra/k8ssandra-operator/pkg/utils" "sigs.k8s.io/controller-runtime/pkg/client" @@ -13,7 +12,6 @@ import ( api "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" "github.com/k8ssandra/k8ssandra-operator/pkg/cassandra" "github.com/k8ssandra/k8ssandra-operator/pkg/vector" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -49,7 +47,7 @@ func CreateVectorConfigMap(namespace, vectorToml string, dc cassdcapi.CassandraD } } -func configureVector(reaper *api.Reaper, deployment *appsv1.Deployment, dc *cassdcapi.CassandraDatacenter, logger logr.Logger) { +func configureVector(reaper *api.Reaper, template *corev1.PodTemplateSpec, dc *cassdcapi.CassandraDatacenter, logger logr.Logger) { if reaper.Spec.Telemetry.IsVectorEnabled() { logger.Info("Injecting Vector agent into Reaper deployments") vectorImage := vector.DefaultVectorImage @@ -83,9 +81,9 @@ func configureVector(reaper *api.Reaper, deployment *appsv1.Deployment, dc *cass }, } // Add the container and volume to the deployment - cassandra.UpdateContainer(&deployment.Spec.Template, VectorContainerName, func(c *corev1.Container) { + cassandra.UpdateContainer(template, VectorContainerName, func(c *corev1.Container) { *c = vectorAgentContainer }) - cassandra.AddVolumesToPodTemplateSpec(&deployment.Spec.Template, vectorAgentVolume) + cassandra.AddVolumesToPodTemplateSpec(template, vectorAgentVolume) } } diff --git a/pkg/reaper/vector_test.go b/pkg/reaper/vector_test.go index a297a36a9..742a00aec 100644 --- a/pkg/reaper/vector_test.go +++ b/pkg/reaper/vector_test.go @@ -1,6 +1,7 @@ package reaper import ( + corev1 "k8s.io/api/core/v1" "testing" testlogr "github.com/go-logr/logr/testing" @@ -9,7 +10,6 @@ import ( telemetryapi "github.com/k8ssandra/k8ssandra-operator/apis/telemetry/v1alpha1" "github.com/k8ssandra/k8ssandra-operator/pkg/vector" "github.com/stretchr/testify/assert" - v1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/utils/ptr" ) @@ -19,16 +19,16 @@ func TestConfigureVector(t *testing.T) { reaper := &api.Reaper{} reaper.Spec.Telemetry = telemetrySpec - deployment := &v1.Deployment{} + template := &corev1.PodTemplateSpec{} fakeDc := &cassdcapi.CassandraDatacenter{} logger := testlogr.NewTestLogger(t) - configureVector(reaper, deployment, fakeDc, logger) + configureVector(reaper, template, fakeDc, logger) - assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Containers)) - assert.Equal(t, "reaper-vector-agent", deployment.Spec.Template.Spec.Containers[0].Name) - assert.Equal(t, resource.MustParse(vector.DefaultVectorCpuLimit), *deployment.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu()) - assert.Equal(t, resource.MustParse(vector.DefaultVectorCpuRequest), *deployment.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu()) - assert.Equal(t, resource.MustParse(vector.DefaultVectorMemoryLimit), *deployment.Spec.Template.Spec.Containers[0].Resources.Limits.Memory()) - assert.Equal(t, resource.MustParse(vector.DefaultVectorMemoryRequest), *deployment.Spec.Template.Spec.Containers[0].Resources.Requests.Memory()) + assert.Equal(t, 1, len(template.Spec.Containers)) + assert.Equal(t, "reaper-vector-agent", template.Spec.Containers[0].Name) + assert.Equal(t, resource.MustParse(vector.DefaultVectorCpuLimit), *template.Spec.Containers[0].Resources.Limits.Cpu()) + assert.Equal(t, resource.MustParse(vector.DefaultVectorCpuRequest), *template.Spec.Containers[0].Resources.Requests.Cpu()) + assert.Equal(t, resource.MustParse(vector.DefaultVectorMemoryLimit), *template.Spec.Containers[0].Resources.Limits.Memory()) + assert.Equal(t, resource.MustParse(vector.DefaultVectorMemoryRequest), *template.Spec.Containers[0].Resources.Requests.Memory()) } diff --git a/test/e2e/reaper_test.go b/test/e2e/reaper_test.go index 17ff962bd..848d29dc6 100644 --- a/test/e2e/reaper_test.go +++ b/test/e2e/reaper_test.go @@ -39,7 +39,7 @@ func createSingleReaper(t *testing.T, ctx context.Context, namespace string, f * checkReaperK8cStatusReady(t, f, ctx, kcKey, dcKey) // check that the Reaper Vector container and config map exist - checkContainerPresence(t, ctx, f, reaperKey, getPodTemplateSpecForDeployment, reaper.VectorContainerName) + checkContainerPresence(t, ctx, f, reaperKey, kc, getPodTemplateSpec, reaper.VectorContainerName) checkVectorAgentConfigMapPresence(t, ctx, f, dcKey, reaper.VectorAgentConfigMapName) t.Logf("check that if Reaper Vector is disabled, the agent and configmap are deleted") @@ -52,7 +52,7 @@ func createSingleReaper(t *testing.T, ctx context.Context, namespace string, f * checkReaperReady(t, f, ctx, reaperKey) checkReaperK8cStatusReady(t, f, ctx, kcKey, dcKey) checkFinalizerRbacRule(t, f, ctx, namespace) - checkContainerDeleted(t, ctx, f, reaperKey, getPodTemplateSpecForDeployment, reaper.VectorContainerName) + checkContainerDeleted(t, ctx, f, reaperKey, kc, getPodTemplateSpec, reaper.VectorContainerName) checkVectorConfigMapDeleted(t, ctx, f, dcKey, reaper.VectorAgentConfigMapName) t.Logf("check that if Reaper Vector is enabled, the agent and configmap are re-created") @@ -64,9 +64,12 @@ func createSingleReaper(t *testing.T, ctx context.Context, namespace string, f * require.NoError(err, "failed to patch K8ssandraCluster in namespace %s", namespace) checkReaperReady(t, f, ctx, reaperKey) checkReaperK8cStatusReady(t, f, ctx, kcKey, dcKey) - checkContainerPresence(t, ctx, f, reaperKey, getPodTemplateSpecForDeployment, reaper.VectorContainerName) + checkContainerPresence(t, ctx, f, reaperKey, kc, getPodTemplateSpec, reaper.VectorContainerName) checkVectorAgentConfigMapPresence(t, ctx, f, dcKey, reaper.VectorAgentConfigMapName) + t.Log("check Reaper app type") + checkReaperAppType(t, ctx, f, reaperKey, kc) + t.Log("check Reaper keyspace created") checkKeyspaceExists(t, f, ctx, f.DataPlaneContexts[0], namespace, kc.SanitizedName(), dcPrefix+"-default-sts-0", "reaper_db") @@ -79,11 +82,13 @@ func createSingleReaper(t *testing.T, ctx context.Context, namespace string, f * defer f.UndeployAllIngresses(t, f.DataPlaneContexts[0], namespace) checkReaperApiReachable(t, ctx, reaperRestHostAndPort) + createEmptyKeyspaceTable(t, f, ctx, f.DataPlaneContexts[0], namespace, kc.SanitizedName(), dcPrefix+"-default-sts-0", "test_ks", "test_table") + t.Run("TestReaperApi[0]", func(t *testing.T) { t.Log("test Reaper API in context", f.DataPlaneContexts[0]) reaperUiSecretKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[0], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "mycluster-reaper-ui"}} username, password := retrieveCredentials(t, f, ctx, reaperUiSecretKey) - testReaperApi(t, ctx, f.DataPlaneContexts[0], DcClusterName(t, f, dcKey), "reaper_db", username, password) + testReaperApi(t, ctx, f.DataPlaneContexts[0], DcClusterName(t, f, dcKey), "test_ks", username, password) }) } diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index 7ead381c4..c310b5bc9 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -812,7 +812,7 @@ func createSingleDatacenterCluster(t *testing.T, ctx context.Context, namespace // check that the Stargate Vector container and config map exist stargateDeploymentKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[0], NamespacedName: types.NamespacedName{Namespace: namespace, Name: dcPrefix + "-default-stargate-deployment"}} - checkContainerPresence(t, ctx, f, stargateDeploymentKey, getPodTemplateSpecForDeployment, stargate.VectorContainerName) + checkContainerPresence(t, ctx, f, stargateDeploymentKey, k8ssandra, getPodTemplateSpec, stargate.VectorContainerName) checkVectorAgentConfigMapPresence(t, ctx, f, dcKey, stargate.VectorAgentConfigMapName) t.Logf("check that if Stargate Vector is disabled, the agent and configmap are deleted") @@ -824,7 +824,7 @@ func createSingleDatacenterCluster(t *testing.T, ctx context.Context, namespace require.NoError(err, "failed to patch K8ssandraCluster in namespace %s", namespace) checkStargateReady(t, f, ctx, stargateKey) checkStargateK8cStatusReady(t, f, ctx, kcKey, dcKey) - checkContainerDeleted(t, ctx, f, stargateDeploymentKey, getPodTemplateSpecForDeployment, stargate.VectorContainerName) + checkContainerDeleted(t, ctx, f, stargateDeploymentKey, k8ssandra, getPodTemplateSpec, stargate.VectorContainerName) checkVectorConfigMapDeleted(t, ctx, f, dcKey, stargate.VectorAgentConfigMapName) t.Logf("check that if Stargate Vector is enabled, the agent and configmap are re-created") @@ -836,7 +836,7 @@ func createSingleDatacenterCluster(t *testing.T, ctx context.Context, namespace require.NoError(err, "failed to patch K8ssandraCluster in namespace %s", namespace) checkStargateReady(t, f, ctx, stargateKey) checkStargateK8cStatusReady(t, f, ctx, kcKey, dcKey) - checkContainerPresence(t, ctx, f, stargateDeploymentKey, getPodTemplateSpecForDeployment, stargate.VectorContainerName) + checkContainerPresence(t, ctx, f, stargateDeploymentKey, k8ssandra, getPodTemplateSpec, stargate.VectorContainerName) checkVectorAgentConfigMapPresence(t, ctx, f, dcKey, stargate.VectorAgentConfigMapName) t.Log("check that if Stargate is deleted directly it gets re-created") @@ -847,7 +847,7 @@ func createSingleDatacenterCluster(t *testing.T, ctx context.Context, namespace require.NoError(err, "failed to delete Stargate in namespace %s", namespace) checkStargateReady(t, f, ctx, stargateKey) - checkContainerPresence(t, ctx, f, stargateDeploymentKey, getPodTemplateSpecForDeployment, stargate.VectorContainerName) + checkContainerPresence(t, ctx, f, stargateDeploymentKey, k8ssandra, getPodTemplateSpec, stargate.VectorContainerName) checkVectorAgentConfigMapPresence(t, ctx, f, dcKey, stargate.VectorAgentConfigMapName) t.Log("delete Stargate in k8ssandracluster resource") @@ -893,7 +893,7 @@ func createSingleDatacenterCluster(t *testing.T, ctx context.Context, namespace require.NoError(err, "failed to patch K8ssandraCluster in operatorNamespace %s", namespace) checkStargateReady(t, f, ctx, stargateKey) - checkContainerPresence(t, ctx, f, stargateDeploymentKey, getPodTemplateSpecForDeployment, stargate.VectorContainerName) + checkContainerPresence(t, ctx, f, stargateDeploymentKey, k8ssandra, getPodTemplateSpec, stargate.VectorContainerName) checkVectorAgentConfigMapPresence(t, ctx, f, dcKey, stargate.VectorAgentConfigMapName) t.Log("check that Cassandra DC was not restarted") @@ -931,7 +931,7 @@ func createSingleDatacenterCluster(t *testing.T, ctx context.Context, namespace // Check that Stargate Vector's configmap is deleted checkStargateReady(t, f, ctx, stargateKey) checkStargateK8cStatusReady(t, f, ctx, kcKey, dcKey) - checkContainerDeleted(t, ctx, f, stargateDeploymentKey, getPodTemplateSpecForDeployment, stargate.VectorContainerName) + checkContainerDeleted(t, ctx, f, stargateDeploymentKey, k8ssandra, getPodTemplateSpec, stargate.VectorContainerName) checkVectorConfigMapDeleted(t, ctx, f, dcKey, stargate.VectorAgentConfigMapName) } @@ -1927,6 +1927,28 @@ func checkKeyspaceExists( }, 1*time.Minute, 3*time.Second) } +func checkReaperAppType( + t *testing.T, + ctx context.Context, + f *framework.E2eFramework, + reaperKey framework.ClusterKey, + kc *api.K8ssandraCluster, +) { + assert.Eventually(t, func() bool { + if kc.Spec.Reaper.StorageType == reaperapi.StorageTypeCassandra { + d := &appsv1.Deployment{} + assert.NoError(t, f.Get(ctx, reaperKey, d)) + return true + } + if kc.Spec.Reaper.StorageType == reaperapi.StorageTypeLocal { + sts := &appsv1.StatefulSet{} + assert.NoError(t, f.Get(ctx, reaperKey, sts)) + return true + } + return false + }, 1*time.Minute, 3*time.Second) +} + func checkKeyspaceReplication( t *testing.T, f *framework.E2eFramework, @@ -1951,6 +1973,33 @@ func checkKeyspaceReplication( }, 1*time.Minute, 3*time.Second) } +func createEmptyKeyspaceTable(t *testing.T, + f *framework.E2eFramework, + ctx context.Context, + k8sContext, namespace, clusterName, pod, keyspace, table string, +) { + assert.Eventually(t, func() bool { + dcKey := framework.ClusterKey{K8sContext: f.DataPlaneContexts[0], NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc1"}} + dcName := DcName(t, f, dcKey) + createKeyspaceStatement := fmt.Sprintf("CREATE KEYSPACE %s WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', '%s' : 3};", keyspace, dcName) + createTableStatement := fmt.Sprintf("CREATE TABLE %s.%s (id int PRIMARY KEY);", keyspace, table) + + _, err := f.ExecuteCql(ctx, k8sContext, namespace, clusterName, pod, createKeyspaceStatement) + if err != nil { + t.Logf("failed to create keyspace %s: %v", keyspace, err) + return false + } + + _, err = f.ExecuteCql(ctx, k8sContext, namespace, clusterName, pod, createTableStatement) + if err != nil { + t.Logf("failed to create table %s.%s: %v", keyspace, table, err) + return false + } + + return true + }, 1*time.Minute, 3*time.Second) +} + // assertCassandraDatacenterK8cStatusReady polls the K8ssandraCluster object, checking its status to // verify the CassandraDatacenter specified by dcName is ready. func assertCassandraDatacenterK8cStatusReady( @@ -2199,23 +2248,42 @@ func checkVectorConfigMapDeleted(t *testing.T, ctx context.Context, f *framework } +func getPodTemplateSpec(t *testing.T, ctx context.Context, f *framework.E2eFramework, appKey framework.ClusterKey, kc *api.K8ssandraCluster) *corev1.PodTemplateSpec { + // this gets called for other pods than reapers (eg stargate). before reaper gained the option of being a STS, everything was a deployment + if kc.Spec.Reaper == nil { + return getPodTemplateSpecForDeployment(t, ctx, f, appKey) + } + if kc.Spec.Reaper.StorageType == reaperapi.StorageTypeCassandra { + return getPodTemplateSpecForDeployment(t, ctx, f, appKey) + } else { + return getPodTemplateSpecForStatefulSet(t, ctx, f, appKey) + } +} + +func getPodTemplateSpecForStatefulSet(t *testing.T, ctx context.Context, f *framework.E2eFramework, stsKey framework.ClusterKey) *corev1.PodTemplateSpec { + sg := &appsv1.StatefulSet{} + require.NoError(t, f.Get(ctx, stsKey, sg), "failed to get StatefulSet", "StatefulSet key", stsKey) + + return &sg.Spec.Template +} + func getPodTemplateSpecForDeployment(t *testing.T, ctx context.Context, f *framework.E2eFramework, deploymentKey framework.ClusterKey) *corev1.PodTemplateSpec { sg := &appsv1.Deployment{} - require.NoError(t, f.Get(ctx, deploymentKey, sg), "failed to get Deployment") + require.NoError(t, f.Get(ctx, deploymentKey, sg), "failed to get Deployment", "deploymentKey", deploymentKey) return &sg.Spec.Template } -func checkContainerPresence(t *testing.T, ctx context.Context, f *framework.E2eFramework, podKey framework.ClusterKey, specFunction func(t *testing.T, ctx context.Context, f *framework.E2eFramework, dcKey framework.ClusterKey) *corev1.PodTemplateSpec, containerName string) { +func checkContainerPresence(t *testing.T, ctx context.Context, f *framework.E2eFramework, podKey framework.ClusterKey, kc *api.K8ssandraCluster, specFunction func(t *testing.T, ctx context.Context, f *framework.E2eFramework, dcKey framework.ClusterKey, kc *api.K8ssandraCluster) *corev1.PodTemplateSpec, containerName string) { t.Logf("check that %s contains Container named %s", podKey.Name, containerName) - podTempSpec := specFunction(t, ctx, f, podKey) + podTempSpec := specFunction(t, ctx, f, podKey, kc) _, containerFound := cassandra.FindContainer(podTempSpec, containerName) require.True(t, containerFound, "cannot find Container in pod template spec") } -func checkContainerDeleted(t *testing.T, ctx context.Context, f *framework.E2eFramework, podKey framework.ClusterKey, specFunction func(t *testing.T, ctx context.Context, f *framework.E2eFramework, dcKey framework.ClusterKey) *corev1.PodTemplateSpec, containerName string) { +func checkContainerDeleted(t *testing.T, ctx context.Context, f *framework.E2eFramework, podKey framework.ClusterKey, kc *api.K8ssandraCluster, specFunction func(t *testing.T, ctx context.Context, f *framework.E2eFramework, dcKey framework.ClusterKey, kc *api.K8ssandraCluster) *corev1.PodTemplateSpec, containerName string) { t.Logf("check that %s does not have a Container named %s", podKey.Name, containerName) - podTempSpec := specFunction(t, ctx, f, podKey) + podTempSpec := specFunction(t, ctx, f, podKey, kc) _, containerFound := cassandra.FindContainer(podTempSpec, containerName) require.False(t, containerFound, "Found Container in pod template spec") } diff --git a/test/testdata/fixtures/single-dc-reaper/k8ssandra.yaml b/test/testdata/fixtures/single-dc-reaper/k8ssandra.yaml index 94a45c542..a043f3b78 100644 --- a/test/testdata/fixtures/single-dc-reaper/k8ssandra.yaml +++ b/test/testdata/fixtures/single-dc-reaper/k8ssandra.yaml @@ -4,6 +4,13 @@ metadata: name: test spec: reaper: + storageType: local + storageConfig: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi httpManagement: enabled: true heapSize: 256Mi