diff --git a/.github/workflows/kind_e2e_tests.yaml b/.github/workflows/kind_e2e_tests.yaml index 400c7fb5a..e156f013f 100644 --- a/.github/workflows/kind_e2e_tests.yaml +++ b/.github/workflows/kind_e2e_tests.yaml @@ -53,6 +53,7 @@ jobs: e2e_test: - CreateSingleDatacenterCluster - CreateStargateAndDatacenter + - CreateSingleReaper fail-fast: false name: ${{ matrix.e2e_test }} env: diff --git a/CHANGELOG/CHANGELOG-1.0.md b/CHANGELOG/CHANGELOG-1.0.md index 21f3cc401..6365a4893 100644 --- a/CHANGELOG/CHANGELOG-1.0.md +++ b/CHANGELOG/CHANGELOG-1.0.md @@ -15,6 +15,7 @@ When cutting a new release, update the `unreleased` heading to the tag being gen ## Unreleased * [CHANGE] [#182](https://github.com/k8ssandra/k8ssandra-operator/pull/182) Update cass-operator to v1.8.0 +* [FEATURE] [#4](https://github.com/k8ssandra/k8ssandra-operator/issues/4) Add support for Reaper * [FEATURE] [#15](https://github.com/k8ssandra/k8ssandra-operator/pull/15) Add finalizer for K8ssandraCluster * [BUGFIX] [#203](https://github.com/k8ssandra/k8ssandra-operator/issues/203) Superuser secret name not set on CassandraDatacenters * [BUGFIX] [#156](https://github.com/k8ssandra/k8ssandra-operator/issues/156) Stargate auth table creation may trigger a table ID mismatch diff --git a/Makefile b/Makefile index 036521af7..81d815741 100644 --- a/Makefile +++ b/Makefile @@ -147,21 +147,21 @@ kind-load-image: kind-e2e-test: multi-up e2e-test single-up: cleanup build manifests kustomize docker-build create-kind-cluster kind-load-image cert-manager - $(KUSTOMIZE) build config/deployments/control-plane | kubectl apply -f - + $(KUSTOMIZE) build config/deployments/control-plane | kubectl apply --server-side --force-conflicts -f - single-reload: build manifests kustomize docker-build kind-load-image cert-manager kubectl config use-context kind-k8ssandra-0 - $(KUSTOMIZE) build config/deployments/control-plane | kubectl apply -f - + $(KUSTOMIZE) build config/deployments/control-plane | kubectl apply --server-side --force-conflicts -f - kubectl delete pod -l control-plane=k8ssandra-operator kubectl rollout status deployment k8ssandra-operator multi-up: cleanup build manifests kustomize docker-build create-kind-multicluster kind-load-image-multi cert-manager-multi ## install the control plane kubectl config use-context kind-k8ssandra-0 - $(KUSTOMIZE) build config/deployments/control-plane | kubectl apply -f - + $(KUSTOMIZE) build config/deployments/control-plane | kubectl apply --server-side --force-conflicts -f - ## install the data plane kubectl config use-context kind-k8ssandra-1 - $(KUSTOMIZE) build config/deployments/data-plane | kubectl apply -f - + $(KUSTOMIZE) build config/deployments/data-plane | kubectl apply --server-side --force-conflicts -f - ## Create a client config make create-client-config ## Restart the control plane @@ -172,12 +172,12 @@ multi-up: cleanup build manifests kustomize docker-build create-kind-multicluste multi-reload: build manifests kustomize docker-build kind-load-image-multi cert-manager-multi # Reload the operator on the control-plane kubectl config use-context kind-k8ssandra-0 - $(KUSTOMIZE) build config/deployments/control-plane | kubectl apply -f - + $(KUSTOMIZE) build config/deployments/control-plane | kubectl apply --server-side --force-conflicts -f - kubectl -n $(NS) delete pod -l control-plane=k8ssandra-operator kubectl -n $(NS) rollout status deployment k8ssandra-operator # Reload the operator on the data-plane kubectl config use-context kind-k8ssandra-1 - $(KUSTOMIZE) build config/deployments/data-plane | kubectl apply -f - + $(KUSTOMIZE) build config/deployments/data-plane | kubectl apply --server-side --force-conflicts -f - kubectl -n $(NS) delete pod -l control-plane=k8ssandra-operator kubectl -n $(NS) rollout status deployment k8ssandra-operator @@ -206,14 +206,14 @@ kind-load-image-multi: ##@ Deployment install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/crd | kubectl apply -f - + $(KUSTOMIZE) build config/crd | kubectl apply --server-side --force-conflicts -f - uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl delete -f - deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/deployments/control-plane | kubectl apply -f - + $(KUSTOMIZE) build config/deployments/control-plane | kubectl apply --server-side --force-conflicts -f - undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/deployments/control-plane | kubectl delete -f - diff --git a/PROJECT b/PROJECT index dc2bde74b..88e44197c 100644 --- a/PROJECT +++ b/PROJECT @@ -41,4 +41,13 @@ resources: kind: ReplicatedSecret path: github.com/k8ssandra/k8ssandra-operator/apis/replication/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: k8ssandra.io + group: reaper + kind: Reaper + path: github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1 + version: v1alpha1 version: "3" diff --git a/apis/config/v1beta1/zz_generated.deepcopy.go b/apis/config/v1beta1/zz_generated.deepcopy.go index 40a35a450..f0bc5e668 100644 --- a/apis/config/v1beta1/zz_generated.deepcopy.go +++ b/apis/config/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/apis/k8ssandra/v1alpha1/constants.go b/apis/k8ssandra/v1alpha1/constants.go index 651b3c762..58e7284d2 100644 --- a/apis/k8ssandra/v1alpha1/constants.go +++ b/apis/k8ssandra/v1alpha1/constants.go @@ -13,10 +13,12 @@ const ( ComponentLabel = "app.kubernetes.io/component" ComponentLabelValueCassandra = "cassandra" ComponentLabelValueStargate = "stargate" + ComponentLabelValueReaper = "reaper" CreatedByLabel = "app.kubernetes.io/created-by" CreatedByLabelValueK8ssandraClusterController = "k8ssandracluster-controller" CreatedByLabelValueStargateController = "stargate-controller" + CreatedByLabelValueReaperController = "reaper-controller" PartOfLabel = "app.kubernetes.io/part-of" PartOfLabelValue = "k8ssandra" diff --git a/apis/k8ssandra/v1alpha1/k8ssandracluster_types.go b/apis/k8ssandra/v1alpha1/k8ssandracluster_types.go index 33938c9ab..29e9e1610 100644 --- a/apis/k8ssandra/v1alpha1/k8ssandracluster_types.go +++ b/apis/k8ssandra/v1alpha1/k8ssandracluster_types.go @@ -18,6 +18,7 @@ package v1alpha1 import ( cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" stargateapi "github.com/k8ssandra/k8ssandra-operator/apis/stargate/v1alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -38,6 +39,11 @@ type K8ssandraClusterSpec struct { // If this is non-nil, Stargate will be deployed on every Cassandra datacenter in this K8ssandraCluster. // +optional Stargate *stargateapi.StargateClusterTemplate `json:"stargate,omitempty"` + + // Reaper defines the desired deployment characteristics for Reaper in this K8ssandraCluster. + // If this is non-nil, Reaper will be deployed on every Cassandra datacenter in this K8ssandraCluster. + // +optional + Reaper *reaperapi.ReaperClusterTemplate `json:"reaper,omitempty"` } // K8ssandraClusterStatus defines the observed state of K8ssandraCluster @@ -55,6 +61,7 @@ type K8ssandraClusterStatus struct { type K8ssandraStatus struct { Cassandra *cassdcapi.CassandraDatacenterStatus `json:"cassandra,omitempty"` Stargate *stargateapi.StargateStatus `json:"stargate,omitempty"` + Reaper *reaperapi.ReaperStatus `json:"reaper,omitempty"` } // +kubebuilder:object:root=true @@ -87,6 +94,24 @@ func (in *K8ssandraCluster) HasStargates() bool { return false } +// HasReapers returns true if at least one Reaper resource will be created as part of the creation +// of this K8ssandraCluster object. +func (in *K8ssandraCluster) HasReapers() bool { + if in == nil { + return false + } else if in.Spec.Reaper != nil { + return true + } else if in.Spec.Cassandra == nil || len(in.Spec.Cassandra.Datacenters) == 0 { + return false + } + for _, dcTemplate := range in.Spec.Cassandra.Datacenters { + if dcTemplate.Reaper != nil { + return true + } + } + return false +} + // +kubebuilder:object:root=true // K8ssandraClusterList contains a list of K8ssandraCluster @@ -197,6 +222,11 @@ type CassandraDatacenterTemplate struct { // deploying Stargate in this datacenter. // +optional Stargate *stargateapi.StargateDatacenterTemplate `json:"stargate,omitempty"` + + // Reaper defines the desired deployment characteristics for Reaper in this datacenter. Leave nil to skip + // deploying Reaper in this datacenter. + // +optional + Reaper *reaperapi.ReaperDatacenterTemplate `json:"reaper,omitempty"` } type EmbeddedObjectMeta struct { @@ -236,17 +266,17 @@ type Auth struct { } type CassandraYaml struct { - //Authenticator string `json:"authenticator,omitempty"` + // Authenticator string `json:"authenticator,omitempty"` // - //Authorizer string `json:"authorizer,omitempty"` + // Authorizer string `json:"authorizer,omitempty"` // - //RoleManager string `json:"role_manager,omitempty"` + // RoleManager string `json:"role_manager,omitempty"` // - //RoleValidityMillis *int64 `json:"roles_validity_in_ms,omitempty"` + // RoleValidityMillis *int64 `json:"roles_validity_in_ms,omitempty"` // - //RoleUpdateIntervalMillis *int64 `json:"roles_update_interval_in_ms,omitempty"` + // RoleUpdateIntervalMillis *int64 `json:"roles_update_interval_in_ms,omitempty"` // - //PermissionValidityMillis *int64 `json:"permissions_validity_in_ms,omitempty"` + // PermissionValidityMillis *int64 `json:"permissions_validity_in_ms,omitempty"` // +optional NumTokens *int `json:"num_tokens,omitempty"` diff --git a/apis/k8ssandra/v1alpha1/k8ssandracluster_types_test.go b/apis/k8ssandra/v1alpha1/k8ssandracluster_types_test.go index daa3944e3..27f6cd028 100644 --- a/apis/k8ssandra/v1alpha1/k8ssandracluster_types_test.go +++ b/apis/k8ssandra/v1alpha1/k8ssandracluster_types_test.go @@ -1,6 +1,7 @@ package v1alpha1 import ( + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" "testing" stargateapi "github.com/k8ssandra/k8ssandra-operator/apis/stargate/v1alpha1" @@ -9,6 +10,7 @@ import ( func TestK8ssandraCluster(t *testing.T) { t.Run("HasStargates", testK8ssandraClusterHasStargates) + t.Run("HasReapers", testK8ssandraClusterHasReapers) } func testK8ssandraClusterHasStargates(t *testing.T) { @@ -55,3 +57,46 @@ func testK8ssandraClusterHasStargates(t *testing.T) { assert.True(t, kc.HasStargates()) }) } + +func testK8ssandraClusterHasReapers(t *testing.T) { + t.Run("nil receiver", func(t *testing.T) { + var kc *K8ssandraCluster = nil + assert.False(t, kc.HasReapers()) + }) + t.Run("no reapers", func(t *testing.T) { + kc := K8ssandraCluster{} + assert.False(t, kc.HasReapers()) + }) + t.Run("cluster-level reaper", func(t *testing.T) { + kc := K8ssandraCluster{ + Spec: K8ssandraClusterSpec{ + Reaper: &reaperapi.ReaperClusterTemplate{ + Keyspace: "reaper", + }, + }, + } + assert.True(t, kc.HasReapers()) + }) + t.Run("dc-level reaper", func(t *testing.T) { + kc := K8ssandraCluster{ + Spec: K8ssandraClusterSpec{ + Cassandra: &CassandraClusterTemplate{ + Cluster: "cluster1", + Datacenters: []CassandraDatacenterTemplate{ + { + Size: 3, + Reaper: nil, + }, + { + Size: 3, + Reaper: &reaperapi.ReaperDatacenterTemplate{ + ServiceAccountName: "reaper_sa", + }, + }, + }, + }, + }, + } + assert.True(t, kc.HasReapers()) + }) +} diff --git a/apis/k8ssandra/v1alpha1/zz_generated.deepcopy.go b/apis/k8ssandra/v1alpha1/zz_generated.deepcopy.go index 37ea6453a..1184b987f 100644 --- a/apis/k8ssandra/v1alpha1/zz_generated.deepcopy.go +++ b/apis/k8ssandra/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -22,6 +23,7 @@ package v1alpha1 import ( "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" + reaperv1alpha1 "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" stargatev1alpha1 "github.com/k8ssandra/k8ssandra-operator/apis/stargate/v1alpha1" "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -172,6 +174,11 @@ func (in *CassandraDatacenterTemplate) DeepCopyInto(out *CassandraDatacenterTemp *out = new(stargatev1alpha1.StargateDatacenterTemplate) (*in).DeepCopyInto(*out) } + if in.Reaper != nil { + in, out := &in.Reaper, &out.Reaper + *out = new(reaperv1alpha1.ReaperDatacenterTemplate) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraDatacenterTemplate. @@ -425,6 +432,11 @@ func (in *K8ssandraClusterSpec) DeepCopyInto(out *K8ssandraClusterSpec) { *out = new(stargatev1alpha1.StargateClusterTemplate) (*in).DeepCopyInto(*out) } + if in.Reaper != nil { + in, out := &in.Reaper, &out.Reaper + *out = new(reaperv1alpha1.ReaperClusterTemplate) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8ssandraClusterSpec. @@ -472,6 +484,11 @@ func (in *K8ssandraStatus) DeepCopyInto(out *K8ssandraStatus) { *out = new(stargatev1alpha1.StargateStatus) (*in).DeepCopyInto(*out) } + if in.Reaper != nil { + in, out := &in.Reaper, &out.Reaper + *out = new(reaperv1alpha1.ReaperStatus) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8ssandraStatus. diff --git a/apis/reaper/v1alpha1/groupversion_info.go b/apis/reaper/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..c3365598d --- /dev/null +++ b/apis/reaper/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the reaper v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=reaper.k8ssandra.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "reaper.k8ssandra.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/reaper/v1alpha1/reaper_types.go b/apis/reaper/v1alpha1/reaper_types.go new file mode 100644 index 000000000..eb01f1a13 --- /dev/null +++ b/apis/reaper/v1alpha1/reaper_types.go @@ -0,0 +1,312 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +const ( + ReaperLabel = "k8ssandra.io/reaper" + DefaultKeyspace = "reaper_db" +) + +type ReaperDatacenterTemplate struct { + + // The image to use. + // +kubebuilder:default="thelastpickle/cassandra-reaper:3.0.0" + // +optional + Image string `json:"image,omitempty"` + + // +kubebuilder:default="IfNotPresent" + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // +kubebuilder:default="default" + // +optional + ServiceAccountName string `json:"ServiceAccountName,omitempty"` + + // Auto scheduling properties. When you enable the auto-schedule feature, Reaper dynamically schedules repairs for + // all non-system keyspaces in a cluster. A cluster's keyspaces are monitored and any modifications (additions or + // removals) are detected. When a new keyspace is created, a new repair schedule is created automatically for that + // keyspace. Conversely, when a keyspace is removed, the corresponding repair schedule is deleted. + // +optional + AutoScheduling AutoScheduling `json:"autoScheduling,omitempty"` + + // Affinity applied to the Reaper pods. + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // Tolerations applied to the Reaper pods. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // PodSecurityContext contains a pod-level SecurityContext to apply to Reaper pods. + // +optional + PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` + + // SecurityContext applied to the Reaper main container. + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` + + // InitContainerSecurityContext is the SecurityContext applied to the Reaper init container, used to perform schema + // migrations. + // +optional + InitContainerSecurityContext *corev1.SecurityContext `json:"initContainerSecurityContext,omitempty"` +} + +// AutoScheduling includes options to configure the auto scheduling of repairs for new clusters. +type AutoScheduling struct { + + // +optional + // +kubebuilder:default=false + Enabled bool `json:"enabled,omitempty"` + + // RepairType is the type of repair to create: + // - REGULAR creates a regular repair (non-adaptive and non-incremental); + // - ADAPTIVE creates an adaptive repair; adaptive repairs are most suited for Cassandra 3. + // - INCREMENTAL creates an incremental repair; incremental repairs should only be used with Cassandra 4+. + // - AUTO chooses between ADAPTIVE and INCREMENTAL depending on the Cassandra server version; ADAPTIVE for Cassandra + // 3 and INCREMENTAL for Cassandra 4+. + // +optional + // +kubebuilder:default="AUTO" + // +kubebuilder:validation:Enum:=REGULAR;ADAPTIVE;INCREMENTAL;AUTO + RepairType string `json:"repairType,omitempty"` + + // PercentUnrepairedThreshold is the percentage of unrepaired data over which an incremental repair should be + // started. Only relevant when using repair type INCREMENTAL. + // +optional + // +kubebuilder:default=10 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=100 + PercentUnrepairedThreshold int `json:"percentUnrepairedThreshold,omitempty"` + + // InitialDelay is the amount of delay time before the schedule period starts. Must be a valid ISO-8601 duration + // string. The default is "PT15S" (15 seconds). + // +optional + // +kubebuilder:default="PT15S" + // +kubebuilder:validation:Pattern:="([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)?" + InitialDelay string `json:"initialDelayPeriod,omitempty"` + + // PeriodBetweenPolls is the interval time to wait before checking whether to start a repair task. Must be a valid + // ISO-8601 duration string. The default is "PT10M" (10 minutes). + // +optional + // +kubebuilder:default="PT10M" + // +kubebuilder:validation:Pattern:="([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)?" + PeriodBetweenPolls string `json:"periodBetweenPolls,omitempty"` + + // TimeBeforeFirstSchedule is the grace period before the first repair in the schedule is started. Must be a valid + // ISO-8601 duration string. The default is "PT5M" (5 minutes). + // +optional + // +kubebuilder:default="PT5M" + // +kubebuilder:validation:Pattern:="([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)?" + TimeBeforeFirstSchedule string `json:"timeBeforeFirstSchedule,omitempty"` + + // ScheduleSpreadPeriod is the time spacing between each of the repair schedules that is to be carried out. Must be + // a valid ISO-8601 duration string. The default is "PT6H" (6 hours). + // +optional + // +kubebuilder:default="PT6H" + // +kubebuilder:validation:Pattern:="([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)?" + ScheduleSpreadPeriod string `json:"scheduleSpreadPeriod,omitempty"` + + // ExcludedClusters are the clusters that are to be excluded from the repair schedule. + // +optional + ExcludedClusters []string `json:"excludedClusters,omitempty"` + + // ExcludedKeyspaces are the keyspaces that are to be excluded from the repair schedule. + // +optional + ExcludedKeyspaces []string `json:"excludedKeyspaces,omitempty"` +} + +type ReaperClusterTemplate struct { + ReaperDatacenterTemplate `json:",inline"` + + // The keyspace to use to store Reaper's state. Will default to "reaper_db" if unspecified. Will be created if it + // does not exist, and if this Reaper resource is managed by K8ssandra. + // +kubebuilder:default="reaper_db" + // +optional + Keyspace string `json:"keyspace,omitempty"` + + // Defines the username and password that Reaper will use to authenticate CQL connections to Cassandra clusters. + // These credentials will be automatically turned into CQL roles by cass-operator when bootstrapping the datacenter, + // then passed to the Reaper instance, so that it can authenticate against nodes in the datacenter using CQL. If CQL + // authentication is not required, leave this field empty. The secret must be in the same namespace as Reaper itself + // and must contain two keys: "username" and "password". + // +optional + CassandraUserSecretRef string `json:"cassandraUserSecretRef,omitempty"` + + // Defines the username and password that Reaper will use to authenticate JMX connections to Cassandra clusters. + // These credentials will be automatically passed to each Cassandra node in the datacenter, as well as to the Reaper + // instance, so that the latter can authenticate against the former. If JMX authentication is not required, leave + // this field empty. The secret must be in the same namespace as Reaper itself and must contain two keys: "username" + // and "password". + // +optional + JmxUserSecretRef string `json:"jmxUserSecretRef,omitempty"` +} + +// CassandraDatacenterRef references the target Cassandra DC that Reaper should manage. +// TODO this object could be used by Stargate too; which currently cannot locate DCs outside of its own namespace. +type CassandraDatacenterRef struct { + + // The datacenter name. + // +kubebuilder:validation:Required + Name string `json:"name"` + + // The datacenter namespace. If empty, the datacenter will be assumed to reside in the same namespace as the Reaper + // instance. + // +optional + Namespace string `json:"namespace,omitempty"` +} + +// ReaperSpec defines the desired state of Reaper +type ReaperSpec struct { + ReaperClusterTemplate `json:",inline"` + + // DatacenterRef is the reference of a CassandraDatacenter resource that this Reaper instance should manage. It will + // also be used as the backend for persisting Reaper's state. Reaper must be able to access the JMX port (7199 by + // default) and the CQL port (9042 by default) on this DC. + // +kubebuilder:validation:Required + DatacenterRef CassandraDatacenterRef `json:"datacenterRef"` + + // DatacenterAvailability indicates to Reaper its deployment in relation to the target datacenter's network. + // For single-DC clusters, the default (LOCAL) is fine. For multi-DC clusters, it is recommended to use EACH, + // provided that there is one Reaper instance managing each DC in the cluster; otherwise, if one single Reaper + // instance is going to manage more than one DC in the cluster, use LOCAL and remote DCs will be handled internally + // by Cassandra itself. + // See https://cassandra-reaper.io/docs/usage/multi_dc/. + // +optional + // +kubebuilder:default="LOCAL" + // +kubebuilder:validation:Enum:=LOCAL;ALL;EACH + DatacenterAvailability string `json:"datacenterAvailability,omitempty"` +} + +// ReaperProgress is a word summarizing the state of a Reaper resource. +type ReaperProgress string + +const ( + // ReaperProgressPending is Reaper's status when it's waiting for the datacenter to become ready. + ReaperProgressPending = ReaperProgress("Pending") + // ReaperProgressDeploying is Reaper's status when it's waiting for the Reaper instance and its associated service + // to become ready. + ReaperProgressDeploying = ReaperProgress("Deploying") + // ReaperProgressConfiguring is Reaper's status when the Reaper instance is ready for work and is being connected + // its target datacenter. + ReaperProgressConfiguring = ReaperProgress("Configuring") + // ReaperProgressRunning is Reaper's status when Reaper is up and running. + ReaperProgressRunning = ReaperProgress("Running") +) + +type ReaperConditionType string + +const ( + ReaperReady ReaperConditionType = "Ready" +) + +type ReaperCondition struct { + Type ReaperConditionType `json:"type"` + Status corev1.ConditionStatus `json:"status"` + + // LastTransitionTime is the last time the condition transited from one status to another. + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// ReaperStatus defines the observed state of Reaper +type ReaperStatus struct { + + // Progress is the progress of this Reaper object. + // +kubebuilder:validation:Enum=Pending;Deploying;Running + Progress ReaperProgress `json:"progress"` + + // +optional + Conditions []ReaperCondition `json:"conditions,omitempty"` +} + +func (in *ReaperStatus) GetConditionStatus(conditionType ReaperConditionType) corev1.ConditionStatus { + if in != nil { + for _, condition := range in.Conditions { + if condition.Type == conditionType { + return condition.Status + } + } + } + return corev1.ConditionUnknown +} + +func (in *ReaperStatus) SetCondition(condition ReaperCondition) { + for i, c := range in.Conditions { + if c.Type == condition.Type { + in.Conditions[i] = condition + return + } + } + in.Conditions = append(in.Conditions, condition) +} + +func (in *ReaperStatus) IsReady() bool { + return in != nil && in.GetConditionStatus(ReaperReady) == corev1.ConditionTrue +} + +func (in *ReaperStatus) SetReady() { + now := metav1.Now() + in.SetCondition(ReaperCondition{ + Type: ReaperReady, + Status: corev1.ConditionTrue, + LastTransitionTime: &now, + }) +} + +func (in *ReaperStatus) SetNotReady() { + now := metav1.Now() + in.SetCondition(ReaperCondition{ + Type: ReaperReady, + Status: corev1.ConditionFalse, + LastTransitionTime: &now, + }) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="DC",type=string,JSONPath=`.spec.datacenterRef.name` +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.progress` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// Reaper is the Schema for the reapers API +type Reaper struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ReaperSpec `json:"spec,omitempty"` + Status ReaperStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ReaperList contains a list of Reaper +type ReaperList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Reaper `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Reaper{}, &ReaperList{}) +} diff --git a/apis/reaper/v1alpha1/zz_generated.deepcopy.go b/apis/reaper/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..5388b4700 --- /dev/null +++ b/apis/reaper/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,243 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScheduling) DeepCopyInto(out *AutoScheduling) { + *out = *in + if in.ExcludedClusters != nil { + in, out := &in.ExcludedClusters, &out.ExcludedClusters + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedKeyspaces != nil { + in, out := &in.ExcludedKeyspaces, &out.ExcludedKeyspaces + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScheduling. +func (in *AutoScheduling) DeepCopy() *AutoScheduling { + if in == nil { + return nil + } + out := new(AutoScheduling) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraDatacenterRef) DeepCopyInto(out *CassandraDatacenterRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraDatacenterRef. +func (in *CassandraDatacenterRef) DeepCopy() *CassandraDatacenterRef { + if in == nil { + return nil + } + out := new(CassandraDatacenterRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Reaper) DeepCopyInto(out *Reaper) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Reaper. +func (in *Reaper) DeepCopy() *Reaper { + if in == nil { + return nil + } + out := new(Reaper) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Reaper) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReaperClusterTemplate) DeepCopyInto(out *ReaperClusterTemplate) { + *out = *in + in.ReaperDatacenterTemplate.DeepCopyInto(&out.ReaperDatacenterTemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReaperClusterTemplate. +func (in *ReaperClusterTemplate) DeepCopy() *ReaperClusterTemplate { + if in == nil { + return nil + } + out := new(ReaperClusterTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReaperCondition) DeepCopyInto(out *ReaperCondition) { + *out = *in + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReaperCondition. +func (in *ReaperCondition) DeepCopy() *ReaperCondition { + if in == nil { + return nil + } + out := new(ReaperCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReaperDatacenterTemplate) DeepCopyInto(out *ReaperDatacenterTemplate) { + *out = *in + in.AutoScheduling.DeepCopyInto(&out.AutoScheduling) + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PodSecurityContext != nil { + in, out := &in.PodSecurityContext, &out.PodSecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.InitContainerSecurityContext != nil { + in, out := &in.InitContainerSecurityContext, &out.InitContainerSecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReaperDatacenterTemplate. +func (in *ReaperDatacenterTemplate) DeepCopy() *ReaperDatacenterTemplate { + if in == nil { + return nil + } + out := new(ReaperDatacenterTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReaperList) DeepCopyInto(out *ReaperList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Reaper, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReaperList. +func (in *ReaperList) DeepCopy() *ReaperList { + if in == nil { + return nil + } + out := new(ReaperList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReaperList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReaperSpec) DeepCopyInto(out *ReaperSpec) { + *out = *in + in.ReaperClusterTemplate.DeepCopyInto(&out.ReaperClusterTemplate) + out.DatacenterRef = in.DatacenterRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReaperSpec. +func (in *ReaperSpec) DeepCopy() *ReaperSpec { + if in == nil { + return nil + } + out := new(ReaperSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReaperStatus) DeepCopyInto(out *ReaperStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReaperCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReaperStatus. +func (in *ReaperStatus) DeepCopy() *ReaperStatus { + if in == nil { + return nil + } + out := new(ReaperStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/replication/v1alpha1/zz_generated.deepcopy.go b/apis/replication/v1alpha1/zz_generated.deepcopy.go index a397bc196..710fceada 100644 --- a/apis/replication/v1alpha1/zz_generated.deepcopy.go +++ b/apis/replication/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/apis/stargate/v1alpha1/zz_generated.deepcopy.go b/apis/stargate/v1alpha1/zz_generated.deepcopy.go index b3fcacac0..f2394704d 100644 --- a/apis/stargate/v1alpha1/zz_generated.deepcopy.go +++ b/apis/stargate/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml b/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml index aa871aeb5..b396f7a13 100644 --- a/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml +++ b/config/crd/bases/k8ssandra.io_k8ssandraclusters.yaml @@ -242,57 +242,16 @@ spec: - name type: object type: array - resources: - description: Resources is the cpu and memory resources for - the cassandra container. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - serverImage: - type: string - serverVersion: - description: ServerVersion is the Cassandra version. - pattern: (3\.11\.\d+)|(4\.0\.\d+) - type: string - size: - description: Size is the number Cassandra pods to deploy - in this datacenter. This number does not include Stargate - instances. - format: int32 - minimum: 1 - type: integer - stargate: - description: Stargate defines the desired deployment characteristics - for Stargate in this datacenter. Leave nil to skip deploying - Stargate in this datacenter. + reaper: + description: Reaper defines the desired deployment characteristics + for Reaper in this datacenter. Leave nil to skip deploying + Reaper in this datacenter. properties: + ServiceAccountName: + default: default + type: string affinity: - description: Affinity is the affinity to apply to all - the Stargate pods. Leave nil to let the controller - reuse the same affinity rules used for data pods in - this datacenter, if any. See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + description: Affinity applied to the Reaper pods. properties: nodeAffinity: description: Describes node affinity scheduling @@ -1277,1168 +1236,2860 @@ spec: type: array type: object type: object - allowStargateOnDataNodes: - default: false - description: 'AllowStargateOnDataNodes allows Stargate - pods to be scheduled on a worker node already hosting - data pods for this datacenter. The default is false, - which means that Stargate pods will be scheduled on - separate worker nodes. Note: if the datacenter pods - have HostNetwork:true, then the Stargate pods will - inherit of it, in which case it is possible that Stargate - nodes won''t be allowed to sit on data nodes even - if this property is set to true, because of port conflicts - on the same IP address.' - type: boolean - cassandraConfigMapRef: - description: CassandraConfigMapRef is a reference to - a ConfigMap that holds Cassandra configuration. The - map should have a key named cassandra_yaml. + autoScheduling: + description: Auto scheduling properties. When you enable + the auto-schedule feature, Reaper dynamically schedules + repairs for all non-system keyspaces in a cluster. + A cluster's keyspaces are monitored and any modifications + (additions or removals) are detected. When a new keyspace + is created, a new repair schedule is created automatically + for that keyspace. Conversely, when a keyspace is + removed, the corresponding repair schedule is deleted. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + enabled: + default: false + type: boolean + excludedClusters: + description: ExcludedClusters are the clusters that + are to be excluded from the repair schedule. + items: + type: string + type: array + excludedKeyspaces: + description: ExcludedKeyspaces are the keyspaces + that are to be excluded from the repair schedule. + items: + type: string + type: array + initialDelayPeriod: + default: PT15S + description: InitialDelay is the amount of delay + time before the schedule period starts. Must be + a valid ISO-8601 duration string. The default + is "PT15S" (15 seconds). + pattern: ([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)? + type: string + percentUnrepairedThreshold: + default: 10 + description: PercentUnrepairedThreshold is the percentage + of unrepaired data over which an incremental repair + should be started. Only relevant when using repair + type INCREMENTAL. + maximum: 100 + minimum: 0 + type: integer + periodBetweenPolls: + default: PT10M + description: PeriodBetweenPolls is the interval + time to wait before checking whether to start + a repair task. Must be a valid ISO-8601 duration + string. The default is "PT10M" (10 minutes). + pattern: ([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)? + type: string + repairType: + default: AUTO + description: 'RepairType is the type of repair to + create: - REGULAR creates a regular repair (non-adaptive + and non-incremental); - ADAPTIVE creates an adaptive + repair; adaptive repairs are most suited for Cassandra + 3. - INCREMENTAL creates an incremental repair; + incremental repairs should only be used with Cassandra + 4+. - AUTO chooses between ADAPTIVE and INCREMENTAL + depending on the Cassandra server version; ADAPTIVE + for Cassandra 3 and INCREMENTAL for Cassandra + 4+.' + enum: + - REGULAR + - ADAPTIVE + - INCREMENTAL + - AUTO + type: string + scheduleSpreadPeriod: + default: PT6H + description: ScheduleSpreadPeriod is the time spacing + between each of the repair schedules that is to + be carried out. Must be a valid ISO-8601 duration + string. The default is "PT6H" (6 hours). + pattern: ([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)? + type: string + timeBeforeFirstSchedule: + default: PT5M + description: TimeBeforeFirstSchedule is the grace + period before the first repair in the schedule + is started. Must be a valid ISO-8601 duration + string. The default is "PT5M" (5 minutes). + pattern: ([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)? type: string type: object - heapSize: - anyOf: - - type: integer - - type: string - default: 256Mi - description: 'HeapSize sets the JVM heap size to use - for Stargate. If no Resources are specified, this - value will also be used to set a default memory request - and limit for the Stargate pods: these will be set - to HeapSize x2 and x4, respectively.' - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - livenessProbe: - description: LivenessProbe sets the Stargate liveness - probe. Leave nil to use defaults. + image: + default: thelastpickle/cassandra-reaper:3.0.0 + description: The image to use. + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + initContainerSecurityContext: + description: InitContainerSecurityContext is the SecurityContext + applied to the Reaper init container, used to perform + schema migrations. properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the container + process. AllowPrivilegeEscalation is true always + when the container is: 1) run as Privileged 2) + has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running + containers. Defaults to the default set of capabilities + granted by the container runtime. properties: - command: - description: Command is the command line to - execute inside the container, the working - directory for the command is root ('/') in - the container's filesystem. The command is - simply exec'd, it is not run inside a shell, - so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is - treated as live/healthy and non-zero is unhealthy. + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities items: + description: Capability represent POSIX capabilities + type type: string type: array type: object - failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default is + DefaultProcMount which uses the container runtime + defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to + be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the + container process. Uses runtime default if unset. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + format: int64 type: integer - httpGet: - description: HTTPGet specifies the http request - to perform. + runAsNonRoot: + description: Indicates that the container must run + as a non-root user. If true, the Kubelet will + validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no + such validation will be performed. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the + container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to + the container. If unspecified, the container runtime + will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. properties: - host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + level: + description: Level is SELinux level label that + applies to the container. type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + role: + description: Role is a SELinux role label that + applies to the container. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to - the host. Defaults to HTTP. + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this + container. If seccomp options are provided at + both the pod & container level, the container + options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. + The profile must be preconfigured on the node + to work. Must be a descending path, relative + to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: + \n Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile should + be used. Unconfined - no profile should be + applied." type: string required: - - port + - type type: object - initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 + windowsOptions: + description: The Windows specific settings applied + to all containers. If unspecified, the options + from the PodSecurityContext will be used. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + This field is alpha-level and will only be + honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the + feature flag will result in errors when validating + the Pod. All of a Pod's containers must have + the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must + also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. Defaults + to the user specified in image metadata if + unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: string + type: object + type: object + podSecurityContext: + description: PodSecurityContext contains a pod-level + SecurityContext to apply to Reaper pods. + properties: + fsGroup: + description: "A special supplemental group that + applies to all containers in a pod. Some volume + types allow the Kubelet to change the ownership + of that volume to be owned by the pod: \n 1. The + owning GID will be the FSGroup 2. The setgid bit + is set (new files created in the volume will be + owned by FSGroup) 3. The permission bits are OR'd + with rw-rw---- \n If unset, the Kubelet will not + modify the ownership and permissions of any volume." + format: int64 type: integer - periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is - 1. - format: int32 + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior + of changing ownership and permission of the volume + before being exposed inside Pod. This field will + only apply to volume types which support fsGroup + based ownership(and permissions). It will have + no effect on ephemeral volume types such as: secret, + configmaps and emptydir. Valid values are "OnRootMismatch" + and "Always". If not specified, "Always" is used.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the + container process. Uses runtime default if unset. + May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence + for that container. + format: int64 type: integer - successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having - failed. Defaults to 1. Must be 1 for liveness - and startup. Minimum value is 1. - format: int32 + runAsNonRoot: + description: Indicates that the container must run + as a non-root user. If true, the Kubelet will + validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no + such validation will be performed. May also be + set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the + container process. Defaults to user specified + in image metadata if unspecified. May also be + set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence for that container. + format: int64 type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: - implement a realistic TCP lifecycle hook' + seLinuxOptions: + description: The SELinux context to be applied to + all containers. If unspecified, the container + runtime will allocate a random SELinux context + for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence + for that container. properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers + in this pod. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. + The profile must be preconfigured on the node + to work. Must be a descending path, relative + to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: + \n Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile should + be used. Unconfined - no profile should be + applied." type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port + - type + type: object + supplementalGroups: + description: A list of groups applied to the first + process run in each container, in addition to + the container's primary GID. If unspecified, + no groups will be added to any container. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls + used for the pod. Pods with unsupported sysctls + (by the container runtime) might fail to launch. + items: + description: Sysctl defines a kernel parameter + to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied + to all containers. If unspecified, the options + within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + This field is alpha-level and will only be + honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the + feature flag will result in errors when validating + the Pod. All of a Pod's containers must have + the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must + also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. Defaults + to the user specified in image metadata if + unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: string type: object - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value must - be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity - to shut down). This is a beta field and requires - enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the - probe times out. Defaults to 1 second. Minimum - value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - nodeSelector: - additionalProperties: - type: string - description: NodeSelector is an optional map of label - keys and values to restrict the scheduling of Stargate - nodes to workers with matching labels. Leave nil to - let the controller reuse the same node selectors used - for data pods in this datacenter, if any. See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector type: object - racks: - description: Racks allow customizing Stargate characteristics - for specific racks in the datacenter. + securityContext: + description: SecurityContext applied to the Reaper main + container. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the container + process. AllowPrivilegeEscalation is true always + when the container is: 1) run as Privileged 2) + has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running + containers. Defaults to the default set of capabilities + granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default is + DefaultProcMount which uses the container runtime + defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to + be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the + container process. Uses runtime default if unset. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run + as a non-root user. If true, the Kubelet will + validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no + such validation will be performed. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the + container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to + the container. If unspecified, the container runtime + will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this + container. If seccomp options are provided at + both the pod & container level, the container + options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. + The profile must be preconfigured on the node + to work. Must be a descending path, relative + to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: + \n Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile should + be used. Unconfined - no profile should be + applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied + to all containers. If unspecified, the options + from the PodSecurityContext will be used. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + This field is alpha-level and will only be + honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the + feature flag will result in errors when validating + the Pod. All of a Pod's containers must have + the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must + also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. Defaults + to the user specified in image metadata if + unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: string + type: object + type: object + tolerations: + description: Tolerations applied to the Reaper pods. items: - description: StargateRackTemplate defines custom rules - for Stargate pods in a given rack. These rules will - be merged with rules defined at datacenter level - in a StargateDatacenterTemplate; rack-level rules - have precedence over datacenter-level ones. + description: The pod this Toleration is attached to + tolerates any taint that matches the triple + using the matching operator . properties: - affinity: - description: Affinity is the affinity to apply - to all the Stargate pods. Leave nil to let the - controller reuse the same affinity rules used - for data pods in this datacenter, if any. See - https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity - properties: - nodeAffinity: - description: Describes node affinity scheduling - rules for the pod. + effect: + description: Effect indicates the taint effect + to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, + PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; + this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and + Equal. Defaults to Equal. Exists is equivalent + to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the + period of time the toleration (which must be + of effect NoExecute, otherwise this field is + ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever + (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value + should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + resources: + description: Resources is the cpu and memory resources for + the cassandra container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + serverImage: + type: string + serverVersion: + description: ServerVersion is the Cassandra version. + pattern: (3\.11\.\d+)|(4\.0\.\d+) + type: string + size: + description: Size is the number Cassandra pods to deploy + in this datacenter. This number does not include Stargate + instances. + format: int32 + minimum: 1 + type: integer + stargate: + description: Stargate defines the desired deployment characteristics + for Stargate in this datacenter. Leave nil to skip deploying + Stargate in this datacenter. + properties: + affinity: + description: Affinity is the affinity to apply to all + the Stargate pods. Leave nil to let the controller + reuse the same affinity rules used for data pods in + this datacenter, if any. See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most + preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit weight + 0 (i.e. it's a no-op). A null preferred + scheduling term matches no objects (i.e. + is also a no-op). properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the affinity expressions specified by - this field, but it may choose a node - that violates one or more of the expressions. - The node that is most preferred is the - one with the greatest sum of weights, - i.e. for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling affinity - expressions, etc.), compute a sum by - iterating through the elements of this - field and adding "weight" to the sum - if the node matches the corresponding - matchExpressions; the node(s) with the - highest sum are the most preferred. - items: - description: An empty preferred scheduling - term matches all objects with implicit - weight 0 (i.e. it's a no-op). A null - preferred scheduling term matches - no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, - associated with the corresponding - weight. + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. properties: - matchExpressions: - description: A list of node - selector requirements by node's - labels. - items: - description: A node selector - requirement is a selector - that contains values, a - key, and an operator that - relates the key and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators are - In, NotIn, Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array - of string values. If - the operator is In or - NotIn, the values array - must be non-empty. If - the operator is Exists - or DoesNotExist, the - values array must be - empty. If the operator - is Gt or Lt, the values - array must have a single - element, which will - be interpreted as an - integer. This array - is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string type: array - matchFields: - description: A list of node - selector requirements by node's - fields. + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. items: - description: A node selector - requirement is a selector - that contains values, a - key, and an operator that - relates the key and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators are - In, NotIn, Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array - of string values. If - the operator is In or - NotIn, the values array - must be non-empty. If - the operator is Exists - or DoesNotExist, the - values array must be - empty. If the operator - is Gt or Lt, the values - array must have a single - element, which will - be interpreted as an - integer. This array - is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object + type: string type: array + required: + - key + - operator type: object - weight: - description: Weight associated with - matching the corresponding nodeSelectorTerm, - in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not met - at scheduling time, the pod will not - be scheduled onto the node. If the affinity - requirements specified by this field - cease to be met at some point during - pod execution (e.g. due to an update), - the system may or may not try to eventually - evict the pod from its node. + type: array + type: object + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. + If the affinity requirements specified by + this field cease to be met at some point during + pod execution (e.g. due to an update), the + system may or may not try to eventually evict + the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. properties: - nodeSelectorTerms: - description: Required. A list of node - selector terms. The terms are ORed. + matchExpressions: + description: A list of node selector + requirements by node's labels. items: - description: A null or empty node - selector term matches no objects. - The requirements of them are ANDed. - The TopologySelectorTerm type - implements a subset of the NodeSelectorTerm. + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. properties: - matchExpressions: - description: A list of node - selector requirements by node's - labels. + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. items: - description: A node selector - requirement is a selector - that contains values, a - key, and an operator that - relates the key and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators are - In, NotIn, Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array - of string values. If - the operator is In or - NotIn, the values array - must be non-empty. If - the operator is Exists - or DoesNotExist, the - values array must be - empty. If the operator - is Gt or Lt, the values - array must have a single - element, which will - be interpreted as an - integer. This array - is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object + type: string type: array - matchFields: - description: A list of node - selector requirements by node's - fields. + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. items: - description: A node selector - requirement is a selector - that contains values, a - key, and an operator that - relates the key and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators are - In, NotIn, Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array - of string values. If - the operator is In or - NotIn, the values array - must be non-empty. If - the operator is Exists - or DoesNotExist, the - values array must be - empty. If the operator - is Gt or Lt, the values - array must have a single - element, which will - be interpreted as an - integer. This array - is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object + type: string type: array + required: + - key + - operator type: object type: array - required: - - nodeSelectorTerms type: object - type: object - podAffinity: - description: Describes pod affinity scheduling - rules (e.g. co-locate this pod in the same - node, zone, etc. as some other pod(s)). + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules + (e.g. co-locate this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest + sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the affinity expressions specified by - this field, but it may choose a node - that violates one or more of the expressions. - The node that is most preferred is the - one with the greatest sum of weights, - i.e. for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling affinity - expressions, etc.), compute a sum by - iterating through the elements of this - field and adding "weight" to the sum - if the node has pods which matches the - corresponding podAffinityTerm; the node(s) - with the highest sum are the most preferred. - items: - description: The weights of all of the - matched WeightedPodAffinityTerm fields - are added per-node to find the most - preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity - term, associated with the corresponding - weight. - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to - a set of values. - Valid operators - are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels map - is equivalent to an element - of matchExpressions, whose - key field is "key", the - operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object + type: array + required: + - key + - operator type: object - namespaceSelector: - description: A label query over - the set of namespaces that - the term applies to. The term - is applied to the union of - the namespaces selected by - this field and the ones listed - in the namespaces field. null - selector and null or empty - namespaces list means "this - pod's namespace". An empty - selector ({}) matches all - namespaces. This field is - beta-level and is only honored - when PodAffinityNamespaceSelector - feature is enabled. + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the + set of namespaces that the term + applies to. The term is applied + to the union of the namespaces selected + by this field and the ones listed + in the namespaces field. null selector + and null or empty namespaces list + means "this pod's namespace". An + empty selector ({}) matches all + namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to - a set of values. - Valid operators - are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels map - is equivalent to an element - of matchExpressions, whose - key field is "key", the - operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object + type: array + required: + - key + - operator type: object - namespaces: - description: namespaces specifies - a static list of namespace - names that the term applies - to. The term is applied to - the union of the namespaces - listed in this field and the - ones selected by namespaceSelector. - null or empty namespaces list - and null namespaceSelector - means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) or - not co-located (anti-affinity) - with the pods matching the - labelSelector in the specified - namespaces, where co-located - is defined as running on a - node whose value of the label - with key topologyKey matches - that of any node on which - any of the selected pods is - running. Empty topologyKey - is not allowed. + type: array + matchLabels: + additionalProperties: type: string - required: - - topologyKey - type: object - weight: - description: weight associated with - matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not met - at scheduling time, the pod will not - be scheduled onto the node. If the affinity - requirements specified by this field - cease to be met at some point during - pod execution (e.g. due to a pod label - update), the system may or may not try - to eventually evict the pod from its - node. When there are multiple elements, - the lists of nodes corresponding to - each podAffinityTerm are intersected, - i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely - those matching the labelSelector relative - to the given namespace(s)) that this - pod should be co-located (affinity) - or not co-located (anti-affinity) - with, where co-located is defined - as running on a node whose value of - the label with key matches - that of any node on which a pod of - the set of pods is running - properties: - labelSelector: - description: A label query over - a set of resources, in this case - pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The term + is applied to the union of the namespaces + listed in this field and the ones + selected by namespaceSelector. null + or empty namespaces list and null + namespaceSelector means "this pod's + namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in + the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. + If the affinity requirements specified by + this field cease to be met at some point during + pod execution (e.g. due to a pod label update), + the system may or may not try to eventually + evict the pod from its node. When there are + multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label query over a set + of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. items: - description: A label selector - requirement is a selector - that contains values, a - key, and an operator that - relates the key and values. - properties: - key: - description: key is the - label key that the selector - applies to. - type: string - operator: - description: operator - represents a key's relationship - to a set of values. - Valid operators are - In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is - an array of string values. - If the operator is In - or NotIn, the values - array must be non-empty. - If the operator is Exists - or DoesNotExist, the - values array must be - empty. This array is - replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: type: string - description: matchLabels is - a map of {key,value} pairs. - A single {key,value} in the - matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", - the operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object + type: array + required: + - key + - operator type: object - namespaceSelector: - description: A label query over - the set of namespaces that the - term applies to. The term is applied - to the union of the namespaces - selected by this field and the - ones listed in the namespaces - field. null selector and null - or empty namespaces list means - "this pod's namespace". An empty - selector ({}) matches all namespaces. - This field is beta-level and is - only honored when PodAffinityNamespaceSelector - feature is enabled. + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set + of namespaces that the term applies + to. The term is applied to the union + of the namespaces selected by this field + and the ones listed in the namespaces + field. null selector and null or empty + namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + This field is beta-level and is only + honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. items: - description: A label selector - requirement is a selector - that contains values, a - key, and an operator that - relates the key and values. - properties: - key: - description: key is the - label key that the selector - applies to. - type: string - operator: - description: operator - represents a key's relationship - to a set of values. - Valid operators are - In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is - an array of string values. - If the operator is In - or NotIn, the values - array must be non-empty. - If the operator is Exists - or DoesNotExist, the - values array must be - empty. This array is - replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: type: string - description: matchLabels is - a map of {key,value} pairs. - A single {key,value} in the - matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", - the operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object + type: array + required: + - key + - operator type: object - namespaces: - description: namespaces specifies - a static list of namespace names - that the term applies to. The - term is applied to the union of - the namespaces listed in this - field and the ones selected by - namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be - co-located (affinity) or not co-located - (anti-affinity) with the pods - matching the labelSelector in - the specified namespaces, where - co-located is defined as running - on a node whose value of the label - with key topologyKey matches that - of any node on which any of the - selected pods is running. Empty - topologyKey is not allowed. + type: array + matchLabels: + additionalProperties: type: string - required: - - topologyKey - type: object + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace" + items: + type: string type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling - rules (e.g. avoid putting this pod in the - same node, zone, etc. as some other pod(s)). + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same + node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity + expressions specified by this field, but it + may choose a node that violates one or more + of the expressions. The node that is most + preferred is the one with the greatest sum + of weights, i.e. for each node that meets + all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most + preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the anti-affinity expressions specified - by this field, but it may choose a node - that violates one or more of the expressions. - The node that is most preferred is the - one with the greatest sum of weights, - i.e. for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by - iterating through the elements of this - field and adding "weight" to the sum - if the node has pods which matches the - corresponding podAffinityTerm; the node(s) - with the highest sum are the most preferred. - items: - description: The weights of all of the - matched WeightedPodAffinityTerm fields - are added per-node to find the most - preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity - term, associated with the corresponding - weight. - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to - a set of values. - Valid operators - are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels map - is equivalent to an element - of matchExpressions, whose - key field is "key", the - operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object + type: array + required: + - key + - operator type: object - namespaceSelector: - description: A label query over - the set of namespaces that - the term applies to. The term - is applied to the union of - the namespaces selected by - this field and the ones listed - in the namespaces field. null - selector and null or empty - namespaces list means "this - pod's namespace". An empty - selector ({}) matches all - namespaces. This field is - beta-level and is only honored - when PodAffinityNamespaceSelector - feature is enabled. + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the + set of namespaces that the term + applies to. The term is applied + to the union of the namespaces selected + by this field and the ones listed + in the namespaces field. null selector + and null or empty namespaces list + means "this pod's namespace". An + empty selector ({}) matches all + namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to - a set of values. - Valid operators - are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels map - is equivalent to an element - of matchExpressions, whose - key field is "key", the - operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object + type: array + required: + - key + - operator type: object - namespaces: - description: namespaces specifies - a static list of namespace - names that the term applies - to. The term is applied to - the union of the namespaces - listed in this field and the - ones selected by namespaceSelector. - null or empty namespaces list - and null namespaceSelector - means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) or - not co-located (anti-affinity) - with the pods matching the - labelSelector in the specified - namespaces, where co-located - is defined as running on a - node whose value of the label - with key topologyKey matches - that of any node on which - any of the selected pods is - running. Empty topologyKey - is not allowed. + type: array + matchLabels: + additionalProperties: type: string - required: - - topologyKey - type: object - weight: - description: weight associated with - matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements - specified by this field are not met - at scheduling time, the pod will not - be scheduled onto the node. If the anti-affinity - requirements specified by this field - cease to be met at some point during - pod execution (e.g. due to a pod label - update), the system may or may not try - to eventually evict the pod from its - node. When there are multiple elements, - the lists of nodes corresponding to - each podAffinityTerm are intersected, - i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely - those matching the labelSelector relative - to the given namespace(s)) that this - pod should be co-located (affinity) - or not co-located (anti-affinity) - with, where co-located is defined - as running on a node whose value of - the label with key matches - that of any node on which a pod of - the set of pods is running - properties: - labelSelector: - description: A label query over - a set of resources, in this case - pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The term + is applied to the union of the namespaces + listed in this field and the ones + selected by namespaceSelector. null + or empty namespaces list and null + namespaceSelector means "this pod's + namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in + the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements + specified by this field are not met at scheduling + time, the pod will not be scheduled onto the + node. If the anti-affinity requirements specified + by this field cease to be met at some point + during pod execution (e.g. due to a pod label + update), the system may or may not try to + eventually evict the pod from its node. When + there are multiple elements, the lists of + nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label query over a set + of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set + of namespaces that the term applies + to. The term is applied to the union + of the namespaces selected by this field + and the ones listed in the namespaces + field. null selector and null or empty + namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + This field is beta-level and is only + honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + allowStargateOnDataNodes: + default: false + description: 'AllowStargateOnDataNodes allows Stargate + pods to be scheduled on a worker node already hosting + data pods for this datacenter. The default is false, + which means that Stargate pods will be scheduled on + separate worker nodes. Note: if the datacenter pods + have HostNetwork:true, then the Stargate pods will + inherit of it, in which case it is possible that Stargate + nodes won''t be allowed to sit on data nodes even + if this property is set to true, because of port conflicts + on the same IP address.' + type: boolean + cassandraConfigMapRef: + description: CassandraConfigMapRef is a reference to + a ConfigMap that holds Cassandra configuration. The + map should have a key named cassandra_yaml. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + heapSize: + anyOf: + - type: integer + - type: string + default: 256Mi + description: 'HeapSize sets the JVM heap size to use + for Stargate. If no Resources are specified, this + value will also be used to set a default memory request + and limit for the Stargate pods: these will be set + to HeapSize x2 and x4, respectively.' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + livenessProbe: + description: LivenessProbe sets the Stargate liveness + probe. Leave nil to use defaults. + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector is an optional map of label + keys and values to restrict the scheduling of Stargate + nodes to workers with matching labels. Leave nil to + let the controller reuse the same node selectors used + for data pods in this datacenter, if any. See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + type: object + racks: + description: Racks allow customizing Stargate characteristics + for specific racks in the datacenter. + items: + description: StargateRackTemplate defines custom rules + for Stargate pods in a given rack. These rules will + be merged with rules defined at datacenter level + in a StargateDatacenterTemplate; rack-level rules + have precedence over datacenter-level ones. + properties: + affinity: + description: Affinity is the affinity to apply + to all the Stargate pods. Leave nil to let the + controller reuse the same affinity rules used + for data pods in this datacenter, if any. See + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified by + this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the + one with the greatest sum of weights, + i.e. for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling affinity + expressions, etc.), compute a sum by + iterating through the elements of this + field and adding "weight" to the sum + if the node matches the corresponding + matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit + weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches + no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, + associated with the corresponding + weight. + properties: + matchExpressions: + description: A list of node + selector requirements by node's + labels. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node + selector requirements by node's + fields. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with + matching the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not met + at scheduling time, the pod will not + be scheduled onto the node. If the affinity + requirements specified by this field + cease to be met at some point during + pod execution (e.g. due to an update), + the system may or may not try to eventually + evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node + selector terms. The terms are ORed. + items: + description: A null or empty node + selector term matches no objects. + The requirements of them are ANDed. + The TopologySelectorTerm type + implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node + selector requirements by node's + labels. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node + selector requirements by node's + fields. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the same + node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified by + this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the + one with the greatest sum of weights, + i.e. for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling affinity + expressions, etc.), compute a sum by + iterating through the elements of this + field and adding "weight" to the sum + if the node has pods which matches the + corresponding podAffinityTerm; the node(s) + with the highest sum are the most preferred. + items: + description: The weights of all of the + matched WeightedPodAffinityTerm fields + are added per-node to find the most + preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over + the set of namespaces that + the term applies to. The term + is applied to the union of + the namespaces selected by + this field and the ones listed + in the namespaces field. null + selector and null or empty + namespaces list means "this + pod's namespace". An empty + selector ({}) matches all + namespaces. This field is + beta-level and is only honored + when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace + names that the term applies + to. The term is applied to + the union of the namespaces + listed in this field and the + ones selected by namespaceSelector. + null or empty namespaces list + and null namespaceSelector + means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with + matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not met + at scheduling time, the pod will not + be scheduled onto the node. If the affinity + requirements specified by this field + cease to be met at some point during + pod execution (e.g. due to a pod label + update), the system may or may not try + to eventually evict the pod from its + node. When there are multiple elements, + the lists of nodes corresponding to + each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value of + the label with key matches + that of any node on which a pod of + the set of pods is running + properties: + labelSelector: + description: A label query over + a set of resources, in this case + pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator + represents a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is + an array of string values. + If the operator is In + or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the + values array must be + empty. This array is + replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in the + matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over + the set of namespaces that the + term applies to. The term is applied + to the union of the namespaces + selected by this field and the + ones listed in the namespaces + field. null selector and null + or empty namespaces list means + "this pod's namespace". An empty + selector ({}) matches all namespaces. + This field is beta-level and is + only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator + represents a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is + an array of string values. + If the operator is In + or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the + values array must be + empty. This array is + replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in the + matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The + term is applied to the union of + the namespaces listed in this + field and the ones selected by + namespaceSelector. null or empty + namespaces list and null namespaceSelector + means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be + co-located (affinity) or not co-located + (anti-affinity) with the pods + matching the labelSelector in + the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the + same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the anti-affinity expressions specified + by this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the + one with the greatest sum of weights, + i.e. for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by + iterating through the elements of this + field and adding "weight" to the sum + if the node has pods which matches the + corresponding podAffinityTerm; the node(s) + with the highest sum are the most preferred. + items: + description: The weights of all of the + matched WeightedPodAffinityTerm fields + are added per-node to find the most + preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over + the set of namespaces that + the term applies to. The term + is applied to the union of + the namespaces selected by + this field and the ones listed + in the namespaces field. null + selector and null or empty + namespaces list means "this + pod's namespace". An empty + selector ({}) matches all + namespaces. This field is + beta-level and is only honored + when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace + names that the term applies + to. The term is applied to + the union of the namespaces + listed in this field and the + ones selected by namespaceSelector. + null or empty namespaces list + and null namespaceSelector + means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with + matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements + specified by this field are not met + at scheduling time, the pod will not + be scheduled onto the node. If the anti-affinity + requirements specified by this field + cease to be met at some point during + pod execution (e.g. due to a pod label + update), the system may or may not try + to eventually evict the pod from its + node. When there are multiple elements, + the lists of nodes corresponding to + each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value of + the label with key matches + that of any node on which a pod of + the set of pods is running + properties: + labelSelector: + description: A label query over + a set of resources, in this case + pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: description: A label selector requirement is a selector that contains values, a @@ -2595,1060 +4246,2049 @@ spec: topologyKey is not allowed. type: string required: - - topologyKey + - topologyKey + type: object + type: array + type: object + type: object + allowStargateOnDataNodes: + default: false + description: 'AllowStargateOnDataNodes allows + Stargate pods to be scheduled on a worker node + already hosting data pods for this datacenter. + The default is false, which means that Stargate + pods will be scheduled on separate worker nodes. + Note: if the datacenter pods have HostNetwork:true, + then the Stargate pods will inherit of it, in + which case it is possible that Stargate nodes + won''t be allowed to sit on data nodes even + if this property is set to true, because of + port conflicts on the same IP address.' + type: boolean + cassandraConfigMapRef: + description: CassandraConfigMapRef is a reference + to a ConfigMap that holds Cassandra configuration. + The map should have a key named cassandra_yaml. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + heapSize: + anyOf: + - type: integer + - type: string + default: 256Mi + description: 'HeapSize sets the JVM heap size + to use for Stargate. If no Resources are specified, + this value will also be used to set a default + memory request and limit for the Stargate pods: + these will be set to HeapSize x2 and x4, respectively.' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + livenessProbe: + description: LivenessProbe sets the Stargate liveness + probe. Leave nil to use defaults. + properties: + exec: + description: One and only one of the following + should be specified. Exec specifies the + action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed after + having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness probes + are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action + involving a TCP port. TCP hooks not yet + supported TODO: implement a realistic TCP + lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully upon + probe failure. The grace period is the duration + in seconds after the processes running in + the pod are sent a termination signal and + the time when the processes are forcibly + halted with a kill signal. Set this value + longer than the expected cleanup time for + your process. If this value is nil, the + pod's terminationGracePeriodSeconds will + be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name is the rack name. It must correspond + to an existing rack name in the CassandraDatacenter + resource where Stargate is being deployed, otherwise + it will be ignored. + minLength: 2 + type: string + nodeSelector: + additionalProperties: + type: string + description: NodeSelector is an optional map of + label keys and values to restrict the scheduling + of Stargate nodes to workers with matching labels. + Leave nil to let the controller reuse the same + node selectors used for data pods in this datacenter, + if any. See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + type: object + readinessProbe: + description: ReadinessProbe sets the Stargate + readiness probe. Leave nil to use defaults. + properties: + exec: + description: One and only one of the following + should be specified. Exec specifies the + action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed after + having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value type: object type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness probes + are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action + involving a TCP port. TCP hooks not yet + supported TODO: implement a realistic TCP + lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully upon + probe failure. The grace period is the duration + in seconds after the processes running in + the pod are sent a termination signal and + the time when the processes are forcibly + halted with a kill signal. Set this value + longer than the expected cleanup time for + your process. If this value is nil, the + pod's terminationGracePeriodSeconds will + be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: Resources is the Kubernetes resource + requests and limits to apply, per Stargate pod. + Leave nil to use defaults. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum + amount of compute resources allowed. More + info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If + Requests is omitted for a container, it + defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object - allowStargateOnDataNodes: - default: false - description: 'AllowStargateOnDataNodes allows - Stargate pods to be scheduled on a worker node - already hosting data pods for this datacenter. - The default is false, which means that Stargate - pods will be scheduled on separate worker nodes. - Note: if the datacenter pods have HostNetwork:true, - then the Stargate pods will inherit of it, in - which case it is possible that Stargate nodes - won''t be allowed to sit on data nodes even - if this property is set to true, because of - port conflicts on the same IP address.' - type: boolean - cassandraConfigMapRef: - description: CassandraConfigMapRef is a reference - to a ConfigMap that holds Cassandra configuration. - The map should have a key named cassandra_yaml. + serviceAccount: + default: default + description: ServiceAccount is the service account + name to use for Stargate pods. + type: string + stargateContainerImage: + description: StargateContainerImage is the image + characteristics to use for Stargate containers. + Leave nil to use a default image. properties: - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + pullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy + for if/when to pull a container image type: string + registry: + default: docker.io + type: string + repository: + type: string + tag: + default: latest + type: string + required: + - repository type: object - heapSize: + tolerations: + description: Tolerations are tolerations to apply + to the Stargate pods. Leave nil to let the controller + reuse the same tolerations used for data pods + in this datacenter, if any. See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + items: + description: The pod this Toleration is attached + to tolerates any taint that matches the triple + using the matching operator + . + properties: + effect: + description: Effect indicates the taint + effect to match. Empty means match all + taint effects. When specified, allowed + values are NoSchedule, PreferNoSchedule + and NoExecute. + type: string + key: + description: Key is the taint key that the + toleration applies to. Empty means match + all taint keys. If the key is empty, operator + must be Exists; this combination means + to match all values and all keys. + type: string + operator: + description: Operator represents a key's + relationship to the value. Valid operators + are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, + so that a pod can tolerate all taints + of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents + the period of time the toleration (which + must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. + By default, it is not set, which means + tolerate the taint forever (do not evict). + Zero and negative values will be treated + as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the + toleration matches to. If the operator + is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + required: + - name + type: object + type: array + readinessProbe: + description: ReadinessProbe sets the Stargate readiness + probe. Leave nil to use defaults. + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: Resources is the Kubernetes resource requests + and limits to apply, per Stargate pod. Leave nil to + use defaults. + properties: + limits: + additionalProperties: anyOf: - type: integer - type: string - default: 256Mi - description: 'HeapSize sets the JVM heap size - to use for Stargate. If no Resources are specified, - this value will also be used to set a default - memory request and limit for the Stargate pods: - these will be set to HeapSize x2 and x4, respectively.' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - livenessProbe: - description: LivenessProbe sets the Stargate liveness - probe. Leave nil to use defaults. + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + serviceAccount: + default: default + description: ServiceAccount is the service account name + to use for Stargate pods. + type: string + size: + default: 1 + description: Size is the number of Stargate instances + to deploy in each datacenter. They will be spread + evenly across racks. + format: int32 + minimum: 1 + type: integer + stargateContainerImage: + description: StargateContainerImage is the image characteristics + to use for Stargate containers. Leave nil to use a + default image. + properties: + pullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + registry: + default: docker.io + type: string + repository: + type: string + tag: + default: latest + type: string + required: + - repository + type: object + tolerations: + description: Tolerations are tolerations to apply to + the Stargate pods. Leave nil to let the controller + reuse the same tolerations used for data pods in this + datacenter, if any. See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + items: + description: The pod this Toleration is attached to + tolerates any taint that matches the triple + using the matching operator . + properties: + effect: + description: Effect indicates the taint effect + to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, + PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; + this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and + Equal. Defaults to Equal. Exists is equivalent + to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the + period of time the toleration (which must be + of effect NoExecute, otherwise this field is + ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever + (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value + should be empty, otherwise just a regular string. + type: string + type: object + type: array + required: + - size + type: object + storageConfig: + description: StorageConfig is the persistent storage requirements + for each Cassandra pod. This includes everything under + /var/lib/cassandra, namely the commit log and data directories. + properties: + additionalVolumes: + items: + description: StorageConfig defines additional storage + configurations + properties: + mountPath: + description: Mount path into cassandra container + type: string + name: + description: Name of the pvc + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + pvcSpec: + description: Persistent volume claim spec properties: - exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + accessModes: + description: 'AccessModes contains the desired + access modes the volume should have. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify + either: * An existing VolumeSnapshot object + (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller + can support the specified data source, it + will create a new volume based on the contents + of the specified data source. If the AnyVolumeDataSource + feature gate is enabled, this field will + always have the same contents as the DataSourceRef + field.' properties: - command: - description: Command is the command line - to execute inside the container, the - working directory for the command is - root ('/') in the container's filesystem. - The command is simply exec'd, it is - not run inside a shell, so traditional - shell instructions ('|', etc) won't - work. To use a shell, you need to explicitly - call out to that shell. Exit status - of 0 is treated as live/healthy and - non-zero is unhealthy. - items: - type: string - type: array + apiGroup: + description: APIGroup is the group for + the resource being referenced. If APIGroup + is not specified, the specified Kind + must be in the core API group. For any + other third-party types, APIGroup is + required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which + to populate the volume with data, if a non-empty + volume is desired. This may be any local + object from a non-empty API group (non core + object) or a PersistentVolumeClaim object. + When this field is specified, volume binding + will only succeed if the type of the specified + object matches some installed volume populator + or dynamic provisioner. This field will + replace the functionality of the DataSource + field and as such if both fields are non-empty, + they must have the same value. For backwards + compatibility, both fields (DataSource and + DataSourceRef) will be set to the same value + automatically if one of them is empty and + the other is non-empty. There are two important + differences between DataSource and DataSourceRef: + * While DataSource only allows two specific + types of objects, DataSourceRef allows + any non-core object, as well as PersistentVolumeClaim + objects. * While DataSource ignores disallowed + values (dropping them), DataSourceRef preserves + all values, and generates an error if a + disallowed value is specified. (Alpha) + Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. If APIGroup + is not specified, the specified Kind + must be in the core API group. For any + other third-party types, APIGroup is + required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum + resources the volume should have. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum + amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. + If Requests is omitted for a container, + it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object type: object - failureThreshold: - description: Minimum consecutive failures - for the probe to be considered failed after - having succeeded. Defaults to 3. Minimum - value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request - to perform. + selector: + description: A label query over volumes to + consider for binding. properties: - host: - description: Host name to connect to, - defaults to the pod IP. You probably - want to set "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in - the request. HTTP allows repeated headers. + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. items: - description: HTTPHeader describes a - custom header to be used in HTTP probes + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. properties: - name: - description: The header field name + key: + description: key is the label key + that the selector applies to. type: string - value: - description: The header field value + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array required: - - name - - value + - key + - operator type: object type: array - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. - type: string - required: - - port + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object type: object - initialDelaySeconds: - description: 'Number of seconds after the - container has started before liveness probes - are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform - the probe. Default to 10 seconds. Minimum - value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes - for the probe to be considered successful - after having failed. Defaults to 1. Must - be 1 for liveness and startup. Minimum value - is 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic TCP - lifecycle hook' + storageClassName: + description: 'Name of the StorageClass required + by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type + of volume is required by the claim. Value + of Filesystem is implied when not included + in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - mountPath + - name + - pvcSpec + type: object + type: array + cassandraDataVolumeClaimSpec: + description: PersistentVolumeClaimSpec describes the + common attributes of storage devices and allows a + Source for provider-specific attributes + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. If the AnyVolumeDataSource feature + gate is enabled, this field will always have the + same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to + populate the volume with data, if a non-empty + volume is desired. This may be any local object + from a non-empty API group (non core object) or + a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + DataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, both fields (DataSource + and DataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. There are two important differences + between DataSource and DataSourceRef: * While + DataSource only allows two specific types of objects, + DataSourceRef allows any non-core object, as + well as PersistentVolumeClaim objects. * While + DataSource ignores disallowed values (dropping + them), DataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + (Alpha) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' + key: + description: key is the label key that + the selector applies to. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array required: - - port + - key + - operator type: object - terminationGracePeriodSeconds: - description: Optional duration in seconds - the pod needs to terminate gracefully upon - probe failure. The grace period is the duration - in seconds after the processes running in - the pod are sent a termination signal and - the time when the processes are forcibly - halted with a kill signal. Set this value - longer than the expected cleanup time for - your process. If this value is nil, the - pod's terminationGracePeriodSeconds will - be used. Otherwise, this value overrides - the value provided by the pod spec. Value - must be non-negative integer. The value - zero indicates stop immediately via the - kill signal (no opportunity to shut down). - This is a beta field and requires enabling - ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which - the probe times out. Defaults to 1 second. - Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name is the rack name. It must correspond - to an existing rack name in the CassandraDatacenter - resource where Stargate is being deployed, otherwise - it will be ignored. - minLength: 2 + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required + by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + type: object + systemLoggerResources: + description: SystemLoggerResources is the cpu and memory + resources for the server-system-logger container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + required: + - size + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + networking: + description: Networking enables host networking and configures + a NodePort ports. + properties: + hostNetwork: + type: boolean + nodePort: + properties: + internode: + type: integer + internodeSSL: + type: integer + native: + type: integer + nativeSSL: + type: integer + type: object + type: object + racks: + description: Racks is a list of named racks. Note that racks are + used to create node affinity. // + items: + description: Rack ... + properties: + name: + description: The rack name + minLength: 2 + type: string + nodeAffinityLabels: + additionalProperties: + type: string + description: NodeAffinityLabels to pin the rack, using node + affinity + type: object + zone: + description: Deprecated. Use nodeAffinityLabels instead. + Zone name to pin the rack, using node affinity + type: string + required: + - name + type: object + type: array + resources: + description: Resources is the cpu and memory resources for the + cassandra container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + serverImage: + description: ServerImage is the image for the cassandra container. + Note that this should be a management-api image. If left empty + the operator will choose a default image based on ServerVersion. + type: string + serverVersion: + description: ServerVersion is the Cassandra version. + pattern: (3\.11\.\d+)|(4\.0\.\d+) + type: string + storageConfig: + description: StorageConfig is the persistent storage requirements + for each Cassandra pod. This includes everything under /var/lib/cassandra, + namely the commit log and data directories. + properties: + additionalVolumes: + items: + description: StorageConfig defines additional storage configurations + properties: + mountPath: + description: Mount path into cassandra container + type: string + name: + description: Name of the pvc + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + pvcSpec: + description: Persistent volume claim spec + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: type: string - nodeSelector: - additionalProperties: + type: array + dataSource: + description: 'This field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. If the AnyVolumeDataSource feature + gate is enabled, this field will always have the + same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. type: string - description: NodeSelector is an optional map of - label keys and values to restrict the scheduling - of Stargate nodes to workers with matching labels. - Leave nil to let the controller reuse the same - node selectors used for data pods in this datacenter, - if any. See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - type: object - readinessProbe: - description: ReadinessProbe sets the Stargate - readiness probe. Leave nil to use defaults. - properties: - exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. - properties: - command: - description: Command is the command line - to execute inside the container, the - working directory for the command is - root ('/') in the container's filesystem. - The command is simply exec'd, it is - not run inside a shell, so traditional - shell instructions ('|', etc) won't - work. To use a shell, you need to explicitly - call out to that shell. Exit status - of 0 is treated as live/healthy and - non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures - for the probe to be considered failed after - having succeeded. Defaults to 3. Minimum - value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: Host name to connect to, - defaults to the pod IP. You probably - want to set "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in - the request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a - custom header to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the - container has started before liveness probes - are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform - the probe. Default to 10 seconds. Minimum - value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes - for the probe to be considered successful - after having failed. Defaults to 1. Must - be 1 for liveness and startup. Minimum value - is 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic TCP - lifecycle hook' + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to + populate the volume with data, if a non-empty + volume is desired. This may be any local object + from a non-empty API group (non core object) or + a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + DataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, both fields (DataSource + and DataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. There are two important differences + between DataSource and DataSourceRef: * While + DataSource only allows two specific types of objects, + DataSourceRef allows any non-core object, as + well as PersistentVolumeClaim objects. * While + DataSource ignores disallowed values (dropping + them), DataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + (Alpha) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' + key: + description: key is the label key that + the selector applies to. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array required: - - port - type: object - terminationGracePeriodSeconds: - description: Optional duration in seconds - the pod needs to terminate gracefully upon - probe failure. The grace period is the duration - in seconds after the processes running in - the pod are sent a termination signal and - the time when the processes are forcibly - halted with a kill signal. Set this value - longer than the expected cleanup time for - your process. If this value is nil, the - pod's terminationGracePeriodSeconds will - be used. Otherwise, this value overrides - the value provided by the pod spec. Value - must be non-negative integer. The value - zero indicates stop immediately via the - kill signal (no opportunity to shut down). - This is a beta field and requires enabling - ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which - the probe times out. Defaults to 1 second. - Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: Resources is the Kubernetes resource - requests and limits to apply, per Stargate pod. - Leave nil to use defaults. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum - amount of compute resources allowed. More - info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. If - Requests is omitted for a container, it - defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + - key + - operator type: object - type: object - serviceAccount: - default: default - description: ServiceAccount is the service account - name to use for Stargate pods. - type: string - stargateContainerImage: - description: StargateContainerImage is the image - characteristics to use for Stargate containers. - Leave nil to use a default image. - properties: - pullPolicy: - default: IfNotPresent - description: PullPolicy describes a policy - for if/when to pull a container image - type: string - registry: - default: docker.io - type: string - repository: - type: string - tag: - default: latest + type: array + matchLabels: + additionalProperties: type: string - required: - - repository - type: object - tolerations: - description: Tolerations are tolerations to apply - to the Stargate pods. Leave nil to let the controller - reuse the same tolerations used for data pods - in this datacenter, if any. See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - items: - description: The pod this Toleration is attached - to tolerates any taint that matches the triple - using the matching operator - . - properties: - effect: - description: Effect indicates the taint - effect to match. Empty means match all - taint effects. When specified, allowed - values are NoSchedule, PreferNoSchedule - and NoExecute. - type: string - key: - description: Key is the taint key that the - toleration applies to. Empty means match - all taint keys. If the key is empty, operator - must be Exists; this combination means - to match all values and all keys. - type: string - operator: - description: Operator represents a key's - relationship to the value. Valid operators - are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, - so that a pod can tolerate all taints - of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents - the period of time the toleration (which - must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. - By default, it is not set, which means - tolerate the taint forever (do not evict). - Zero and negative values will be treated - as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the - toleration matches to. If the operator - is Exists, the value should be empty, - otherwise just a regular string. - type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. type: object - type: array - required: - - name + type: object + storageClassName: + description: 'Name of the StorageClass required + by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - mountPath + - name + - pvcSpec + type: object + type: array + cassandraDataVolumeClaimSpec: + description: PersistentVolumeClaimSpec describes the common + attributes of storage devices and allows a Source for provider-specific + attributes + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified + data source, it will create a new volume based on the + contents of the specified data source. If the AnyVolumeDataSource + feature gate is enabled, this field will always have + the same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to populate + the volume with data, if a non-empty volume is desired. + This may be any local object from a non-empty API group + (non core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only + succeed if the type of the specified object matches + some installed volume populator or dynamic provisioner. + This field will replace the functionality of the DataSource + field and as such if both fields are non-empty, they + must have the same value. For backwards compatibility, + both fields (DataSource and DataSourceRef) will be set + to the same value automatically if one of them is empty + and the other is non-empty. There are two important + differences between DataSource and DataSourceRef: * + While DataSource only allows two specific types of objects, + DataSourceRef allows any non-core object, as well + as PersistentVolumeClaim objects. * While DataSource + ignores disallowed values (dropping them), DataSourceRef preserves + all values, and generates an error if a disallowed value + is specified. (Alpha) Using this field requires the + AnyVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object - type: array - readinessProbe: - description: ReadinessProbe sets the Stargate readiness - probe. Leave nil to use defaults. - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider for + binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. properties: - command: - description: Command is the command line to - execute inside the container, the working - directory for the command is root ('/') in - the container's filesystem. The command is - simply exec'd, it is not run inside a shell, - so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is - treated as live/healthy and non-zero is unhealthy. + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. items: type: string type: array + required: + - key + - operator type: object - failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request - to perform. + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the + claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is + required by the claim. Value of Filesystem is implied + when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + type: object + superuserSecret: + description: SuperuserSecretName allows to override the default + super user secret + type: string + systemLoggerResources: + description: SystemLoggerResources is the cpu and memory resources + for the server-system-logger container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + reaper: + description: Reaper defines the desired deployment characteristics + for Reaper in this K8ssandraCluster. If this is non-nil, Reaper + will be deployed on every Cassandra datacenter in this K8ssandraCluster. + properties: + ServiceAccountName: + default: default + type: string + affinity: + description: Affinity applied to the Reaper pods. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects + (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. properties: - host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. + matchExpressions: + description: A list of node selector requirements + by node's labels. items: - description: HTTPHeader describes a custom - header to be used in HTTP probes + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. properties: - name: - description: The header field name + key: + description: The label key that the selector + applies to. type: string - value: - description: The header field value + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array required: - - name - - value + - key + - operator type: object type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to - the host. Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is - 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having - failed. Defaults to 1. Must be 1 for liveness - and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: - implement a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port type: object - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value must - be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity - to shut down). This is a beta field and requires - enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the - probe times out. Defaults to 1 second. Minimum - value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. format: int32 type: integer - type: object - resources: - description: Resources is the Kubernetes resource requests - and limits to apply, per Stargate pod. Leave nil to - use defaults. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - serviceAccount: - default: default - description: ServiceAccount is the service account name - to use for Stargate pods. - type: string - size: - default: 1 - description: Size is the number of Stargate instances - to deploy in each datacenter. They will be spread - evenly across racks. - format: int32 - minimum: 1 - type: integer - stargateContainerImage: - description: StargateContainerImage is the image characteristics - to use for Stargate containers. Leave nil to use a - default image. - properties: - pullPolicy: - default: IfNotPresent - description: PullPolicy describes a policy for if/when - to pull a container image - type: string - registry: - default: docker.io - type: string - repository: - type: string - tag: - default: latest - type: string required: - - repository + - preference + - weight type: object - tolerations: - description: Tolerations are tolerations to apply to - the Stargate pods. Leave nil to let the controller - reuse the same tolerations used for data pods in this - datacenter, if any. See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - items: - description: The pod this Toleration is attached to - tolerates any taint that matches the triple - using the matching operator . - properties: - effect: - description: Effect indicates the taint effect - to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, - PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; - this combination means to match all values and - all keys. - type: string - operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and - Equal. Defaults to Equal. Exists is equivalent - to wildcard for value, so that a pod can tolerate - all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the - period of time the toleration (which must be - of effect NoExecute, otherwise this field is - ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever - (do not evict). Zero and negative values will - be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value - should be empty, otherwise just a regular string. - type: string - type: object - type: array - required: - - size - type: object - storageConfig: - description: StorageConfig is the persistent storage requirements - for each Cassandra pod. This includes everything under - /var/lib/cassandra, namely the commit log and data directories. - properties: - additionalVolumes: - items: - description: StorageConfig defines additional storage - configurations - properties: - mountPath: - description: Mount path into cassandra container - type: string - name: - description: Name of the pvc - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' - type: string - pvcSpec: - description: Persistent volume claim spec - properties: - accessModes: - description: 'AccessModes contains the desired - access modes the volume should have. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify - either: * An existing VolumeSnapshot object - (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller - can support the specified data source, it - will create a new volume based on the contents - of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will - always have the same contents as the DataSourceRef - field.' + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from + its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them are + ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. properties: - apiGroup: - description: APIGroup is the group for - the resource being referenced. If APIGroup - is not specified, the specified Kind - must be in the core API group. For any - other third-party types, APIGroup is - required. - type: string - kind: - description: Kind is the type of resource - being referenced + key: + description: The label key that the selector + applies to. type: string - name: - description: Name is the name of resource - being referenced + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array required: - - kind - - name + - key + - operator type: object - dataSourceRef: - description: 'Specifies the object from which - to populate the volume with data, if a non-empty - volume is desired. This may be any local - object from a non-empty API group (non core - object) or a PersistentVolumeClaim object. - When this field is specified, volume binding - will only succeed if the type of the specified - object matches some installed volume populator - or dynamic provisioner. This field will - replace the functionality of the DataSource - field and as such if both fields are non-empty, - they must have the same value. For backwards - compatibility, both fields (DataSource and - DataSourceRef) will be set to the same value - automatically if one of them is empty and - the other is non-empty. There are two important - differences between DataSource and DataSourceRef: - * While DataSource only allows two specific - types of objects, DataSourceRef allows - any non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed - values (dropping them), DataSourceRef preserves - all values, and generates an error if a - disallowed value is specified. (Alpha) - Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. properties: - apiGroup: - description: APIGroup is the group for - the resource being referenced. If APIGroup - is not specified, the specified Kind - must be in the core API group. For any - other third-party types, APIGroup is - required. - type: string - kind: - description: Kind is the type of resource - being referenced + key: + description: The label key that the selector + applies to. type: string - name: - description: Name is the name of resource - being referenced + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum - resources the volume should have. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum - amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. - If Requests is omitted for a container, - it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object + - key + - operator type: object - selector: - description: A label query over volumes to - consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: type: string - operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. This + field is beta-level and is only honored when + PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: type: string - values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + type: array + required: + - key + - operator type: object - type: object - storageClassName: - description: 'Name of the StorageClass required - by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type - of volume is required by the claim. Value - of Filesystem is implied when not included - in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference - to the PersistentVolume backing this claim. + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace" + items: type: string - type: object - required: - - mountPath - - name - - pvcSpec - type: object - type: array - cassandraDataVolumeClaimSpec: - description: PersistentVolumeClaimSpec describes the - common attributes of storage devices and allows a - Source for provider-specific attributes - properties: - accessModes: - description: 'AccessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify - either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the - provisioner or an external controller can support - the specified data source, it will create a new - volume based on the contents of the specified - data source. If the AnyVolumeDataSource feature - gate is enabled, this field will always have the - same contents as the DataSourceRef field.' - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API - group. For any other third-party types, APIGroup - is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - required: - - kind - - name - type: object - dataSourceRef: - description: 'Specifies the object from which to - populate the volume with data, if a non-empty - volume is desired. This may be any local object - from a non-empty API group (non core object) or - a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed - if the type of the specified object matches some - installed volume populator or dynamic provisioner. - This field will replace the functionality of the - DataSource field and as such if both fields are - non-empty, they must have the same value. For - backwards compatibility, both fields (DataSource - and DataSourceRef) will be set to the same value - automatically if one of them is empty and the - other is non-empty. There are two important differences - between DataSource and DataSourceRef: * While - DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as - well as PersistentVolumeClaim objects. * While - DataSource ignores disallowed values (dropping - them), DataSourceRef preserves all values, and - generates an error if a disallowed value is specified. - (Alpha) Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API - group. For any other third-party types, APIGroup - is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. type: string required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. If Requests - is omitted for a container, it defaults to - Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object + - topologyKey type: object - selector: - description: A label query over volumes to consider - for binding. + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to a pod label update), + the system may or may not try to eventually evict the + pod from its node. When there are multiple elements, + the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any node + on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. properties: matchExpressions: description: matchExpressions is a list of label @@ -3694,267 +6334,285 @@ spec: map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required - by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume - is required by the claim. Value of Filesystem - is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference - to the PersistentVolume backing this claim. - type: string - type: object - type: object - systemLoggerResources: - description: SystemLoggerResources is the cpu and memory - resources for the server-system-logger container. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - required: - - size - type: object - x-kubernetes-preserve-unknown-fields: true - type: array - networking: - description: Networking enables host networking and configures - a NodePort ports. - properties: - hostNetwork: - type: boolean - nodePort: - properties: - internode: - type: integer - internodeSSL: - type: integer - native: - type: integer - nativeSSL: - type: integer - type: object - type: object - racks: - description: Racks is a list of named racks. Note that racks are - used to create node affinity. // - items: - description: Rack ... - properties: - name: - description: The rack name - minLength: 2 - type: string - nodeAffinityLabels: - additionalProperties: - type: string - description: NodeAffinityLabels to pin the rack, using node - affinity - type: object - zone: - description: Deprecated. Use nodeAffinityLabels instead. - Zone name to pin the rack, using node affinity - type: string - required: - - name - type: object - type: array - resources: - description: Resources is the cpu and memory resources for the - cassandra container. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - serverImage: - description: ServerImage is the image for the cassandra container. - Note that this should be a management-api image. If left empty - the operator will choose a default image based on ServerVersion. - type: string - serverVersion: - description: ServerVersion is the Cassandra version. - pattern: (3\.11\.\d+)|(4\.0\.\d+) - type: string - storageConfig: - description: StorageConfig is the persistent storage requirements - for each Cassandra pod. This includes everything under /var/lib/cassandra, - namely the commit log and data directories. - properties: - additionalVolumes: - items: - description: StorageConfig defines additional storage configurations - properties: - mountPath: - description: Mount path into cassandra container - type: string - name: - description: Name of the pvc - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' - type: string - pvcSpec: - description: Persistent volume claim spec - properties: - accessModes: - description: 'AccessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify - either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the - provisioner or an external controller can support - the specified data source, it will create a new - volume based on the contents of the specified - data source. If the AnyVolumeDataSource feature - gate is enabled, this field will always have the - same contents as the DataSourceRef field.' - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API - group. For any other third-party types, APIGroup - is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - required: - - kind - - name - type: object - dataSourceRef: - description: 'Specifies the object from which to - populate the volume with data, if a non-empty - volume is desired. This may be any local object - from a non-empty API group (non core object) or - a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed - if the type of the specified object matches some - installed volume populator or dynamic provisioner. - This field will replace the functionality of the - DataSource field and as such if both fields are - non-empty, they must have the same value. For - backwards compatibility, both fields (DataSource - and DataSourceRef) will be set to the same value - automatically if one of them is empty and the - other is non-empty. There are two important differences - between DataSource and DataSourceRef: * While - DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as - well as PersistentVolumeClaim objects. * While - DataSource ignores disallowed values (dropping - them), DataSourceRef preserves all values, and - generates an error if a disallowed value is specified. - (Alpha) Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API - group. For any other third-party types, APIGroup - is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - required: - - kind - - name + The requirements are ANDed. + type: object type: object - resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. properties: - limits: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. If Requests - is omitted for a container, it defaults to - Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node that + violates one or more of the expressions. The node that + is most preferred is the one with the greatest sum of + weights, i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + anti-affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. This + field is beta-level and is only honored when + PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey type: object - selector: - description: A label query over volumes to consider - for binding. + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the pod + will not be scheduled onto the node. If the anti-affinity + requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod + label update), the system may or may not try to eventually + evict the pod from its node. When there are multiple + elements, the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any node + on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. properties: matchExpressions: description: matchExpressions is a list of label @@ -4003,222 +6661,720 @@ spec: The requirements are ANDed. type: object type: object - storageClassName: - description: 'Name of the StorageClass required - by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume - is required by the claim. Value of Filesystem - is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference - to the PersistentVolume backing this claim. - type: string - type: object - required: - - mountPath - - name - - pvcSpec - type: object - type: array - cassandraDataVolumeClaimSpec: - description: PersistentVolumeClaimSpec describes the common - attributes of storage devices and allows a Source for provider-specific - attributes - properties: - accessModes: - description: 'AccessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the provisioner - or an external controller can support the specified - data source, it will create a new volume based on the - contents of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will always have - the same contents as the DataSourceRef field.' - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - dataSourceRef: - description: 'Specifies the object from which to populate - the volume with data, if a non-empty volume is desired. - This may be any local object from a non-empty API group - (non core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only - succeed if the type of the specified object matches - some installed volume populator or dynamic provisioner. - This field will replace the functionality of the DataSource - field and as such if both fields are non-empty, they - must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will be set - to the same value automatically if one of them is empty - and the other is non-empty. There are two important - differences between DataSource and DataSourceRef: * - While DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as well - as PersistentVolumeClaim objects. * While DataSource - ignores disallowed values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed value - is specified. (Alpha) Using this field requires the - AnyVolumeDataSource feature gate to be enabled.' - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: A label query over volumes to consider for - binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. items: - type: string + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object type: array - required: - - key - - operator + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object type: object - type: array - matchLabels: - additionalProperties: + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the - claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + required: + - topologyKey + type: object + type: array + type: object + type: object + autoScheduling: + description: Auto scheduling properties. When you enable the auto-schedule + feature, Reaper dynamically schedules repairs for all non-system + keyspaces in a cluster. A cluster's keyspaces are monitored + and any modifications (additions or removals) are detected. + When a new keyspace is created, a new repair schedule is created + automatically for that keyspace. Conversely, when a keyspace + is removed, the corresponding repair schedule is deleted. + properties: + enabled: + default: false + type: boolean + excludedClusters: + description: ExcludedClusters are the clusters that are to + be excluded from the repair schedule. + items: + type: string + type: array + excludedKeyspaces: + description: ExcludedKeyspaces are the keyspaces that are + to be excluded from the repair schedule. + items: + type: string + type: array + initialDelayPeriod: + default: PT15S + description: InitialDelay is the amount of delay time before + the schedule period starts. Must be a valid ISO-8601 duration + string. The default is "PT15S" (15 seconds). + pattern: ([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)? + type: string + percentUnrepairedThreshold: + default: 10 + description: PercentUnrepairedThreshold is the percentage + of unrepaired data over which an incremental repair should + be started. Only relevant when using repair type INCREMENTAL. + maximum: 100 + minimum: 0 + type: integer + periodBetweenPolls: + default: PT10M + description: PeriodBetweenPolls is the interval time to wait + before checking whether to start a repair task. Must be + a valid ISO-8601 duration string. The default is "PT10M" + (10 minutes). + pattern: ([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)? + type: string + repairType: + default: AUTO + description: 'RepairType is the type of repair to create: + - REGULAR creates a regular repair (non-adaptive and non-incremental); + - ADAPTIVE creates an adaptive repair; adaptive repairs + are most suited for Cassandra 3. - INCREMENTAL creates an + incremental repair; incremental repairs should only be used + with Cassandra 4+. - AUTO chooses between ADAPTIVE and INCREMENTAL + depending on the Cassandra server version; ADAPTIVE for + Cassandra 3 and INCREMENTAL for Cassandra 4+.' + enum: + - REGULAR + - ADAPTIVE + - INCREMENTAL + - AUTO + type: string + scheduleSpreadPeriod: + default: PT6H + description: ScheduleSpreadPeriod is the time spacing between + each of the repair schedules that is to be carried out. + Must be a valid ISO-8601 duration string. The default is + "PT6H" (6 hours). + pattern: ([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)? + type: string + timeBeforeFirstSchedule: + default: PT5M + description: TimeBeforeFirstSchedule is the grace period before + the first repair in the schedule is started. Must be a valid + ISO-8601 duration string. The default is "PT5M" (5 minutes). + pattern: ([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)? + type: string + type: object + cassandraUserSecretRef: + description: 'Defines the username and password that Reaper will + use to authenticate CQL connections to Cassandra clusters. These + credentials will be automatically turned into CQL roles by cass-operator + when bootstrapping the datacenter, then passed to the Reaper + instance, so that it can authenticate against nodes in the datacenter + using CQL. If CQL authentication is not required, leave this + field empty. The secret must be in the same namespace as Reaper + itself and must contain two keys: "username" and "password".' + type: string + image: + default: thelastpickle/cassandra-reaper:3.0.0 + description: The image to use. + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + initContainerSecurityContext: + description: InitContainerSecurityContext is the SecurityContext + applied to the Reaper init container, used to perform schema + migrations. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. type: string - volumeMode: - description: volumeMode defines what type of volume is - required by the claim. Value of Filesystem is implied - when not included in claim spec. + role: + description: Role is a SELinux role label that applies + to the container. type: string - volumeName: - description: VolumeName is the binding reference to the - PersistentVolume backing this claim. + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must + be preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a + profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile + should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components that + enable the WindowsHostProcessContainers feature flag. + Setting this field without the feature flag will result + in errors when validating the Pod. All of a Pod's containers + must have the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. type: string type: object type: object - superuserSecret: - description: SuperuserSecretName allows to override the default - super user secret + jmxUserSecretRef: + description: 'Defines the username and password that Reaper will + use to authenticate JMX connections to Cassandra clusters. These + credentials will be automatically passed to each Cassandra node + in the datacenter, as well as to the Reaper instance, so that + the latter can authenticate against the former. If JMX authentication + is not required, leave this field empty. The secret must be + in the same namespace as Reaper itself and must contain two + keys: "username" and "password".' type: string - systemLoggerResources: - description: SystemLoggerResources is the cpu and memory resources - for the server-system-logger container. + keyspace: + default: reaper_db + description: The keyspace to use to store Reaper's state. Will + default to "reaper_db" if unspecified. Will be created if it + does not exist, and if this Reaper resource is managed by K8ssandra. + type: string + podSecurityContext: + description: PodSecurityContext contains a pod-level SecurityContext + to apply to Reaper pods. properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + fsGroup: + description: "A special supplemental group that applies to + all containers in a pod. Some volume types allow the Kubelet + to change the ownership of that volume to be owned by the + pod: \n 1. The owning GID will be the FSGroup 2. The setgid + bit is set (new files created in the volume will be owned + by FSGroup) 3. The permission bits are OR'd with rw-rw---- + \n If unset, the Kubelet will not modify the ownership and + permissions of any volume." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will + have no effect on ephemeral volume types such as: secret, + configmaps and emptydir. Valid values are "OnRootMismatch" + and "Always". If not specified, "Always" is used.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + SecurityContext. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence + for that container. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + seccompProfile: + description: The seccomp options to use by the containers + in this pod. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must + be preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a + profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile + should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process + run in each container, in addition to the container's primary + GID. If unspecified, no groups will be added to any container. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used + for the pod. Pods with unsupported sysctls (by the container + runtime) might fail to launch. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options within a container's + SecurityContext will be used. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components that + enable the WindowsHostProcessContainers feature flag. + Setting this field without the feature flag will result + in errors when validating the Pod. All of a Pod's containers + must have the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + securityContext: + description: SecurityContext applied to the Reaper main container. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must + be preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a + profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile + should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components that + enable the WindowsHostProcessContainers feature flag. + Setting this field without the feature flag will result + in errors when validating the Pod. All of a Pod's containers + must have the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string type: object type: object + tolerations: + description: Tolerations applied to the Reaper pods. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, allowed + values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to + the value. Valid operators are Exists and Equal. Defaults + to Equal. Exists is equivalent to wildcard for value, + so that a pod can tolerate all taints of a particular + category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the taint + forever (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array type: object stargate: description: Stargate defines the desired deployment characteristics @@ -5577,6 +8733,36 @@ spec: format: date-time type: string type: object + reaper: + description: ReaperStatus defines the observed state of Reaper + properties: + conditions: + items: + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the + condition transited from one status to another. + format: date-time + type: string + status: + type: string + type: + type: string + required: + - status + - type + type: object + type: array + progress: + description: Progress is the progress of this Reaper object. + enum: + - Pending + - Deploying + - Running + type: string + required: + - progress + type: object stargate: description: StargateStatus defines the observed state of a Stargate resource. diff --git a/config/crd/bases/reaper.k8ssandra.io_reapers.yaml b/config/crd/bases/reaper.k8ssandra.io_reapers.yaml new file mode 100644 index 000000000..c5955c4c2 --- /dev/null +++ b/config/crd/bases/reaper.k8ssandra.io_reapers.yaml @@ -0,0 +1,1558 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: reapers.reaper.k8ssandra.io +spec: + group: reaper.k8ssandra.io + names: + kind: Reaper + listKind: ReaperList + plural: reapers + singular: reaper + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.datacenterRef.name + name: DC + type: string + - jsonPath: .status.progress + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Reaper is the Schema for the reapers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ReaperSpec defines the desired state of Reaper + properties: + ServiceAccountName: + default: default + type: string + affinity: + description: Affinity applied to the Reaper pods. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + This field is beta-level and is only honored when + PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + This field is beta-level and is only honored when + PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + autoScheduling: + description: Auto scheduling properties. When you enable the auto-schedule + feature, Reaper dynamically schedules repairs for all non-system + keyspaces in a cluster. A cluster's keyspaces are monitored and + any modifications (additions or removals) are detected. When a new + keyspace is created, a new repair schedule is created automatically + for that keyspace. Conversely, when a keyspace is removed, the corresponding + repair schedule is deleted. + properties: + enabled: + default: false + type: boolean + excludedClusters: + description: ExcludedClusters are the clusters that are to be + excluded from the repair schedule. + items: + type: string + type: array + excludedKeyspaces: + description: ExcludedKeyspaces are the keyspaces that are to be + excluded from the repair schedule. + items: + type: string + type: array + initialDelayPeriod: + default: PT15S + description: InitialDelay is the amount of delay time before the + schedule period starts. Must be a valid ISO-8601 duration string. + The default is "PT15S" (15 seconds). + pattern: ([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)? + type: string + percentUnrepairedThreshold: + default: 10 + description: PercentUnrepairedThreshold is the percentage of unrepaired + data over which an incremental repair should be started. Only + relevant when using repair type INCREMENTAL. + maximum: 100 + minimum: 0 + type: integer + periodBetweenPolls: + default: PT10M + description: PeriodBetweenPolls is the interval time to wait before + checking whether to start a repair task. Must be a valid ISO-8601 + duration string. The default is "PT10M" (10 minutes). + pattern: ([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)? + type: string + repairType: + default: AUTO + description: 'RepairType is the type of repair to create: - REGULAR + creates a regular repair (non-adaptive and non-incremental); + - ADAPTIVE creates an adaptive repair; adaptive repairs are + most suited for Cassandra 3. - INCREMENTAL creates an incremental + repair; incremental repairs should only be used with Cassandra + 4+. - AUTO chooses between ADAPTIVE and INCREMENTAL depending + on the Cassandra server version; ADAPTIVE for Cassandra 3 and + INCREMENTAL for Cassandra 4+.' + enum: + - REGULAR + - ADAPTIVE + - INCREMENTAL + - AUTO + type: string + scheduleSpreadPeriod: + default: PT6H + description: ScheduleSpreadPeriod is the time spacing between + each of the repair schedules that is to be carried out. Must + be a valid ISO-8601 duration string. The default is "PT6H" (6 + hours). + pattern: ([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)? + type: string + timeBeforeFirstSchedule: + default: PT5M + description: TimeBeforeFirstSchedule is the grace period before + the first repair in the schedule is started. Must be a valid + ISO-8601 duration string. The default is "PT5M" (5 minutes). + pattern: ([-+]?)P(?:([-+]?[0-9]+)D)?(T(?:([-+]?[0-9]+)H)?(?:([-+]?[0-9]+)M)?(?:([-+]?[0-9]+)(?:[.,]([0-9]{0,9}))?S)?)? + type: string + type: object + cassandraUserSecretRef: + description: 'Defines the username and password that Reaper will use + to authenticate CQL connections to Cassandra clusters. These credentials + will be automatically turned into CQL roles by cass-operator when + bootstrapping the datacenter, then passed to the Reaper instance, + so that it can authenticate against nodes in the datacenter using + CQL. If CQL authentication is not required, leave this field empty. + The secret must be in the same namespace as Reaper itself and must + contain two keys: "username" and "password".' + type: string + datacenterAvailability: + default: LOCAL + description: DatacenterAvailability indicates to Reaper its deployment + in relation to the target datacenter's network. For single-DC clusters, + the default (LOCAL) is fine. For multi-DC clusters, it is recommended + to use EACH, provided that there is one Reaper instance managing + each DC in the cluster; otherwise, if one single Reaper instance + is going to manage more than one DC in the cluster, use LOCAL and + remote DCs will be handled internally by Cassandra itself. See https://cassandra-reaper.io/docs/usage/multi_dc/. + enum: + - LOCAL + - ALL + - EACH + type: string + datacenterRef: + description: DatacenterRef is the reference of a CassandraDatacenter + resource that this Reaper instance should manage. It will also be + used as the backend for persisting Reaper's state. Reaper must be + able to access the JMX port (7199 by default) and the CQL port (9042 + by default) on this DC. + properties: + name: + description: The datacenter name. + type: string + namespace: + description: The datacenter namespace. If empty, the datacenter + will be assumed to reside in the same namespace as the Reaper + instance. + type: string + required: + - name + type: object + image: + default: thelastpickle/cassandra-reaper:3.0.0 + description: The image to use. + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to pull a container + image + type: string + initContainerSecurityContext: + description: InitContainerSecurityContext is the SecurityContext applied + to the Reaper init container, used to perform schema migrations. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process + can gain more privileges than its parent process. This bool + directly controls if the no_new_privs flag will be set on the + container process. AllowPrivilegeEscalation is true always when + the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container + runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged + containers are essentially equivalent to root on the host. Defaults + to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for + the containers. The default is DefaultProcMount which uses the + container runtime defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If + seccomp options are provided at both the pod & container level, + the container options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will + be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object + jmxUserSecretRef: + description: 'Defines the username and password that Reaper will use + to authenticate JMX connections to Cassandra clusters. These credentials + will be automatically passed to each Cassandra node in the datacenter, + as well as to the Reaper instance, so that the latter can authenticate + against the former. If JMX authentication is not required, leave + this field empty. The secret must be in the same namespace as Reaper + itself and must contain two keys: "username" and "password".' + type: string + keyspace: + default: reaper_db + description: The keyspace to use to store Reaper's state. Will default + to "reaper_db" if unspecified. Will be created if it does not exist, + and if this Reaper resource is managed by K8ssandra. + type: string + podSecurityContext: + description: PodSecurityContext contains a pod-level SecurityContext + to apply to Reaper pods. + properties: + fsGroup: + description: "A special supplemental group that applies to all + containers in a pod. Some volume types allow the Kubelet to + change the ownership of that volume to be owned by the pod: + \n 1. The owning GID will be the FSGroup 2. The setgid bit is + set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- \n If unset, + the Kubelet will not modify the ownership and permissions of + any volume." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will have + no effect on ephemeral volume types such as: secret, configmaps + and emptydir. Valid values are "OnRootMismatch" and "Always". + If not specified, "Always" is used.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this + pod. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run + in each container, in addition to the container's primary GID. If + unspecified, no groups will be added to any container. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object + securityContext: + description: SecurityContext applied to the Reaper main container. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process + can gain more privileges than its parent process. This bool + directly controls if the no_new_privs flag will be set on the + container process. AllowPrivilegeEscalation is true always when + the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container + runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged + containers are essentially equivalent to root on the host. Defaults + to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for + the containers. The default is DefaultProcMount which uses the + container runtime defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If + seccomp options are provided at both the pod & container level, + the container options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will + be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object + tolerations: + description: Tolerations applied to the Reaper pods. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + required: + - datacenterRef + type: object + status: + description: ReaperStatus defines the observed state of Reaper + properties: + conditions: + items: + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transited from one status to another. + format: date-time + type: string + status: + type: string + type: + type: string + required: + - status + - type + type: object + type: array + progress: + description: Progress is the progress of this Reaper object. + enum: + - Pending + - Deploying + - Running + type: string + required: + - progress + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 31370b00b..7c052bad9 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -6,6 +6,7 @@ resources: - bases/stargate.k8ssandra.io_stargates.yaml - bases/config.k8ssandra.io_clientconfigs.yaml - bases/replication.k8ssandra.io_replicatedsecrets.yaml +- bases/reaper.k8ssandra.io_reapers.yaml #+kubebuilder:scaffold:crdkustomizeresource patches: @@ -22,6 +23,7 @@ patchesStrategicMerge: #- patches/webhook_in_k8ssandraclusters.yaml #- patches/webhook_in_stargates.yaml #- patches/webhook_in_replicatedsecrets.yaml +#- patches/webhook_in_reapers.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. @@ -29,6 +31,7 @@ patchesStrategicMerge: #- patches/cainjection_in_k8ssandraclusters.yaml #- patches/cainjection_in_stargates.yaml #- patches/cainjection_in_replicatedsecrets.yaml +#- patches/cainjection_in_reapers.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_reapers.yaml b/config/crd/patches/cainjection_in_reapers.yaml new file mode 100644 index 000000000..78918755d --- /dev/null +++ b/config/crd/patches/cainjection_in_reapers.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: reapers.reaper.k8ssandra.io diff --git a/config/crd/patches/webhook_in_reapers.yaml b/config/crd/patches/webhook_in_reapers.yaml new file mode 100644 index 000000000..cc2e48ba2 --- /dev/null +++ b/config/crd/patches/webhook_in_reapers.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: reapers.reaper.k8ssandra.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/rbac/reaper_editor_role.yaml b/config/rbac/reaper_editor_role.yaml new file mode 100644 index 000000000..d268c39eb --- /dev/null +++ b/config/rbac/reaper_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit reapers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: reaper-editor-role +rules: +- apiGroups: + - reaper.k8ssandra.io + resources: + - reapers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - reaper.k8ssandra.io + resources: + - reapers/status + verbs: + - get diff --git a/config/rbac/reaper_viewer_role.yaml b/config/rbac/reaper_viewer_role.yaml new file mode 100644 index 000000000..4baa0446a --- /dev/null +++ b/config/rbac/reaper_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view reapers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: reaper-viewer-role +rules: +- apiGroups: + - reaper.k8ssandra.io + resources: + - reapers + verbs: + - get + - list + - watch +- apiGroups: + - reaper.k8ssandra.io + resources: + - reapers/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 9ba1cb17c..cbf14adde 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -102,6 +102,26 @@ rules: - get - patch - update +- apiGroups: + - reaper.k8ssandra.io + resources: + - reapers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - reaper.k8ssandra.io + resources: + - reapers/status + verbs: + - get + - patch + - update - apiGroups: - replication.k8ssandra.io resources: diff --git a/config/samples/reaper_v1alpha1_reaper.yaml b/config/samples/reaper_v1alpha1_reaper.yaml new file mode 100644 index 000000000..acb2c437a --- /dev/null +++ b/config/samples/reaper_v1alpha1_reaper.yaml @@ -0,0 +1,7 @@ +apiVersion: reaper.k8ssandra.io/v1alpha1 +kind: Reaper +metadata: + name: reaper-sample +spec: + # Add fields here + foo: bar diff --git a/controllers/k8ssandra/k8ssandracluster_controller.go b/controllers/k8ssandra/k8ssandracluster_controller.go index dadc4ce55..8811a1c77 100644 --- a/controllers/k8ssandra/k8ssandracluster_controller.go +++ b/controllers/k8ssandra/k8ssandracluster_controller.go @@ -19,6 +19,8 @@ package k8ssandra import ( "context" "fmt" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/k8ssandra-operator/pkg/reaper" "k8s.io/apimachinery/pkg/labels" controllerutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sort" @@ -50,7 +52,6 @@ import ( ) const ( - stargateAuthKeyspace = "data_endpoint_auth" k8ssandraClusterFinalizer = "k8ssandracluster.k8ssandra.io/finalizer" ) @@ -70,6 +71,7 @@ type K8ssandraClusterReconciler struct { // +kubebuilder:rbac:groups=k8ssandra.io,namespace="k8ssandra",resources=k8ssandraclusters/finalizers,verbs=update // +kubebuilder:rbac:groups=cassandra.datastax.com,namespace="k8ssandra",resources=cassandradatacenters,verbs=get;list;watch;create;update;delete;patch // +kubebuilder:rbac:groups=stargate.k8ssandra.io,namespace="k8ssandra",resources=stargates,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=reaper.k8ssandra.io,namespace="k8ssandra",resources=reapers,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=core,namespace="k8ssandra",resources=pods;secrets,verbs=get;list;watch func (r *K8ssandraClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -136,10 +138,7 @@ func (r *K8ssandraClusterReconciler) reconcile(ctx context.Context, kc *api.K8ss hasErrors = true } - selector := map[string]string{ - api.CreatedByLabel: api.CreatedByLabelValueK8ssandraClusterController, - api.K8ssandraClusterLabel: kc.Name, - } + selector := utils.CreatedByK8ssandraControllerLabels(kc.Name) stargateList := &stargateapi.StargateList{} options := client.ListOptions{ Namespace: namespace, @@ -163,6 +162,10 @@ func (r *K8ssandraClusterReconciler) reconcile(ctx context.Context, kc *api.K8ss } } } + + if r.deleteReapers(ctx, kc, dcTemplate, namespace, remoteClient, kcLogger) { + hasErrors = true + } } if hasErrors { @@ -204,11 +207,15 @@ func (r *K8ssandraClusterReconciler) reconcile(ctx context.Context, kc *api.K8ss kcLogger.Info("Setting default superuser secret", "SuperuserSecretName", kc.Spec.Cassandra.SuperuserSecretName) } - if err := secret.ReconcileSuperuserSecret(ctx, r.Client, kc.Spec.Cassandra.SuperuserSecretName, kc.Spec.Cassandra.Cluster, kc.GetNamespace()); err != nil { + if err := secret.ReconcileSecret(ctx, r.Client, kc.Spec.Cassandra.SuperuserSecretName, kc.Name, kc.Namespace); err != nil { kcLogger.Error(err, "Failed to verify existence of superuserSecret") return ctrl.Result{}, err } + if err := r.reconcileReaperSecrets(ctx, kc, kcLogger); err != nil { + return ctrl.Result{}, err + } + if err := secret.ReconcileReplicatedSecret(ctx, r.Client, r.Scheme, kc, kcLogger); err != nil { kcLogger.Error(err, "Failed to reconcile ReplicatedSecret") return ctrl.Result{}, err @@ -220,7 +227,7 @@ func (r *K8ssandraClusterReconciler) reconcile(ctx context.Context, kc *api.K8ss // Reconcile CassandraDatacenter objects only for _, dcTemplate := range kc.Spec.Cassandra.Datacenters { - if !secret.HasReplicatedSecrets(ctx, r.Client, kc.Spec.Cassandra.Cluster, kc.Namespace, dcTemplate.K8sContext) { + if !secret.HasReplicatedSecrets(ctx, r.Client, kc.Name, kc.Namespace, dcTemplate.K8sContext) { // ReplicatedSecret has not replicated yet, wait until it has kcLogger.Info("Waiting for secret replication") return ctrl.Result{RequeueAfter: r.ReconcilerConfig.DefaultDelay}, nil @@ -236,6 +243,10 @@ func (r *K8ssandraClusterReconciler) reconcile(ctx context.Context, kc *api.K8ss cassandra.AllowAlterRfDuringRangeMovement(dcConfig) } dcConfig.AdditionalSeeds = seeds + reaperTemplate := reaper.Coalesce(kc.Spec.Reaper.DeepCopy(), dcTemplate.Reaper.DeepCopy()) + if reaperTemplate != nil { + reaper.AddReaperSettingsToDcConfig(reaperTemplate, dcConfig) + } desiredDc, err := cassandra.NewDatacenter(kcKey, dcConfig) dcKey := types.NamespacedName{Namespace: desiredDc.Namespace, Name: desiredDc.Name} logger := kcLogger.WithValues("CassandraDatacenter", dcKey) @@ -325,23 +336,40 @@ func (r *K8ssandraClusterReconciler) reconcile(ctx context.Context, kc *api.K8ss } } - // Reconcile Stargate across all datacenters - stargateAuthSchemaReconciled := false + kcLogger.Info("All dcs reconciled") + + if kc.HasStargates() { + kcLogger.Info("Reconciling Stargate auth schema") + dcTemplate := kc.Spec.Cassandra.Datacenters[0] + if remoteClient, err := r.ClientCache.GetRemoteClient(dcTemplate.K8sContext); err != nil { + return ctrl.Result{}, err + } else if err := r.reconcileStargateAuthSchema(ctx, kc, actualDcs[0], remoteClient, kcLogger); err != nil { + return ctrl.Result{RequeueAfter: r.ReconcilerConfig.LongDelay}, err + } + } + + if kc.HasReapers() { + kcLogger.Info("Reconciling Reaper schema") + dcTemplate := kc.Spec.Cassandra.Datacenters[0] + if remoteClient, err := r.ClientCache.GetRemoteClient(dcTemplate.K8sContext); err != nil { + return ctrl.Result{}, err + } else if err := r.reconcileReaperSchema(ctx, kc, actualDcs[0], remoteClient, kcLogger); err != nil { + return ctrl.Result{RequeueAfter: r.ReconcilerConfig.LongDelay}, err + } + } + + // Reconcile Stargate and Reaper across all datacenters for i, dcTemplate := range kc.Spec.Cassandra.Datacenters { actualDc := actualDcs[i] dcKey := types.NamespacedName{Namespace: actualDc.Namespace, Name: actualDc.Name} logger := kcLogger.WithValues("CassandraDatacenter", dcKey) - - remoteClient, err := r.ClientCache.GetRemoteClient(dcTemplate.K8sContext) - if err != nil { + logger.Info("Reconciling Stargate and Reaper for dc " + actualDc.Name) + if remoteClient, err := r.ClientCache.GetRemoteClient(dcTemplate.K8sContext); err != nil { return ctrl.Result{}, err - } - - if err = remoteClient.Get(ctx, dcKey, actualDc); err == nil { - result, err := r.reconcileStargate(ctx, kc, dcTemplate, actualDc, logger, remoteClient, &stargateAuthSchemaReconciled) - if !result.IsZero() || err != nil { - return result, err - } + } else if result, err := r.reconcileStargate(ctx, kc, dcTemplate, actualDc, logger, remoteClient); !result.IsZero() || err != nil { + return result, err + } else if result, err := r.reconcileReaper(ctx, kc, dcTemplate, actualDc, logger, remoteClient); !result.IsZero() || err != nil { + return result, err } } @@ -356,7 +384,6 @@ func (r *K8ssandraClusterReconciler) reconcileStargate( actualDc *cassdcapi.CassandraDatacenter, logger logr.Logger, remoteClient client.Client, - stargateAuthSchemaReconciled *bool, ) (ctrl.Result, error) { stargateTemplate := dcTemplate.Stargate.Coalesce(kc.Spec.Stargate) @@ -369,13 +396,6 @@ func (r *K8ssandraClusterReconciler) reconcileStargate( if stargateTemplate != nil { - if !*stargateAuthSchemaReconciled { - if err := r.reconcileStargateAuthSchema(ctx, kc, actualDc, remoteClient, logger); err != nil { - return ctrl.Result{RequeueAfter: r.ReconcilerConfig.LongDelay}, err - } - *stargateAuthSchemaReconciled = true - } - desiredStargate := r.newStargate(stargateKey, kc, stargateTemplate, actualDc) desiredStargateHash := utils.DeepHashString(desiredStargate) desiredStargate.Annotations[api.ResourceHashAnnotation] = desiredStargateHash @@ -425,19 +445,16 @@ func (r *K8ssandraClusterReconciler) reconcileStargate( logger.Error(err, "Failed to get Stargate resource", "Stargate", stargateKey) return ctrl.Result{}, err } - } else { - if actualStargate.Labels[api.CreatedByLabel] == api.CreatedByLabelValueK8ssandraClusterController && - actualStargate.Labels[api.K8ssandraClusterLabel] == kc.Name { - if err := remoteClient.Delete(ctx, actualStargate); err != nil { - logger.Error(err, "Failed to delete Stargate resource", "Stargate", stargateKey) - return ctrl.Result{}, err - } else { - r.removeStargateStatus(kc, dcTemplate.Meta.Name) - logger.Info("Stargate deleted", "Stargate", stargateKey) - } + } else if utils.IsCreatedByK8ssandraController(actualStargate, kc.Name) { + if err := remoteClient.Delete(ctx, actualStargate); err != nil { + logger.Error(err, "Failed to delete Stargate resource", "Stargate", stargateKey) + return ctrl.Result{}, err } else { - logger.Info("Not deleting Stargate since it wasn't created by this controller", "Stargate", stargateKey) + r.removeStargateStatus(kc, dcTemplate.Meta.Name) + logger.Info("Stargate deleted", "Stargate", stargateKey) } + } else { + logger.Info("Not deleting Stargate since it wasn't created by this controller", "Stargate", stargateKey) } } return ctrl.Result{}, nil @@ -535,11 +552,14 @@ func (r *K8ssandraClusterReconciler) reconcileStargateAuthSchema( remoteClient client.Client, logger logr.Logger, ) error { - if managementApi, err := r.ManagementApi.NewManagementApiFacade(ctx, dc, remoteClient, logger); err != nil { - return err - } else { - return stargate.ReconcileAuthSchema(kc, dc, managementApi, logger) + managementApi, err := r.ManagementApi.NewManagementApiFacade(ctx, dc, remoteClient, logger) + if err == nil { + replication := cassandra.ComputeReplication(3, kc.Spec.Cassandra.Datacenters...) + if err = managementApi.EnsureKeyspaceReplication(stargate.AuthKeyspace, replication); err == nil { + err = stargate.ReconcileAuthTable(managementApi, logger) + } } + return err } func (r *K8ssandraClusterReconciler) removeStargateStatus(kc *api.K8ssandraCluster, dcName string) { @@ -547,6 +567,7 @@ func (r *K8ssandraClusterReconciler) removeStargateStatus(kc *api.K8ssandraClust kc.Status.Datacenters[dcName] = api.K8ssandraStatus{ Stargate: nil, Cassandra: kdcStatus.Cassandra.DeepCopy(), + Reaper: kdcStatus.Reaper.DeepCopy(), } } } @@ -558,10 +579,9 @@ func (r *K8ssandraClusterReconciler) SetupWithManager(mgr ctrl.Manager, clusters clusterLabelFilter := func(mapObj client.Object) []reconcile.Request { requests := make([]reconcile.Request, 0) - labels := mapObj.GetLabels() - cluster, found := labels[api.K8ssandraClusterLabel] - if found { - requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mapObj.GetNamespace(), Name: cluster}}) + k8cName := utils.GetLabel(mapObj, api.K8ssandraClusterLabel) + if k8cName != "" { + requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mapObj.GetNamespace(), Name: k8cName}}) } return requests } @@ -571,6 +591,8 @@ func (r *K8ssandraClusterReconciler) SetupWithManager(mgr ctrl.Manager, clusters handler.EnqueueRequestsFromMapFunc(clusterLabelFilter)) cb = cb.Watches(source.NewKindWithCache(&stargateapi.Stargate{}, c.GetCache()), handler.EnqueueRequestsFromMapFunc(clusterLabelFilter)) + cb = cb.Watches(source.NewKindWithCache(&reaperapi.Reaper{}, c.GetCache()), + handler.EnqueueRequestsFromMapFunc(clusterLabelFilter)) } return cb.Complete(r) diff --git a/controllers/k8ssandra/k8ssandracluster_controller_test.go b/controllers/k8ssandra/k8ssandracluster_controller_test.go index a695b3dce..896b5cfcf 100644 --- a/controllers/k8ssandra/k8ssandracluster_controller_test.go +++ b/controllers/k8ssandra/k8ssandracluster_controller_test.go @@ -99,6 +99,7 @@ func TestK8ssandraCluster(t *testing.T) { t.Run("ApplyDatacenterTemplateConfigs", testEnv.ControllerTest(ctx, applyDatacenterTemplateConfigs)) t.Run("ApplyClusterTemplateAndDatacenterTemplateConfigs", testEnv.ControllerTest(ctx, applyClusterTemplateAndDatacenterTemplateConfigs)) t.Run("CreateMultiDcClusterWithStargate", testEnv.ControllerTest(ctx, createMultiDcClusterWithStargate)) + t.Run("CreateMultiDcClusterWithReaper", testEnv.ControllerTest(ctx, createMultiDcClusterWithReaper)) } // createSingleDcCluster verifies that the CassandraDatacenter is created and that the @@ -1093,7 +1094,7 @@ func createMultiDcClusterWithStargate(t *testing.T, ctx context.Context, f *fram require.Eventually(f.StargateExists(ctx, sg1Key), timeout, interval) t.Logf("update stargate sg1 status to ready") - err = f.PatchStagateStatus(ctx, sg1Key, func(sg *stargateapi.Stargate) { + err = f.PatchStargateStatus(ctx, sg1Key, func(sg *stargateapi.Stargate) { now := metav1.Now() sg.Status.Progress = stargateapi.StargateProgressRunning sg.Status.AvailableReplicas = 1 @@ -1132,7 +1133,7 @@ func createMultiDcClusterWithStargate(t *testing.T, ctx context.Context, f *fram // require.NoError(err, "timed out waiting for remote seeds to be updated on dc1") t.Logf("update stargate sg2 status to ready") - err = f.PatchStagateStatus(ctx, sg2Key, func(sg *stargateapi.Stargate) { + err = f.PatchStargateStatus(ctx, sg2Key, func(sg *stargateapi.Stargate) { now := metav1.Now() sg.Status.Progress = stargateapi.StargateProgressRunning sg.Status.AvailableReplicas = 1 @@ -1270,7 +1271,7 @@ func verifyReplicatedSecretReconciled(ctx context.Context, t *testing.T, f *fram t.Log("check ReplicatedSecret reconciled") replSecret := &replicationapi.ReplicatedSecret{} - replSecretKey := types.NamespacedName{Name: kc.Spec.Cassandra.Cluster, Namespace: kc.Namespace} + replSecretKey := types.NamespacedName{Name: kc.Name, Namespace: kc.Namespace} assert.Eventually(t, func() bool { err := f.Client.Get(ctx, replSecretKey, replSecret) @@ -1286,7 +1287,7 @@ func verifyReplicatedSecretReconciled(ctx context.Context, t *testing.T, f *fram assert.Equal(t, api.NameLabelValue, val) val, exists = replSecret.Labels[api.K8ssandraClusterLabel] assert.True(t, exists) - assert.Equal(t, kc.Spec.Cassandra.Cluster, val) + assert.Equal(t, kc.Name, val) assert.Equal(t, len(kc.Spec.Cassandra.Datacenters), len(replSecret.Spec.ReplicationTargets)) } @@ -1322,15 +1323,7 @@ type fakeManagementApiFactory struct { func (f fakeManagementApiFactory) NewManagementApiFacade(context.Context, *cassdcapi.CassandraDatacenter, client.Client, logr.Logger) (cassandra.ManagementApiFacade, error) { m := new(mocks.ManagementApiFacade) - m.On("CreateKeyspaceIfNotExists", stargate.AuthKeyspace, mock.Anything).Return(nil) - m.On("ListKeyspaces", stargate.AuthKeyspace).Return([]string{stargate.AuthKeyspace}, nil) - m.On("AlterKeyspace", stargate.AuthKeyspace, mock.Anything).Return(nil) - m.On("GetKeyspaceReplication", stargate.AuthKeyspace).Return( - map[string]string{ - "class": "org.apache.cassandra.locator.NetworkTopologyStrategy", - "dc1": "1", - }, - nil) + m.On("EnsureKeyspaceReplication", mock.Anything, mock.Anything).Return(nil) m.On("ListTables", stargate.AuthKeyspace).Return([]string{"token"}, nil) m.On("CreateTable", mock.MatchedBy(func(def *httphelper.TableDefinition) bool { return def.KeyspaceName == stargate.AuthKeyspace && def.TableName == stargate.AuthTable diff --git a/controllers/k8ssandra/reaper_reconciler.go b/controllers/k8ssandra/reaper_reconciler.go new file mode 100644 index 000000000..958a48b76 --- /dev/null +++ b/controllers/k8ssandra/reaper_reconciler.go @@ -0,0 +1,239 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8ssandra + +import ( + "context" + "github.com/go-logr/logr" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/k8ssandra-operator/pkg/cassandra" + "github.com/k8ssandra/k8ssandra-operator/pkg/reaper" + "github.com/k8ssandra/k8ssandra-operator/pkg/secret" + "github.com/k8ssandra/k8ssandra-operator/pkg/utils" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" + api "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" +) + +func (r *K8ssandraClusterReconciler) reconcileReaperSecrets( + ctx context.Context, + kc *api.K8ssandraCluster, + logger logr.Logger, +) error { + logger.Info("Reconciling Reaper user secrets") + if kc.Spec.Reaper != nil { + cassandraUserSecretRef := kc.Spec.Reaper.CassandraUserSecretRef + jmxUserSecretRef := kc.Spec.Reaper.JmxUserSecretRef + if cassandraUserSecretRef == "" { + cassandraUserSecretRef = reaper.DefaultUserSecretName(kc.Name) + } + if jmxUserSecretRef == "" { + jmxUserSecretRef = reaper.DefaultJmxUserSecretName(kc.Name) + } + logger = logger.WithValues( + "ReaperCassandraUserSecretRef", + cassandraUserSecretRef, + "ReaperJmxUserSecretRef", + jmxUserSecretRef, + ) + if err := secret.ReconcileSecret(ctx, r.Client, cassandraUserSecretRef, kc.Name, kc.Namespace); err != nil { + logger.Error(err, "Failed to reconcile Reaper CQL user secret") + return err + } + if err := secret.ReconcileSecret(ctx, r.Client, jmxUserSecretRef, kc.Name, kc.Namespace); err != nil { + logger.Error(err, "Failed to reconcile Reaper JMX user secret") + return err + } + } + logger.Info("Reaper user secrets successfully reconciled") + return nil +} + +func (r *K8ssandraClusterReconciler) reconcileReaperSchema( + ctx context.Context, + kc *api.K8ssandraCluster, + actualDc *cassdcapi.CassandraDatacenter, + remoteClient client.Client, + logger logr.Logger, +) error { + managementApiFacade, err := r.ManagementApi.NewManagementApiFacade(ctx, actualDc, remoteClient, logger) + if err != nil { + return err + } + keyspace := reaperapi.DefaultKeyspace + if kc.Spec.Reaper != nil && kc.Spec.Reaper.Keyspace != "" { + keyspace = kc.Spec.Reaper.Keyspace + } + return managementApiFacade.EnsureKeyspaceReplication( + keyspace, + cassandra.ComputeReplication(3, kc.Spec.Cassandra.Datacenters...), + ) +} + +func (r *K8ssandraClusterReconciler) reconcileReaper( + ctx context.Context, + kc *api.K8ssandraCluster, + dcTemplate api.CassandraDatacenterTemplate, + actualDc *cassdcapi.CassandraDatacenter, + logger logr.Logger, + remoteClient client.Client, +) (ctrl.Result, error) { + + reaperTemplate := reaper.Coalesce(kc.Spec.Reaper.DeepCopy(), dcTemplate.Reaper.DeepCopy()) + reaperKey := types.NamespacedName{ + Namespace: actualDc.Namespace, + Name: reaper.ResourceName(kc.Name, actualDc.Name), + } + logger = logger.WithValues("Reaper", reaperKey) + actualReaper := &reaperapi.Reaper{} + + if reaperTemplate != nil { + + logger.Info("Reaper present for DC " + actualDc.Name) + + desiredReaper := reaper.NewReaper(reaperKey, kc, actualDc, reaperTemplate) + + if err := remoteClient.Get(ctx, reaperKey, actualReaper); err != nil { + if errors.IsNotFound(err) { + logger.Info("Creating Reaper resource") + if err := remoteClient.Create(ctx, desiredReaper); err != nil { + logger.Error(err, "Failed to create Reaper resource") + return ctrl.Result{}, err + } else { + return ctrl.Result{RequeueAfter: r.DefaultDelay}, nil + } + } else { + logger.Error(err, "failed to retrieve reaper instance") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } + } + + actualReaper = actualReaper.DeepCopy() + + if err := r.setStatusForReaper(kc, actualReaper, dcTemplate.Meta.Name); err != nil { + logger.Error(err, "Failed to update status for reaper") + return ctrl.Result{}, err + } + + if !utils.CompareAnnotations(actualReaper, desiredReaper, api.ResourceHashAnnotation) { + logger.Info("Updating Reaper resource") + resourceVersion := actualReaper.GetResourceVersion() + desiredReaper.DeepCopyInto(actualReaper) + actualReaper.SetResourceVersion(resourceVersion) + if err := remoteClient.Update(ctx, actualReaper); err != nil { + logger.Error(err, "Failed to update Reaper resource") + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: r.DefaultDelay}, nil + } + + if !actualReaper.Status.IsReady() { + logger.Info("Waiting for Reaper to become ready") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, nil + } + + logger.Info("Reaper is ready") + return ctrl.Result{}, nil + + } else { + + logger.Info("Reaper not present for DC " + actualDc.Name) + + // Test if Reaper was removed + if err := remoteClient.Get(ctx, reaperKey, actualReaper); err != nil { + if errors.IsNotFound(err) { + r.removeReaperStatus(kc, dcTemplate.Meta.Name) + } else { + logger.Error(err, "Failed to get Reaper resource") + return ctrl.Result{}, err + } + } else if utils.IsCreatedByK8ssandraController(actualReaper, kc.Name) { + if err = remoteClient.Delete(ctx, actualReaper); err != nil { + logger.Error(err, "Failed to delete Reaper resource") + return ctrl.Result{}, err + } else { + r.removeReaperStatus(kc, dcTemplate.Meta.Name) + logger.Info("Reaper deleted") + } + } else { + logger.Info("Not deleting Reaper since it wasn't created by this controller") + } + return ctrl.Result{}, nil + } +} + +func (r *K8ssandraClusterReconciler) deleteReapers( + ctx context.Context, + kc *api.K8ssandraCluster, + dcTemplate api.CassandraDatacenterTemplate, + namespace string, + remoteClient client.Client, + kcLogger logr.Logger, +) (hasErrors bool) { + selector := utils.CreatedByK8ssandraControllerLabels(kc.Name) + reaperList := &reaperapi.ReaperList{} + options := client.ListOptions{ + Namespace: namespace, + LabelSelector: labels.SelectorFromSet(selector), + } + if err := remoteClient.List(ctx, reaperList, &options); err != nil { + kcLogger.Error(err, "Failed to list Reaper objects", "Context", dcTemplate.K8sContext) + return true + } + for _, rp := range reaperList.Items { + if err := remoteClient.Delete(ctx, &rp); err != nil { + key := client.ObjectKey{Namespace: namespace, Name: rp.Name} + if !errors.IsNotFound(err) { + kcLogger.Error(err, "Failed to delete Reaper", "Reaper", key, + "Context", dcTemplate.K8sContext) + hasErrors = true + } + } + } + return +} + +func (r *K8ssandraClusterReconciler) setStatusForReaper(kc *api.K8ssandraCluster, reaper *reaperapi.Reaper, dcName string) error { + if len(kc.Status.Datacenters) == 0 { + kc.Status.Datacenters = make(map[string]api.K8ssandraStatus) + } + kdcStatus, found := kc.Status.Datacenters[dcName] + if found { + kdcStatus.Reaper = reaper.Status.DeepCopy() + kc.Status.Datacenters[dcName] = kdcStatus + } else { + kc.Status.Datacenters[dcName] = api.K8ssandraStatus{ + Reaper: reaper.Status.DeepCopy(), + } + } + return nil +} + +func (r *K8ssandraClusterReconciler) removeReaperStatus(kc *api.K8ssandraCluster, dcName string) { + if kdcStatus, found := kc.Status.Datacenters[dcName]; found { + kc.Status.Datacenters[dcName] = api.K8ssandraStatus{ + Reaper: nil, + Cassandra: kdcStatus.Cassandra.DeepCopy(), + Stargate: kdcStatus.Stargate.DeepCopy(), + } + } +} diff --git a/controllers/k8ssandra/reaper_reconciler_test.go b/controllers/k8ssandra/reaper_reconciler_test.go new file mode 100644 index 000000000..d99edd89d --- /dev/null +++ b/controllers/k8ssandra/reaper_reconciler_test.go @@ -0,0 +1,288 @@ +package k8ssandra + +import ( + "context" + "fmt" + cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" + api "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/k8ssandra-operator/test/framework" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "testing" +) + +func createMultiDcClusterWithReaper(t *testing.T, ctx context.Context, f *framework.Framework, namespace string) { + require := require.New(t) + assert := assert.New(t) + + k8sCtx0 := "cluster-0" + k8sCtx1 := "cluster-1" + + kc := &api.K8ssandraCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "test", + }, + Spec: api.K8ssandraClusterSpec{ + Cassandra: &api.CassandraClusterTemplate{ + Cluster: "test", + Datacenters: []api.CassandraDatacenterTemplate{ + { + Meta: api.EmbeddedObjectMeta{ + Name: "dc1", + }, + K8sContext: k8sCtx0, + Size: 3, + ServerVersion: "3.11.10", + StorageConfig: &cassdcapi.StorageConfig{ + CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &defaultStorageClass, + }, + }, + Reaper: &reaperapi.ReaperDatacenterTemplate{ + AutoScheduling: reaperapi.AutoScheduling{Enabled: true}, + }, + }, + { + Meta: api.EmbeddedObjectMeta{ + Name: "dc2", + }, + K8sContext: k8sCtx1, + Size: 3, + ServerVersion: "3.11.10", + StorageConfig: &cassdcapi.StorageConfig{ + CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &defaultStorageClass, + }, + }, + Reaper: &reaperapi.ReaperDatacenterTemplate{ + AutoScheduling: reaperapi.AutoScheduling{Enabled: true}, + }, + }, + }, + }, + }, + } + + err := f.Client.Create(ctx, kc) + require.NoError(err, "failed to create K8ssandraCluster") + + dc1PodIps := []string{"10.10.100.1", "10.10.100.2", "10.10.100.3"} + dc2PodIps := []string{"10.11.100.1", "10.11.100.2", "10.11.100.3"} + + allPodIps := make([]string, 0, 6) + allPodIps = append(allPodIps, dc1PodIps...) + allPodIps = append(allPodIps, dc2PodIps...) + + verifyDefaultSuperUserSecretCreated(ctx, t, f, kc) + + t.Log("check that dc1 was created") + dc1Key := framework.ClusterKey{NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc1"}, K8sContext: k8sCtx0} + require.Eventually(f.DatacenterExists(ctx, dc1Key), timeout, interval) + + t.Log("update datacenter status to scaling up") + err = f.PatchDatacenterStatus(ctx, dc1Key, func(dc *cassdcapi.CassandraDatacenter) { + dc.SetCondition(cassdcapi.DatacenterCondition{ + Type: cassdcapi.DatacenterScalingUp, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }) + }) + require.NoError(err, "failed to patch datacenter status") + + kcKey := framework.ClusterKey{K8sContext: k8sCtx0, NamespacedName: types.NamespacedName{Namespace: namespace, Name: "test"}} + + t.Log("check that the K8ssandraCluster status is updated") + require.Eventually(func() bool { + kc := &api.K8ssandraCluster{} + err = f.Get(ctx, kcKey, kc) + if err != nil { + t.Logf("failed to get K8ssandraCluster: %v", err) + return false + } + + if len(kc.Status.Datacenters) == 0 { + return false + } + + k8ssandraStatus, found := kc.Status.Datacenters[dc1Key.Name] + if !found { + t.Logf("status for datacenter %s not found", dc1Key) + return false + } + + condition := findDatacenterCondition(k8ssandraStatus.Cassandra, cassdcapi.DatacenterScalingUp) + return !(condition == nil && condition.Status == corev1.ConditionFalse) + }, timeout, interval, "timed out waiting for K8ssandraCluster status update") + + reaper1Key := framework.ClusterKey{ + K8sContext: k8sCtx0, + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: kc.Name + "-" + dc1Key.Name + "-reaper"}, + } + + t.Logf("check that reaper %s has not been created", reaper1Key) + reaper1 := &reaperapi.Reaper{} + err = f.Get(ctx, reaper1Key, reaper1) + require.True(err != nil && errors.IsNotFound(err), fmt.Sprintf("reaper %s should not be created until dc1 is ready", reaper1Key)) + + t.Log("check that dc2 has not been created yet") + dc2Key := framework.ClusterKey{NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc2"}, K8sContext: k8sCtx1} + dc2 := &cassdcapi.CassandraDatacenter{} + err = f.Get(ctx, dc2Key, dc2) + require.True(err != nil && errors.IsNotFound(err), "dc2 should not be created until dc1 is ready") + + seedsResolver.callback = func(dc *cassdcapi.CassandraDatacenter) ([]string, error) { + if dc.Name == "dc1" { + return dc1PodIps, nil + } + if dc.Name == "dc2" { + return dc2PodIps, nil + } + return nil, fmt.Errorf("unknown datacenter: %s", dc.Name) + } + + t.Log("update dc1 status to ready") + err = f.PatchDatacenterStatus(ctx, dc1Key, func(dc *cassdcapi.CassandraDatacenter) { + dc.Status.CassandraOperatorProgress = cassdcapi.ProgressReady + dc.SetCondition(cassdcapi.DatacenterCondition{ + Type: cassdcapi.DatacenterReady, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }) + }) + require.NoError(err, "failed to update dc1 status to ready") + + t.Log("check that dc2 was created") + require.Eventually(f.DatacenterExists(ctx, dc2Key), timeout, interval) + + t.Log("check that remote seeds are set on dc2") + dc2 = &cassdcapi.CassandraDatacenter{} + err = f.Get(ctx, dc2Key, dc2) + require.NoError(err, "failed to get dc2") + + assert.Equal(dc1PodIps, dc2.Spec.AdditionalSeeds, "The AdditionalSeeds property for dc2 is wrong") + + reaper2Key := framework.ClusterKey{ + K8sContext: k8sCtx1, + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: kc.Name + "-" + dc2Key.Name + "-reaper"}, + } + + t.Log("update dc2 status to ready") + err = f.PatchDatacenterStatus(ctx, dc2Key, func(dc *cassdcapi.CassandraDatacenter) { + dc.Status.CassandraOperatorProgress = cassdcapi.ProgressReady + dc.SetCondition(cassdcapi.DatacenterCondition{ + Type: cassdcapi.DatacenterReady, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }) + }) + require.NoError(err, "failed to update dc2 status to ready") + + t.Log("check that reaper reaper1 is created") + require.Eventually(f.ReaperExists(ctx, reaper1Key), timeout, interval) + + t.Logf("update reaper reaper1 status to ready") + err = f.PatchReaperStatus(ctx, reaper1Key, func(reaper *reaperapi.Reaper) { + reaper.Status.Progress = reaperapi.ReaperProgressRunning + reaper.Status.SetReady() + }) + require.NoError(err, "failed to patch reaper status") + + t.Log("check that reaper reaper2 is created") + require.Eventually(f.ReaperExists(ctx, reaper2Key), timeout, interval) + + t.Logf("update reaper reaper2 status to ready") + err = f.PatchReaperStatus(ctx, reaper2Key, func(reaper *reaperapi.Reaper) { + reaper.Status.Progress = reaperapi.ReaperProgressRunning + reaper.Status.SetReady() + }) + require.NoError(err, "failed to patch reaper status") + + t.Log("check that the K8ssandraCluster status is updated") + require.Eventually(func() bool { + kc := &api.K8ssandraCluster{} + err = f.Get(ctx, kcKey, kc) + if err != nil { + t.Logf("failed to get K8ssandraCluster: %v", err) + return false + } + + if len(kc.Status.Datacenters) != 2 { + return false + } + + k8ssandraStatus, found := kc.Status.Datacenters[dc1Key.Name] + if !found { + t.Logf("status for datacenter %s not found", dc1Key) + return false + } + + condition := findDatacenterCondition(k8ssandraStatus.Cassandra, cassdcapi.DatacenterReady) + if condition == nil || condition.Status == corev1.ConditionFalse { + t.Logf("k8ssandracluster status check failed: cassandra in %s is not ready", dc1Key.Name) + return false + } + + if k8ssandraStatus.Reaper == nil || !k8ssandraStatus.Reaper.IsReady() { + t.Logf("k8ssandracluster status check failed: reaper in %s is not ready", dc1Key.Name) + } + + k8ssandraStatus, found = kc.Status.Datacenters[dc2Key.Name] + if !found { + t.Logf("status for datacenter %s not found", dc2Key) + return false + } + + condition = findDatacenterCondition(k8ssandraStatus.Cassandra, cassdcapi.DatacenterReady) + if condition == nil || condition.Status == corev1.ConditionFalse { + t.Logf("k8ssandracluster status check failed: cassandra in %s is not ready", dc2Key.Name) + return false + } + + if k8ssandraStatus.Reaper == nil || !k8ssandraStatus.Reaper.IsReady() { + t.Logf("k8ssandracluster status check failed: reaper in %s is not ready", dc2Key.Name) + return false + } + + return true + }, timeout, interval, "timed out waiting for K8ssandraCluster status update") + + t.Log("remove both reapers from kc spec") + err = f.Get(ctx, kcKey, kc) + patch := client.MergeFromWithOptions(kc.DeepCopy(), client.MergeFromWithOptimisticLock{}) + kc.Spec.Cassandra.Datacenters[0].Reaper = nil + kc.Spec.Cassandra.Datacenters[1].Reaper = nil + err = f.Client.Patch(ctx, kc, patch) + require.NoError(err, "failed to update K8ssandraCluster") + + t.Log("check that reaper reaper1 is deleted") + require.Eventually(func() bool { + err = f.Get(ctx, reaper1Key, &reaperapi.Reaper{}) + return errors.IsNotFound(err) + }, timeout, interval) + + t.Log("check that reaper reaper2 is deleted") + require.Eventually(func() bool { + err = f.Get(ctx, reaper2Key, &reaperapi.Reaper{}) + return errors.IsNotFound(err) + }, timeout, interval) + + t.Log("check that kc status is updated") + require.Eventually(func() bool { + err = f.Get(ctx, kcKey, kc) + require.NoError(err, "failed to get K8ssandraCluster") + return kc.Status.Datacenters[dc1Key.Name].Reaper == nil && + kc.Status.Datacenters[dc2Key.Name].Reaper == nil + }, timeout, interval) + +} diff --git a/controllers/reaper/reaper_controller.go b/controllers/reaper/reaper_controller.go new file mode 100644 index 000000000..d8380676e --- /dev/null +++ b/controllers/reaper/reaper_controller.go @@ -0,0 +1,347 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reaper + +import ( + "context" + "github.com/go-logr/logr" + cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" + k8ssandraapi "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/k8ssandra-operator/pkg/cassandra" + "github.com/k8ssandra/k8ssandra-operator/pkg/config" + "github.com/k8ssandra/k8ssandra-operator/pkg/reaper" + "github.com/k8ssandra/k8ssandra-operator/pkg/utils" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// ReaperReconciler reconciles a Reaper object +type ReaperReconciler struct { + *config.ReconcilerConfig + client.Client + Scheme *runtime.Scheme + NewManager func() reaper.Manager +} + +// +kubebuilder:rbac:groups=reaper.k8ssandra.io,namespace="k8ssandra",resources=reapers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=reaper.k8ssandra.io,namespace="k8ssandra",resources=reapers/status,verbs=get;update;patch +// +kubebuilder:rbac:groups="apps",namespace="k8ssandra",resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="core",namespace="k8ssandra",resources=pods;secrets,verbs=get;list;watch +// +kubebuilder:rbac:groups="core",namespace="k8ssandra",resources=services,verbs=get;list;watch;create + +func (r *ReaperReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx, "Reaper", req.NamespacedName) + + logger.Info("Starting Reaper reconciliation") + + // Fetch the Reaper instance + actualReaper := &reaperapi.Reaper{} + if err := r.Get(ctx, req.NamespacedName, actualReaper); err != nil { + if errors.IsNotFound(err) { + logger.Info("Reaper resource not found") + return ctrl.Result{}, nil + } + logger.Info("Failed to fetch Reaper resource") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } + + actualReaper = actualReaper.DeepCopy() + patch := client.MergeFromWithOptions(actualReaper.DeepCopy()) + + result, err := r.reconcile(ctx, actualReaper, logger) + + if patchErr := r.Status().Patch(ctx, actualReaper, patch); patchErr != nil { + logger.Error(patchErr, "Failed to update Reaper status") + } else { + logger.Info("Updated Reaper status") + } + + return result, err +} + +func (r *ReaperReconciler) reconcile(ctx context.Context, actualReaper *reaperapi.Reaper, logger logr.Logger) (ctrl.Result, error) { + + actualReaper.Status.Progress = reaperapi.ReaperProgressPending + actualReaper.Status.SetNotReady() + + actualDc, result, err := r.reconcileDatacenter(ctx, actualReaper, logger) + if !result.IsZero() || err != nil { + return result, err + } + + actualReaper.Status.Progress = reaperapi.ReaperProgressDeploying + + if result, err = r.reconcileDeployment(ctx, actualReaper, actualDc, logger); !result.IsZero() || err != nil { + return result, err + } + + if result, err = r.reconcileService(ctx, actualReaper, logger); !result.IsZero() || err != nil { + return result, err + } + + actualReaper.Status.Progress = reaperapi.ReaperProgressConfiguring + + if result, err = r.configureReaper(ctx, actualReaper, actualDc, logger); !result.IsZero() || err != nil { + return result, err + } + + actualReaper.Status.Progress = reaperapi.ReaperProgressRunning + actualReaper.Status.SetReady() + + logger.Info("Reaper successfully reconciled") + return ctrl.Result{}, nil +} + +func (r *ReaperReconciler) reconcileDatacenter( + ctx context.Context, + actualReaper *reaperapi.Reaper, + logger logr.Logger, +) (*cassdcapi.CassandraDatacenter, ctrl.Result, error) { + dcNamespace := actualReaper.Spec.DatacenterRef.Namespace + if dcNamespace == "" { + dcNamespace = actualReaper.Namespace + } + dcKey := client.ObjectKey{Namespace: dcNamespace, Name: actualReaper.Spec.DatacenterRef.Name} + logger = logger.WithValues("CassandraDatacenter", dcKey) + logger.Info("Fetching CassandraDatacenter resource") + actualDc := &cassdcapi.CassandraDatacenter{} + if err := r.Get(ctx, dcKey, actualDc); err != nil { + if errors.IsNotFound(err) { + logger.Info("Waiting for datacenter to be created") + return nil, ctrl.Result{RequeueAfter: r.DefaultDelay}, nil + } else { + logger.Error(err, "Failed to fetch CassandraDatacenter") + return nil, ctrl.Result{}, err + } + } + actualDc = actualDc.DeepCopy() + if !cassandra.DatacenterReady(actualDc) { + logger.Info("Waiting for datacenter to become ready") + return nil, ctrl.Result{RequeueAfter: r.DefaultDelay}, nil + } + return actualDc, ctrl.Result{}, nil +} + +func (r *ReaperReconciler) reconcileDeployment( + ctx context.Context, + actualReaper *reaperapi.Reaper, + actualDc *cassdcapi.CassandraDatacenter, + logger logr.Logger, +) (ctrl.Result, error) { + + deploymentKey := types.NamespacedName{Namespace: actualReaper.Namespace, Name: actualReaper.Name} + logger = logger.WithValues("Deployment", deploymentKey) + logger.Info("Reconciling Reaper Deployment") + + authVars, err := r.collectAuthVars(ctx, actualReaper, logger) + if err != nil { + logger.Error(err, "Failed to collect Reaper auth variables") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } + + desiredDeployment := reaper.NewDeployment(actualReaper, actualDc, authVars...) + + actualDeployment := &appsv1.Deployment{} + if err := r.Get(ctx, deploymentKey, actualDeployment); err != nil { + if errors.IsNotFound(err) { + if err = controllerutil.SetControllerReference(actualReaper, desiredDeployment, r.Scheme); err != nil { + logger.Error(err, "Failed to set owner on Reaper Deployment") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } else if err = r.Create(ctx, desiredDeployment); err != nil { + if errors.IsAlreadyExists(err) { + // the read from the local cache didn't catch that the resource was created + // already; simply requeue until the cache is up-to-date + return ctrl.Result{Requeue: true}, nil + } else { + logger.Error(err, "Failed to create Reaper Deployment") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } + } + logger.Info("Reaper Deployment created successfully") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, nil + } else { + logger.Error(err, "Failed to get Reaper Deployment") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } + } + + actualDeployment = actualDeployment.DeepCopy() + + // Check if the deployment needs to be updated + if !utils.CompareAnnotations(actualDeployment, desiredDeployment, k8ssandraapi.ResourceHashAnnotation) { + logger.Info("Updating Reaper Deployment") + resourceVersion := actualDeployment.GetResourceVersion() + desiredDeployment.DeepCopyInto(actualDeployment) + actualDeployment.SetResourceVersion(resourceVersion) + if err := controllerutil.SetControllerReference(actualReaper, actualDeployment, r.Scheme); err != nil { + logger.Error(err, "Failed to set controller reference on updated Reaper Deployment") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } else if err := r.Update(ctx, actualDeployment); err != nil { + logger.Error(err, "Failed to update Reaper Deployment") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } else { + logger.Info("Reaper Deployment updated successfully") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, nil + } + } + + logger.Info("Reaper Deployment ready") + return ctrl.Result{}, nil +} + +func (r *ReaperReconciler) reconcileService( + ctx context.Context, + actualReaper *reaperapi.Reaper, + logger logr.Logger, +) (ctrl.Result, error) { + serviceKey := types.NamespacedName{Namespace: actualReaper.Namespace, Name: reaper.GetServiceName(actualReaper.Name)} + logger = logger.WithValues("Service", serviceKey) + logger.Info("Reconciling Reaper Service") + desiredService := reaper.NewService(serviceKey, actualReaper) + actualService := &corev1.Service{} + if err := r.Client.Get(ctx, serviceKey, actualService); err != nil { + if errors.IsNotFound(err) { + // create the service + if err = controllerutil.SetControllerReference(actualReaper, desiredService, r.Scheme); err != nil { + logger.Error(err, "Failed to set controller reference on Reaper Service") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } + logger.Info("Creating Reaper service") + if err = r.Client.Create(ctx, desiredService); err != nil { + if errors.IsAlreadyExists(err) { + // the read from the local cache didn't catch that the resource was created + // already; simply requeue until the cache is up-to-date + return ctrl.Result{Requeue: true}, nil + } else { + logger.Error(err, "Failed to create Reaper Service") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } + } + logger.Info("Reaper Service created successfully") + return ctrl.Result{}, nil + } else { + logger.Error(err, "Failed to get Reaper Service") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } + } + if !utils.CompareAnnotations(actualService, desiredService, k8ssandraapi.ResourceHashAnnotation) { + logger.Info("Updating Reaper Service") + updatedService := actualService.DeepCopy() + desiredService.DeepCopyInto(updatedService) + updatedService.SetResourceVersion(actualService.GetResourceVersion()) + updatedService.Spec.ClusterIP = actualService.Spec.ClusterIP + updatedService.Spec.ClusterIPs = actualService.Spec.ClusterIPs + if err := controllerutil.SetControllerReference(actualReaper, updatedService, r.Scheme); err != nil { + logger.Error(err, "Failed to set controller reference on updated Reaper Service") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } else if err := r.Update(ctx, updatedService); err != nil { + logger.Error(err, "Failed to update Reaper Service") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } else { + logger.Info("Reaper Service updated successfully") + return ctrl.Result{}, nil + } + } + logger.Info("Reaper Service is ready") + return ctrl.Result{}, nil +} + +func (r *ReaperReconciler) configureReaper(ctx context.Context, actualReaper *reaperapi.Reaper, actualDc *cassdcapi.CassandraDatacenter, logger logr.Logger) (ctrl.Result, error) { + manager := r.NewManager() + if err := manager.Connect(actualReaper); err != nil { + logger.Error(err, "failed to connect to reaper instance") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } else if found, err := manager.VerifyClusterIsConfigured(ctx, actualDc); err != nil { + logger.Error(err, "failed to verify the cluster is registered with reaper") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } else if !found { + logger.Info("registering cluster with reaper") + if err = manager.AddClusterToReaper(ctx, actualDc); err != nil { + logger.Error(err, "failed to register cluster with reaper") + return ctrl.Result{RequeueAfter: r.DefaultDelay}, err + } + } + return ctrl.Result{}, nil +} + +func (r *ReaperReconciler) collectAuthVars(ctx context.Context, actualReaper *reaperapi.Reaper, logger logr.Logger) ([]*corev1.EnvVar, error) { + cqlVars, err := r.collectCqlAuthVars(ctx, actualReaper, logger) + if err != nil { + return nil, err + } + jmxVars, err := r.collectJmxAuthVars(ctx, actualReaper, logger) + if err != nil { + return nil, err + } + return append(cqlVars, jmxVars...), nil +} + +func (r *ReaperReconciler) collectCqlAuthVars(ctx context.Context, actualReaper *reaperapi.Reaper, logger logr.Logger) ([]*corev1.EnvVar, error) { + if len(actualReaper.Spec.CassandraUserSecretRef) > 0 { + secretKey := types.NamespacedName{Namespace: actualReaper.Namespace, Name: actualReaper.Spec.CassandraUserSecretRef} + if secret, err := r.getSecret(ctx, secretKey); err != nil { + logger.Error(err, "Failed to get Cassandra authentication secret", "CassandraUserSecretName", secretKey) + return nil, err + } else if usernameEnvVar, passwordEnvVar, err := reaper.GetCassandraAuthEnvironmentVars(secret); err != nil { + logger.Error(err, "Failed to get Cassandra authentication env vars", "CassandraUserSecretName", secretKey) + return nil, err + } else { + return []*corev1.EnvVar{usernameEnvVar, passwordEnvVar, reaper.EnableCassAuthVar}, nil + } + } + return nil, nil +} + +func (r *ReaperReconciler) collectJmxAuthVars(ctx context.Context, actualReaper *reaperapi.Reaper, logger logr.Logger) ([]*corev1.EnvVar, error) { + if len(actualReaper.Spec.JmxUserSecretRef) > 0 { + secretKey := types.NamespacedName{Namespace: actualReaper.Namespace, Name: actualReaper.Spec.JmxUserSecretRef} + if secret, err := r.getSecret(ctx, secretKey); err != nil { + logger.Error(err, "Failed to get JMX authentication secret", "JmxUserSecretName", secretKey) + return nil, err + } else if usernameEnvVar, passwordEnvVar, err := reaper.GetJmxAuthEnvironmentVars(secret); err != nil { + logger.Error(err, "Failed to get JMX authentication env vars", "JmxUserSecretName", secretKey) + return nil, err + } else { + return []*corev1.EnvVar{usernameEnvVar, passwordEnvVar}, nil + } + } + return nil, nil +} + +func (r *ReaperReconciler) getSecret(ctx context.Context, secretKey types.NamespacedName) (*corev1.Secret, error) { + secret := &corev1.Secret{} + err := r.Get(ctx, secretKey, secret) + return secret, err +} + +func (r *ReaperReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&reaperapi.Reaper{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Owns(&appsv1.Deployment{}). + Owns(&corev1.Service{}). + Complete(r) +} diff --git a/controllers/reaper/reaper_controller_test.go b/controllers/reaper/reaper_controller_test.go new file mode 100644 index 000000000..f102407d1 --- /dev/null +++ b/controllers/reaper/reaper_controller_test.go @@ -0,0 +1,396 @@ +package reaper + +import ( + "context" + cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" + k8ssandraapi "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/k8ssandra-operator/pkg/config" + "github.com/k8ssandra/k8ssandra-operator/pkg/mocks" + "github.com/k8ssandra/k8ssandra-operator/pkg/reaper" + testutils "github.com/k8ssandra/k8ssandra-operator/pkg/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "testing" + "time" +) + +const ( + reaperName = "test-reaper" + cassandraClusterName = "test-cluster" + cassandraDatacenterName = "test-dc" + + timeout = time.Second * 5 + interval = time.Millisecond * 250 +) + +func TestReaper(t *testing.T) { + ctx := testutils.TestSetup(t) + ctx, cancel := context.WithCancel(ctx) + testEnv := &testutils.TestEnv{} + err := testEnv.Start(ctx, t, func(mgr manager.Manager) error { + err := (&ReaperReconciler{ + ReconcilerConfig: config.InitConfig(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + NewManager: newMockManager, + }).SetupWithManager(mgr) + return err + }) + if err != nil { + t.Fatalf("failed to start test environment: %s", err) + } + + defer testEnv.Stop(t) + defer cancel() + + t.Run("CreateReaper", reaperControllerTest(ctx, testEnv, testCreateReaper)) + t.Run("CreateReaperWithExistingObjects", reaperControllerTest(ctx, testEnv, testCreateReaperWithExistingObjects)) + t.Run("CreateReaperWithAutoSchedulingEnabled", reaperControllerTest(ctx, testEnv, testCreateReaperWithAutoSchedulingEnabled)) + t.Run("CreateReaperWithAuthEnabled", reaperControllerTest(ctx, testEnv, testCreateReaperWithAuthEnabled)) +} + +func newMockManager() reaper.Manager { + m := new(mocks.ReaperManager) + m.On("Connect", mock.Anything).Return(nil) + m.On("AddClusterToReaper", mock.Anything, mock.Anything).Return(nil) + m.On("VerifyClusterIsConfigured", mock.Anything, mock.Anything).Return(true, nil) + return m +} + +func reaperControllerTest(ctx context.Context, env *testutils.TestEnv, test func(t *testing.T, ctx context.Context, k8sClient client.Client, testNamespace string)) func(t *testing.T) { + return func(t *testing.T) { + testNamespace := "ns-" + rand.String(6) + beforeTest(t, ctx, env.TestClient, testNamespace) + test(t, ctx, env.TestClient, testNamespace) + } +} + +func beforeTest(t *testing.T, ctx context.Context, k8sClient client.Client, testNamespace string) { + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}} + err := k8sClient.Create(ctx, ns) + require.NoError(t, err) + + testDc := &cassdcapi.CassandraDatacenter{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: cassandraDatacenterName, + }, + Spec: cassdcapi.CassandraDatacenterSpec{ + ClusterName: cassandraClusterName, + ServerType: "cassandra", + ServerVersion: "3.11.7", + Size: 3, + }, + } + err = k8sClient.Create(ctx, testDc) + require.NoError(t, err) + + patchCassdc := client.MergeFrom(testDc.DeepCopy()) + testDc.Status.CassandraOperatorProgress = cassdcapi.ProgressReady + testDc.Status.Conditions = []cassdcapi.DatacenterCondition{{ + Status: corev1.ConditionTrue, + Type: cassdcapi.DatacenterReady, + }} + + err = k8sClient.Status().Patch(ctx, testDc, patchCassdc) + require.NoError(t, err) + + cassdcKey := types.NamespacedName{Namespace: testNamespace, Name: cassandraDatacenterName} + cassdc := &cassdcapi.CassandraDatacenter{} + assert.Eventually(t, func() bool { + err := k8sClient.Get(ctx, cassdcKey, cassdc) + if err != nil { + return false + } + return cassdc.Status.CassandraOperatorProgress == cassdcapi.ProgressReady + }, timeout, interval) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cassdc-pod1", + Namespace: testNamespace, + Labels: map[string]string{ + cassdcapi.ClusterLabel: cassandraClusterName, + cassdcapi.DatacenterLabel: cassandraDatacenterName, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "cassandra", + Image: "k8ssandra/cassandra-nothere:latest", + }}, + }, + } + + err = k8sClient.Create(ctx, pod) + require.NoError(t, err) + + podIP := "127.0.0.1" + + patchPod := client.MergeFrom(pod.DeepCopy()) + pod.Status = corev1.PodStatus{ + PodIP: podIP, + PodIPs: []corev1.PodIP{{IP: podIP}}} + err = k8sClient.Status().Patch(ctx, pod, patchPod) + require.NoError(t, err) + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dc-test-dc-all-pods-service", + Namespace: testNamespace, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{Name: "mgmt-api-http", Port: int32(8080)}}, + Selector: map[string]string{ + cassdcapi.ClusterLabel: cassandraClusterName, + cassdcapi.DatacenterLabel: cassandraDatacenterName, + }, + }, + } + err = k8sClient.Create(ctx, service) + require.NoError(t, err) +} + +func testCreateReaper(t *testing.T, ctx context.Context, k8sClient client.Client, testNamespace string) { + rpr := newReaper(testNamespace) + err := k8sClient.Create(ctx, rpr) + require.NoError(t, err) + + t.Log("check that the service is created") + serviceKey := types.NamespacedName{Namespace: testNamespace, Name: reaper.GetServiceName(rpr.Name)} + service := &corev1.Service{} + + require.Eventually(t, func() bool { + return k8sClient.Get(ctx, serviceKey, service) == nil + }, timeout, interval, "service creation check failed") + + assert.Len(t, service.OwnerReferences, 1, "service owner reference not set") + assert.Equal(t, rpr.UID, service.OwnerReferences[0].UID, "service owner reference has wrong uid") + + t.Log("check that the deployment is created") + deploymentKey := types.NamespacedName{Namespace: testNamespace, Name: reaperName} + deployment := &appsv1.Deployment{} + + require.Eventually(t, func() bool { + return k8sClient.Get(ctx, deploymentKey, deployment) == nil + }, timeout, interval, "deployment creation check failed") + + assert.Len(t, deployment.OwnerReferences, 1, "deployment owner reference not set") + assert.Equal(t, rpr.UID, deployment.OwnerReferences[0].UID, "deployment owner reference has wrong uid") + + t.Log("update deployment to be ready") + patchDeploymentStatus(t, ctx, deployment, 1, 1, k8sClient) + + verifyReaperReady(t, ctx, k8sClient, testNamespace) + + // Now simulate the Reaper app entering a state in which its readiness probe fails. This + // should cause the deployment to have its status updated. The Reaper object's .Status.Ready + // field should subsequently be updated. + t.Log("update deployment to be not ready") + patchDeploymentStatus(t, ctx, deployment, 1, 0, k8sClient) + + reaperKey := types.NamespacedName{Namespace: testNamespace, Name: reaperName} + updatedReaper := &reaperapi.Reaper{} + require.Eventually(t, func() bool { + err := k8sClient.Get(ctx, reaperKey, updatedReaper) + if err != nil { + return false + } + return updatedReaper.Status.IsReady() + }, timeout, interval, "reaper status should have been updated") +} + +// The purpose of this test is to cover code paths where an object, e.g., the +// deployment already exists. This could happen after a failed reconciliation and +// the request gets requeued. +func testCreateReaperWithExistingObjects(t *testing.T, ctx context.Context, k8sClient client.Client, testNamespace string) { + + t.Log("create the service") + serviceKey := types.NamespacedName{Namespace: testNamespace, Name: reaper.GetServiceName(reaperName)} + // We can use a fake service here with only the required properties set. Since the service already + // exists, the reconciler should continue its work. There are unit tests to verify that the service + // is created as expected. + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: serviceKey.Namespace, + Name: serviceKey.Name, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: "fake-port", + Protocol: corev1.ProtocolTCP, + Port: 8888, + }, + }}, + } + err := k8sClient.Create(ctx, service) + require.NoError(t, err) + + t.Log("create the deployment") + // We can use a fake deployment here with only the required properties set. Since the deployment + // already exists, the reconciler will just check that it is ready. There are unit tests to + // verify that the deployment is created as expected. + labels := map[string]string{ + reaperapi.ReaperLabel: reaperName, + k8ssandraapi.ManagedByLabel: k8ssandraapi.NameLabelValue, + } + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: reaperName, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: k8ssandraapi.ManagedByLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{k8ssandraapi.NameLabelValue}, + }, + { + Key: reaperapi.ReaperLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{reaperName}, + }, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "fake-deployment", + Image: "fake-deployment:test", + }}, + }, + }, + }, + } + err = k8sClient.Create(ctx, deployment) + require.NoError(t, err) + + // We need to mock the deployment being ready in order for Reaper status to be updated + t.Log("update deployment to be ready") + patchDeploymentStatus(t, ctx, deployment, 1, 1, k8sClient) + + t.Log("create the Reaper object") + rpr := newReaper(testNamespace) + err = k8sClient.Create(ctx, rpr) + require.NoError(t, err) + + verifyReaperReady(t, ctx, k8sClient, testNamespace) +} + +func testCreateReaperWithAutoSchedulingEnabled(t *testing.T, ctx context.Context, k8sClient client.Client, testNamespace string) { + t.Log("create the Reaper object") + rpr := newReaper(testNamespace) + rpr.Spec.AutoScheduling = reaperapi.AutoScheduling{ + Enabled: true, + } + err := k8sClient.Create(ctx, rpr) + require.NoError(t, err) + + t.Log("check that the deployment is created") + deploymentKey := types.NamespacedName{Namespace: testNamespace, Name: reaperName} + deployment := &appsv1.Deployment{} + + require.Eventually(t, func() bool { + return k8sClient.Get(ctx, deploymentKey, deployment) == nil + }, timeout, interval, "deployment creation check failed") + + assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) + + autoSchedulingEnabled := false + for _, env := range deployment.Spec.Template.Spec.Containers[0].Env { + if env.Name == "REAPER_AUTO_SCHEDULING_ENABLED" && env.Value == "true" { + autoSchedulingEnabled = true + } + } + assert.True(t, autoSchedulingEnabled) +} + +func testCreateReaperWithAuthEnabled(t *testing.T, ctx context.Context, k8sClient client.Client, testNamespace string) { + t.Log("creating a secret") + secret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: "top-secret-cass", + }, + Data: map[string][]byte{ + "username": []byte("bond"), + "password": []byte("james"), + }, + } + err := k8sClient.Create(ctx, &secret) + require.NoError(t, err) + + t.Log("create the Reaper object and modify it") + rpr := newReaper(testNamespace) + rpr.Spec.CassandraUserSecretRef = "top-secret-cass" + err = k8sClient.Create(ctx, rpr) + require.NoError(t, err) + + t.Log("check that the deployment is created") + deploymentKey := types.NamespacedName{Namespace: testNamespace, Name: reaperName} + deployment := &appsv1.Deployment{} + + require.Eventually(t, func() bool { + return k8sClient.Get(ctx, deploymentKey, deployment) == nil + }, timeout, interval, "deployment creation check failed") + + t.Log("verify the deployment has CassAuth EnvVars") + envVars := deployment.Spec.Template.Spec.Containers[0].Env + assert.Equal(t, "REAPER_CASS_AUTH_USERNAME", envVars[len(envVars)-3].Name) + assert.Equal(t, "top-secret-cass", envVars[len(envVars)-3].ValueFrom.SecretKeyRef.LocalObjectReference.Name) + assert.Equal(t, "username", envVars[len(envVars)-3].ValueFrom.SecretKeyRef.Key) + assert.Equal(t, "REAPER_CASS_AUTH_PASSWORD", envVars[len(envVars)-2].Name) + assert.Equal(t, "top-secret-cass", envVars[len(envVars)-2].ValueFrom.SecretKeyRef.LocalObjectReference.Name) + assert.Equal(t, "password", envVars[len(envVars)-2].ValueFrom.SecretKeyRef.Key) + assert.Equal(t, "REAPER_CASS_AUTH_ENABLED", envVars[len(envVars)-1].Name) + assert.Equal(t, "true", envVars[len(envVars)-1].Value) +} + +func newReaper(namespace string) *reaperapi.Reaper { + return &reaperapi.Reaper{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: reaperName, + }, + Spec: reaperapi.ReaperSpec{ + DatacenterRef: reaperapi.CassandraDatacenterRef{ + Name: cassandraDatacenterName, + }, + }, + } +} + +func verifyReaperReady(t *testing.T, ctx context.Context, k8sClient client.Client, testNamespace string) { + t.Log("check that the reaper is ready") + reaperKey := types.NamespacedName{Namespace: testNamespace, Name: reaperName} + require.Eventually(t, func() bool { + updatedReaper := &reaperapi.Reaper{} + if err := k8sClient.Get(ctx, reaperKey, updatedReaper); err != nil { + return false + } + return updatedReaper.Status.IsReady() + }, timeout, interval) +} + +func patchDeploymentStatus(t *testing.T, ctx context.Context, deployment *appsv1.Deployment, replicas, readyReplicas int32, k8sClient client.Client) { + deploymentPatch := client.MergeFrom(deployment.DeepCopy()) + deployment.Status.Replicas = replicas + deployment.Status.ReadyReplicas = readyReplicas + err := k8sClient.Status().Patch(ctx, deployment, deploymentPatch) + require.NoError(t, err) +} diff --git a/controllers/replication/secret_controller.go b/controllers/replication/secret_controller.go index 6cc7b498e..6272a1c3c 100644 --- a/controllers/replication/secret_controller.go +++ b/controllers/replication/secret_controller.go @@ -3,6 +3,7 @@ package replication import ( "context" "fmt" + "github.com/k8ssandra/k8ssandra-operator/pkg/secret" "strings" "sync" @@ -33,9 +34,6 @@ import ( // TODO Move these to apis? const ( replicatedResourceFinalizer = "replicatedresource.k8ssandra.io/finalizer" - - // OrphanResourceAnnotation when set to true prevents the deletion of secret from target clusters even if matching ReplicatedSecret is removed - OrphanResourceAnnotation = "replicatedresource.k8ssandra.io/orphan" ) // We need rights to update the target cluster's secrets, not necessarily this cluster @@ -72,7 +70,7 @@ func (s *SecretSyncController) Reconcile(ctx context.Context, req ctrl.Request) // Fetch all secrets from managed cluster. // Remove only those secrets which are not matched by any other ReplicatedSecret and do not have the orphan annotation - if val, found := rsec.GetAnnotations()[OrphanResourceAnnotation]; !found || val != "true" { + if val, found := rsec.GetAnnotations()[secret.OrphanResourceAnnotation]; !found || val != "true" { logger.Info("Cleaning up all the replicated resources", "ReplicatedSecret", req.NamespacedName) selector, err := metav1.LabelSelectorAsSelector(rsec.Spec.Selector) if err != nil { @@ -101,7 +99,7 @@ func (s *SecretSyncController) Reconcile(ctx context.Context, req ctrl.Request) continue } - if val, found := sec.GetAnnotations()[OrphanResourceAnnotation]; found && val == "true" { + if val, found := sec.GetAnnotations()[secret.OrphanResourceAnnotation]; found && val == "true" { // Managed cluster has orphan set to the secret, do not delete it from target clusters continue SecretsToCheck } diff --git a/controllers/replication/secret_controller_test.go b/controllers/replication/secret_controller_test.go index 62543639a..c6b2c3ff3 100644 --- a/controllers/replication/secret_controller_test.go +++ b/controllers/replication/secret_controller_test.go @@ -2,6 +2,7 @@ package replication import ( "context" + "github.com/k8ssandra/k8ssandra-operator/pkg/secret" "testing" "time" @@ -392,14 +393,14 @@ func TestSyncSecrets(t *testing.T) { assert.Equal(orig.Data, dest.Data) - dest.GetLabels()[OrphanResourceAnnotation] = "true" + dest.GetLabels()[secret.OrphanResourceAnnotation] = "true" dest.GetAnnotations()[coreapi.ResourceHashAnnotation] = "9876555" syncSecrets(orig, dest) // Verify additional orphan annotation was not removed - assert.Contains(dest.GetLabels(), OrphanResourceAnnotation) + assert.Contains(dest.GetLabels(), secret.OrphanResourceAnnotation) // Verify original labels and their values are set for k, v := range orig.GetLabels() { diff --git a/go.mod b/go.mod index 54cca491e..eccc208cd 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/go-sql-driver/mysql v1.5.0 // indirect github.com/gruntwork-io/terratest v0.37.7 github.com/k8ssandra/cass-operator v1.8.0-rc.2.0.20211104092550-4d5f19c977b6 + github.com/k8ssandra/reaper-client-go v0.3.0 github.com/pkg/errors v0.9.1 github.com/rs/zerolog v1.20.0 github.com/sirupsen/logrus v1.8.1 diff --git a/go.sum b/go.sum index 389a3bfe1..1372de260 100644 --- a/go.sum +++ b/go.sum @@ -469,8 +469,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k8ssandra/cass-operator v1.8.0-rc.2.0.20211104092550-4d5f19c977b6 h1:rTXou7DdQRtr7+9jDPLGmlVu4r2bKDVeGykESz8Q3kA= github.com/k8ssandra/cass-operator v1.8.0-rc.2.0.20211104092550-4d5f19c977b6/go.mod h1:Su27TRowgMMbVD28AUus2+2YHCAQd+0uqiLcgHY6tUA= -github.com/k8ssandra/cass-operator v1.8.0 h1:QFsuxr//BOGY0qmUDo6ZVMqKJbYyNKiboXBQej3WIuM= -github.com/k8ssandra/cass-operator v1.8.0/go.mod h1:Su27TRowgMMbVD28AUus2+2YHCAQd+0uqiLcgHY6tUA= +github.com/k8ssandra/reaper-client-go v0.3.0 h1:77F37hIJrP/YrwIYbJ1LOucJ7CwKWLAt8OM7TD0mDBk= +github.com/k8ssandra/reaper-client-go v0.3.0/go.mod h1:1yol6YTcKcLOmPH9CfAdeFnVg/KCCTSbIUXMsF1cKII= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= diff --git a/main.go b/main.go index d3d153589..c3666bf98 100644 --- a/main.go +++ b/main.go @@ -25,6 +25,7 @@ import ( "github.com/k8ssandra/k8ssandra-operator/pkg/cassandra" "github.com/k8ssandra/k8ssandra-operator/pkg/clientcache" "github.com/k8ssandra/k8ssandra-operator/pkg/config" + "github.com/k8ssandra/k8ssandra-operator/pkg/reaper" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. @@ -41,12 +42,14 @@ import ( configapi "github.com/k8ssandra/k8ssandra-operator/apis/config/v1beta1" k8ssandraiov1alpha1 "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" replicationapi "github.com/k8ssandra/k8ssandra-operator/apis/replication/v1alpha1" stargateapi "github.com/k8ssandra/k8ssandra-operator/apis/stargate/v1alpha1" k8ssandractrl "github.com/k8ssandra/k8ssandra-operator/controllers/k8ssandra" + reaperctrl "github.com/k8ssandra/k8ssandra-operator/controllers/reaper" replicationctrl "github.com/k8ssandra/k8ssandra-operator/controllers/replication" stargatectrl "github.com/k8ssandra/k8ssandra-operator/controllers/stargate" - //+kubebuilder:scaffold:imports + // +kubebuilder:scaffold:imports ) var ( @@ -62,7 +65,8 @@ func init() { utilruntime.Must(replicationapi.AddToScheme(scheme)) utilruntime.Must(stargateapi.AddToScheme(scheme)) utilruntime.Must(configapi.AddToScheme(scheme)) - //+kubebuilder:scaffold:scheme + utilruntime.Must(reaperapi.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme } func main() { @@ -193,7 +197,17 @@ func main() { os.Exit(1) } - //+kubebuilder:scaffold:builder + if err = (&reaperctrl.ReaperReconciler{ + ReconcilerConfig: reconcilerConfig, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + NewManager: reaper.NewManager, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Reaper") + os.Exit(1) + } + + // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") diff --git a/pkg/cassandra/datacenter.go b/pkg/cassandra/datacenter.go index 6ab90b4a7..31d8456fd 100644 --- a/pkg/cassandra/datacenter.go +++ b/pkg/cassandra/datacenter.go @@ -39,6 +39,8 @@ type DatacenterConfig struct { CassandraConfig *api.CassandraConfig AdditionalSeeds []string Networking *cassdcapi.NetworkingConfig + Users []cassdcapi.CassandraUser + PodTemplateSpec *corev1.PodTemplateSpec } func NewDatacenter(klusterKey types.NamespacedName, template *DatacenterConfig) (*cassdcapi.CassandraDatacenter, error) { @@ -66,17 +68,19 @@ func NewDatacenter(klusterKey types.NamespacedName, template *DatacenterConfig) }, }, Spec: cassdcapi.CassandraDatacenterSpec{ - ClusterName: template.Cluster, - ServerImage: template.ServerImage, - SuperuserSecretName: template.SuperUserSecretName, Size: template.Size, - ServerType: "cassandra", ServerVersion: template.ServerVersion, + ServerImage: template.ServerImage, + ServerType: "cassandra", Config: rawConfig, Racks: template.Racks, StorageConfig: *template.StorageConfig, - AdditionalSeeds: template.AdditionalSeeds, + ClusterName: template.Cluster, + SuperuserSecretName: template.SuperUserSecretName, + Users: template.Users, Networking: template.Networking, + AdditionalSeeds: template.AdditionalSeeds, + PodTemplateSpec: template.PodTemplateSpec, }, } diff --git a/pkg/cassandra/management.go b/pkg/cassandra/management.go index 345266a63..d1bbbf56c 100644 --- a/pkg/cassandra/management.go +++ b/pkg/cassandra/management.go @@ -84,9 +84,13 @@ type ManagementApiFacade interface { // ListTables calls the management API "GET /ops/tables" endpoint to retrieve the table names in the given keyspace. ListTables(keyspaceName string) ([]string, error) - // CreateTable calls the management API "POST /ops/tables/create" endpoint to create a new table in the given + // CreateTable calls the management API "POST /ops/tables/create" endpoint to create a new table in the given // keyspace. CreateTable(definition *httphelper.TableDefinition) error + + // EnsureKeyspaceReplication checks if the given keyspace has the given replication, and if it does not, + // alters it to match the desired replication. + EnsureKeyspaceReplication(keyspaceName string, replication map[string]int) error } type defaultManagementApiFacade struct { @@ -255,3 +259,34 @@ func (r *defaultManagementApiFacade) CreateTable(table *httphelper.TableDefiniti return fmt.Errorf("CALL create table failed on all datacenter %v pods", r.dc.Name) } } + +func (r *defaultManagementApiFacade) EnsureKeyspaceReplication(keyspaceName string, replication map[string]int) error { + r.logger.Info(fmt.Sprintf("Ensuring that keyspace %s exists in cluster %v...", keyspaceName, r.dc.Spec.ClusterName)) + if keyspaces, err := r.ListKeyspaces(keyspaceName); err != nil { + return err + } else if len(keyspaces) == 0 { + r.logger.Info(fmt.Sprintf("keyspace %s does not exist in cluster %v, creating it", keyspaceName, r.dc.Spec.ClusterName)) + if err := r.CreateKeyspaceIfNotExists(keyspaceName, replication); err != nil { + return err + } else { + r.logger.Info(fmt.Sprintf("Keyspace %s successfully created", keyspaceName)) + return nil + } + } else { + r.logger.Info(fmt.Sprintf("keyspace %s already exists in cluster %v", keyspaceName, r.dc.Spec.ClusterName)) + if actualReplication, err := r.GetKeyspaceReplication(keyspaceName); err != nil { + return err + } else if CompareReplications(actualReplication, replication) { + r.logger.Info(fmt.Sprintf("Keyspace %s has desired replication", keyspaceName)) + return nil + } else { + r.logger.Info(fmt.Sprintf("keyspace %s already exists in cluster %v but has wrong replication, altering it", keyspaceName, r.dc.Spec.ClusterName)) + if err := r.AlterKeyspace(keyspaceName, replication); err != nil { + return err + } else { + r.logger.Info(fmt.Sprintf("Keyspace %s successfully altered", keyspaceName)) + return nil + } + } + } +} diff --git a/pkg/cassandra/util.go b/pkg/cassandra/util.go index 3f3fdd8ab..e87116775 100644 --- a/pkg/cassandra/util.go +++ b/pkg/cassandra/util.go @@ -2,6 +2,7 @@ package cassandra import ( "math" + "strconv" "time" api "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" @@ -43,3 +44,34 @@ func ComputeSystemReplication(kluster *api.K8ssandraCluster) SystemReplication { return SystemReplication{Datacenters: dcNames, ReplicationFactor: int(rf)} } + +func ComputeReplication(maxReplicationPerDc int, datacenters ...api.CassandraDatacenterTemplate) map[string]int { + desiredReplication := make(map[string]int, len(datacenters)) + for _, dcTemplate := range datacenters { + replicationFactor := int(math.Min(float64(maxReplicationPerDc), float64(dcTemplate.Size))) + desiredReplication[dcTemplate.Meta.Name] = replicationFactor + } + return desiredReplication +} + +const networkTopology = "org.apache.cassandra.locator.NetworkTopologyStrategy" + +func CompareReplications(actualReplication map[string]string, desiredReplication map[string]int) bool { + if len(actualReplication) == 0 { + return false + } else if class := actualReplication["class"]; class != networkTopology { + return false + } else if len(actualReplication) != len(desiredReplication)+1 { + return false + } + for dcName, desiredRf := range desiredReplication { + if actualRf, ok := actualReplication[dcName]; !ok { + return false + } else if rf, err := strconv.Atoi(actualRf); err != nil { + return false + } else if rf != desiredRf { + return false + } + } + return true +} diff --git a/pkg/cassandra/util_test.go b/pkg/cassandra/util_test.go index 9ea2c3d83..8a03e4334 100644 --- a/pkg/cassandra/util_test.go +++ b/pkg/cassandra/util_test.go @@ -1,6 +1,7 @@ package cassandra import ( + "github.com/stretchr/testify/assert" "testing" api "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" @@ -101,3 +102,56 @@ func TestComputeSystemReplication(t *testing.T) { }) } } + +func TestComputeReplication(t *testing.T) { + tests := []struct { + name string + dcs []api.CassandraDatacenterTemplate + expected map[string]int + }{ + {"one dc", []api.CassandraDatacenterTemplate{ + {Meta: api.EmbeddedObjectMeta{Name: "dc1"}, Size: 3}, + }, map[string]int{"dc1": 3}}, + {"small dc", []api.CassandraDatacenterTemplate{ + {Meta: api.EmbeddedObjectMeta{Name: "dc1"}, Size: 1}, + }, map[string]int{"dc1": 1}}, + {"large dc", []api.CassandraDatacenterTemplate{ + {Meta: api.EmbeddedObjectMeta{Name: "dc1"}, Size: 10}, + }, map[string]int{"dc1": 3}}, + {"many dcs", []api.CassandraDatacenterTemplate{ + {Meta: api.EmbeddedObjectMeta{Name: "dc1"}, Size: 3}, + {Meta: api.EmbeddedObjectMeta{Name: "dc2"}, Size: 1}, + {Meta: api.EmbeddedObjectMeta{Name: "dc3"}, Size: 10}, + }, map[string]int{"dc1": 3, "dc2": 1, "dc3": 3}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := ComputeReplication(3, tt.dcs...) + assert.Equal(t, tt.expected, actual) + }) + } +} + +func TestCompareReplications(t *testing.T) { + tests := []struct { + name string + actual map[string]string + desired map[string]int + expected bool + }{ + {"nil", nil, map[string]int{"dc1": 3}, false}, + {"empty", map[string]string{}, map[string]int{"dc1": 3}, false}, + {"wrong class", map[string]string{"class": "wrong"}, map[string]int{"dc1": 3}, false}, + {"wrong length", map[string]string{"class": networkTopology, "dc1": "3", "dc2": "3"}, map[string]int{"dc1": 3}, false}, + {"missing dc", map[string]string{"class": networkTopology, "dc2": "3"}, map[string]int{"dc1": 3}, false}, + {"invalid rf", map[string]string{"class": networkTopology, "dc1": "not a number"}, map[string]int{"dc1": 3}, false}, + {"wrong rf", map[string]string{"class": networkTopology, "dc1": "1"}, map[string]int{"dc1": 3}, false}, + {"success", map[string]string{"class": networkTopology, "dc1": "1", "dc2": "3"}, map[string]int{"dc1": 1, "dc2": 3}, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := CompareReplications(tt.actual, tt.desired) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/mocks/ManagementApiFacade.go b/pkg/mocks/ManagementApiFacade.go index 963f90188..9d0198ea7 100644 --- a/pkg/mocks/ManagementApiFacade.go +++ b/pkg/mocks/ManagementApiFacade.go @@ -54,6 +54,20 @@ func (_m *ManagementApiFacade) CreateTable(definition *httphelper.TableDefinitio return r0 } +// EnsureKeyspaceReplication provides a mock function with given fields: keyspaceName, replication +func (_m *ManagementApiFacade) EnsureKeyspaceReplication(keyspaceName string, replication map[string]int) error { + ret := _m.Called(keyspaceName, replication) + + var r0 error + if rf, ok := ret.Get(0).(func(string, map[string]int) error); ok { + r0 = rf(keyspaceName, replication) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // GetKeyspaceReplication provides a mock function with given fields: keyspaceName func (_m *ManagementApiFacade) GetKeyspaceReplication(keyspaceName string) (map[string]string, error) { ret := _m.Called(keyspaceName) diff --git a/pkg/mocks/README.md b/pkg/mocks/README.md index a2755ee07..7d76a5207 100644 --- a/pkg/mocks/README.md +++ b/pkg/mocks/README.md @@ -9,3 +9,4 @@ To install Mockery on macOS: If necessary, mocks can be regenerated with: mockery --dir=./pkg/cassandra --output=./pkg/mocks --name=ManagementApiFacade + mockery --dir=./pkg/reaper --output=./pkg/mocks --name=Manager --filename=reaper_manager.go --structname=ReaperManager diff --git a/pkg/mocks/reaper_manager.go b/pkg/mocks/reaper_manager.go new file mode 100644 index 000000000..3a176b35e --- /dev/null +++ b/pkg/mocks/reaper_manager.go @@ -0,0 +1,67 @@ +// Code generated by mockery v2.9.4. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + v1alpha1 "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + + v1beta1 "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" +) + +// ReaperManager is an autogenerated mock type for the Manager type +type ReaperManager struct { + mock.Mock +} + +// AddClusterToReaper provides a mock function with given fields: ctx, cassdc +func (_m *ReaperManager) AddClusterToReaper(ctx context.Context, cassdc *v1beta1.CassandraDatacenter) error { + ret := _m.Called(ctx, cassdc) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *v1beta1.CassandraDatacenter) error); ok { + r0 = rf(ctx, cassdc) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Connect provides a mock function with given fields: _a0 +func (_m *ReaperManager) Connect(_a0 *v1alpha1.Reaper) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*v1alpha1.Reaper) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// VerifyClusterIsConfigured provides a mock function with given fields: ctx, cassdc +func (_m *ReaperManager) VerifyClusterIsConfigured(ctx context.Context, cassdc *v1beta1.CassandraDatacenter) (bool, error) { + ret := _m.Called(ctx, cassdc) + + var r0 bool + if rf, ok := ret.Get(0).(func(context.Context, *v1beta1.CassandraDatacenter) bool); ok { + r0 = rf(ctx, cassdc) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *v1beta1.CassandraDatacenter) error); ok { + r1 = rf(ctx, cassdc) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/pkg/reaper/datacenter.go b/pkg/reaper/datacenter.go new file mode 100644 index 000000000..79aec08f8 --- /dev/null +++ b/pkg/reaper/datacenter.go @@ -0,0 +1,84 @@ +package reaper + +import ( + cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" + "github.com/k8ssandra/cass-operator/pkg/reconciliation" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/k8ssandra-operator/pkg/cassandra" + corev1 "k8s.io/api/core/v1" +) + +func AddReaperSettingsToDcConfig(reaperTemplate *reaperapi.ReaperClusterTemplate, dcConfig *cassandra.DatacenterConfig) { + addUser(reaperTemplate, dcConfig) + if dcConfig.PodTemplateSpec == nil { + dcConfig.PodTemplateSpec = &corev1.PodTemplateSpec{} + } + addInitContainer(reaperTemplate, dcConfig) + modifyMainContainer(dcConfig) +} + +func addUser(reaperTemplate *reaperapi.ReaperClusterTemplate, dcConfig *cassandra.DatacenterConfig) { + cassandraUserSecretRef := reaperTemplate.CassandraUserSecretRef + if cassandraUserSecretRef == "" { + cassandraUserSecretRef = DefaultUserSecretName(dcConfig.Cluster) + } + dcConfig.Users = append(dcConfig.Users, cassdcapi.CassandraUser{ + SecretName: cassandraUserSecretRef, + Superuser: true, + }) +} + +func addInitContainer(reaperTemplate *reaperapi.ReaperClusterTemplate, dcConfig *cassandra.DatacenterConfig) { + jmxUserSecretRef := reaperTemplate.JmxUserSecretRef + if jmxUserSecretRef == "" { + jmxUserSecretRef = DefaultJmxUserSecretName(dcConfig.Cluster) + } + dcConfig.PodTemplateSpec.Spec.InitContainers = append(dcConfig.PodTemplateSpec.Spec.InitContainers, corev1.Container{ + Name: "jmx-credentials", + Image: "docker.io/busybox:1.33.1", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + { + Name: "REAPER_JMX_USERNAME", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: jmxUserSecretRef}, + Key: "username", + }, + }, + }, + { + Name: "REAPER_JMX_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: jmxUserSecretRef}, + Key: "password", + }, + }, + }, + }, + Args: []string{ + "/bin/sh", + "-c", + "echo \"$REAPER_JMX_USERNAME $REAPER_JMX_PASSWORD\" > /config/jmxremote.password", + }, + VolumeMounts: []corev1.VolumeMount{{ + Name: "server-config", + MountPath: "/config", + }}, + }) +} + +func modifyMainContainer(dcConfig *cassandra.DatacenterConfig) { + for i, container := range dcConfig.PodTemplateSpec.Spec.Containers { + if container.Name == reconciliation.CassandraContainerName { + container.Env = append(container.Env, corev1.EnvVar{Name: "LOCAL_JMX", Value: "no"}) + dcConfig.PodTemplateSpec.Spec.Containers[i] = container + return + } + } + dcConfig.PodTemplateSpec.Spec.Containers = append(dcConfig.PodTemplateSpec.Spec.Containers, corev1.Container{ + Name: reconciliation.CassandraContainerName, + Env: []corev1.EnvVar{{Name: "LOCAL_JMX", Value: "no"}}, + }) +} diff --git a/pkg/reaper/datacenter_test.go b/pkg/reaper/datacenter_test.go new file mode 100644 index 000000000..5b483a3ba --- /dev/null +++ b/pkg/reaper/datacenter_test.go @@ -0,0 +1,171 @@ +package reaper + +import ( + cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" + "github.com/k8ssandra/cass-operator/pkg/reconciliation" + api "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/k8ssandra-operator/pkg/cassandra" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "testing" +) + +func TestAddReaperSettingsToDcConfig(t *testing.T) { + tests := []struct { + name string + reaperTemplate *reaperapi.ReaperClusterTemplate + actual *cassandra.DatacenterConfig + expected *cassandra.DatacenterConfig + }{ + { + "defaults", + &reaperapi.ReaperClusterTemplate{}, + &cassandra.DatacenterConfig{ + Meta: api.EmbeddedObjectMeta{Name: "dc1"}, + Cluster: "cluster1", + }, + &cassandra.DatacenterConfig{ + Meta: api.EmbeddedObjectMeta{Name: "dc1"}, + Cluster: "cluster1", + Users: []cassdcapi.CassandraUser{{SecretName: "cluster1-reaper", Superuser: true}}, + PodTemplateSpec: &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{{ + Name: "jmx-credentials", + Image: "docker.io/busybox:1.33.1", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + { + Name: "REAPER_JMX_USERNAME", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "cluster1-reaper-jmx"}, + Key: "username", + }, + }, + }, + { + Name: "REAPER_JMX_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "cluster1-reaper-jmx"}, + Key: "password", + }, + }, + }, + }, + Args: []string{ + "/bin/sh", + "-c", + "echo \"$REAPER_JMX_USERNAME $REAPER_JMX_PASSWORD\" > /config/jmxremote.password", + }, + VolumeMounts: []corev1.VolumeMount{{ + Name: "server-config", + MountPath: "/config", + }}, + }}, + Containers: []corev1.Container{{ + Name: reconciliation.CassandraContainerName, + Env: []corev1.EnvVar{{Name: "LOCAL_JMX", Value: "no"}}, + }}, + }, + }, + }, + }, + { + "existing objects", + &reaperapi.ReaperClusterTemplate{ + CassandraUserSecretRef: "cass-user", + JmxUserSecretRef: "jmx-user", + }, + &cassandra.DatacenterConfig{ + Meta: api.EmbeddedObjectMeta{Name: "dc1"}, + Cluster: "cluster1", + Users: []cassdcapi.CassandraUser{{SecretName: "another-user", Superuser: true}}, + PodTemplateSpec: &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{{ + Name: "another-init-container", + }}, + Containers: []corev1.Container{ + { + Name: reconciliation.CassandraContainerName, + Env: []corev1.EnvVar{{Name: "ANOTHER_VAR", Value: "irrelevant"}}, + }, + { + Name: "another-container", + }, + }, + }, + }, + }, + &cassandra.DatacenterConfig{ + Meta: api.EmbeddedObjectMeta{Name: "dc1"}, + Cluster: "cluster1", + Users: []cassdcapi.CassandraUser{ + {SecretName: "another-user", Superuser: true}, + {SecretName: "cass-user", Superuser: true}, + }, + PodTemplateSpec: &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "another-init-container"}, + { + Name: "jmx-credentials", + Image: "docker.io/busybox:1.33.1", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + { + Name: "REAPER_JMX_USERNAME", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "jmx-user"}, + Key: "username", + }, + }, + }, + { + Name: "REAPER_JMX_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "jmx-user"}, + Key: "password", + }, + }, + }, + }, + Args: []string{ + "/bin/sh", + "-c", + "echo \"$REAPER_JMX_USERNAME $REAPER_JMX_PASSWORD\" > /config/jmxremote.password", + }, + VolumeMounts: []corev1.VolumeMount{{ + Name: "server-config", + MountPath: "/config", + }}, + }}, + Containers: []corev1.Container{ + { + Name: reconciliation.CassandraContainerName, + Env: []corev1.EnvVar{ + {Name: "ANOTHER_VAR", Value: "irrelevant"}, + {Name: "LOCAL_JMX", Value: "no"}, + }, + }, + { + Name: "another-container", + }, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + AddReaperSettingsToDcConfig(tt.reaperTemplate, tt.actual) + assert.Equal(t, tt.expected, tt.actual) + }) + } +} diff --git a/pkg/reaper/deployment.go b/pkg/reaper/deployment.go new file mode 100644 index 000000000..97c8c8403 --- /dev/null +++ b/pkg/reaper/deployment.go @@ -0,0 +1,205 @@ +package reaper + +import ( + "fmt" + cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" + k8ssandraapi "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + api "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/k8ssandra-operator/pkg/cassandra" + "github.com/k8ssandra/k8ssandra-operator/pkg/utils" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "strings" +) + +func NewDeployment(reaper *api.Reaper, dc *cassdcapi.CassandraDatacenter, authVars ...*corev1.EnvVar) *appsv1.Deployment { + labels := createServiceAndDeploymentLabels(reaper) + + selector := metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: k8ssandraapi.ManagedByLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{k8ssandraapi.NameLabelValue}, + }, + { + Key: api.ReaperLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{reaper.Name}, + }, + }, + } + + healthProbe := &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthcheck", + Port: intstr.FromInt(8081), + }, + }, + InitialDelaySeconds: 45, + PeriodSeconds: 15, + } + + envVars := []corev1.EnvVar{ + { + Name: "REAPER_STORAGE_TYPE", + Value: "cassandra", + }, + { + Name: "REAPER_ENABLE_DYNAMIC_SEED_LIST", + Value: "false", + }, + { + Name: "REAPER_CASS_CONTACT_POINTS", + Value: fmt.Sprintf("[%s]", dc.GetDatacenterServiceName()), + }, + { + Name: "REAPER_AUTH_ENABLED", + Value: "false", + }, + { + Name: "REAPER_DATACENTER_AVAILABILITY", + Value: reaper.Spec.DatacenterAvailability, + }, + { + Name: "REAPER_CASS_LOCAL_DC", + Value: dc.Name, + }, + } + + if reaper.Spec.AutoScheduling.Enabled { + envVars = append(envVars, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_ENABLED", + Value: "true", + }) + adaptive, incremental := getAdaptiveIncremental(reaper, dc) + envVars = append(envVars, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_ADAPTIVE", + Value: fmt.Sprintf("%v", adaptive), + }) + envVars = append(envVars, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_INCREMENTAL", + Value: fmt.Sprintf("%v", incremental), + }) + envVars = append(envVars, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_PERCENT_UNREPAIRED_THRESHOLD", + Value: fmt.Sprintf("%v", reaper.Spec.AutoScheduling.PercentUnrepairedThreshold), + }) + envVars = append(envVars, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_INITIAL_DELAY_PERIOD", + Value: reaper.Spec.AutoScheduling.InitialDelay, + }) + envVars = append(envVars, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_PERIOD_BETWEEN_POLLS", + Value: reaper.Spec.AutoScheduling.PeriodBetweenPolls, + }) + envVars = append(envVars, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_TIME_BEFORE_FIRST_SCHEDULE", + Value: reaper.Spec.AutoScheduling.TimeBeforeFirstSchedule, + }) + envVars = append(envVars, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_SCHEDULE_SPREAD_PERIOD", + Value: reaper.Spec.AutoScheduling.ScheduleSpreadPeriod, + }) + if reaper.Spec.AutoScheduling.ExcludedClusters != nil { + envVars = append(envVars, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_EXCLUDED_CLUSTERS", + Value: fmt.Sprintf("[%s]", strings.Join(reaper.Spec.AutoScheduling.ExcludedClusters, ", ")), + }) + } + if reaper.Spec.AutoScheduling.ExcludedKeyspaces != nil { + envVars = append(envVars, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_EXCLUDED_KEYSPACES", + Value: fmt.Sprintf("[%s]", strings.Join(reaper.Spec.AutoScheduling.ExcludedKeyspaces, ", ")), + }) + } + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: reaper.Namespace, + Name: reaper.Name, + Labels: labels, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &selector, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Affinity: reaper.Spec.Affinity, + InitContainers: []corev1.Container{ + { + Name: "reaper-schema-init", + ImagePullPolicy: reaper.Spec.ImagePullPolicy, + Image: reaper.Spec.Image, + SecurityContext: reaper.Spec.InitContainerSecurityContext, + Env: envVars, + Args: []string{"schema-migration"}, + }, + }, + Containers: []corev1.Container{ + { + Name: "reaper", + ImagePullPolicy: reaper.Spec.ImagePullPolicy, + Image: reaper.Spec.Image, + SecurityContext: reaper.Spec.SecurityContext, + Ports: []corev1.ContainerPort{ + { + Name: "app", + ContainerPort: 8080, + Protocol: "TCP", + }, + { + Name: "admin", + ContainerPort: 8081, + Protocol: "TCP", + }, + }, + LivenessProbe: healthProbe, + ReadinessProbe: healthProbe, + Env: envVars, + }, + }, + ServiceAccountName: reaper.Spec.ServiceAccountName, + Tolerations: reaper.Spec.Tolerations, + SecurityContext: reaper.Spec.PodSecurityContext, + }, + }, + }, + } + addAuthEnvVars(deployment, authVars) + utils.AddHashAnnotation(deployment, k8ssandraapi.ResourceHashAnnotation) + return deployment +} + +func addAuthEnvVars(deployment *appsv1.Deployment, vars []*corev1.EnvVar) { + initEnvVars := deployment.Spec.Template.Spec.InitContainers[0].Env + envVars := deployment.Spec.Template.Spec.Containers[0].Env + for _, v := range vars { + initEnvVars = append(initEnvVars, *v) + envVars = append(envVars, *v) + } + deployment.Spec.Template.Spec.InitContainers[0].Env = initEnvVars + deployment.Spec.Template.Spec.Containers[0].Env = envVars +} + +func getAdaptiveIncremental(reaper *api.Reaper, dc *cassdcapi.CassandraDatacenter) (adaptive bool, incremental bool) { + switch reaper.Spec.AutoScheduling.RepairType { + case "ADAPTIVE": + adaptive = true + case "INCREMENTAL": + incremental = true + case "AUTO": + if cassandra.IsCassandra3(dc.Spec.ServerVersion) { + adaptive = true + } else { + incremental = true + } + } + return +} diff --git a/pkg/reaper/deployment_test.go b/pkg/reaper/deployment_test.go new file mode 100644 index 000000000..b9ef74ace --- /dev/null +++ b/pkg/reaper/deployment_test.go @@ -0,0 +1,337 @@ +package reaper + +import ( + cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" + k8ssandraapi "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "testing" +) + +func TestNewDeployment(t *testing.T) { + image := "test/reaper:latest" + reaper := newTestReaper() + reaper.Spec.Image = image + reaper.Spec.ImagePullPolicy = "Always" + reaper.Spec.AutoScheduling = reaperapi.AutoScheduling{Enabled: false} + reaper.Spec.ServiceAccountName = "reaper" + reaper.Spec.DatacenterAvailability = DatacenterAvailabilityLocal + + labels := createServiceAndDeploymentLabels(reaper) + deployment := NewDeployment(reaper, newTestDatacenter()) + + assert.Equal(t, reaper.Namespace, deployment.Namespace) + assert.Equal(t, reaper.Name, deployment.Name) + assert.Equal(t, labels, deployment.Labels) + assert.Equal(t, reaper.Spec.ServiceAccountName, deployment.Spec.Template.Spec.ServiceAccountName) + + selector := deployment.Spec.Selector + assert.Len(t, selector.MatchLabels, 0) + assert.ElementsMatch(t, selector.MatchExpressions, []metav1.LabelSelectorRequirement{ + { + Key: k8ssandraapi.ManagedByLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{k8ssandraapi.NameLabelValue}, + }, + { + Key: reaperapi.ReaperLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{reaper.Name}, + }, + }) + + assert.Equal(t, labels, deployment.Spec.Template.Labels) + + podSpec := deployment.Spec.Template.Spec + assert.Len(t, podSpec.Containers, 1) + + container := podSpec.Containers[0] + + assert.Equal(t, image, container.Image) + assert.Equal(t, corev1.PullAlways, container.ImagePullPolicy) + assert.ElementsMatch(t, container.Env, []corev1.EnvVar{ + { + Name: "REAPER_STORAGE_TYPE", + Value: "cassandra", + }, + { + Name: "REAPER_ENABLE_DYNAMIC_SEED_LIST", + Value: "false", + }, + { + Name: "REAPER_CASS_CONTACT_POINTS", + Value: "[cluster1-dc1-service]", + }, + { + Name: "REAPER_AUTH_ENABLED", + Value: "false", + }, + { + Name: "REAPER_DATACENTER_AVAILABILITY", + Value: DatacenterAvailabilityLocal, + }, + { + Name: "REAPER_CASS_LOCAL_DC", + Value: "dc1", + }, + }) + + assert.Len(t, podSpec.InitContainers, 1) + + initContainer := podSpec.InitContainers[0] + assert.Equal(t, image, initContainer.Image) + assert.Equal(t, corev1.PullAlways, initContainer.ImagePullPolicy) + assert.ElementsMatch(t, initContainer.Env, []corev1.EnvVar{ + { + Name: "REAPER_STORAGE_TYPE", + Value: "cassandra", + }, + { + Name: "REAPER_ENABLE_DYNAMIC_SEED_LIST", + Value: "false", + }, + { + Name: "REAPER_CASS_CONTACT_POINTS", + Value: "[cluster1-dc1-service]", + }, + { + Name: "REAPER_AUTH_ENABLED", + Value: "false", + }, + { + Name: "REAPER_DATACENTER_AVAILABILITY", + Value: DatacenterAvailabilityLocal, + }, + { + Name: "REAPER_CASS_LOCAL_DC", + Value: "dc1", + }, + }) + + assert.ElementsMatch(t, initContainer.Args, []string{"schema-migration"}) + + reaper.Spec.AutoScheduling = reaperapi.AutoScheduling{ + Enabled: false, + InitialDelay: "PT10S", + PeriodBetweenPolls: "PT5M", + TimeBeforeFirstSchedule: "PT10M", + ScheduleSpreadPeriod: "PT6H", + RepairType: "AUTO", + PercentUnrepairedThreshold: 30, + ExcludedClusters: []string{"a", "b"}, + ExcludedKeyspaces: []string{"system.powers"}, + } + + deployment = NewDeployment(reaper, newTestDatacenter()) + podSpec = deployment.Spec.Template.Spec + container = podSpec.Containers[0] + assert.Len(t, container.Env, 6) + + reaper.Spec.AutoScheduling.Enabled = true + deployment = NewDeployment(reaper, newTestDatacenter()) + podSpec = deployment.Spec.Template.Spec + container = podSpec.Containers[0] + assert.Len(t, container.Env, 16) + + assert.Contains(t, container.Env, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_ADAPTIVE", + Value: "false", + }) + + assert.Contains(t, container.Env, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_INCREMENTAL", + Value: "true", + }) + + assert.Contains(t, container.Env, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_PERCENT_UNREPAIRED_THRESHOLD", + Value: "30", + }) + + assert.Contains(t, container.Env, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_PERIOD_BETWEEN_POLLS", + Value: "PT5M", + }) + + assert.Contains(t, container.Env, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_TIME_BEFORE_FIRST_SCHEDULE", + Value: "PT10M", + }) + + assert.Contains(t, container.Env, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_INITIAL_DELAY_PERIOD", + Value: "PT10S", + }) + + assert.Contains(t, container.Env, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_EXCLUDED_CLUSTERS", + Value: "[a, b]", + }) + + assert.Contains(t, container.Env, corev1.EnvVar{ + Name: "REAPER_AUTO_SCHEDULING_EXCLUDED_KEYSPACES", + Value: "[system.powers]", + }) + + probe := &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthcheck", + Port: intstr.FromInt(8081), + }, + }, + InitialDelaySeconds: 45, + PeriodSeconds: 15, + } + assert.Equal(t, probe, container.LivenessProbe) + assert.Equal(t, probe, container.ReadinessProbe) +} + +func TestTolerations(t *testing.T) { + image := "test/reaper:latest" + tolerations := []corev1.Toleration{ + { + Key: "key1", + Operator: corev1.TolerationOpEqual, + Value: "value1", + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "key2", + Operator: corev1.TolerationOpEqual, + Value: "value2", + Effect: corev1.TaintEffectNoSchedule, + }, + } + + reaper := newTestReaper() + reaper.Spec.Image = image + reaper.Spec.Tolerations = tolerations + + deployment := NewDeployment(reaper, newTestDatacenter()) + assert.ElementsMatch(t, tolerations, deployment.Spec.Template.Spec.Tolerations) +} + +func TestAffinity(t *testing.T) { + image := "test/reaper:latest" + affinity := &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/e2e-az-name", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"e2e-az1", "e2e-az2"}, + }, + }, + }, + }, + }, + }, + } + reaper := newTestReaper() + reaper.Spec.Image = image + reaper.Spec.Affinity = affinity + + deployment := NewDeployment(reaper, newTestDatacenter()) + assert.EqualValues(t, affinity, deployment.Spec.Template.Spec.Affinity, "affinity does not match") +} + +func TestContainerSecurityContext(t *testing.T) { + image := "test/reaper:latest" + readOnlyRootFilesystemOverride := true + securityContext := &corev1.SecurityContext{ + ReadOnlyRootFilesystem: &readOnlyRootFilesystemOverride, + } + reaper := newTestReaper() + reaper.Spec.Image = image + reaper.Spec.SecurityContext = securityContext + + deployment := NewDeployment(reaper, newTestDatacenter()) + podSpec := deployment.Spec.Template.Spec + + assert.Len(t, podSpec.Containers, 1, "Expected a single container to exist") + assert.Equal(t, podSpec.Containers[0].Name, "reaper") + assert.EqualValues(t, securityContext, podSpec.Containers[0].SecurityContext, "securityContext does not match for container") +} + +func TestSchemaInitContainerSecurityContext(t *testing.T) { + image := "test/reaper:latest" + readOnlyRootFilesystemOverride := true + initContainerSecurityContext := &corev1.SecurityContext{ + ReadOnlyRootFilesystem: &readOnlyRootFilesystemOverride, + } + nonInitContainerSecurityContext := &corev1.SecurityContext{ + ReadOnlyRootFilesystem: &readOnlyRootFilesystemOverride, + } + + reaper := newTestReaper() + reaper.Spec.Image = image + reaper.Spec.SecurityContext = nonInitContainerSecurityContext + reaper.Spec.InitContainerSecurityContext = initContainerSecurityContext + + deployment := NewDeployment(reaper, newTestDatacenter()) + podSpec := deployment.Spec.Template.Spec + + assert.Equal(t, podSpec.InitContainers[0].Name, "reaper-schema-init") + assert.Len(t, podSpec.InitContainers, 1, "Expected a single schema init container to exist") + assert.EqualValues(t, initContainerSecurityContext, podSpec.InitContainers[0].SecurityContext, "securityContext does not match for schema init container") +} + +func TestPodSecurityContext(t *testing.T) { + image := "test/reaper:latest" + runAsUser := int64(8675309) + podSecurityContext := &corev1.PodSecurityContext{ + RunAsUser: &runAsUser, + } + reaper := newTestReaper() + reaper.Spec.Image = image + reaper.Spec.PodSecurityContext = podSecurityContext + + deployment := NewDeployment(reaper, newTestDatacenter()) + podSpec := deployment.Spec.Template.Spec + + assert.EqualValues(t, podSecurityContext, podSpec.SecurityContext, "podSecurityContext expected at pod level") +} + +func newTestReaper() *reaperapi.Reaper { + namespace := "service-test" + reaperName := "test-reaper" + dcName := "dc1" + return &reaperapi.Reaper{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: reaperName, + }, + Spec: reaperapi.ReaperSpec{ + DatacenterRef: reaperapi.CassandraDatacenterRef{ + Name: dcName, + Namespace: namespace, + }, + ReaperClusterTemplate: reaperapi.ReaperClusterTemplate{ + Keyspace: "reaper_db", + }, + }, + } +} + +func newTestDatacenter() *cassdcapi.CassandraDatacenter { + namespace := "service-test" + dcName := "dc1" + clusterName := "cluster1" + return &cassdcapi.CassandraDatacenter{ + ObjectMeta: metav1.ObjectMeta{ + Name: dcName, + Namespace: namespace, + }, + Spec: cassdcapi.CassandraDatacenterSpec{ + ClusterName: clusterName, + ServerVersion: "4.0.1", + }, + } +} diff --git a/pkg/reaper/labels.go b/pkg/reaper/labels.go new file mode 100644 index 000000000..978d894dd --- /dev/null +++ b/pkg/reaper/labels.go @@ -0,0 +1,33 @@ +package reaper + +import ( + k8ssandraapi "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/k8ssandra-operator/pkg/utils" +) + +var commonLabels = map[string]string{ + k8ssandraapi.NameLabel: k8ssandraapi.NameLabelValue, + k8ssandraapi.PartOfLabel: k8ssandraapi.PartOfLabelValue, + k8ssandraapi.ComponentLabel: k8ssandraapi.ComponentLabelValueReaper, + k8ssandraapi.ManagedByLabel: k8ssandraapi.NameLabelValue, +} + +func createResourceLabels(kc *k8ssandraapi.K8ssandraCluster) map[string]string { + labels := map[string]string{ + k8ssandraapi.K8ssandraClusterLabel: kc.Name, + k8ssandraapi.CreatedByLabel: k8ssandraapi.CreatedByLabelValueK8ssandraClusterController, + } + return utils.MergeMap(labels, commonLabels) +} + +func createServiceAndDeploymentLabels(r *reaperapi.Reaper) map[string]string { + labels := map[string]string{ + reaperapi.ReaperLabel: r.Name, + k8ssandraapi.CreatedByLabel: k8ssandraapi.CreatedByLabelValueReaperController, + } + if klusterName, found := r.Labels[k8ssandraapi.K8ssandraClusterLabel]; found { + labels[k8ssandraapi.K8ssandraClusterLabel] = klusterName + } + return utils.MergeMap(labels, commonLabels) +} diff --git a/pkg/reaper/manager.go b/pkg/reaper/manager.go new file mode 100644 index 000000000..ad2be22f3 --- /dev/null +++ b/pkg/reaper/manager.go @@ -0,0 +1,54 @@ +package reaper + +import ( + "context" + "fmt" + + cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" + api "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/reaper-client-go/reaper" + reapergo "github.com/k8ssandra/reaper-client-go/reaper" +) + +type Manager interface { + Connect(reaper *api.Reaper) error + AddClusterToReaper(ctx context.Context, cassdc *cassdcapi.CassandraDatacenter) error + VerifyClusterIsConfigured(ctx context.Context, cassdc *cassdcapi.CassandraDatacenter) (bool, error) +} + +func NewManager() Manager { + return &restReaperManager{} +} + +type restReaperManager struct { + reaperClient reaper.ReaperClient +} + +func (r *restReaperManager) Connect(reaper *api.Reaper) error { + // Include the namespace in case Reaper is deployed in a different namespace than + // the CassandraDatacenter. + reaperSvc := GetServiceName(reaper.Name) + "." + reaper.Namespace + + reaperClient, err := reapergo.NewReaperClient(fmt.Sprintf("http://%s:8080", reaperSvc)) + if err != nil { + return err + } + r.reaperClient = reaperClient + return nil +} + +func (r *restReaperManager) AddClusterToReaper(ctx context.Context, cassdc *cassdcapi.CassandraDatacenter) error { + return r.reaperClient.AddCluster(ctx, cassdc.Spec.ClusterName, cassdc.GetSeedServiceName()) +} + +func (r *restReaperManager) VerifyClusterIsConfigured(ctx context.Context, cassdc *cassdcapi.CassandraDatacenter) (bool, error) { + _, err := r.reaperClient.GetCluster(ctx, cassdc.Spec.ClusterName) + if err != nil { + if err == reaper.CassandraClusterNotFound { + // We didn't have issues verifying the existence, but the cluster isn't there + return false, nil + } + return false, err + } + return true, nil +} diff --git a/pkg/reaper/resource.go b/pkg/reaper/resource.go new file mode 100644 index 000000000..a5494c797 --- /dev/null +++ b/pkg/reaper/resource.go @@ -0,0 +1,151 @@ +package reaper + +import ( + cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" + k8ssandraapi "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + api "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/k8ssandra-operator/pkg/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + DatacenterAvailabilityLocal = "LOCAL" + DatacenterAvailabilityEach = "EACH" +) + +func ResourceName(klusterName, dcName string) string { + return klusterName + "-" + dcName + "-reaper" +} + +func NewReaper( + reaperKey types.NamespacedName, + kc *k8ssandraapi.K8ssandraCluster, + dc *cassdcapi.CassandraDatacenter, + reaperTemplate *reaperapi.ReaperClusterTemplate, +) *reaperapi.Reaper { + labels := createResourceLabels(kc) + desiredReaper := &reaperapi.Reaper{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: reaperKey.Namespace, + Name: reaperKey.Name, + Annotations: map[string]string{}, + Labels: labels, + }, + Spec: reaperapi.ReaperSpec{ + ReaperClusterTemplate: *reaperTemplate, + DatacenterRef: reaperapi.CassandraDatacenterRef{ + Name: dc.Name, + Namespace: dc.Namespace, + }, + DatacenterAvailability: computeReaperDcAvailability(kc), + }, + } + if desiredReaper.Spec.CassandraUserSecretRef == "" { + desiredReaper.Spec.CassandraUserSecretRef = DefaultUserSecretName(kc.Name) + } + if desiredReaper.Spec.JmxUserSecretRef == "" { + desiredReaper.Spec.JmxUserSecretRef = DefaultJmxUserSecretName(kc.Name) + } + utils.AddHashAnnotation(desiredReaper, k8ssandraapi.ResourceHashAnnotation) + return desiredReaper +} + +// See https://cassandra-reaper.io/docs/usage/multi_dc/. +// If each DC has its own Reaper instance, use EACH, otherwise use LOCAL. +func computeReaperDcAvailability(kc *k8ssandraapi.K8ssandraCluster) string { + if kc.Spec.Reaper != nil { + return DatacenterAvailabilityEach + } + reapersCount := 0 + for _, dcTemplate := range kc.Spec.Cassandra.Datacenters { + if dcTemplate.Reaper != nil { + reapersCount++ + } + } + if reapersCount == len(kc.Spec.Cassandra.Datacenters) { + return DatacenterAvailabilityEach + } + return DatacenterAvailabilityLocal +} + +// Coalesce combines the cluster and dc templates with override semantics. If a property is +// defined in both templates, the dc-level property takes precedence. +func Coalesce(clusterTemplate *api.ReaperClusterTemplate, dcTemplate *api.ReaperDatacenterTemplate) *api.ReaperClusterTemplate { + + if clusterTemplate == nil && dcTemplate == nil { + return nil + } + + coalesced := &api.ReaperClusterTemplate{} + + if dcTemplate != nil && len(dcTemplate.Image) != 0 { + coalesced.Image = dcTemplate.Image + } else if clusterTemplate != nil && len(clusterTemplate.Image) != 0 { + coalesced.Image = clusterTemplate.Image + } + + if dcTemplate != nil && len(dcTemplate.ImagePullPolicy) != 0 { + coalesced.ImagePullPolicy = dcTemplate.ImagePullPolicy + } else if clusterTemplate != nil && len(clusterTemplate.ImagePullPolicy) != 0 { + coalesced.ImagePullPolicy = clusterTemplate.ImagePullPolicy + } + + if dcTemplate != nil && len(dcTemplate.ServiceAccountName) != 0 { + coalesced.ServiceAccountName = dcTemplate.ServiceAccountName + } else if clusterTemplate != nil && len(clusterTemplate.ServiceAccountName) != 0 { + coalesced.ServiceAccountName = clusterTemplate.ServiceAccountName + } + + if clusterTemplate != nil && len(clusterTemplate.Keyspace) != 0 { + coalesced.Keyspace = clusterTemplate.Keyspace + } + + if clusterTemplate != nil && len(clusterTemplate.CassandraUserSecretRef) != 0 { + coalesced.CassandraUserSecretRef = clusterTemplate.CassandraUserSecretRef + } + + if clusterTemplate != nil && len(clusterTemplate.JmxUserSecretRef) != 0 { + coalesced.JmxUserSecretRef = clusterTemplate.JmxUserSecretRef + } + + // FIXME do we want to drill down on auto scheduling properties? + if dcTemplate != nil { + coalesced.AutoScheduling = dcTemplate.AutoScheduling + } else if clusterTemplate != nil { + coalesced.AutoScheduling = clusterTemplate.AutoScheduling + } + + if dcTemplate != nil && dcTemplate.Affinity != nil { + coalesced.Affinity = dcTemplate.Affinity + } else if clusterTemplate != nil && clusterTemplate.Affinity != nil { + coalesced.Affinity = clusterTemplate.Affinity + } + + if dcTemplate != nil && dcTemplate.Tolerations != nil { + coalesced.Tolerations = dcTemplate.Tolerations + } else if clusterTemplate != nil && clusterTemplate.Tolerations != nil { + coalesced.Tolerations = clusterTemplate.Tolerations + } + + if dcTemplate != nil && dcTemplate.PodSecurityContext != nil { + coalesced.PodSecurityContext = dcTemplate.PodSecurityContext + } else if clusterTemplate != nil && clusterTemplate.PodSecurityContext != nil { + coalesced.PodSecurityContext = clusterTemplate.PodSecurityContext + } + + if dcTemplate != nil && dcTemplate.SecurityContext != nil { + coalesced.SecurityContext = dcTemplate.SecurityContext + } else if clusterTemplate != nil && clusterTemplate.SecurityContext != nil { + coalesced.SecurityContext = clusterTemplate.SecurityContext + } + + if dcTemplate != nil && dcTemplate.InitContainerSecurityContext != nil { + coalesced.InitContainerSecurityContext = dcTemplate.InitContainerSecurityContext + } else if clusterTemplate != nil && clusterTemplate.InitContainerSecurityContext != nil { + coalesced.InitContainerSecurityContext = clusterTemplate.InitContainerSecurityContext + } + + return coalesced +} diff --git a/pkg/reaper/secrets.go b/pkg/reaper/secrets.go new file mode 100644 index 000000000..a56ca66bd --- /dev/null +++ b/pkg/reaper/secrets.go @@ -0,0 +1,73 @@ +package reaper + +import ( + "fmt" + corev1 "k8s.io/api/core/v1" +) + +const ( + jmxAuthEnvPasswordName = "REAPER_JMX_AUTH_PASSWORD" + jmxAuthEnvUsernameName = "REAPER_JMX_AUTH_USERNAME" + cassAuthEnvPasswordName = "REAPER_CASS_AUTH_PASSWORD" + cassAuthEnvUsernameName = "REAPER_CASS_AUTH_USERNAME" + envVarEnableCassAuth = "REAPER_CASS_AUTH_ENABLED" + secretUsernameName = "username" + secretPasswordName = "password" +) + +var EnableCassAuthVar = &corev1.EnvVar{ + Name: envVarEnableCassAuth, + Value: "true", +} + +func DefaultUserSecretName(k8cName string) string { + return fmt.Sprintf("%v-reaper", k8cName) +} + +func DefaultJmxUserSecretName(k8cName string) string { + return fmt.Sprintf("%v-reaper-jmx", k8cName) +} + +func GetCassandraAuthEnvironmentVars(secret *corev1.Secret) (*corev1.EnvVar, *corev1.EnvVar, error) { + return secretToEnvVars(secret, cassAuthEnvUsernameName, cassAuthEnvPasswordName) +} + +func GetJmxAuthEnvironmentVars(secret *corev1.Secret) (*corev1.EnvVar, *corev1.EnvVar, error) { + return secretToEnvVars(secret, jmxAuthEnvUsernameName, jmxAuthEnvPasswordName) +} + +func secretToEnvVars(secret *corev1.Secret, envUsernameParam, envPasswordParam string) (*corev1.EnvVar, *corev1.EnvVar, error) { + if _, ok := secret.Data[secretUsernameName]; !ok { + return nil, nil, fmt.Errorf("username key not found in jmx auth secret %s", secret.Name) + } + + if _, ok := secret.Data[secretPasswordName]; !ok { + return nil, nil, fmt.Errorf("password key not found in jmx auth secret %s", secret.Name) + } + + usernameEnvVar := corev1.EnvVar{ + Name: envUsernameParam, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Key: "username", + }, + }, + } + + passwordEnvVar := corev1.EnvVar{ + Name: envPasswordParam, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Key: "password", + }, + }, + } + + return &usernameEnvVar, &passwordEnvVar, nil +} diff --git a/pkg/reaper/service.go b/pkg/reaper/service.go new file mode 100644 index 000000000..ad3a270d2 --- /dev/null +++ b/pkg/reaper/service.go @@ -0,0 +1,41 @@ +package reaper + +import ( + k8ssandraapi "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + api "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/k8ssandra-operator/pkg/utils" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func GetServiceName(reaperName string) string { + return reaperName + "-service" +} + +func NewService(key types.NamespacedName, reaper *api.Reaper) *corev1.Service { + labels := createServiceAndDeploymentLabels(reaper) + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{{ + Port: 8080, + Name: "app", + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.IntOrString{ + Type: intstr.String, + StrVal: "app", + }, + }}, + Selector: labels, + }, + } + utils.AddHashAnnotation(service, k8ssandraapi.ResourceHashAnnotation) + return service +} diff --git a/pkg/reaper/service_test.go b/pkg/reaper/service_test.go new file mode 100644 index 000000000..7c127d2b9 --- /dev/null +++ b/pkg/reaper/service_test.go @@ -0,0 +1,35 @@ +package reaper + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func TestNewService(t *testing.T) { + reaper := newTestReaper() + key := types.NamespacedName{Namespace: reaper.Namespace, Name: GetServiceName(reaper.Name)} + + service := NewService(key, reaper) + + assert.Equal(t, key.Name, service.Name) + assert.Equal(t, key.Namespace, service.Namespace) + assert.Equal(t, createServiceAndDeploymentLabels(reaper), service.Labels) + + assert.Equal(t, createServiceAndDeploymentLabels(reaper), service.Spec.Selector) + assert.Len(t, service.Spec.Ports, 1) + + port := corev1.ServicePort{ + Name: "app", + Protocol: corev1.ProtocolTCP, + Port: 8080, + TargetPort: intstr.IntOrString{ + Type: intstr.String, + StrVal: "app", + }, + } + assert.Equal(t, port, service.Spec.Ports[0]) +} diff --git a/pkg/secret/replicated.go b/pkg/secret/replicated.go index b46bcc78e..cdb566cf5 100644 --- a/pkg/secret/replicated.go +++ b/pkg/secret/replicated.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "fmt" "github.com/go-logr/logr" + "github.com/k8ssandra/k8ssandra-operator/pkg/utils" "k8s.io/apimachinery/pkg/runtime" "math/big" "reflect" @@ -22,8 +23,11 @@ import ( ) const ( - passwordCharacters = "!#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~" + passwordCharacters = "-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" usernameCharacters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + + // OrphanResourceAnnotation when set to true prevents the deletion of secret from target clusters even if matching ReplicatedSecret is removed + OrphanResourceAnnotation = "replicatedresource.k8ssandra.io/orphan" ) func generateRandomString(charset string, length int) ([]byte, error) { @@ -48,8 +52,9 @@ func DefaultSuperuserSecretName(clusterName string) string { return secretName } -// ReconcileSuperuserSecret creates the superUserSecret with proper annotations -func ReconcileSuperuserSecret(ctx context.Context, c client.Client, secretName, clusterName, namespace string) error { +// ReconcileSecret creates a new secret with proper "managed-by" annotations, or ensure the existing secret has such +// annotations. +func ReconcileSecret(ctx context.Context, c client.Client, secretName, clusterName, namespace string) error { if secretName == "" { return fmt.Errorf("secretName is required") } @@ -79,7 +84,12 @@ func ReconcileSuperuserSecret(ctx context.Context, c client.Client, secretName, } } - // It exists or was created, we won't modify it anymore + // It exists or was created: ensure it has proper annotations + if !utils.IsManagedBy(currentSec, clusterName) { + utils.SetManagedBy(currentSec, clusterName) + utils.AddAnnotation(currentSec, OrphanResourceAnnotation, "true") + return c.Update(ctx, currentSec) + } return nil } @@ -92,17 +102,17 @@ func ReconcileReplicatedSecret(ctx context.Context, c client.Client, scheme *run } } - targetRepSec := generateReplicatedSecret(kc.Spec.Cassandra.Cluster, kc.Namespace, replicationTargets) + targetRepSec := generateReplicatedSecret(kc.Name, kc.Namespace, replicationTargets) key := client.ObjectKey{Namespace: targetRepSec.Namespace, Name: targetRepSec.Name} repSec := &replicationapi.ReplicatedSecret{} err := controllerutil.SetControllerReference(kc, targetRepSec, scheme) if err != nil { - logger.Error(err, "Failed to set owner reference on ReplicatedSecret", "ReplicatedSect", key) + logger.Error(err, "Failed to set owner reference on ReplicatedSecret", "ReplicatedSecret", key) return err } - err = c.Get(ctx, types.NamespacedName{Name: kc.Spec.Cassandra.Cluster, Namespace: kc.Namespace}, repSec) + err = c.Get(ctx, types.NamespacedName{Name: kc.Name, Namespace: kc.Namespace}, repSec) if err != nil { if errors.IsNotFound(err) { logger.Info("Creating ReplicatedSecret", "ReplicatedSecret", key) @@ -160,10 +170,7 @@ func generateReplicatedSecret(clusterName, namespace string, targetContexts []st ObjectMeta: getManagedObjectMeta(clusterName, namespace, clusterName), Spec: replicationapi.ReplicatedSecretSpec{ Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - api.ManagedByLabel: api.NameLabelValue, - api.K8ssandraClusterLabel: clusterName, - }, + MatchLabels: utils.ManagedByLabels(clusterName), }, ReplicationTargets: replicationTargets, }, @@ -202,12 +209,7 @@ func requiresUpdate(current, desired *replicationapi.ReplicatedSecret) bool { } func getManagedObjectMeta(name, namespace, clusterName string) metav1.ObjectMeta { - return metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{ - api.ManagedByLabel: api.NameLabelValue, - api.K8ssandraClusterLabel: clusterName, - }, - } + meta := metav1.ObjectMeta{Name: name, Namespace: namespace} + utils.SetManagedBy(&meta, clusterName) + return meta } diff --git a/pkg/stargate/auth_schema.go b/pkg/stargate/auth_schema.go index f5d278e08..a15102ef8 100644 --- a/pkg/stargate/auth_schema.go +++ b/pkg/stargate/auth_schema.go @@ -3,12 +3,8 @@ package stargate import ( "fmt" "github.com/go-logr/logr" - cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" "github.com/k8ssandra/cass-operator/pkg/httphelper" - k8ssandraapi "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" "github.com/k8ssandra/k8ssandra-operator/pkg/cassandra" - "math" - "strconv" ) const ( @@ -16,8 +12,6 @@ const ( AuthTable = "token" ) -const networkTopology = "org.apache.cassandra.locator.NetworkTopologyStrategy" - var authTableDefinition = &httphelper.TableDefinition{ KeyspaceName: AuthKeyspace, TableName: AuthTable, @@ -28,60 +22,11 @@ var authTableDefinition = &httphelper.TableDefinition{ }, } -func ReconcileAuthSchema( - kc *k8ssandraapi.K8ssandraCluster, - dc *cassdcapi.CassandraDatacenter, - managementApi cassandra.ManagementApiFacade, - logger logr.Logger, -) (err error) { - replication := desiredAuthSchemaReplication(kc.Spec.Cassandra.Datacenters...) - if err = reconcileAuthKeyspace(replication, dc.ClusterName, managementApi, logger); err == nil { - err = reconcileAuthTable(managementApi, logger) - } - return -} - -func desiredAuthSchemaReplication(datacenters ...k8ssandraapi.CassandraDatacenterTemplate) map[string]int { - desiredReplication := make(map[string]int, len(datacenters)) - for _, dcTemplate := range datacenters { - replicationFactor := int(math.Min(3.0, float64(dcTemplate.Size))) - desiredReplication[dcTemplate.Meta.Name] = replicationFactor - } - return desiredReplication -} - -func reconcileAuthKeyspace(desiredReplication map[string]int, clusterName string, managementApi cassandra.ManagementApiFacade, logger logr.Logger) error { - logger.Info(fmt.Sprintf("Ensuring that keyspace %s exists in cluster %v...", AuthKeyspace, clusterName)) - if keyspaces, err := managementApi.ListKeyspaces(AuthKeyspace); err != nil { - return err - } else if len(keyspaces) == 0 { - logger.Info(fmt.Sprintf("keyspace %s does not exist in cluster %v, creating it", AuthKeyspace, clusterName)) - if err := managementApi.CreateKeyspaceIfNotExists(AuthKeyspace, desiredReplication); err != nil { - return err - } else { - logger.Info(fmt.Sprintf("Keyspace %s successfully created", AuthKeyspace)) - return nil - } - } else { - logger.Info(fmt.Sprintf("keyspace %s already exists in cluster %v", AuthKeyspace, clusterName)) - if actualReplication, err := managementApi.GetKeyspaceReplication(AuthKeyspace); err != nil { - return err - } else if compareReplications(actualReplication, desiredReplication) { - logger.Info(fmt.Sprintf("Keyspace %s has desired replication", AuthKeyspace)) - return nil - } else { - logger.Info(fmt.Sprintf("keyspace %s already exists in cluster %v but has wrong replication, altering it", AuthKeyspace, clusterName)) - if err := managementApi.AlterKeyspace(AuthKeyspace, desiredReplication); err != nil { - return err - } else { - logger.Info(fmt.Sprintf("Keyspace %s successfully altered", AuthKeyspace)) - return nil - } - } - } -} - -func reconcileAuthTable(managementApi cassandra.ManagementApiFacade, logger logr.Logger) error { +// ReconcileAuthTable ensures that the Stargate auth schema contains the appropriate tables. Note that we don't do much +// here and currently, we only check if the token table exists, without checking if the table definition matches the +// expected one. If the auth schema becomes more complex in the future, we'd need to find a more robust solution, maybe +// à la Reaper. +func ReconcileAuthTable(managementApi cassandra.ManagementApiFacade, logger logr.Logger) error { if tables, err := managementApi.ListTables(AuthKeyspace); err != nil { return err } else if len(tables) == 0 { @@ -93,28 +38,7 @@ func reconcileAuthTable(managementApi cassandra.ManagementApiFacade, logger logr return nil } } else { - // We do not currently check if the table definition matches logger.Info(fmt.Sprintf("table %s already exists in keyspace %v", AuthTable, AuthKeyspace)) return nil } } - -func compareReplications(actualReplication map[string]string, desiredReplication map[string]int) bool { - if len(actualReplication) == 0 { - return false - } else if class := actualReplication["class"]; class != networkTopology { - return false - } else if len(actualReplication) != len(desiredReplication)+1 { - return false - } - for dcName, desiredRf := range desiredReplication { - if actualRf, ok := actualReplication[dcName]; !ok { - return false - } else if rf, err := strconv.Atoi(actualRf); err != nil { - return false - } else if rf != desiredRf { - return false - } - } - return true -} diff --git a/pkg/stargate/auth_schema_test.go b/pkg/stargate/auth_schema_test.go index 2878b04de..14a8ed8a7 100644 --- a/pkg/stargate/auth_schema_test.go +++ b/pkg/stargate/auth_schema_test.go @@ -2,7 +2,6 @@ package stargate import ( "errors" - k8ssandraapi "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" "github.com/k8ssandra/k8ssandra-operator/pkg/cassandra" "github.com/k8ssandra/k8ssandra-operator/pkg/mocks" "github.com/stretchr/testify/assert" @@ -11,110 +10,6 @@ import ( "testing" ) -func TestReconcileAuthKeyspace(t *testing.T) { - dummyError := errors.New("failure") - desiredReplication := map[string]int{"dc1": 1} - goodReplication := map[string]string{ - "class": networkTopology, - "dc1": "1", - } - badReplication := map[string]string{ - "class": networkTopology, - "dc1": "3", - } - tests := []struct { - name string - replication map[string]int - managementApi func() cassandra.ManagementApiFacade - err error - }{ - { - "list keyspace failed", - desiredReplication, - func() cassandra.ManagementApiFacade { - m := new(mocks.ManagementApiFacade) - m.On("ListKeyspaces", AuthKeyspace).Return(nil, dummyError) - return m - }, - dummyError, - }, - { - "create keyspace failed", - desiredReplication, - func() cassandra.ManagementApiFacade { - m := new(mocks.ManagementApiFacade) - m.On("ListKeyspaces", AuthKeyspace).Return([]string{}, nil) - m.On("CreateKeyspaceIfNotExists", AuthKeyspace, desiredReplication).Return(dummyError) - return m - }, - dummyError, - }, - { - "create keyspace OK", - desiredReplication, - func() cassandra.ManagementApiFacade { - m := new(mocks.ManagementApiFacade) - m.On("ListKeyspaces", AuthKeyspace).Return([]string{}, nil) - m.On("CreateKeyspaceIfNotExists", AuthKeyspace, desiredReplication).Return(nil) - return m - }, - nil, - }, - { - "get keyspace replication failed", - desiredReplication, - func() cassandra.ManagementApiFacade { - m := new(mocks.ManagementApiFacade) - m.On("ListKeyspaces", AuthKeyspace).Return([]string{AuthKeyspace}, nil) - m.On("GetKeyspaceReplication", AuthKeyspace).Return(nil, dummyError) - return m - }, - dummyError, - }, - { - "get keyspace replication OK", - desiredReplication, - func() cassandra.ManagementApiFacade { - m := new(mocks.ManagementApiFacade) - m.On("ListKeyspaces", AuthKeyspace).Return([]string{AuthKeyspace}, nil) - m.On("GetKeyspaceReplication", AuthKeyspace).Return(goodReplication, nil) - return m - }, - nil, - }, - { - "get keyspace replication wrong, alter OK", - desiredReplication, - func() cassandra.ManagementApiFacade { - m := new(mocks.ManagementApiFacade) - m.On("ListKeyspaces", AuthKeyspace).Return([]string{AuthKeyspace}, nil) - m.On("GetKeyspaceReplication", AuthKeyspace).Return(badReplication, nil) - m.On("AlterKeyspace", AuthKeyspace, desiredReplication).Return(nil) - return m - }, - nil, - }, - { - "get keyspace replication wrong, alter failed", - desiredReplication, - func() cassandra.ManagementApiFacade { - m := new(mocks.ManagementApiFacade) - m.On("ListKeyspaces", AuthKeyspace).Return([]string{AuthKeyspace}, nil) - m.On("GetKeyspaceReplication", AuthKeyspace).Return(badReplication, nil) - m.On("AlterKeyspace", AuthKeyspace, desiredReplication).Return(dummyError) - return m - }, - dummyError, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := reconcileAuthKeyspace(tt.replication, "test", tt.managementApi(), log.NullLogger{}) - assert.Equal(t, tt.err, err) - }) - } -} - func TestReconcileAuthTable(t *testing.T) { dummyError := errors.New("failure") tests := []struct { @@ -163,61 +58,8 @@ func TestReconcileAuthTable(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := reconcileAuthTable(tt.managementApi(), log.NullLogger{}) + err := ReconcileAuthTable(tt.managementApi(), log.NullLogger{}) assert.Equal(t, tt.err, err) }) } } - -func TestDesiredAuthSchemaReplication(t *testing.T) { - tests := []struct { - name string - dcs []k8ssandraapi.CassandraDatacenterTemplate - expected map[string]int - }{ - {"one dc", []k8ssandraapi.CassandraDatacenterTemplate{ - {Meta: k8ssandraapi.EmbeddedObjectMeta{Name: "dc1"}, Size: 3}, - }, map[string]int{"dc1": 3}}, - {"small dc", []k8ssandraapi.CassandraDatacenterTemplate{ - {Meta: k8ssandraapi.EmbeddedObjectMeta{Name: "dc1"}, Size: 1}, - }, map[string]int{"dc1": 1}}, - {"large dc", []k8ssandraapi.CassandraDatacenterTemplate{ - {Meta: k8ssandraapi.EmbeddedObjectMeta{Name: "dc1"}, Size: 10}, - }, map[string]int{"dc1": 3}}, - {"many dcs", []k8ssandraapi.CassandraDatacenterTemplate{ - {Meta: k8ssandraapi.EmbeddedObjectMeta{Name: "dc1"}, Size: 3}, - {Meta: k8ssandraapi.EmbeddedObjectMeta{Name: "dc2"}, Size: 1}, - {Meta: k8ssandraapi.EmbeddedObjectMeta{Name: "dc3"}, Size: 10}, - }, map[string]int{"dc1": 3, "dc2": 1, "dc3": 3}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - actual := desiredAuthSchemaReplication(tt.dcs...) - assert.Equal(t, tt.expected, actual) - }) - } -} - -func TestCompareReplications(t *testing.T) { - tests := []struct { - name string - actual map[string]string - desired map[string]int - expected bool - }{ - {"nil", nil, map[string]int{"dc1": 3}, false}, - {"empty", map[string]string{}, map[string]int{"dc1": 3}, false}, - {"wrong class", map[string]string{"class": "wrong"}, map[string]int{"dc1": 3}, false}, - {"wrong length", map[string]string{"class": networkTopology, "dc1": "3", "dc2": "3"}, map[string]int{"dc1": 3}, false}, - {"missing dc", map[string]string{"class": networkTopology, "dc2": "3"}, map[string]int{"dc1": 3}, false}, - {"invalid rf", map[string]string{"class": networkTopology, "dc1": "not a number"}, map[string]int{"dc1": 3}, false}, - {"wrong rf", map[string]string{"class": networkTopology, "dc1": "1"}, map[string]int{"dc1": 3}, false}, - {"success", map[string]string{"class": networkTopology, "dc1": "1", "dc2": "3"}, map[string]int{"dc1": 1, "dc2": 3}, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := compareReplications(tt.actual, tt.desired) - assert.Equal(t, tt.expected, result) - }) - } -} diff --git a/pkg/test/testenv.go b/pkg/test/testenv.go index 865e52bff..fcabab7cc 100644 --- a/pkg/test/testenv.go +++ b/pkg/test/testenv.go @@ -3,6 +3,7 @@ package test import ( "context" "fmt" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" "os" "path/filepath" "testing" @@ -278,6 +279,10 @@ func registerApis() error { return err } + if err := reaperapi.AddToScheme(scheme.Scheme); err != nil { + return err + } + if err := configapi.AddToScheme(scheme.Scheme); err != nil { return err } diff --git a/pkg/utils/annotations.go b/pkg/utils/annotations.go new file mode 100644 index 000000000..d88072d18 --- /dev/null +++ b/pkg/utils/annotations.go @@ -0,0 +1,32 @@ +package utils + +type Annotated interface { + GetAnnotations() map[string]string + SetAnnotations(annotations map[string]string) +} + +func AddAnnotation(obj Annotated, annotationKey string, annotationValue string) { + m := obj.GetAnnotations() + if m == nil { + m = map[string]string{} + } + m[annotationKey] = annotationValue + obj.SetAnnotations(m) +} + +func GetAnnotation(component Annotated, annotationKey string) string { + m := component.GetAnnotations() + return m[annotationKey] +} + +func HasAnnotationWithValue(component Annotated, annotationKey string, annotationValue string) bool { + return GetAnnotation(component, annotationKey) == annotationValue +} + +func CompareAnnotations(r1, r2 Annotated, annotationKey string) bool { + annotationValue := GetAnnotation(r1, annotationKey) + if annotationValue == "" { + return false + } + return HasAnnotationWithValue(r2, annotationKey, annotationValue) +} diff --git a/pkg/utils/hash.go b/pkg/utils/hash.go index 4b5e1c9a7..7c8daacdf 100644 --- a/pkg/utils/hash.go +++ b/pkg/utils/hash.go @@ -6,6 +6,11 @@ import ( "k8s.io/kubernetes/pkg/util/hash" ) +func AddHashAnnotation(obj Annotated, annotationKey string) { + h := DeepHashString(obj) + AddAnnotation(obj, annotationKey, h) +} + func DeepHashString(obj interface{}) string { hasher := sha256.New() hash.DeepHashObject(hasher, obj) diff --git a/pkg/utils/labels.go b/pkg/utils/labels.go new file mode 100644 index 000000000..0fdbb900e --- /dev/null +++ b/pkg/utils/labels.go @@ -0,0 +1,75 @@ +package utils + +import ( + api "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" +) + +type Labeled interface { + GetLabels() map[string]string + SetLabels(labels map[string]string) +} + +func AddLabel(obj Labeled, labelKey string, labelValue string) { + m := obj.GetLabels() + if m == nil { + m = map[string]string{} + } + m[labelKey] = labelValue + obj.SetLabels(m) +} + +func GetLabel(component Labeled, labelKey string) string { + m := component.GetLabels() + return m[labelKey] +} + +func HasLabelWithValue(component Labeled, labelKey string, labelValue string) bool { + return GetLabel(component, labelKey) == labelValue +} + +// SetManagedBy sets the required labels for making a component managed by K8ssandra. +// Important: k8cName is the name of the K8ssandraCluster resource managing the component, not the name of the Cassandra +// cluster. IOW, it should be k8c.Name, NOT k8c.Spec.Cassandra.Cluster! +func SetManagedBy(component Labeled, k8cName string) { + AddLabel(component, api.ManagedByLabel, api.NameLabelValue) + AddLabel(component, api.K8ssandraClusterLabel, k8cName) +} + +// IsManagedBy checks whether the given component is managed by K8ssandra, and belongs to the K8ssandraCluster resource +// specified by k8cName. +// Important: k8cName is the name of the K8ssandraCluster resource managing the component, not the name of the Cassandra +// cluster. IOW, it should be k8c.Name, NOT k8c.Spec.Cassandra.Cluster! +func IsManagedBy(component Labeled, k8cName string) bool { + return HasLabelWithValue(component, api.ManagedByLabel, api.NameLabelValue) && + HasLabelWithValue(component, api.K8ssandraClusterLabel, k8cName) +} + +// ManagedByLabels returns the labels used to identify a component managed by K8ssandra. +// Important: k8cName is the name of the K8ssandraCluster resource managing the component, not the name of the Cassandra +// cluster. IOW, it should be k8c.Name, NOT k8c.Spec.Cassandra.Cluster! +func ManagedByLabels(k8cName string) map[string]string { + return map[string]string{ + api.ManagedByLabel: api.NameLabelValue, + api.K8ssandraClusterLabel: k8cName, + } +} + +// IsCreatedByK8ssandraController returns true if this component was created by the k8ssandra-cluster controller, and +// belongs to the K8ssandraCluster resource specified by k8cName. +// Important: k8cName is the name of the K8ssandraCluster resource managing the component, not the name of the Cassandra +// cluster. IOW, it should be k8c.Name, NOT k8c.Spec.Cassandra.Cluster! +func IsCreatedByK8ssandraController(component Labeled, k8cName string) bool { + return HasLabelWithValue(component, api.CreatedByLabel, api.CreatedByLabelValueK8ssandraClusterController) && + HasLabelWithValue(component, api.K8ssandraClusterLabel, k8cName) +} + +// CreatedByK8ssandraControllerLabels returns the labels used to identify a component created by the k8ssandra-cluster +// controller, and belonging to the K8ssandraCluster resource specified by k8cName. +// Important: k8cName is the name of the K8ssandraCluster resource managing the component, not the name of the Cassandra +// cluster. IOW, it should be k8c.Name, NOT k8c.Spec.Cassandra.Cluster! +func CreatedByK8ssandraControllerLabels(k8cName string) map[string]string { + return map[string]string{ + api.CreatedByLabel: api.CreatedByLabelValueK8ssandraClusterController, + api.K8ssandraClusterLabel: k8cName, + } +} diff --git a/pkg/utils/slice.go b/pkg/utils/slice.go index ad7010faa..3985c87cc 100644 --- a/pkg/utils/slice.go +++ b/pkg/utils/slice.go @@ -8,15 +8,3 @@ func SliceContains(slice []string, s string) bool { } return false } - -func SlicesContainSameElementsInAnyOrder(slice1, slice2 []string) bool { - if len(slice1) != len(slice2) { - return false - } - for _, s := range slice1 { - if !SliceContains(slice2, s) { - return false - } - } - return true -} diff --git a/test/e2e/reaper_test.go b/test/e2e/reaper_test.go new file mode 100644 index 000000000..75077f4f8 --- /dev/null +++ b/test/e2e/reaper_test.go @@ -0,0 +1,136 @@ +package e2e + +import ( + "context" + api "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" + "github.com/k8ssandra/k8ssandra-operator/test/framework" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "testing" +) + +func createSingleReaper(t *testing.T, ctx context.Context, namespace string, f *framework.E2eFramework) { + + t.Log("check that the K8ssandraCluster was created") + k8ssandra := &api.K8ssandraCluster{} + kcKey := types.NamespacedName{Namespace: namespace, Name: "test"} + err := f.Client.Get(ctx, kcKey, k8ssandra) + require.NoError(t, err, "failed to get K8ssandraCluster in namespace %s", namespace) + + dcKey := framework.ClusterKey{K8sContext: "kind-k8ssandra-0", NamespacedName: types.NamespacedName{Namespace: namespace, Name: "dc1"}} + checkDatacenterReady(t, ctx, dcKey, f) + + t.Log("check k8ssandra cluster status updated for CassandraDatacenter") + assert.Eventually(t, func() bool { + k8ssandra := &api.K8ssandraCluster{} + if err := f.Client.Get(ctx, kcKey, k8ssandra); err != nil { + return false + } + kdcStatus, found := k8ssandra.Status.Datacenters[dcKey.Name] + return found && kdcStatus.Cassandra != nil && cassandraDatacenterReady(kdcStatus.Cassandra) + }, polling.k8ssandraClusterStatus.timeout, polling.k8ssandraClusterStatus.interval, "timed out waiting for K8ssandraCluster status to get updated") + + reaperKey := framework.ClusterKey{K8sContext: "kind-k8ssandra-0", NamespacedName: types.NamespacedName{Namespace: namespace, Name: "test-dc1-reaper"}} + withReaper := f.NewWithReaper(ctx, reaperKey) + + t.Log("check Reaper status updated to ready") + require.Eventually(t, withReaper(func(reaper *reaperapi.Reaper) bool { + return reaper.Status.IsReady() + }), polling.reaperReady.timeout, polling.reaperReady.interval) + + t.Log("check k8ssandra cluster status updated for Reaper") + assert.Eventually(t, func() bool { + k8ssandra := &api.K8ssandraCluster{} + if err := f.Client.Get(ctx, kcKey, k8ssandra); err != nil { + return false + } + kdcStatus, found := k8ssandra.Status.Datacenters[dcKey.Name] + return found && + kdcStatus.Cassandra != nil && + cassandraDatacenterReady(kdcStatus.Cassandra) && + kdcStatus.Reaper != nil && + kdcStatus.Reaper.IsReady() + }, polling.k8ssandraClusterStatus.timeout, polling.k8ssandraClusterStatus.interval, "timed out waiting for K8ssandraCluster status to get updated") + + t.Log("check that if Reaper is deleted directly it gets re-created") + reaper := &reaperapi.Reaper{} + err = f.Client.Get(ctx, reaperKey.NamespacedName, reaper) + require.NoError(t, err, "failed to get Reaper in namespace %s", namespace) + err = f.Client.Delete(ctx, reaper) + require.NoError(t, err, "failed to delete Reaper in namespace %s", namespace) + + t.Log("check that Reaper is re-created and ready") + require.Eventually(t, withReaper(func(reaper *reaperapi.Reaper) bool { + return reaper.Status.IsReady() + }), polling.reaperReady.timeout, polling.reaperReady.interval, "timed out waiting for Reaper test-dc1-reaper to become ready") + + t.Log("delete Reaper in k8ssandracluster CRD") + err = f.Client.Get(ctx, kcKey, k8ssandra) + require.NoError(t, err, "failed to get K8ssandraCluster in namespace %s", namespace) + patch := client.MergeFromWithOptions(k8ssandra.DeepCopy(), client.MergeFromWithOptimisticLock{}) + reaperTemplate := k8ssandra.Spec.Reaper + k8ssandra.Spec.Reaper = nil + err = f.Client.Patch(ctx, k8ssandra, patch) + require.NoError(t, err, "failed to patch K8ssandraCluster in namespace %s", namespace) + + t.Log("check Reaper deleted") + require.Eventually(t, func() bool { + reaper := &reaperapi.Reaper{} + err := f.Client.Get(ctx, reaperKey.NamespacedName, reaper) + return err != nil && errors.IsNotFound(err) + }, polling.reaperReady.timeout, polling.reaperReady.interval) + + checkDatacenterReady(t, ctx, dcKey, f) + + t.Log("check Reaper status deleted in k8ssandracluster resource") + require.Eventually(t, func() bool { + k8ssandra := &api.K8ssandraCluster{} + if err := f.Client.Get(ctx, kcKey, k8ssandra); err != nil { + return false + } + kdcStatus, found := k8ssandra.Status.Datacenters[dcKey.Name] + return found && kdcStatus.Reaper == nil + }, polling.reaperReady.timeout, polling.reaperReady.interval) + + t.Log("re-create Reaper in k8ssandracluster resource") + err = f.Client.Get(ctx, kcKey, k8ssandra) + require.NoError(t, err, "failed to get K8ssandraCluster in namespace %s", namespace) + patch = client.MergeFromWithOptions(k8ssandra.DeepCopy(), client.MergeFromWithOptimisticLock{}) + k8ssandra.Spec.Reaper = reaperTemplate.DeepCopy() + err = f.Client.Patch(ctx, k8ssandra, patch) + require.NoError(t, err, "failed to patch K8ssandraCluster in namespace %s", namespace) + + t.Log("check Reaper re-created") + require.Eventually(t, withReaper(func(reaper *reaperapi.Reaper) bool { + return true + }), polling.reaperReady.timeout, polling.reaperReady.interval) + + checkDatacenterReady(t, ctx, dcKey, f) + + t.Log("check Reaper status updated to ready") + require.Eventually(t, withReaper(func(reaper *reaperapi.Reaper) bool { + return reaper.Status.IsReady() + }), polling.reaperReady.timeout, polling.reaperReady.interval) + + t.Log("check k8ssandra cluster status updated for Reaper") + require.Eventually(t, func() bool { + k8ssandra := &api.K8ssandraCluster{} + if err := f.Client.Get(ctx, kcKey, k8ssandra); err != nil { + return false + } + kdcStatus, found := k8ssandra.Status.Datacenters[dcKey.Name] + return found && + kdcStatus.Cassandra != nil && + cassandraDatacenterReady(kdcStatus.Cassandra) && + kdcStatus.Reaper != nil && + kdcStatus.Reaper.IsReady() + }, polling.k8ssandraClusterStatus.timeout, polling.k8ssandraClusterStatus.interval) +} + +func createMultiReaper(t *testing.T, ctx context.Context, namespace string, f *framework.E2eFramework) { + t.Log("TODO") +} diff --git a/test/e2e/stargate_apis_test.go b/test/e2e/stargate_apis_test.go index e2249afc8..14236137e 100644 --- a/test/e2e/stargate_apis_test.go +++ b/test/e2e/stargate_apis_test.go @@ -8,13 +8,9 @@ import ( "github.com/datastax/go-cassandra-native-protocol/frame" "github.com/datastax/go-cassandra-native-protocol/message" "github.com/datastax/go-cassandra-native-protocol/primitive" - "github.com/k8ssandra/k8ssandra-operator/pkg/secret" - "github.com/k8ssandra/k8ssandra-operator/test/framework" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/resty.v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/rand" "net/http" neturl "net/url" @@ -232,34 +228,6 @@ func authenticate(t *testing.T, restClient *resty.Client, k8sContextIdx int, use return tokenStr } -func retrieveDatabaseCredentials(t *testing.T, f *framework.E2eFramework, ctx context.Context, namespace, clusterName string) (string, string) { - superUserSecret := retrieveSuperuserSecret(t, f, ctx, namespace, clusterName) - username := string(superUserSecret.Data["username"]) - password := string(superUserSecret.Data["password"]) - return username, password -} - -func retrieveSuperuserSecret(t *testing.T, f *framework.E2eFramework, ctx context.Context, namespace, clusterName string) *corev1.Secret { - var superUserSecret *corev1.Secret - timeout := 2 * time.Minute - interval := 1 * time.Second - require.Eventually(t, func() bool { - superUserSecret = &corev1.Secret{} - superUserSecretKey := framework.ClusterKey{ - NamespacedName: types.NamespacedName{ - Namespace: namespace, - Name: secret.DefaultSuperuserSecretName(clusterName), - }, - K8sContext: f.ControlPlaneContext, - } - err := f.Get(ctx, superUserSecretKey, superUserSecret) - return err == nil && - len(superUserSecret.Data["username"]) >= 0 && - len(superUserSecret.Data["password"]) >= 0 - }, timeout, interval, "Failed to retrieve superuser secret") - return superUserSecret -} - func openCqlClientConnection(t *testing.T, ctx context.Context, k8sContextIdx int, username, password string) *client.CqlClientConnection { contactPoint := fmt.Sprintf("stargate.127.0.0.1.nip.io:3%v942", k8sContextIdx) cqlClient := client.NewCqlClient(contactPoint, &client.AuthCredentials{ diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index 11c8f17da..6ab93ac03 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -41,6 +41,7 @@ var ( operatorDeploymentReady pollingConfig k8ssandraClusterStatus pollingConfig stargateReady pollingConfig + reaperReady pollingConfig } logKustomizeOutput = flag.Bool("logKustomizeOutput", false, "") @@ -59,6 +60,8 @@ func TestOperator(t *testing.T) { t.Run("CreateMultiStargateAndDatacenter", e2eTest(ctx, "multi-stargate", true, createStargateAndDatacenter)) t.Run("CreateMultiDatacenterCluster", e2eTest(ctx, "multi-dc", false, createMultiDatacenterCluster)) t.Run("CheckStargateApisWithMultiDcCluster", e2eTest(ctx, "multi-dc-stargate", true, checkStargateApisWithMultiDcCluster)) + t.Run("CreateSingleReaper", e2eTest(ctx, "single-dc-reaper", false, createSingleReaper)) + t.Run("CreateMultiReaper", e2eTest(ctx, "multi-dc-reaper", false, createMultiReaper)) } func beforeSuite(t *testing.T) { @@ -227,6 +230,9 @@ func applyPollingDefaults() { polling.stargateReady.timeout = 5 * time.Minute polling.stargateReady.interval = 5 * time.Second + + polling.reaperReady.timeout = 5 * time.Minute + polling.reaperReady.interval = 5 * time.Second } func afterTest(t *testing.T, namespace string, f *framework.E2eFramework, deployTraefik bool) { @@ -246,12 +252,12 @@ func cleanUp(t *testing.T, namespace string, f *framework.E2eFramework, deployTr } if err := f.DeleteK8ssandraClusters(namespace); err != nil { - return err + t.Logf("failed to delete K8ssandra clusters: %v", err) } if deployTraefik { if err := f.UndeployTraefik(t, namespace); err != nil { - return err + t.Logf("failed to undeploy Traefik: %v", err) } } @@ -259,19 +265,23 @@ func cleanUp(t *testing.T, namespace string, f *framework.E2eFramework, deployTr interval := 10 * time.Second if err := f.DeleteStargates(namespace, timeout, interval); err != nil { - return err + t.Logf("failed to delete Stargates: %v", err) + } + + if err := f.DeleteReapers(namespace, timeout, interval); err != nil { + t.Logf("failed to delete Reapers: %v", err) } if err := f.DeleteDatacenters(namespace, timeout, interval); err != nil { - return err + t.Logf("failed to delete datacenters: %v", err) } if err := f.DeleteReplicatedSecrets(namespace, timeout, interval); err != nil { - return err + t.Logf("failed to delete replicated secrets: %v", err) } if err := f.DeleteNamespace(namespace, timeout, interval); err != nil { - return err + t.Logf("failed to delete namespace: %v", err) } return nil @@ -391,11 +401,11 @@ func createSingleDatacenterCluster(t *testing.T, ctx context.Context, namespace }), polling.stargateReady.timeout, polling.stargateReady.interval, "timed out waiting for Stargate test-dc1-stargate to become ready") t.Log("retrieve database credentials") - username, password := retrieveDatabaseCredentials(t, f, ctx, namespace, "test") + username, password := f.RetrieveDatabaseCredentials(t, ctx, namespace, "test") t.Log("deploying Stargate ingress routes in kind-k8ssandra-0") f.DeployStargateIngresses(t, "kind-k8ssandra-0", 0, namespace, "test-dc1-stargate-service", username, password) - defer f.UndeployStargateIngresses(t, "kind-k8ssandra-0", namespace) + defer f.UndeployAllIngresses(t, "kind-k8ssandra-0", namespace) replication := map[string]int{"dc1": 1} testStargateApis(t, ctx, "kind-k8ssandra-0", 0, username, password, replication) @@ -417,11 +427,11 @@ func createStargateAndDatacenter(t *testing.T, ctx context.Context, namespace st }), polling.stargateReady.timeout, polling.stargateReady.interval, "timed out waiting for Stargate s1 to become ready") t.Log("retrieve database credentials") - username, password := retrieveDatabaseCredentials(t, f, ctx, namespace, "test") + username, password := f.RetrieveDatabaseCredentials(t, ctx, namespace, "test") t.Log("deploying Stargate ingress routes in kind-k8ssandra-0") f.DeployStargateIngresses(t, "kind-k8ssandra-0", 0, namespace, "test-dc1-stargate-service", username, password) - defer f.UndeployStargateIngresses(t, "kind-k8ssandra-0", namespace) + defer f.UndeployAllIngresses(t, "kind-k8ssandra-0", namespace) replication := map[string]int{"dc1": 3} testStargateApis(t, ctx, "kind-k8ssandra-0", 0, username, password, replication) @@ -631,15 +641,15 @@ func checkStargateApisWithMultiDcCluster(t *testing.T, ctx context.Context, name assert.NoError(t, err, "timed out waiting for nodetool status check against "+pod) t.Log("retrieve database credentials") - username, password := retrieveDatabaseCredentials(t, f, ctx, namespace, "test") + username, password := f.RetrieveDatabaseCredentials(t, ctx, namespace, "test") t.Log("deploying Stargate ingress routes in kind-k8ssandra-0") f.DeployStargateIngresses(t, "kind-k8ssandra-0", 0, namespace, "test-dc1-stargate-service", username, password) - defer f.UndeployStargateIngresses(t, "kind-k8ssandra-0", namespace) + defer f.UndeployAllIngresses(t, "kind-k8ssandra-0", namespace) t.Log("deploying Stargate ingress routes in kind-k8ssandra-1") f.DeployStargateIngresses(t, "kind-k8ssandra-1", 1, namespace, "test-dc2-stargate-service", username, password) - defer f.UndeployStargateIngresses(t, "kind-k8ssandra-1", namespace) + defer f.UndeployAllIngresses(t, "kind-k8ssandra-1", namespace) replication := map[string]int{"dc1": 1, "dc2": 1} diff --git a/test/framework/cqlsh.go b/test/framework/cqlsh.go new file mode 100644 index 000000000..9c6da64e6 --- /dev/null +++ b/test/framework/cqlsh.go @@ -0,0 +1,62 @@ +package framework + +import ( + "context" + "github.com/k8ssandra/k8ssandra-operator/pkg/secret" + "github.com/k8ssandra/k8ssandra-operator/test/kubectl" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "testing" + "time" +) + +func (f *E2eFramework) RetrieveSuperuserSecret(t *testing.T, ctx context.Context, namespace, k8cName string) *corev1.Secret { + var superUserSecret *corev1.Secret + timeout := 2 * time.Minute + interval := 1 * time.Second + require.Eventually(t, func() bool { + superUserSecret = &corev1.Secret{} + superUserSecretKey := ClusterKey{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: secret.DefaultSuperuserSecretName(k8cName), + }, + K8sContext: f.ControlPlaneContext, + } + err := f.Get(ctx, superUserSecretKey, superUserSecret) + return err == nil && + len(superUserSecret.Data["username"]) >= 0 && + len(superUserSecret.Data["password"]) >= 0 + }, timeout, interval, "Failed to retrieve superuser secret") + return superUserSecret +} + +func (f *E2eFramework) RetrieveDatabaseCredentials(t *testing.T, ctx context.Context, namespace, k8cName string) (string, string) { + superUserSecret := f.RetrieveSuperuserSecret(t, ctx, namespace, k8cName) + username := string(superUserSecret.Data["username"]) + password := string(superUserSecret.Data["password"]) + return username, password +} + +func (f *E2eFramework) ExecuteCql(t *testing.T, ctx context.Context, k8sContext, namespace, k8cName, pod, query string) string { + username, password := f.RetrieveDatabaseCredentials(t, ctx, namespace, k8cName) + options := kubectl.Options{Namespace: namespace, Context: k8sContext} + output, _ := kubectl.Exec(options, pod, + "--", + "/opt/cassandra/bin/cqlsh", + "--username", + username, + "--password", + password, + "-e", + query, + ) + return output +} + +func (f *E2eFramework) CheckKeyspaceExists(t *testing.T, ctx context.Context, k8sContext, namespace, k8cName, pod, keyspace string) { + keyspaces := f.ExecuteCql(t, ctx, k8sContext, namespace, k8cName, pod, "describe keyspaces") + assert.Contains(t, keyspaces, keyspace) +} diff --git a/test/framework/e2e_framework.go b/test/framework/e2e_framework.go index c04616b33..54594ec1e 100644 --- a/test/framework/e2e_framework.go +++ b/test/framework/e2e_framework.go @@ -3,6 +3,7 @@ package framework import ( "context" "fmt" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" "io/ioutil" "os" "os/exec" @@ -232,7 +233,7 @@ func (f *E2eFramework) kustomizeAndApply(dir, namespace string, contexts ...stri return err } - options := kubectl.Options{Namespace: namespace, Context: defaultControlPlaneContext} + options := kubectl.Options{Namespace: namespace, Context: defaultControlPlaneContext, ServerSide: true} return kubectl.Apply(options, buf) } @@ -245,7 +246,7 @@ func (f *E2eFramework) kustomizeAndApply(dir, namespace string, contexts ...stri return err } - options := kubectl.Options{Namespace: namespace, Context: ctx} + options := kubectl.Options{Namespace: namespace, Context: ctx, ServerSide: true} if err := kubectl.Apply(options, buf); err != nil { return err } @@ -494,6 +495,19 @@ func (f *E2eFramework) DeleteStargates(namespace string, timeout, interval time. ) } +// DeleteReapers deletes all Reapers in namespace in all remote clusters. +// This function blocks until all pods from all Reapers have terminated. +func (f *E2eFramework) DeleteReapers(namespace string, timeout, interval time.Duration) error { + f.logger.Info("deleting all Reapers", "Namespace", namespace) + return f.deleteAllResources( + namespace, + &reaperapi.Reaper{}, + timeout, + interval, + client.HasLabels{reaperapi.ReaperLabel}, + ) +} + // DeleteReplicatedSecrets deletes all the ReplicatedSecrets in the namespace. This causes // some delay while secret controller removes the finalizers and clears the replicated secrets from // remote clusters. diff --git a/test/framework/framework.go b/test/framework/framework.go index 41e17d144..ac809f57a 100644 --- a/test/framework/framework.go +++ b/test/framework/framework.go @@ -3,6 +3,7 @@ package framework import ( "context" "fmt" + reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1" "testing" "time" @@ -45,6 +46,9 @@ func Init(t *testing.T) { err = stargateapi.AddToScheme(scheme.Scheme) require.NoError(t, err, "failed to register scheme for stargate") + err = reaperapi.AddToScheme(scheme.Scheme) + require.NoError(t, err, "failed to register scheme for reaper") + err = configapi.AddToScheme(scheme.Scheme) require.NoError(t, err, "failed to register scheme for k8ssandra-operator configs") @@ -163,7 +167,7 @@ func (f *Framework) PatchDatacenterStatus(ctx context.Context, key ClusterKey, u return remoteClient.Status().Patch(ctx, dc, patch) } -func (f *Framework) PatchStagateStatus(ctx context.Context, key ClusterKey, updateFn func(sg *stargateapi.Stargate)) error { +func (f *Framework) PatchStargateStatus(ctx context.Context, key ClusterKey, updateFn func(sg *stargateapi.Stargate)) error { sg := &stargateapi.Stargate{} err := f.Get(ctx, key, sg) @@ -178,6 +182,21 @@ func (f *Framework) PatchStagateStatus(ctx context.Context, key ClusterKey, upda return remoteClient.Status().Patch(ctx, sg, patch) } +func (f *Framework) PatchReaperStatus(ctx context.Context, key ClusterKey, updateFn func(sg *reaperapi.Reaper)) error { + sg := &reaperapi.Reaper{} + err := f.Get(ctx, key, sg) + + if err != nil { + return err + } + + patch := client.MergeFromWithOptions(sg.DeepCopy(), client.MergeFromWithOptimisticLock{}) + updateFn(sg) + + remoteClient := f.remoteClients[key.K8sContext] + return remoteClient.Status().Patch(ctx, sg, patch) +} + // WaitForDeploymentToBeReady Blocks until the Deployment is ready. If // ClusterKey.K8sContext is empty, this method blocks until the deployment is ready in all // remote clusters. @@ -295,7 +314,39 @@ func (f *Framework) withStargate(ctx context.Context, key ClusterKey, condition func (f *Framework) StargateExists(ctx context.Context, key ClusterKey) func() bool { withStargate := f.NewWithStargate(ctx, key) - return withStargate(func(dc *stargateapi.Stargate) bool { + return withStargate(func(s *stargateapi.Stargate) bool { + return true + }) +} + +// NewWithReaper is a function generator for withReaper that is bound to ctx, and key. +func (f *Framework) NewWithReaper(ctx context.Context, key ClusterKey) func(func(reaper *reaperapi.Reaper) bool) func() bool { + return func(condition func(*reaperapi.Reaper) bool) func() bool { + return f.withReaper(ctx, key, condition) + } +} + +// withReaper Fetches the reaper specified by key and then calls condition. +func (f *Framework) withReaper(ctx context.Context, key ClusterKey, condition func(*reaperapi.Reaper) bool) func() bool { + return func() bool { + remoteClient, found := f.remoteClients[key.K8sContext] + if !found { + f.logger.Error(f.k8sContextNotFound(key.K8sContext), "cannot lookup Reaper", "key", key) + return false + } + reaper := &reaperapi.Reaper{} + if err := remoteClient.Get(ctx, key.NamespacedName, reaper); err == nil { + return condition(reaper) + } else { + f.logger.Error(err, "failed to get Reaper", "key", key) + return false + } + } +} + +func (f *Framework) ReaperExists(ctx context.Context, key ClusterKey) func() bool { + withReaper := f.NewWithReaper(ctx, key) + return withReaper(func(r *reaperapi.Reaper) bool { return true }) } diff --git a/test/framework/traefik.go b/test/framework/traefik.go index d4279fed9..e61064b59 100644 --- a/test/framework/traefik.go +++ b/test/framework/traefik.go @@ -71,12 +71,11 @@ func (f *E2eFramework) DeployStargateIngresses(t *testing.T, k8sContext string, timeout := 2 * time.Minute interval := 1 * time.Second require.Eventually(t, func() bool { - url := fmt.Sprintf("http://stargate.127.0.0.1.nip.io:3%v080/v1/auth", k8sContextIdx) body := map[string]string{"username": username, "password": password} request := resty.NewRequest(). SetHeader("Content-Type", "application/json"). SetBody(body) - response, err := request.Post(url) + response, err := request.Post(stargateHttp) return err == nil && response.StatusCode() == http.StatusCreated }, timeout, interval, "Address is unreachable: %s", stargateHttp) require.Eventually(t, func() bool { @@ -90,7 +89,30 @@ func (f *E2eFramework) DeployStargateIngresses(t *testing.T, k8sContext string, }, timeout, interval, "Address is unreachable: %s", stargateCql) } -func (f *E2eFramework) UndeployStargateIngresses(t *testing.T, k8sContext, namespace string) { +func (f *E2eFramework) DeployReaperIngresses(t *testing.T, k8sContext string, k8sContextIdx int, namespace, reaperServiceName string) { + src := filepath.Join("..", "..", "test", "testdata", "ingress", "reaper-ingress.yaml") + dir := filepath.Join("..", "..", "build", "test-config", "ingress", k8sContext) + dest := filepath.Join(dir, "reaper-ingress.yaml") + err := os.MkdirAll(dir, 0755) + require.NoError(t, err) + buf, err := ioutil.ReadFile(src) + require.NoError(t, err) + err = ioutil.WriteFile(dest, buf, 0644) + require.NoError(t, err) + err = generateReaperIngressKustomization(k8sContext, namespace, reaperServiceName) + require.NoError(t, err) + err = f.kustomizeAndApply(dir, namespace, k8sContext) + assert.NoError(t, err) + reaperHttp := fmt.Sprintf("http://reaper.127.0.0.1.nip.io:3%v080/cluster", k8sContextIdx) + timeout := 2 * time.Minute + interval := 1 * time.Second + require.Eventually(t, func() bool { + response, err := resty.NewRequest().Get(reaperHttp) + return err == nil && response.StatusCode() == http.StatusOK + }, timeout, interval, "Address is unreachable: %s", reaperHttp) +} + +func (f *E2eFramework) UndeployAllIngresses(t *testing.T, k8sContext, namespace string) { options := kubectl.Options{Context: k8sContext, Namespace: namespace} err := kubectl.DeleteAllOf(options, "IngressRoute") assert.NoError(t, err) @@ -152,3 +174,34 @@ patchesJson6902: k := Kustomization{Namespace: namespace} return generateKustomizationFile("ingress/"+k8sContext, k, tmpl) } + +func generateReaperIngressKustomization(k8sContext, namespace, serviceName string) error { + tmpl := `apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- reaper-ingress.yaml +namespace: {{ .Namespace }} +patches: +- target: + group: traefik.containo.us + version: v1alpha1 + kind: IngressRoute + name: test-dc1-reaper-service-http-ingress + patch: |- + - op: replace + path: /metadata/name + value: "` + serviceName + `-http-ingress" +patchesJson6902: + - target: + group: traefik.containo.us + version: v1alpha1 + kind: IngressRoute + name: .* + patch: |- + - op: replace + path: /spec/routes/0/services/0/name + value: "` + serviceName + `" +` + k := Kustomization{Namespace: namespace} + return generateKustomizationFile("ingress/"+k8sContext, k, tmpl) +} diff --git a/test/kubectl/kubectl.go b/test/kubectl/kubectl.go index 1f81fa6b6..79f7c8cb6 100644 --- a/test/kubectl/kubectl.go +++ b/test/kubectl/kubectl.go @@ -10,8 +10,9 @@ import ( ) type Options struct { - Namespace string - Context string + Namespace string + Context string + ServerSide bool } var logOutput = false @@ -31,7 +32,13 @@ func Apply(opts Options, arg interface{}) error { cmd.Args = append(cmd.Args, "-n", opts.Namespace) } - cmd.Args = append(cmd.Args, "apply", "-f") + cmd.Args = append(cmd.Args, "apply") + + if opts.ServerSide { + cmd.Args = append(cmd.Args, "--server-side", "--force-conflicts") + } + + cmd.Args = append(cmd.Args, "-f") if buf, ok := arg.(*bytes.Buffer); ok { cmd.Stdin = buf diff --git a/test/testdata/fixtures/multi-dc-reaper/k8ssandra.yaml b/test/testdata/fixtures/multi-dc-reaper/k8ssandra.yaml new file mode 100644 index 000000000..ceae23322 --- /dev/null +++ b/test/testdata/fixtures/multi-dc-reaper/k8ssandra.yaml @@ -0,0 +1,49 @@ +apiVersion: k8ssandra.io/v1alpha1 +kind: K8ssandraCluster +metadata: + name: test +spec: + cassandra: + cluster: test + serverVersion: "3.11.11" + datacenters: + - metadata: + name: dc1 + k8sContext: kind-k8ssandra-0 + size: 1 + storageConfig: + cassandraDataVolumeClaimSpec: + storageClassName: standard + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + config: + jvmOptions: + heapSize: 384Mi + networking: + hostNetwork: true + reaper: + autoScheduling: + enabled: true + - metadata: + name: dc2 + k8sContext: kind-k8ssandra-1 + size: 1 + storageConfig: + cassandraDataVolumeClaimSpec: + storageClassName: standard + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + config: + jvmOptions: + heapSize: 384Mi + networking: + hostNetwork: true + reaper: + autoScheduling: + enabled: true diff --git a/test/testdata/fixtures/single-dc-reaper/k8ssandra.yaml b/test/testdata/fixtures/single-dc-reaper/k8ssandra.yaml new file mode 100644 index 000000000..041756ef4 --- /dev/null +++ b/test/testdata/fixtures/single-dc-reaper/k8ssandra.yaml @@ -0,0 +1,27 @@ +apiVersion: k8ssandra.io/v1alpha1 +kind: K8ssandraCluster +metadata: + name: test +spec: + reaper: + autoScheduling: + enabled: true + cassandra: + cluster: test + serverVersion: "3.11.11" + datacenters: + - metadata: + name: dc1 + k8sContext: kind-k8ssandra-0 + size: 1 + storageConfig: + cassandraDataVolumeClaimSpec: + storageClassName: standard + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + config: + jvmOptions: + heapSize: 384Mi diff --git a/test/testdata/ingress/reaper-ingress.yaml b/test/testdata/ingress/reaper-ingress.yaml new file mode 100644 index 000000000..f2444c931 --- /dev/null +++ b/test/testdata/ingress/reaper-ingress.yaml @@ -0,0 +1,15 @@ +# https://github.com/traefik/traefik/blob/v2.5.1/docs/content/reference/dynamic-configuration/traefik.containo.us_ingressroutes.yaml +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: test-dc1-reaper-service-http-ingress # actual namespace will be kustomized + namespace: default # actual namespace will be kustomized +spec: + entryPoints: + - web-http + routes: + - match: Host(`reaper.127.0.0.1.nip.io`) + kind: Rule + services: + - name: test-dc1-reaper-service # actual service name will be kustomized + port: 8080 diff --git a/test/testdata/ingress/stargate-ingress.yaml b/test/testdata/ingress/stargate-ingress.yaml index a6dcc33c2..556fa4b41 100644 --- a/test/testdata/ingress/stargate-ingress.yaml +++ b/test/testdata/ingress/stargate-ingress.yaml @@ -6,7 +6,7 @@ metadata: namespace: default # actual namespace will be kustomized spec: entryPoints: - - stargate-http + - web-http routes: - match: Host(`stargate.127.0.0.1.nip.io`) && PathPrefix(`/v1/auth`) kind: Rule diff --git a/test/testdata/ingress/traefik.values.yaml b/test/testdata/ingress/traefik.values.yaml index 5ac838ff6..81f6822ea 100644 --- a/test/testdata/ingress/traefik.values.yaml +++ b/test/testdata/ingress/traefik.values.yaml @@ -32,7 +32,7 @@ ports: expose: true port: 30090 nodePort: 30090 - stargate-http: + web-http: expose: true port: 30080 nodePort: 30080