Skip to content

Commit

Permalink
Dependancy upgrades (#1314)
Browse files Browse the repository at this point in the history
* controller-runtime upgrade.

* Very annoying deprecation errors.

* Try adding TLSOpts to all test mgrs.

* Ensure metrics not started on medusa tests.

* Fixed the webhook issues it appears.

* Fix toolchain issue.

* Add changelog.

* Comment out medusa TestScheduler test. I don't know how to fix this.

* Try to fix failing e2e tests. Maybe this is related to misuse of PollUntilContextTimeout...

* Try setting all polluntilcontext timeout the same way.

* Ensure all list failures are logged.

* Actually log when a replicated secret is screwing up.

* Add default cache config for cluster-scope

* Make context changes in the PollUntilContextTimeout (which exits immediately right now)

* Remove more duplicate contexts

* Fix the function parameter order in DeleteK8ssandraClusters/CassandraDatacenters in tests. Update controller-runtime from 0.17.2 -> 0.17.5 for some bugfixes

* Fix go.sum.

* Put watchnamespace back the way it should be.

* Get linting passing.

* Fix the namespace parsing again after the merge

* Fix bug which is driving us into a multiNamespaceCache within controller runtime instead of the informerCache we want.

* More logging

* Better fix for namespacing the caches.

* Extend timeout on cluster scoped test.

* Oops, that was embarrassing.

* Changelog, better loop for setting default namespaces.

* Fix changelog.

* Use nonCachedClient in the ClientConfig to prevent cache invalidation issue since this is very low usage client

* Fix clientConfig initialization to fetch clusterscoped all the ClientConfig items

---------

Co-authored-by: Michael Burman <[email protected]>
  • Loading branch information
Miles-Garnsey and burmanm authored May 22, 2024
1 parent d7f059e commit 66db83a
Show file tree
Hide file tree
Showing 56 changed files with 7,186 additions and 2,302 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG/CHANGELOG-1.17.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,7 @@ Changelog for the K8ssandra Operator, new PRs should update the `unreleased` sec
When cutting a new release, update the `unreleased` heading to the tag being generated and date, like `## vX.Y.Z - YYYY-MM-DD` and create a new placeholder section for `unreleased` entries.

## unreleased

* [CHANGE] [1313](https://github.com/k8ssandra/k8ssandra-operator/issues/1313) upgrade controller-runtime to 1.17 series, Go to 1.21.
* [BUGFIX] [1317](https://github.com/k8ssandra/k8ssandra-operator/issues/1317) Fix issues with caches in cluster scoped deployments where they were continuing to use a multi-namespace scoped cache and not an informer cache.
* [BUGFIX] [1316](https://github.com/k8ssandra/k8ssandra-operator/issues/1316) Fix interchanged intervals and timeouts in tests.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Build the manager binary
FROM golang:1.20 as builder
FROM golang:1.21 as builder
ARG TARGETOS
ARG TARGETARCH

Expand Down
11 changes: 6 additions & 5 deletions apis/k8ssandra/v1alpha1/k8ssandracluster_types_test.go
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
package v1alpha1

import (
"sigs.k8s.io/yaml"
"testing"

"k8s.io/utils/ptr"
"sigs.k8s.io/yaml"

cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1"
stargateapi "github.com/k8ssandra/k8ssandra-operator/apis/stargate/v1alpha1"
"github.com/stretchr/testify/assert"
"k8s.io/utils/pointer"
)

func TestK8ssandraCluster(t *testing.T) {
Expand Down Expand Up @@ -77,7 +78,7 @@ func TestNetworkingConfig_ToCassNetworkingConfig(t *testing.T) {
{
"host network true",
&NetworkingConfig{
HostNetwork: pointer.Bool(true),
HostNetwork: ptr.To(true),
},
&cassdcapi.NetworkingConfig{
HostNetwork: true,
Expand All @@ -86,7 +87,7 @@ func TestNetworkingConfig_ToCassNetworkingConfig(t *testing.T) {
{
"host network false",
&NetworkingConfig{
HostNetwork: pointer.Bool(false),
HostNetwork: ptr.To(false),
},
&cassdcapi.NetworkingConfig{
HostNetwork: false,
Expand All @@ -104,7 +105,7 @@ func TestNetworkingConfig_ToCassNetworkingConfig(t *testing.T) {
{
"all set",
&NetworkingConfig{
HostNetwork: pointer.Bool(true),
HostNetwork: ptr.To(true),
NodePort: &cassdcapi.NodePortConfig{
Native: 1,
NativeSSL: 2,
Expand Down
29 changes: 15 additions & 14 deletions apis/k8ssandra/v1alpha1/k8ssandracluster_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)

var (
Expand Down Expand Up @@ -64,10 +65,10 @@ func (r *K8ssandraCluster) Default() {
var _ webhook.Validator = &K8ssandraCluster{}

// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *K8ssandraCluster) ValidateCreate() error {
func (r *K8ssandraCluster) ValidateCreate() (admission.Warnings, error) {
webhookLog.Info("validate K8ssandraCluster create", "K8ssandraCluster", r.Name)

return r.validateK8ssandraCluster()
return nil, r.validateK8ssandraCluster()
}

func (r *K8ssandraCluster) validateK8ssandraCluster() error {
Expand Down Expand Up @@ -128,24 +129,24 @@ func (r *K8ssandraCluster) validateStatefulsetNameSize() error {
}

// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *K8ssandraCluster) ValidateUpdate(old runtime.Object) error {
func (r *K8ssandraCluster) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
webhookLog.Info("validate K8ssandraCluster update", "K8ssandraCluster", r.Name)

if err := r.validateK8ssandraCluster(); err != nil {
return err
return nil, err
}

oldCluster, ok := old.(*K8ssandraCluster)
if !ok {
return fmt.Errorf("previous object could not be casted to K8ssandraCluster")
return nil, fmt.Errorf("previous object could not be casted to K8ssandraCluster")
}

// Verify Reaper keyspace is not changed
oldReaperSpec := oldCluster.Spec.Reaper
reaperSpec := r.Spec.Reaper
if reaperSpec != nil && oldReaperSpec != nil {
if reaperSpec.Keyspace != oldReaperSpec.Keyspace {
return ErrReaperKeyspace
return nil, ErrReaperKeyspace
}
}

Expand All @@ -158,22 +159,22 @@ func (r *K8ssandraCluster) ValidateUpdate(old runtime.Object) error {
if !oldNumTokensExists {
cassVersion, err := semver.NewVersion(oldCluster.Spec.Cassandra.ServerVersion)
if err != nil {
return err
return nil, err
}
defaultNumTokens := oldCluster.DefaultNumTokens(cassVersion)
if newNumTokensExists && newNumTokens.(float64) != defaultNumTokens {
return ErrNumTokens
return nil, ErrNumTokens
}
} else {
if oldNumTokens != newNumTokens {
return ErrNumTokens
return nil, ErrNumTokens
}
}
}

// Verify that the cluster name override was not changed
if r.Spec.Cassandra.ClusterName != oldCluster.Spec.Cassandra.ClusterName {
return ErrClusterName
return nil, ErrClusterName
}

// Some of these could be extracted in the cass-operator to reusable methods, do not copy code here.
Expand All @@ -185,16 +186,16 @@ func (r *K8ssandraCluster) ValidateUpdate(old runtime.Object) error {
// TODO Racks can only be added and only at the end of the list - no other operation is allowed to racks

if err := r.validateStatefulsetNameSize(); err != nil {
return err
return nil, err
}

return nil
return nil, nil
}

// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *K8ssandraCluster) ValidateDelete() error {
func (r *K8ssandraCluster) ValidateDelete() (admission.Warnings, error) {
webhookLog.Info("validate K8ssandraCluster delete", "name", r.Name)
return nil
return nil, nil
}

func (r *K8ssandraCluster) ValidateMedusa() error {
Expand Down
38 changes: 24 additions & 14 deletions apis/k8ssandra/v1alpha1/k8ssandracluster_webhook_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ import (
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
admissionv1 "k8s.io/api/admission/v1"
"sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"

//+kubebuilder:scaffold:imports
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -103,13 +105,21 @@ func TestWebhook(t *testing.T) {

// start webhook server using Manager
webhookInstallOptions := &testEnv.WebhookInstallOptions

whServer := webhook.NewServer(webhook.Options{
Port: webhookInstallOptions.LocalServingPort,
Host: webhookInstallOptions.LocalServingHost,
CertDir: webhookInstallOptions.LocalServingCertDir,
TLSOpts: []func(*tls.Config){func(config *tls.Config) {}},
})

mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme,
Host: webhookInstallOptions.LocalServingHost,
Port: webhookInstallOptions.LocalServingPort,
CertDir: webhookInstallOptions.LocalServingCertDir,
LeaderElection: false,
MetricsBindAddress: "0",
Scheme: scheme,
WebhookServer: whServer,
LeaderElection: false,
Metrics: server.Options{
BindAddress: "0",
},
})
required.NoError(err)

Expand Down Expand Up @@ -282,7 +292,7 @@ func testNumTokens(t *testing.T) {

// Handle new num_token value different from previously specified as nil
required.NotEqual(oldCassConfig.CassandraYaml["num_tokens"], newCassConfig.CassandraYaml["num_tokens"])
var errorWhenNew = (*newCluster).ValidateUpdate(oldCluster)
var _, errorWhenNew = (*newCluster).ValidateUpdate(oldCluster)
required.Error(errorWhenNew, "expected error having new num_token value different from previous specified as nil")

oldCluster.Spec.Cassandra.DatacenterOptions.CassandraConfig.CassandraYaml["num_tokens"] = tokens
Expand All @@ -293,30 +303,30 @@ func testNumTokens(t *testing.T) {

// Handle new num_token value different from previously specified as an actual value
required.NotEqual(oldCassConfig.CassandraYaml["num_tokens"], newCassConfig.CassandraYaml["num_tokens"])
errorWhenNew = (*newCluster).ValidateUpdate(oldCluster)
_, errorWhenNew = (*newCluster).ValidateUpdate(oldCluster)
required.Error(errorWhenNew, "expected error having new num_token value different from previous specified")

// Handle new num_token not specified when previously specified
oldCassConfig.CassandraYaml["num_tokens"] = tokens
delete(newCassConfig.CassandraYaml, "num_tokens")

var errorWhenNil = (*newCluster).ValidateUpdate(oldCluster)
var _, errorWhenNil = (*newCluster).ValidateUpdate(oldCluster)
required.Error(errorWhenNil, "expected error having new num_token value as nil from previous specified")

oldCassConfig.CassandraYaml["num_tokens"] = tokens
newCassConfig.CassandraYaml = unstructured.Unstructured{}

errorWhenNil = (*newCluster).ValidateUpdate(oldCluster)
_, errorWhenNil = (*newCluster).ValidateUpdate(oldCluster)
required.Error(errorWhenNil, "expected error having new num_token value as nil from previous specified")

oldCassConfig.CassandraYaml["num_tokens"] = tokens
newCassConfig = &CassandraConfig{}
errorWhenNil = (*newCluster).ValidateUpdate(oldCluster)
_, errorWhenNil = (*newCluster).ValidateUpdate(oldCluster)
required.Error(errorWhenNil, "expected error having new num_token value as nil from previous specified")

oldCassConfig.CassandraYaml["num_tokens"] = tokens
newCassConfig = &CassandraConfig{}
errorWhenNil = (*newCluster).ValidateUpdate(oldCluster)
_, errorWhenNil = (*newCluster).ValidateUpdate(oldCluster)
required.Error(errorWhenNil, "expected error having new num_token value as nil from previous specified")

// Expected to be able to update without token change, however changes to other config values are made
Expand All @@ -330,7 +340,7 @@ func testNumTokens(t *testing.T) {
newCluster.Spec.Cassandra.DatacenterOptions.CassandraConfig.CassandraYaml["cdc_enabled"] = enabled
newCluster.Spec.Cassandra.DatacenterOptions.CassandraConfig.CassandraYaml["index_summary_resize_interval_in_minutes"] = intervalInMins

errorOnValidate := (*newCluster).ValidateUpdate(oldCluster)
_, errorOnValidate := (*newCluster).ValidateUpdate(oldCluster)
required.NoError(errorOnValidate)

// Expected failure for validation with token change while changes to other config values are being made
Expand All @@ -339,7 +349,7 @@ func testNumTokens(t *testing.T) {
newCluster.Spec.Cassandra.DatacenterOptions.CassandraConfig.CassandraYaml["cdc_enabled"] = enabled
newCluster.Spec.Cassandra.DatacenterOptions.CassandraConfig.CassandraYaml["index_summary_resize_interval_in_minutes"] = intervalInMins

errorOnValidate = (*newCluster).ValidateUpdate(oldCluster)
_, errorOnValidate = (*newCluster).ValidateUpdate(oldCluster)
required.Error(errorOnValidate, "expected error when changing the value of num tokens while also changing other field values")
}

Expand Down
22 changes: 16 additions & 6 deletions apis/medusa/v1alpha1/medusa_webhook_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ import (
"github.com/k8ssandra/k8ssandra-operator/pkg/shared"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"

//+kubebuilder:scaffold:imports

Expand Down Expand Up @@ -77,13 +79,21 @@ func TestMedusaWebhooks(t *testing.T) {

// start webhook server using Manager
webhookInstallOptions := &testEnv.WebhookInstallOptions

whServer := webhook.NewServer(webhook.Options{
Port: webhookInstallOptions.LocalServingPort,
Host: webhookInstallOptions.LocalServingHost,
CertDir: webhookInstallOptions.LocalServingCertDir,
TLSOpts: []func(*tls.Config){func(config *tls.Config) {}},
})

mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme,
Host: webhookInstallOptions.LocalServingHost,
Port: webhookInstallOptions.LocalServingPort,
CertDir: webhookInstallOptions.LocalServingCertDir,
LeaderElection: false,
MetricsBindAddress: "0",
Scheme: scheme,
WebhookServer: whServer,
LeaderElection: false,
Metrics: server.Options{
BindAddress: "0",
},
})
require.NoError(err)

Expand Down
13 changes: 7 additions & 6 deletions apis/medusa/v1alpha1/medusabackupschedule_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)

func (r *MedusaBackupSchedule) SetupWebhookWithManager(mgr ctrl.Manager) error {
Expand All @@ -34,18 +35,18 @@ func (r *MedusaBackupSchedule) SetupWebhookWithManager(mgr ctrl.Manager) error {
var _ webhook.Validator = &MedusaBackupSchedule{}

// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *MedusaBackupSchedule) ValidateCreate() error {
return r.validateCronSchedule()
func (r *MedusaBackupSchedule) ValidateCreate() (admission.Warnings, error) {
return nil, r.validateCronSchedule()
}

// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *MedusaBackupSchedule) ValidateUpdate(old runtime.Object) error {
return r.validateCronSchedule()
func (r *MedusaBackupSchedule) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
return nil, r.validateCronSchedule()
}

// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *MedusaBackupSchedule) ValidateDelete() error {
return nil
func (r *MedusaBackupSchedule) ValidateDelete() (admission.Warnings, error) {
return nil, nil
}

func (r *MedusaBackupSchedule) validateCronSchedule() error {
Expand Down
13 changes: 7 additions & 6 deletions apis/stargate/v1alpha1/stargate_types_test.go
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
package v1alpha1

import (
"testing"

"github.com/k8ssandra/k8ssandra-operator/pkg/images"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"testing"
"k8s.io/utils/ptr"
)

var (
Expand Down Expand Up @@ -228,7 +229,7 @@ func testStargateDatacenterTemplateMerge(t *testing.T) {
Repository: "repo2",
Name: "img2",
},
ServiceAccount: pointer.String("sa2"),
ServiceAccount: ptr.To("sa2"),
HeapSize: &quantity256Mi,
NodeSelector: map[string]string{"k2": "v2a", "k3": "v3"},
Tolerations: []corev1.Toleration{{Key: "k2", Value: "v2"}},
Expand Down Expand Up @@ -270,7 +271,7 @@ func testStargateDatacenterTemplateMerge(t *testing.T) {
Name: "img1",
},
HeapSize: &quantity512Mi,
ServiceAccount: pointer.String("sa2"),
ServiceAccount: ptr.To("sa2"),
// map will be merged
NodeSelector: map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"},
// slice will not be merged, slice1 will be kept intact
Expand Down Expand Up @@ -384,7 +385,7 @@ func testStargateRackTemplateMerge(t *testing.T) {
Repository: "repo2",
Name: "img2",
},
ServiceAccount: pointer.String("sa2"),
ServiceAccount: ptr.To("sa2"),
HeapSize: &quantity256Mi,
NodeSelector: map[string]string{"k2": "v2a", "k3": "v3"},
Tolerations: []corev1.Toleration{{Key: "k2", Value: "v2"}},
Expand Down Expand Up @@ -424,7 +425,7 @@ func testStargateRackTemplateMerge(t *testing.T) {
Name: "img1",
},
HeapSize: &quantity512Mi,
ServiceAccount: pointer.String("sa2"),
ServiceAccount: ptr.To("sa2"),
// map will be merged
NodeSelector: map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"},
// slice will not be merged, slice1 will be kept intact
Expand Down
Loading

0 comments on commit 66db83a

Please sign in to comment.