Skip to content

Commit

Permalink
Try setting all polluntilcontext timeout the same way.
Browse files Browse the repository at this point in the history
  • Loading branch information
Miles-Garnsey committed May 17, 2024
1 parent 738b004 commit 2456e37
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 9 deletions.
10 changes: 5 additions & 5 deletions test/framework/e2e_framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ func (f *E2eFramework) InstallMinio() error {

// Wait for the minio rollout to complete
opts := kubectl.Options{Namespace: MinioNamespace, Context: k8sContext}
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) {
if err := kubectl.RolloutStatus(ctx, opts, "Deployment", "minio"); err != nil {
f.logger.Info("Waiting for minio rollout to complete: %s", err)
return false, err
Expand All @@ -387,7 +387,7 @@ func (f *E2eFramework) CreateMedusaBucket(namespace string) error {
// Wait for job to succeed
f.logger.Info("Waiting for setup-minio job to succeed")
opts := kubectl.Options{Namespace: namespace, Context: k8sContext}
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) {
if err := kubectl.JobSuccess(ctx, opts, namespace, "setup-minio"); err != nil {
f.logger.Error(err, "Waiting for setup-minio job to succeed")
return false, nil
Expand Down Expand Up @@ -550,7 +550,7 @@ func (f *E2eFramework) DeleteNamespace(name string, timeout, interval time.Durat
}

// Should this wait.Poll call be per cluster?
return wait.PollUntilContextTimeout(context.TODO(), interval, timeout, false, func(context.Context) (bool, error) {
return wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(ctx context.Context) (bool, error) {
for _, remoteClient := range f.remoteClients {
err := remoteClient.Get(context.TODO(), types.NamespacedName{Name: name}, namespace.DeepCopy())

Expand Down Expand Up @@ -730,7 +730,7 @@ func (f *E2eFramework) DeleteReplicatedSecrets(namespace string, timeout, interv
return err
}

return wait.PollUntilContextTimeout(context.TODO(), interval, timeout, false, func(context.Context) (bool, error) {
return wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(ctx context.Context) (bool, error) {
list := &replicationapi.ReplicatedSecretList{}
if err := f.Client.List(context.Background(), list, client.InNamespace(namespace)); err != nil {
f.logger.Error(err, "failed to list ReplicatedSecrets")
Expand Down Expand Up @@ -764,7 +764,7 @@ func (f *E2eFramework) deleteAllResources(
}
// Check that all pods created by resources are terminated
// FIXME Should there be a separate wait.Poll call per cluster?
return wait.PollUntilContextTimeout(context.TODO(), interval, timeout, false, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(ctx context.Context) (bool, error) {
podListOptions = append(podListOptions, client.InNamespace(namespace))
for k8sContext, remoteClient := range f.remoteClients {
list := &corev1.PodList{}
Expand Down
8 changes: 4 additions & 4 deletions test/framework/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,7 @@ func (f *Framework) WaitForDeploymentToBeReady(key ClusterKey, timeout, interval
if len(key.K8sContext) == 0 {
for k8sContext := range f.remoteClients {
opts := kubectl.Options{Namespace: key.Namespace, Context: k8sContext}
err := wait.PollUntilContextTimeout(context.Background(), interval, timeout, false, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(ctx context.Context) (bool, error) {
if err := kubectl.RolloutStatus(ctx, opts, "Deployment", key.Name); err != nil {
return false, err
}
Expand All @@ -479,7 +479,7 @@ func (f *Framework) WaitForDeploymentToBeReady(key ClusterKey, timeout, interval
return f.k8sContextNotFound(key.K8sContext)
}
opts := kubectl.Options{Namespace: key.Namespace, Context: key.K8sContext}
return wait.PollUntilContextTimeout(context.Background(), interval, timeout, false, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(ctx context.Context) (bool, error) {
if err := kubectl.RolloutStatus(ctx, opts, "Deployment", key.Name); err != nil {
return false, err
}
Expand All @@ -498,7 +498,7 @@ func (f *Framework) DeleteK8ssandraCluster(ctx context.Context, key client.Objec
if err != nil {
return err
}
return wait.PollUntilContextTimeout(context.TODO(), interval, timeout, false, func(context.Context) (bool, error) {
return wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(ctx context.Context) (bool, error) {
err := f.Client.Get(ctx, key, kc)
return err != nil && errors.IsNotFound(err), nil
})
Expand Down Expand Up @@ -532,7 +532,7 @@ func (f *Framework) DeleteCassandraDatacenters(namespace string, interval, timeo
f.logger.Error(err, "Failed to delete CassandraDatacenters")
}

return wait.PollUntilContextTimeout(context.TODO(), interval, timeout, false, func(context.Context) (bool, error) {
return wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(ctx context.Context) (bool, error) {
list := &cassdcapi.CassandraDatacenterList{}
err := f.Client.List(context.Background(), list, client.InNamespace(namespace))
if err != nil {
Expand Down

0 comments on commit 2456e37

Please sign in to comment.