Skip to content

Commit

Permalink
convert hardcoded master words into a constant variable
Browse files Browse the repository at this point in the history
  • Loading branch information
idanovinda committed Dec 2, 2024
1 parent 1410daa commit daec1a4
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 37 deletions.
4 changes: 4 additions & 0 deletions e2e/tests/constants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
LEADER_LABEL_VALUE = "master" # master or primary
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1"
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2"
SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-16:3.2-p3"
13 changes: 7 additions & 6 deletions e2e/tests/k8s_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from kubernetes import client, config
from kubernetes.client.rest import ApiException

from tests.constants import LEADER_LABEL_VALUE

def to_selector(labels):
return ",".join(["=".join(lbl) for lbl in labels.items()])
Expand Down Expand Up @@ -47,7 +48,7 @@ def get_pg_nodes(self, pg_cluster_name, namespace='default'):
replica_pod_nodes = []
podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_name)
for pod in podsList.items:
if pod.metadata.labels.get('spilo-role') == 'master':
if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE:
master_pod_node = pod.spec.node_name
elif pod.metadata.labels.get('spilo-role') == 'replica':
replica_pod_nodes.append(pod.spec.node_name)
Expand All @@ -59,7 +60,7 @@ def get_cluster_nodes(self, cluster_labels='application=spilo,cluster-name=acid-
r = []
podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels)
for pod in podsList.items:
if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running':
if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE and pod.status.phase == 'Running':
m.append(pod.spec.node_name)
elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running':
r.append(pod.spec.node_name)
Expand Down Expand Up @@ -351,7 +352,7 @@ def get_cluster_pod(self, role, labels='application=spilo,cluster-name=acid-mini
return pods[0]

def get_cluster_leader_pod(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
return self.get_cluster_pod('master', labels, namespace)
return self.get_cluster_pod(LEADER_LABEL_VALUE, labels, namespace)

def get_cluster_replica_pod(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
return self.get_cluster_pod('replica', labels, namespace)
Expand Down Expand Up @@ -383,7 +384,7 @@ def get_pg_nodes(self, pg_cluster_labels='cluster-name=acid-minimal-cluster', na
replica_pod_nodes = []
podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_labels)
for pod in podsList.items:
if pod.metadata.labels.get('spilo-role') == 'master':
if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE:
master_pod_node = pod.spec.node_name
elif pod.metadata.labels.get('spilo-role') == 'replica':
replica_pod_nodes.append(pod.spec.node_name)
Expand All @@ -395,7 +396,7 @@ def get_cluster_nodes(self, cluster_labels='cluster-name=acid-minimal-cluster',
r = []
podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels)
for pod in podsList.items:
if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running':
if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE and pod.status.phase == 'Running':
m.append(pod.spec.node_name)
elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running':
r.append(pod.spec.node_name)
Expand Down Expand Up @@ -622,7 +623,7 @@ def get_pg_nodes(self):
replica_pod_nodes = []
podsList = self.api.core_v1.list_namespaced_pod(self.namespace, label_selector=self.labels)
for pod in podsList.items:
if pod.metadata.labels.get('spilo-role') == 'master':
if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE:
master_pod_node = pod.spec.node_name
elif pod.metadata.labels.get('spilo-role') == 'replica':
replica_pod_nodes.append(pod.spec.node_name)
Expand Down
57 changes: 27 additions & 30 deletions e2e/tests/test_e2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,10 @@

from datetime import datetime, date, timedelta
from kubernetes import client

from tests.k8s_api import K8s
from kubernetes.client.rest import ApiException

SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1"
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2"
SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-16:3.2-p3"
from tests.k8s_api import K8s
from tests.constants import SPILO_CURRENT, SPILO_FULL_IMAGE, SPILO_LAZY, LEADER_LABEL_VALUE


def to_selector(labels):
Expand Down Expand Up @@ -155,7 +152,7 @@ def setUpClass(cls):
result = k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml")
print('stdout: {}, stderr: {}'.format(result.stdout, result.stderr))
try:
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
Expand Down Expand Up @@ -224,7 +221,7 @@ def test_additional_pod_capabilities(self):
k8s.update_config(patch_capabilities)

# changed security context of postgres container should trigger a rolling update
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)

self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
Expand Down Expand Up @@ -658,7 +655,7 @@ def test_custom_ssl_certificate(self):
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_tls)

# wait for switched over
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)

self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("SSL_CERTIFICATE_FILE", cluster_label), 2, "TLS env variable SSL_CERTIFICATE_FILE missing in Spilo pods")
Expand Down Expand Up @@ -861,7 +858,7 @@ def test_enable_load_balancer(self):
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster,spilo-role={}'

self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")),
self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format(LEADER_LABEL_VALUE)),
'ClusterIP',
"Expected ClusterIP type initially, found {}")

Expand All @@ -876,7 +873,7 @@ def test_enable_load_balancer(self):
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs)

self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")),
self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format(LEADER_LABEL_VALUE)),
'LoadBalancer',
"Expected LoadBalancer service type for master, found {}")

Expand All @@ -894,7 +891,7 @@ def test_enable_load_balancer(self):
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs)

self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")),
self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format(LEADER_LABEL_VALUE)),
'ClusterIP',
"Expected LoadBalancer service type for master, found {}")

Expand Down Expand Up @@ -1227,7 +1224,7 @@ def get_annotations():
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")

k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 13, "Version should be upgraded from 12 to 13")

Expand All @@ -1252,8 +1249,8 @@ def get_annotations():
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")

k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 13, "Version should not be upgraded")

Expand All @@ -1278,7 +1275,7 @@ def get_annotations():
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")

k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 15, "Version should be upgraded from 13 to 15")

Expand All @@ -1304,8 +1301,8 @@ def get_annotations():
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_16)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")

k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 15, "Version should not be upgraded because annotation for last upgrade's failure is set")

Expand All @@ -1315,7 +1312,7 @@ def get_annotations():
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")

k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)

fourth_annotations = get_annotations()
Expand Down Expand Up @@ -1433,7 +1430,7 @@ def test_resource_generation(self):
"Operator does not get in sync")

# wait for switched over
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)

def verify_pod_resources():
Expand Down Expand Up @@ -1465,7 +1462,7 @@ def test_multi_namespace_support(self):

try:
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
k8s.wait_for_pod_start("spilo-role=master", self.test_namespace)
k8s.wait_for_pod_start("spilo-role={}".format(LEADER_LABEL_VALUE), self.test_namespace)
k8s.wait_for_pod_start("spilo-role=replica", self.test_namespace)
self.assert_master_is_unique(self.test_namespace, "acid-test-cluster")
# acid-test-cluster will be deleted in test_owner_references test
Expand Down Expand Up @@ -1540,7 +1537,7 @@ def test_node_affinity(self):
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
# next master will be switched over and pod needs to be replaced as well to finish the rolling update
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)

podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label)
Expand Down Expand Up @@ -1573,7 +1570,7 @@ def test_node_affinity(self):

# node affinity change should cause another rolling update and relocation of replica
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)

except timeout_decorator.TimeoutError:
Expand Down Expand Up @@ -1634,7 +1631,7 @@ def test_node_readiness_label(self):
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)

# next master will be switched over and pod needs to be replaced as well to finish the rolling update
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)

# patch also node where master ran before
Expand Down Expand Up @@ -1922,7 +1919,7 @@ def test_rolling_update_flag(self):
podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label)
for pod in podsList.items:
# add flag only to the master to make it appear to the operator as a leftover from a rolling update
if pod.metadata.labels.get('spilo-role') == 'master':
if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE:
old_creation_timestamp = pod.metadata.creation_timestamp
k8s.patch_pod(flag, pod.metadata.name, pod.metadata.namespace)
else:
Expand All @@ -1933,7 +1930,7 @@ def test_rolling_update_flag(self):
k8s.delete_operator_pod()

# operator should now recreate the master pod and do a switchover before
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)

# check if the former replica is now the new master
Expand Down Expand Up @@ -2004,7 +2001,7 @@ def test_rolling_update_label_timeout(self):
self.eventuallyEqual(lambda: k8s.pg_get_status(), "SyncFailed", "Expected SYNC event to fail")

# wait for next sync, replica should be running normally by now and be ready for switchover
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)

# check if the former replica is now the new master
Expand Down Expand Up @@ -2079,7 +2076,7 @@ def test_service_annotations(self):
"alice": "bob"
}

self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=master", annotations), "Wrong annotations")
self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role={}".format(LEADER_LABEL_VALUE), annotations), "Wrong annotations")
self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=replica", annotations), "Wrong annotations")

# clean up
Expand Down Expand Up @@ -2151,7 +2148,7 @@ def test_standby_cluster(self):

try:
k8s.create_with_kubectl("manifests/standby-manifest.yaml")
k8s.wait_for_pod_start("spilo-role=master," + cluster_label)
k8s.wait_for_pod_start("spilo-role={},".format(LEADER_LABEL_VALUE) + cluster_label)

except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
Expand Down Expand Up @@ -2455,11 +2452,11 @@ def test_zz_cluster_deletion(self):

def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"):
'''
Check that there is a single pod in the k8s cluster with the label "spilo-role=master"
Check that there is a single pod in the k8s cluster with the label "spilo-role=primary" or "spilo-role=master"
To be called manually after operations that affect pods
'''
k8s = self.k8s
labels = 'spilo-role=master,cluster-name=' + clusterName
labels = 'spilo-role={},cluster-name='.format(LEADER_LABEL_VALUE) + clusterName

num_of_master_pods = k8s.count_pods_with_label(labels, namespace)
self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods))
Expand Down
2 changes: 1 addition & 1 deletion pkg/cluster/connection_pooler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) {
cluster.Name = "acid-fake-cluster"
cluster.Namespace = "default"

_, err := cluster.createService(cluster.masterRole()) //PROBLEM1
_, err := cluster.createService(cluster.masterRole())
assert.NoError(t, err)
_, err = cluster.createStatefulSet()
assert.NoError(t, err)
Expand Down

0 comments on commit daec1a4

Please sign in to comment.