@@ -161,6 +161,7 @@ def test_additional_pod_capabilities(self):
161161 '''
162162 Extend postgres container capabilities
163163 '''
164+ k8s = self .k8s
164165 cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
165166 capabilities = ["SYS_NICE" ,"CHOWN" ]
166167 patch_capabilities = {
@@ -170,18 +171,18 @@ def test_additional_pod_capabilities(self):
170171 }
171172
172173 # get node and replica (expected target of new master)
173- _ , replica_nodes = self . k8s .get_pg_nodes (cluster_label )
174+ _ , replica_nodes = k8s .get_pg_nodes (cluster_label )
174175
175176 try :
176- self . k8s .update_config (patch_capabilities )
177- self .eventuallyEqual (lambda : self . k8s .get_operator_state (), {"0" : "idle" },
177+ k8s .update_config (patch_capabilities )
178+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" },
178179 "Operator does not get in sync" )
179180
180181 # changed security context of postrges container should trigger a rolling update
181- self . k8s .wait_for_pod_failover (replica_nodes , 'spilo-role=master,' + cluster_label )
182- self . k8s .wait_for_pod_start ('spilo-role=replica,' + cluster_label )
182+ k8s .wait_for_pod_failover (replica_nodes , 'spilo-role=master,' + cluster_label )
183+ k8s .wait_for_pod_start ('spilo-role=replica,' + cluster_label )
183184
184- self .eventuallyEqual (lambda : self . k8s .count_pods_with_container_capabilities (capabilities , cluster_label ),
185+ self .eventuallyEqual (lambda : k8s .count_pods_with_container_capabilities (capabilities , cluster_label ),
185186 2 , "Container capabilities not updated" )
186187
187188 except timeout_decorator .TimeoutError :
@@ -193,6 +194,8 @@ def test_additional_teams_and_members(self):
193194 '''
194195 Test PostgresTeam CRD with extra teams and members
195196 '''
197+ k8s = self .k8s
198+
196199 # enable PostgresTeam CRD and lower resync
197200 enable_postgres_team_crd = {
198201 "data" : {
@@ -202,11 +205,11 @@ def test_additional_teams_and_members(self):
202205 "resync_period" : "15s"
203206 },
204207 }
205- self . k8s .update_config (enable_postgres_team_crd )
206- self .eventuallyEqual (lambda : self . k8s .get_operator_state (), {"0" : "idle" },
208+ k8s .update_config (enable_postgres_team_crd )
209+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" },
207210 "Operator does not get in sync" )
208211
209- self . k8s .api .custom_objects_api .patch_namespaced_custom_object (
212+ k8s .api .custom_objects_api .patch_namespaced_custom_object (
210213 'acid.zalan.do' , 'v1' , 'default' ,
211214 'postgresteams' , 'custom-team-membership' ,
212215 {
@@ -224,7 +227,7 @@ def test_additional_teams_and_members(self):
224227 }
225228 })
226229
227- leader = self . k8s .get_cluster_leader_pod ()
230+ leader = k8s .get_cluster_leader_pod ()
228231 user_query = """
229232 SELECT rolname
230233 FROM pg_catalog.pg_roles
@@ -234,7 +237,7 @@ def test_additional_teams_and_members(self):
234237 "Not all additional users found in database" , 10 , 5 )
235238
236239 # replace additional member and check if the removed member's role is renamed
237- self . k8s .api .custom_objects_api .patch_namespaced_custom_object (
240+ k8s .api .custom_objects_api .patch_namespaced_custom_object (
238241 'acid.zalan.do' , 'v1' , 'default' ,
239242 'postgresteams' , 'custom-team-membership' ,
240243 {
@@ -257,7 +260,7 @@ def test_additional_teams_and_members(self):
257260 "Database role of replaced member in PostgresTeam not renamed" , 10 , 5 )
258261
259262 # re-add additional member and check if the role is renamed back
260- self . k8s .api .custom_objects_api .patch_namespaced_custom_object (
263+ k8s .api .custom_objects_api .patch_namespaced_custom_object (
261264 'acid.zalan.do' , 'v1' , 'default' ,
262265 'postgresteams' , 'custom-team-membership' ,
263266 {
@@ -285,8 +288,8 @@ def test_additional_teams_and_members(self):
285288 "resync_period" : "30m" ,
286289 },
287290 }
288- self . k8s .update_config (revert_resync )
289- self .eventuallyEqual (lambda : self . k8s .get_operator_state (), {"0" : "idle" },
291+ k8s .update_config (revert_resync )
292+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" },
290293 "Operator does not get in sync" )
291294
292295 @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
@@ -302,13 +305,13 @@ def test_cross_namespace_secrets(self):
302305 "enable_cross_namespace_secret" : "true"
303306 }
304307 }
305- self . k8s .update_config (patch_cross_namespace_secret ,
308+ k8s .update_config (patch_cross_namespace_secret ,
306309 step = "cross namespace secrets enabled" )
307310 self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" },
308311 "Operator does not get in sync" )
309312
310313 # create secret in test namespace
311- self . k8s .api .custom_objects_api .patch_namespaced_custom_object (
314+ k8s .api .custom_objects_api .patch_namespaced_custom_object (
312315 'acid.zalan.do' , 'v1' , 'default' ,
313316 'postgresqls' , 'acid-minimal-cluster' ,
314317 {
@@ -321,7 +324,7 @@ def test_cross_namespace_secrets(self):
321324
322325 self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" },
323326 "Operator does not get in sync" )
324- self .eventuallyEqual (lambda : self . k8s .count_secrets_with_label ("cluster-name=acid-minimal-cluster,application=spilo" , self .test_namespace ),
327+ self .eventuallyEqual (lambda : k8s .count_secrets_with_label ("cluster-name=acid-minimal-cluster,application=spilo" , self .test_namespace ),
325328 1 , "Secret not created for user in namespace" )
326329
327330 @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
@@ -928,7 +931,7 @@ def test_node_affinity(self):
928931 plural = "postgresqls" ,
929932 name = "acid-minimal-cluster" ,
930933 body = patch_node_affinity_config )
931- self .eventuallyEqual (lambda : self . k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
934+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
932935
933936 # node affinity change should cause replica to relocate from replica node to master node due to node affinity requirement
934937 k8s .wait_for_pod_failover (master_node , 'spilo-role=replica,' + cluster_label )
@@ -960,7 +963,7 @@ def test_node_affinity(self):
960963 plural = "postgresqls" ,
961964 name = "acid-minimal-cluster" ,
962965 body = patch_node_remove_affinity_config )
963- self .eventuallyEqual (lambda : self . k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
966+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
964967
965968 self .eventuallyEqual (lambda : k8s .count_running_pods (), 2 , "No 2 pods running" )
966969 self .eventuallyEqual (lambda : len (k8s .get_patroni_running_members ("acid-minimal-cluster-0" )), 2 , "Postgres status did not enter running" )
@@ -1024,12 +1027,13 @@ def test_node_readiness_label(self):
10241027
10251028 @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
10261029 def test_overwrite_pooler_deployment (self ):
1027- self .k8s .create_with_kubectl ("manifests/minimal-fake-pooler-deployment.yaml" )
1028- self .eventuallyEqual (lambda : self .k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1029- self .eventuallyEqual (lambda : self .k8s .get_deployment_replica_count (name = "acid-minimal-cluster-pooler" ), 1 ,
1030+ k8s = self .k8s
1031+ k8s .create_with_kubectl ("manifests/minimal-fake-pooler-deployment.yaml" )
1032+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1033+ self .eventuallyEqual (lambda : k8s .get_deployment_replica_count (name = "acid-minimal-cluster-pooler" ), 1 ,
10301034 "Initial broken deployment not rolled out" )
10311035
1032- self . k8s .api .custom_objects_api .patch_namespaced_custom_object (
1036+ k8s .api .custom_objects_api .patch_namespaced_custom_object (
10331037 'acid.zalan.do' , 'v1' , 'default' ,
10341038 'postgresqls' , 'acid-minimal-cluster' ,
10351039 {
@@ -1038,11 +1042,11 @@ def test_overwrite_pooler_deployment(self):
10381042 }
10391043 })
10401044
1041- self .eventuallyEqual (lambda : self . k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1042- self .eventuallyEqual (lambda : self . k8s .get_deployment_replica_count (name = "acid-minimal-cluster-pooler" ), 2 ,
1045+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1046+ self .eventuallyEqual (lambda : k8s .get_deployment_replica_count (name = "acid-minimal-cluster-pooler" ), 2 ,
10431047 "Operator did not succeed in overwriting labels" )
10441048
1045- self . k8s .api .custom_objects_api .patch_namespaced_custom_object (
1049+ k8s .api .custom_objects_api .patch_namespaced_custom_object (
10461050 'acid.zalan.do' , 'v1' , 'default' ,
10471051 'postgresqls' , 'acid-minimal-cluster' ,
10481052 {
@@ -1051,8 +1055,8 @@ def test_overwrite_pooler_deployment(self):
10511055 }
10521056 })
10531057
1054- self .eventuallyEqual (lambda : self . k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1055- self .eventuallyEqual (lambda : self . k8s .count_running_pods ("connection-pooler=acid-minimal-cluster-pooler" ),
1058+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1059+ self .eventuallyEqual (lambda : k8s .count_running_pods ("connection-pooler=acid-minimal-cluster-pooler" ),
10561060 0 , "Pooler pods not scaled down" )
10571061
10581062 @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
@@ -1094,7 +1098,7 @@ def test_patroni_config_update(self):
10941098 k8s .api .custom_objects_api .patch_namespaced_custom_object (
10951099 "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_patch_config )
10961100
1097- self .eventuallyEqual (lambda : self . k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1101+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
10981102
10991103 def compare_config ():
11001104 effective_config = k8s .patroni_rest (masterPod .metadata .name , "config" )
0 commit comments