@@ -109,9 +109,18 @@ def setUpClass(cls):
109109 with open ("manifests/postgres-operator.yaml" , 'w' ) as f :
110110 yaml .dump (operator_deployment , f , Dumper = yaml .Dumper )
111111
112+ with open ("manifests/configmap.yaml" , 'r+' ) as f :
113+ configmap = yaml .safe_load (f )
114+ configmap ["data" ]["workers" ] = "1"
115+
116+ with open ("manifests/configmap.yaml" , 'w' ) as f :
117+ yaml .dump (configmap , f , Dumper = yaml .Dumper )
118+
112119 for filename in ["operator-service-account-rbac.yaml" ,
120+ "postgresteam.crd.yaml" ,
113121 "configmap.yaml" ,
114122 "postgres-operator.yaml" ,
123+ "api-service.yaml" ,
115124 "infrastructure-roles.yaml" ,
116125 "infrastructure-roles-new.yaml" ,
117126 "e2e-storage-class.yaml" ]:
@@ -338,6 +347,7 @@ def test_infrastructure_roles(self):
338347 },
339348 }
340349 k8s .update_config (patch_infrastructure_roles )
350+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" :"idle" }, "Operator does not get in sync" )
341351
342352 try :
343353 # check that new roles are represented in the config by requesting the
@@ -447,6 +457,7 @@ def test_lazy_spilo_upgrade(self):
447457 # so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works
448458 self .eventuallyEqual (lambda : k8s .get_effective_pod_image (pod0 ), conf_image , "Rolling upgrade was not executed" , 50 , 3 )
449459 self .eventuallyEqual (lambda : k8s .get_effective_pod_image (pod1 ), conf_image , "Rolling upgrade was not executed" , 50 , 3 )
460+ self .eventuallyEqual (lambda : len (k8s .get_patroni_running_members (pod0 )), 2 , "Postgres status did not enter running" )
450461
451462 except timeout_decorator .TimeoutError :
452463 print ('Operator log: {}' .format (k8s .get_operator_log ()))
@@ -519,6 +530,9 @@ def get_docker_image():
519530 print ('Operator log: {}' .format (k8s .get_operator_log ()))
520531 raise
521532
533+ # ensure cluster is healthy after tests
534+ self .eventuallyEqual (lambda : len (k8s .get_patroni_running_members ("acid-minimal-cluster-0" )), 2 , "Postgres status did not enter running" )
535+
522536 @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
523537 def test_min_resource_limits (self ):
524538 '''
@@ -809,12 +823,14 @@ def test_zzzz_cluster_deletion(self):
809823 }
810824 }
811825 k8s .update_config (patch_delete_annotations )
826+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" :"idle" }, "Operator does not get in sync" )
812827
813828 try :
814829 # this delete attempt should be omitted because of missing annotations
815830 k8s .api .custom_objects_api .delete_namespaced_custom_object (
816831 "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" )
817832 time .sleep (5 )
833+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" :"idle" }, "Operator does not get in sync" )
818834
819835 # check that pods and services are still there
820836 k8s .wait_for_running_pods (cluster_label , 2 )
@@ -825,6 +841,7 @@ def test_zzzz_cluster_deletion(self):
825841
826842 # wait a little before proceeding
827843 time .sleep (10 )
844+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" :"idle" }, "Operator does not get in sync" )
828845
829846 # add annotations to manifest
830847 delete_date = datetime .today ().strftime ('%Y-%m-%d' )
@@ -838,6 +855,7 @@ def test_zzzz_cluster_deletion(self):
838855 }
839856 k8s .api .custom_objects_api .patch_namespaced_custom_object (
840857 "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_patch_delete_annotations )
858+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" :"idle" }, "Operator does not get in sync" )
841859
842860 # wait a little before proceeding
843861 time .sleep (20 )
0 commit comments