22import unittest
33import time
44import timeout_decorator
5- import subprocess
6- import warnings
75import os
86import yaml
97
108from datetime import datetime
11- from kubernetes import client , config
9+ from kubernetes import client
1210
1311from tests .k8s_api import K8s
12+ from kubernetes .client .rest import ApiException
1413
1514SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-12:1.6-p5"
1615SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p114"
@@ -89,17 +88,17 @@ def setUpClass(cls):
8988 # remove existing local storage class and create hostpath class
9089 try :
9190 k8s .api .storage_v1_api .delete_storage_class ("standard" )
92- except :
93- print ("Storage class has already been remove" )
91+ except ApiException as e :
92+ print ("Failed to delete the 'standard' storage class: {0}" . format ( e ) )
9493
9594 # operator deploys pod service account there on start up
9695 # needed for test_multi_namespace_support()
97- cls .namespace = "test"
96+ cls .test_namespace = "test"
9897 try :
99- v1_namespace = client .V1Namespace (metadata = client .V1ObjectMeta (name = cls .namespace ))
98+ v1_namespace = client .V1Namespace (metadata = client .V1ObjectMeta (name = cls .test_namespace ))
10099 k8s .api .core_v1 .create_namespace (v1_namespace )
101- except :
102- print ("Namespace already present" )
100+ except ApiException as e :
101+ print ("Failed to create the '{0}' namespace: {1}" . format ( cls . test_namespace , e ) )
103102
104103 # submit the most recent operator image built on the Docker host
105104 with open ("manifests/postgres-operator.yaml" , 'r+' ) as f :
@@ -135,10 +134,8 @@ def setUpClass(cls):
135134
136135 # make sure we start a new operator on every new run,
137136 # this tackles the problem when kind is reused
138- # and the Docker image is infact changed (dirty one)
137+ # and the Docker image is in fact changed (dirty one)
139138
140- # patch resync period, this can catch some problems with hanging e2e tests
141- # k8s.update_config({"data": {"resync_period":"30s"}},step="TestSuite setup")
142139 k8s .update_config ({}, step = "TestSuite Startup" )
143140
144141 actual_operator_image = k8s .api .core_v1 .list_namespaced_pod (
@@ -170,9 +167,6 @@ def test_enable_disable_connection_pooler(self):
170167 'connection-pooler' : 'acid-minimal-cluster-pooler' ,
171168 })
172169
173- pod_selector = to_selector (pod_labels )
174- service_selector = to_selector (service_labels )
175-
176170 # enable connection pooler
177171 k8s .api .custom_objects_api .patch_namespaced_custom_object (
178172 'acid.zalan.do' , 'v1' , 'default' ,
@@ -347,7 +341,7 @@ def test_infrastructure_roles(self):
347341 },
348342 }
349343 k8s .update_config (patch_infrastructure_roles )
350- self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" :"idle" }, "Operator does not get in sync" )
344+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
351345
352346 try :
353347 # check that new roles are represented in the config by requesting the
@@ -604,17 +598,25 @@ def test_multi_namespace_support(self):
604598
605599 with open ("manifests/complete-postgres-manifest.yaml" , 'r+' ) as f :
606600 pg_manifest = yaml .safe_load (f )
607- pg_manifest ["metadata" ]["namespace" ] = self .namespace
601+ pg_manifest ["metadata" ]["namespace" ] = self .test_namespace
608602 yaml .dump (pg_manifest , f , Dumper = yaml .Dumper )
609603
610604 try :
611605 k8s .create_with_kubectl ("manifests/complete-postgres-manifest.yaml" )
612- k8s .wait_for_pod_start ("spilo-role=master" , self .namespace )
613- self .assert_master_is_unique (self .namespace , "acid-test-cluster" )
606+ k8s .wait_for_pod_start ("spilo-role=master" , self .test_namespace )
607+ self .assert_master_is_unique (self .test_namespace , "acid-test-cluster" )
614608
615609 except timeout_decorator .TimeoutError :
616610 print ('Operator log: {}' .format (k8s .get_operator_log ()))
617611 raise
612+ finally :
613+ # delete the new cluster so that the k8s_api.get_operator_state works correctly in subsequent tests
614+ # ideally we should delete the 'test' namespace here but
615+ # the pods inside the namespace stuck in the Terminating state making the test time out
616+ k8s .api .custom_objects_api .delete_namespaced_custom_object (
617+ "acid.zalan.do" , "v1" , self .test_namespace , "postgresqls" , "acid-test-cluster" )
618+ time .sleep (5 )
619+
618620
619621 @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
620622 def test_zz_node_readiness_label (self ):
@@ -746,12 +748,12 @@ def test_statefulset_annotation_propagation(self):
746748 }
747749 k8s .api .custom_objects_api .patch_namespaced_custom_object (
748750 "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_crd_annotations )
749-
751+
750752 annotations = {
751753 "deployment-time" : "2020-04-30 12:00:00" ,
752754 "downscaler/downtime_replicas" : "0" ,
753755 }
754-
756+
755757 self .eventuallyTrue (lambda : k8s .check_statefulset_annotations (cluster_label , annotations ), "Annotations missing" )
756758
757759
@@ -823,14 +825,14 @@ def test_zzzz_cluster_deletion(self):
823825 }
824826 }
825827 k8s .update_config (patch_delete_annotations )
826- self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" :"idle" }, "Operator does not get in sync" )
828+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
827829
828830 try :
829831 # this delete attempt should be omitted because of missing annotations
830832 k8s .api .custom_objects_api .delete_namespaced_custom_object (
831833 "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" )
832834 time .sleep (5 )
833- self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" :"idle" }, "Operator does not get in sync" )
835+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
834836
835837 # check that pods and services are still there
836838 k8s .wait_for_running_pods (cluster_label , 2 )
@@ -841,7 +843,7 @@ def test_zzzz_cluster_deletion(self):
841843
842844 # wait a little before proceeding
843845 time .sleep (10 )
844- self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" :"idle" }, "Operator does not get in sync" )
846+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
845847
846848 # add annotations to manifest
847849 delete_date = datetime .today ().strftime ('%Y-%m-%d' )
@@ -855,7 +857,7 @@ def test_zzzz_cluster_deletion(self):
855857 }
856858 k8s .api .custom_objects_api .patch_namespaced_custom_object (
857859 "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_patch_delete_annotations )
858- self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" :"idle" }, "Operator does not get in sync" )
860+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
859861
860862 # wait a little before proceeding
861863 time .sleep (20 )
@@ -882,7 +884,7 @@ def test_zzzz_cluster_deletion(self):
882884 print ('Operator log: {}' .format (k8s .get_operator_log ()))
883885 raise
884886
885- #reset configmap
887+ # reset configmap
886888 patch_delete_annotations = {
887889 "data" : {
888890 "delete_annotation_date_key" : "" ,
0 commit comments