@@ -324,65 +324,6 @@ def test_cross_namespace_secrets(self):
324324 self .eventuallyEqual (lambda : self .k8s .count_secrets_with_label ("cluster-name=acid-minimal-cluster,application=spilo" , self .test_namespace ),
325325 1 , "Secret not created for user in namespace" )
326326
327- @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
328- def test_decrease_max_connections (self ):
329- '''
330- Test decreasing max_connections and restarting cluster through rest api
331- '''
332- k8s = self .k8s
333- cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
334- labels = 'spilo-role=master,' + cluster_label
335- new_max_connections_value = "99"
336- pods = k8s .api .core_v1 .list_namespaced_pod (
337- 'default' , label_selector = labels ).items
338- self .assert_master_is_unique ()
339- masterPod = pods [0 ]
340- creationTimestamp = masterPod .metadata .creation_timestamp
341-
342- # adjust max_connection
343- pg_patch_max_connections = {
344- "spec" : {
345- "postgresql" : {
346- "parameters" : {
347- "max_connections" : new_max_connections_value
348- }
349- }
350- }
351- }
352-
353- try :
354- k8s .api .custom_objects_api .patch_namespaced_custom_object (
355- "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_patch_max_connections )
356-
357- def get_max_connections ():
358- pods = k8s .api .core_v1 .list_namespaced_pod (
359- 'default' , label_selector = labels ).items
360- self .assert_master_is_unique ()
361- masterPod = pods [0 ]
362- get_max_connections_cmd = '''psql -At -U postgres -c "SELECT setting FROM pg_settings WHERE name = 'max_connections';"'''
363- result = k8s .exec_with_kubectl (masterPod .metadata .name , get_max_connections_cmd )
364- max_connections_value = int (result .stdout )
365- return max_connections_value
366-
367- #Make sure that max_connections decreased
368- self .eventuallyEqual (get_max_connections , int (new_max_connections_value ), "max_connections didn't decrease" )
369- pods = k8s .api .core_v1 .list_namespaced_pod (
370- 'default' , label_selector = labels ).items
371- self .assert_master_is_unique ()
372- masterPod = pods [0 ]
373- #Make sure that pod didn't restart
374- self .assertEqual (creationTimestamp , masterPod .metadata .creation_timestamp ,
375- "Master pod creation timestamp is updated" )
376-
377- except timeout_decorator .TimeoutError :
378- print ('Operator log: {}' .format (k8s .get_operator_log ()))
379- raise
380-
381- # make sure cluster is in a good state for further tests
382- self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
383- self .eventuallyEqual (lambda : k8s .count_running_pods (), 2 ,
384- "No 2 pods running" )
385-
386327 @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
387328 def test_enable_disable_connection_pooler (self ):
388329 '''
@@ -1114,6 +1055,88 @@ def test_overwrite_pooler_deployment(self):
11141055 self .eventuallyEqual (lambda : self .k8s .count_running_pods ("connection-pooler=acid-minimal-cluster-pooler" ),
11151056 0 , "Pooler pods not scaled down" )
11161057
1058+ @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
1059+ def test_patroni_config_update (self ):
1060+ '''
1061+ Change Postgres config under Spec.Postgresql.Parameters and Spec.Patroni
1062+ and query Patroni config endpoint to check if manifest changes got applied
1063+ via restarting cluster through Patroni's rest api
1064+ '''
1065+ k8s = self .k8s
1066+ masterPod = k8s .get_cluster_leader_pod ()
1067+ labels = 'application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master'
1068+ creationTimestamp = masterPod .metadata .creation_timestamp
1069+ new_max_connections_value = "50"
1070+
1071+ # adjust max_connection
1072+ pg_patch_config = {
1073+ "spec" : {
1074+ "postgresql" : {
1075+ "parameters" : {
1076+ "max_connections" : new_max_connections_value
1077+ }
1078+ },
1079+ "patroni" : {
1080+ "slots" : {
1081+ "test_slot" : {
1082+ "type" : "physical"
1083+ }
1084+ },
1085+ "ttl" : 29 ,
1086+ "loop_wait" : 9 ,
1087+ "retry_timeout" : 9 ,
1088+ "synchronous_mode" : True
1089+ }
1090+ }
1091+ }
1092+
1093+ try :
1094+ k8s .api .custom_objects_api .patch_namespaced_custom_object (
1095+ "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_patch_config )
1096+
1097+ self .eventuallyEqual (lambda : self .k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1098+
1099+ def compare_config ():
1100+ effective_config = k8s .patroni_rest (masterPod .metadata .name , "config" )
1101+ desired_patroni = pg_patch_config ["spec" ]["patroni" ]
1102+ desired_parameters = pg_patch_config ["spec" ]["postgresql" ]["parameters" ]
1103+ effective_parameters = effective_config ["postgresql" ]["parameters" ]
1104+ self .assertEqual (desired_parameters ["max_connections" ], effective_parameters ["max_connections" ],
1105+ "max_connections not updated" )
1106+ self .assertTrue (effective_config ["slots" ] is not None , "physical replication slot not added" )
1107+ self .assertEqual (desired_patroni ["ttl" ], effective_config ["ttl" ],
1108+ "ttl not updated" )
1109+ self .assertEqual (desired_patroni ["loop_wait" ], effective_config ["loop_wait" ],
1110+ "loop_wait not updated" )
1111+ self .assertEqual (desired_patroni ["retry_timeout" ], effective_config ["retry_timeout" ],
1112+ "retry_timeout not updated" )
1113+ self .assertEqual (desired_patroni ["synchronous_mode" ], effective_config ["synchronous_mode" ],
1114+ "synchronous_mode not updated" )
1115+ return True
1116+
1117+ self .eventuallyTrue (compare_config , "Postgres config not applied" )
1118+
1119+ setting_query = """
1120+ SELECT setting
1121+ FROM pg_settings
1122+ WHERE name = 'max_connections';
1123+ """
1124+ self .eventuallyEqual (lambda : self .query_database (masterPod .metadata .name , "postgres" , setting_query )[0 ], new_max_connections_value ,
1125+ "New max_connections setting not applied" , 10 , 5 )
1126+
1127+ # make sure that pod wasn't recreated
1128+ self .assertEqual (creationTimestamp , masterPod .metadata .creation_timestamp ,
1129+ "Master pod creation timestamp is updated" )
1130+
1131+ except timeout_decorator .TimeoutError :
1132+ print ('Operator log: {}' .format (k8s .get_operator_log ()))
1133+ raise
1134+
1135+ # make sure cluster is in a good state for further tests
1136+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1137+ self .eventuallyEqual (lambda : k8s .count_running_pods (), 2 ,
1138+ "No 2 pods running" )
1139+
11171140 @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
11181141 def test_rolling_update_flag (self ):
11191142 '''
0 commit comments