@@ -126,6 +126,7 @@ def setUpClass(cls):
126
126
"api-service.yaml" ,
127
127
"infrastructure-roles.yaml" ,
128
128
"infrastructure-roles-new.yaml" ,
129
+ "custom-team-membership.yaml" ,
129
130
"e2e-storage-class.yaml" ]:
130
131
result = k8s .create_with_kubectl ("manifests/" + filename )
131
132
print ("stdout: {}, stderr: {}" .format (result .stdout , result .stderr ))
@@ -174,6 +175,63 @@ def test_additional_pod_capabilities(self):
174
175
self .eventuallyEqual (lambda : self .k8s .count_pods_with_container_capabilities (capabilities , cluster_label ),
175
176
2 , "Container capabilities not updated" )
176
177
178
+ @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
179
+ def test_additional_teams_and_members (self ):
180
+ '''
181
+ Test PostgresTeam CRD with extra teams and members
182
+ '''
183
+ # enable PostgresTeam CRD and lower resync
184
+ enable_postgres_team_crd = {
185
+ "data" : {
186
+ "enable_postgres_team_crd" : "true" ,
187
+ "resync_period" : "15s" ,
188
+ },
189
+ }
190
+ self .k8s .update_config (enable_postgres_team_crd )
191
+ self .eventuallyEqual (lambda : self .k8s .get_operator_state (), {"0" : "idle" },
192
+ "Operator does not get in sync" )
193
+
194
+ self .k8s .api .custom_objects_api .patch_namespaced_custom_object (
195
+ 'acid.zalan.do' , 'v1' , 'default' ,
196
+ 'postgresteams' , 'custom-team-membership' ,
197
+ {
198
+ 'spec' : {
199
+ 'additionalTeams' : {
200
+ 'acid' : [
201
+ 'e2e'
202
+ ]
203
+ },
204
+ 'additionalMembers' : {
205
+ 'e2e' : [
206
+ 'kind'
207
+ ]
208
+ }
209
+ }
210
+ })
211
+
212
+ # make sure we let one sync pass and the new user being added
213
+ time .sleep (15 )
214
+
215
+ leader = self .k8s .get_cluster_leader_pod ('acid-minimal-cluster' )
216
+ user_query = """
217
+ SELECT usename
218
+ FROM pg_catalog.pg_user
219
+ WHERE usename IN ('elephant', 'kind');
220
+ """
221
+ users = self .query_database (leader .metadata .name , "postgres" , user_query )
222
+ self .eventuallyEqual (lambda : len (users ), 2 ,
223
+ "Not all additional users found in database: {}" .format (users ))
224
+
225
+ # revert config change
226
+ revert_resync = {
227
+ "data" : {
228
+ "resync_period" : "30m" ,
229
+ },
230
+ }
231
+ self .k8s .update_config (revert_resync )
232
+ self .eventuallyEqual (lambda : self .k8s .get_operator_state (), {"0" : "idle" },
233
+ "Operator does not get in sync" )
234
+
177
235
@timeout_decorator .timeout (TEST_TIMEOUT_SEC )
178
236
def test_overwrite_pooler_deployment (self ):
179
237
self .k8s .create_with_kubectl ("manifests/minimal-fake-pooler-deployment.yaml" )
@@ -332,54 +390,19 @@ def test_enable_disable_connection_pooler(self):
332
390
# Verify that all the databases have pooler schema installed.
333
391
# Do this via psql, since otherwise we need to deal with
334
392
# credentials.
335
- dbList = []
393
+ db_list = []
336
394
337
395
leader = k8s .get_cluster_leader_pod ('acid-minimal-cluster' )
338
- dbListQuery = "select datname from pg_database"
339
- schemasQuery = """
396
+ schemas_query = """
340
397
select schema_name
341
398
from information_schema.schemata
342
399
where schema_name = 'pooler'
343
400
"""
344
- exec_query = r"psql -tAq -c \"{}\" -d {}"
345
401
346
- if leader :
347
- try :
348
- q = exec_query .format (dbListQuery , "postgres" )
349
- q = "su postgres -c \" {}\" " .format (q )
350
- print ('Get databases: {}' .format (q ))
351
- result = k8s .exec_with_kubectl (leader .metadata .name , q )
352
- dbList = clean_list (result .stdout .split (b'\n ' ))
353
- print ('dbList: {}, stdout: {}, stderr {}' .format (
354
- dbList , result .stdout , result .stderr
355
- ))
356
- except Exception as ex :
357
- print ('Could not get databases: {}' .format (ex ))
358
- print ('Stdout: {}' .format (result .stdout ))
359
- print ('Stderr: {}' .format (result .stderr ))
360
-
361
- for db in dbList :
362
- if db in ('template0' , 'template1' ):
363
- continue
364
-
365
- schemas = []
366
- try :
367
- q = exec_query .format (schemasQuery , db )
368
- q = "su postgres -c \" {}\" " .format (q )
369
- print ('Get schemas: {}' .format (q ))
370
- result = k8s .exec_with_kubectl (leader .metadata .name , q )
371
- schemas = clean_list (result .stdout .split (b'\n ' ))
372
- print ('schemas: {}, stdout: {}, stderr {}' .format (
373
- schemas , result .stdout , result .stderr
374
- ))
375
- except Exception as ex :
376
- print ('Could not get databases: {}' .format (ex ))
377
- print ('Stdout: {}' .format (result .stdout ))
378
- print ('Stderr: {}' .format (result .stderr ))
379
-
380
- self .assertNotEqual (len (schemas ), 0 )
381
- else :
382
- print ('Could not find leader pod' )
402
+ db_list = self .list_databases (leader .metadata .name )
403
+ for db in db_list :
404
+ self .eventuallyNotEqual (lambda : len (self .query_database (leader .metadata .name , db , schemas_query )), 0 ,
405
+ "Pooler schema not found in database {}" .format (db ))
383
406
384
407
# remove config section to make test work next time
385
408
k8s .api .custom_objects_api .patch_namespaced_custom_object (
@@ -690,6 +713,7 @@ def test_min_resource_limits(self):
690
713
"min_memory_limit" : minMemoryLimit
691
714
}
692
715
}
716
+ k8s .update_config (patch_min_resource_limits , "Minimum resource test" )
693
717
694
718
# lower resource limits below minimum
695
719
pg_patch_resources = {
@@ -707,10 +731,8 @@ def test_min_resource_limits(self):
707
731
}
708
732
}
709
733
k8s .api .custom_objects_api .patch_namespaced_custom_object (
710
- "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_patch_resources )
711
-
712
- k8s .patch_statefulset ({"metadata" : {"annotations" : {"zalando-postgres-operator-rolling-update-required" : "False" }}})
713
- k8s .update_config (patch_min_resource_limits , "Minimum resource test" )
734
+ "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_patch_resources )
735
+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
714
736
715
737
self .eventuallyEqual (lambda : k8s .count_running_pods (), 2 , "No two pods running after lazy rolling upgrade" )
716
738
self .eventuallyEqual (lambda : len (k8s .get_patroni_running_members ()), 2 , "Postgres status did not enter running" )
@@ -967,7 +989,6 @@ def test_node_affinity(self):
967
989
# verify we are in good state from potential previous tests
968
990
self .eventuallyEqual (lambda : k8s .count_running_pods (), 2 , "No 2 pods running" )
969
991
self .eventuallyEqual (lambda : len (k8s .get_patroni_running_members ("acid-minimal-cluster-0" )), 2 , "Postgres status did not enter running" )
970
- self .eventuallyEqual (lambda : self .k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
971
992
972
993
# get nodes of master and replica(s)
973
994
master_node , replica_nodes = k8s .get_pg_nodes (cluster_label )
@@ -1053,6 +1074,9 @@ def test_node_affinity(self):
1053
1074
body = patch_node_remove_affinity_config )
1054
1075
self .eventuallyEqual (lambda : self .k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1055
1076
1077
+ self .eventuallyEqual (lambda : k8s .count_running_pods (), 2 , "No 2 pods running" )
1078
+ self .eventuallyEqual (lambda : len (k8s .get_patroni_running_members ("acid-minimal-cluster-0" )), 2 , "Postgres status did not enter running" )
1079
+
1056
1080
# remove node affinity to move replica away from master node
1057
1081
nm , new_replica_nodes = k8s .get_cluster_nodes ()
1058
1082
new_master_node = nm [0 ]
@@ -1219,6 +1243,60 @@ def assert_distributed_pods(self, master_node, replica_nodes, cluster_label):
1219
1243
k8s .wait_for_pod_start ('spilo-role=replica' )
1220
1244
return True
1221
1245
1246
+ def list_databases (self , pod_name ):
1247
+ '''
1248
+ Get list of databases we might want to iterate over
1249
+ '''
1250
+ k8s = self .k8s
1251
+ result_set = []
1252
+ db_list = []
1253
+ db_list_query = "select datname from pg_database"
1254
+ exec_query = r"psql -tAq -c \"{}\" -d {}"
1255
+
1256
+ try :
1257
+ q = exec_query .format (db_list_query , "postgres" )
1258
+ q = "su postgres -c \" {}\" " .format (q )
1259
+ print ('Get databases: {}' .format (q ))
1260
+ result = k8s .exec_with_kubectl (pod_name , q )
1261
+ db_list = clean_list (result .stdout .split (b'\n ' ))
1262
+ print ('db_list: {}, stdout: {}, stderr {}' .format (
1263
+ db_list , result .stdout , result .stderr
1264
+ ))
1265
+ except Exception as ex :
1266
+ print ('Could not get databases: {}' .format (ex ))
1267
+ print ('Stdout: {}' .format (result .stdout ))
1268
+ print ('Stderr: {}' .format (result .stderr ))
1269
+
1270
+ for db in db_list :
1271
+ if db in ('template0' , 'template1' ):
1272
+ continue
1273
+ result_set .append (db )
1274
+
1275
+ return result_set
1276
+
1277
+ def query_database (self , pod_name , db_name , query ):
1278
+ '''
1279
+ Query database and return result as a list
1280
+ '''
1281
+ k8s = self .k8s
1282
+ result_set = []
1283
+ exec_query = r"psql -tAq -c \"{}\" -d {}"
1284
+
1285
+ try :
1286
+ q = exec_query .format (query , db_name )
1287
+ q = "su postgres -c \" {}\" " .format (q )
1288
+ print ('Send query: {}' .format (q ))
1289
+ result = k8s .exec_with_kubectl (pod_name , q )
1290
+ result_set = clean_list (result .stdout .split (b'\n ' ))
1291
+ print ('result: {}, stdout: {}, stderr {}' .format (
1292
+ result_set , result .stdout , result .stderr
1293
+ ))
1294
+ except Exception as ex :
1295
+ print ('Error on query execution: {}' .format (ex ))
1296
+ print ('Stdout: {}' .format (result .stdout ))
1297
+ print ('Stderr: {}' .format (result .stderr ))
1298
+
1299
+ return result_set
1222
1300
1223
1301
if __name__ == '__main__' :
1224
1302
unittest .main ()
0 commit comments