Skip to content

Commit 41858a7

Browse files
authored
making pgTeamMap a pointer (#1349)
* making pgTeamMap a pointer * init empty map * add e2e test for additional teams and members * update test_min_resource_limits * add more waiting in node_affinity_test * no need for pointers in map of postgresTeamMebership * another minor update on node affinity test * refactor and fix fetching additional members
1 parent 137fbbf commit 41858a7

File tree

7 files changed

+185
-95
lines changed

7 files changed

+185
-95
lines changed

e2e/tests/test_e2e.py

Lines changed: 124 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,7 @@ def setUpClass(cls):
126126
"api-service.yaml",
127127
"infrastructure-roles.yaml",
128128
"infrastructure-roles-new.yaml",
129+
"custom-team-membership.yaml",
129130
"e2e-storage-class.yaml"]:
130131
result = k8s.create_with_kubectl("manifests/" + filename)
131132
print("stdout: {}, stderr: {}".format(result.stdout, result.stderr))
@@ -174,6 +175,63 @@ def test_additional_pod_capabilities(self):
174175
self.eventuallyEqual(lambda: self.k8s.count_pods_with_container_capabilities(capabilities, cluster_label),
175176
2, "Container capabilities not updated")
176177

178+
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
179+
def test_additional_teams_and_members(self):
180+
'''
181+
Test PostgresTeam CRD with extra teams and members
182+
'''
183+
# enable PostgresTeam CRD and lower resync
184+
enable_postgres_team_crd = {
185+
"data": {
186+
"enable_postgres_team_crd": "true",
187+
"resync_period": "15s",
188+
},
189+
}
190+
self.k8s.update_config(enable_postgres_team_crd)
191+
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"},
192+
"Operator does not get in sync")
193+
194+
self.k8s.api.custom_objects_api.patch_namespaced_custom_object(
195+
'acid.zalan.do', 'v1', 'default',
196+
'postgresteams', 'custom-team-membership',
197+
{
198+
'spec': {
199+
'additionalTeams': {
200+
'acid': [
201+
'e2e'
202+
]
203+
},
204+
'additionalMembers': {
205+
'e2e': [
206+
'kind'
207+
]
208+
}
209+
}
210+
})
211+
212+
# make sure we let one sync pass and the new user being added
213+
time.sleep(15)
214+
215+
leader = self.k8s.get_cluster_leader_pod('acid-minimal-cluster')
216+
user_query = """
217+
SELECT usename
218+
FROM pg_catalog.pg_user
219+
WHERE usename IN ('elephant', 'kind');
220+
"""
221+
users = self.query_database(leader.metadata.name, "postgres", user_query)
222+
self.eventuallyEqual(lambda: len(users), 2,
223+
"Not all additional users found in database: {}".format(users))
224+
225+
# revert config change
226+
revert_resync = {
227+
"data": {
228+
"resync_period": "30m",
229+
},
230+
}
231+
self.k8s.update_config(revert_resync)
232+
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"},
233+
"Operator does not get in sync")
234+
177235
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
178236
def test_overwrite_pooler_deployment(self):
179237
self.k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml")
@@ -332,54 +390,19 @@ def test_enable_disable_connection_pooler(self):
332390
# Verify that all the databases have pooler schema installed.
333391
# Do this via psql, since otherwise we need to deal with
334392
# credentials.
335-
dbList = []
393+
db_list = []
336394

337395
leader = k8s.get_cluster_leader_pod('acid-minimal-cluster')
338-
dbListQuery = "select datname from pg_database"
339-
schemasQuery = """
396+
schemas_query = """
340397
select schema_name
341398
from information_schema.schemata
342399
where schema_name = 'pooler'
343400
"""
344-
exec_query = r"psql -tAq -c \"{}\" -d {}"
345401

346-
if leader:
347-
try:
348-
q = exec_query.format(dbListQuery, "postgres")
349-
q = "su postgres -c \"{}\"".format(q)
350-
print('Get databases: {}'.format(q))
351-
result = k8s.exec_with_kubectl(leader.metadata.name, q)
352-
dbList = clean_list(result.stdout.split(b'\n'))
353-
print('dbList: {}, stdout: {}, stderr {}'.format(
354-
dbList, result.stdout, result.stderr
355-
))
356-
except Exception as ex:
357-
print('Could not get databases: {}'.format(ex))
358-
print('Stdout: {}'.format(result.stdout))
359-
print('Stderr: {}'.format(result.stderr))
360-
361-
for db in dbList:
362-
if db in ('template0', 'template1'):
363-
continue
364-
365-
schemas = []
366-
try:
367-
q = exec_query.format(schemasQuery, db)
368-
q = "su postgres -c \"{}\"".format(q)
369-
print('Get schemas: {}'.format(q))
370-
result = k8s.exec_with_kubectl(leader.metadata.name, q)
371-
schemas = clean_list(result.stdout.split(b'\n'))
372-
print('schemas: {}, stdout: {}, stderr {}'.format(
373-
schemas, result.stdout, result.stderr
374-
))
375-
except Exception as ex:
376-
print('Could not get databases: {}'.format(ex))
377-
print('Stdout: {}'.format(result.stdout))
378-
print('Stderr: {}'.format(result.stderr))
379-
380-
self.assertNotEqual(len(schemas), 0)
381-
else:
382-
print('Could not find leader pod')
402+
db_list = self.list_databases(leader.metadata.name)
403+
for db in db_list:
404+
self.eventuallyNotEqual(lambda: len(self.query_database(leader.metadata.name, db, schemas_query)), 0,
405+
"Pooler schema not found in database {}".format(db))
383406

384407
# remove config section to make test work next time
385408
k8s.api.custom_objects_api.patch_namespaced_custom_object(
@@ -690,6 +713,7 @@ def test_min_resource_limits(self):
690713
"min_memory_limit": minMemoryLimit
691714
}
692715
}
716+
k8s.update_config(patch_min_resource_limits, "Minimum resource test")
693717

694718
# lower resource limits below minimum
695719
pg_patch_resources = {
@@ -707,10 +731,8 @@ def test_min_resource_limits(self):
707731
}
708732
}
709733
k8s.api.custom_objects_api.patch_namespaced_custom_object(
710-
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
711-
712-
k8s.patch_statefulset({"metadata": {"annotations": {"zalando-postgres-operator-rolling-update-required": "False"}}})
713-
k8s.update_config(patch_min_resource_limits, "Minimum resource test")
734+
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
735+
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
714736

715737
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No two pods running after lazy rolling upgrade")
716738
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members()), 2, "Postgres status did not enter running")
@@ -967,7 +989,6 @@ def test_node_affinity(self):
967989
# verify we are in good state from potential previous tests
968990
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
969991
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running")
970-
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
971992

972993
# get nodes of master and replica(s)
973994
master_node, replica_nodes = k8s.get_pg_nodes(cluster_label)
@@ -1053,6 +1074,9 @@ def test_node_affinity(self):
10531074
body=patch_node_remove_affinity_config)
10541075
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
10551076

1077+
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
1078+
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running")
1079+
10561080
# remove node affinity to move replica away from master node
10571081
nm, new_replica_nodes = k8s.get_cluster_nodes()
10581082
new_master_node = nm[0]
@@ -1219,6 +1243,60 @@ def assert_distributed_pods(self, master_node, replica_nodes, cluster_label):
12191243
k8s.wait_for_pod_start('spilo-role=replica')
12201244
return True
12211245

1246+
def list_databases(self, pod_name):
1247+
'''
1248+
Get list of databases we might want to iterate over
1249+
'''
1250+
k8s = self.k8s
1251+
result_set = []
1252+
db_list = []
1253+
db_list_query = "select datname from pg_database"
1254+
exec_query = r"psql -tAq -c \"{}\" -d {}"
1255+
1256+
try:
1257+
q = exec_query.format(db_list_query, "postgres")
1258+
q = "su postgres -c \"{}\"".format(q)
1259+
print('Get databases: {}'.format(q))
1260+
result = k8s.exec_with_kubectl(pod_name, q)
1261+
db_list = clean_list(result.stdout.split(b'\n'))
1262+
print('db_list: {}, stdout: {}, stderr {}'.format(
1263+
db_list, result.stdout, result.stderr
1264+
))
1265+
except Exception as ex:
1266+
print('Could not get databases: {}'.format(ex))
1267+
print('Stdout: {}'.format(result.stdout))
1268+
print('Stderr: {}'.format(result.stderr))
1269+
1270+
for db in db_list:
1271+
if db in ('template0', 'template1'):
1272+
continue
1273+
result_set.append(db)
1274+
1275+
return result_set
1276+
1277+
def query_database(self, pod_name, db_name, query):
1278+
'''
1279+
Query database and return result as a list
1280+
'''
1281+
k8s = self.k8s
1282+
result_set = []
1283+
exec_query = r"psql -tAq -c \"{}\" -d {}"
1284+
1285+
try:
1286+
q = exec_query.format(query, db_name)
1287+
q = "su postgres -c \"{}\"".format(q)
1288+
print('Send query: {}'.format(q))
1289+
result = k8s.exec_with_kubectl(pod_name, q)
1290+
result_set = clean_list(result.stdout.split(b'\n'))
1291+
print('result: {}, stdout: {}, stderr {}'.format(
1292+
result_set, result.stdout, result.stderr
1293+
))
1294+
except Exception as ex:
1295+
print('Error on query execution: {}'.format(ex))
1296+
print('Stdout: {}'.format(result.stdout))
1297+
print('Stderr: {}'.format(result.stderr))
1298+
1299+
return result_set
12221300

12231301
if __name__ == '__main__':
12241302
unittest.main()

pkg/cluster/cluster.go

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ var (
4949
type Config struct {
5050
OpConfig config.Config
5151
RestConfig *rest.Config
52-
PgTeamMap pgteams.PostgresTeamMap
52+
PgTeamMap *pgteams.PostgresTeamMap
5353
InfrastructureRoles map[string]spec.PgUser // inherited from the controller
5454
PodServiceAccount *v1.ServiceAccount
5555
PodServiceAccountRoleBinding *rbacv1.RoleBinding
@@ -1143,8 +1143,8 @@ func (c *Cluster) initHumanUsers() error {
11431143
var clusterIsOwnedBySuperuserTeam bool
11441144
superuserTeams := []string{}
11451145

1146-
if c.OpConfig.EnablePostgresTeamCRDSuperusers {
1147-
superuserTeams = c.PgTeamMap.GetAdditionalSuperuserTeams(c.Spec.TeamID, true)
1146+
if c.OpConfig.EnablePostgresTeamCRD && c.OpConfig.EnablePostgresTeamCRDSuperusers && c.Config.PgTeamMap != nil {
1147+
superuserTeams = c.Config.PgTeamMap.GetAdditionalSuperuserTeams(c.Spec.TeamID, true)
11481148
}
11491149

11501150
for _, postgresSuperuserTeam := range c.OpConfig.PostgresSuperuserTeams {
@@ -1163,12 +1163,14 @@ func (c *Cluster) initHumanUsers() error {
11631163
}
11641164
}
11651165

1166-
additionalTeams := c.PgTeamMap.GetAdditionalTeams(c.Spec.TeamID, true)
1167-
for _, additionalTeam := range additionalTeams {
1168-
if !(util.SliceContains(superuserTeams, additionalTeam)) {
1169-
err := c.initTeamMembers(additionalTeam, false)
1170-
if err != nil {
1171-
return fmt.Errorf("Cannot initialize members for additional team %q for cluster owned by %q: %v", additionalTeam, c.Spec.TeamID, err)
1166+
if c.OpConfig.EnablePostgresTeamCRD && c.Config.PgTeamMap != nil {
1167+
additionalTeams := c.Config.PgTeamMap.GetAdditionalTeams(c.Spec.TeamID, true)
1168+
for _, additionalTeam := range additionalTeams {
1169+
if !(util.SliceContains(superuserTeams, additionalTeam)) {
1170+
err := c.initTeamMembers(additionalTeam, false)
1171+
if err != nil {
1172+
return fmt.Errorf("Cannot initialize members for additional team %q for cluster owned by %q: %v", additionalTeam, c.Spec.TeamID, err)
1173+
}
11721174
}
11731175
}
11741176
}

pkg/cluster/util.go

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -238,15 +238,26 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
238238
return nil, fmt.Errorf("no teamId specified")
239239
}
240240

241-
c.logger.Debugf("fetching possible additional team members for team %q", teamID)
242241
members := []string{}
243-
additionalMembers := c.PgTeamMap[teamID].AdditionalMembers
244-
for _, member := range additionalMembers {
245-
members = append(members, member)
242+
243+
if c.OpConfig.EnablePostgresTeamCRD && c.Config.PgTeamMap != nil {
244+
c.logger.Debugf("fetching possible additional team members for team %q", teamID)
245+
additionalMembers := []string{}
246+
247+
for team, membership := range *c.Config.PgTeamMap {
248+
if team == teamID {
249+
additionalMembers = membership.AdditionalMembers
250+
c.logger.Debugf("found %d additional members for team %q", len(members), teamID)
251+
}
252+
}
253+
254+
for _, member := range additionalMembers {
255+
members = append(members, member)
256+
}
246257
}
247258

248259
if !c.OpConfig.EnableTeamsAPI {
249-
c.logger.Debugf("team API is disabled, only returning %d members for team %q", len(members), teamID)
260+
c.logger.Debugf("team API is disabled")
250261
return members, nil
251262
}
252263

pkg/controller/controller.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -329,10 +329,9 @@ func (c *Controller) initController() {
329329

330330
c.initSharedInformers()
331331

332+
c.pgTeamMap = teams.PostgresTeamMap{}
332333
if c.opConfig.EnablePostgresTeamCRD {
333334
c.loadPostgresTeams()
334-
} else {
335-
c.pgTeamMap = teams.PostgresTeamMap{}
336335
}
337336

338337
if c.opConfig.DebugLogging {

pkg/controller/util.go

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ import (
1515
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
1616
"github.com/zalando/postgres-operator/pkg/cluster"
1717
"github.com/zalando/postgres-operator/pkg/spec"
18-
"github.com/zalando/postgres-operator/pkg/teams"
1918
"github.com/zalando/postgres-operator/pkg/util"
2019
"github.com/zalando/postgres-operator/pkg/util/config"
2120
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
@@ -31,7 +30,7 @@ func (c *Controller) makeClusterConfig() cluster.Config {
3130
return cluster.Config{
3231
RestConfig: c.config.RestConfig,
3332
OpConfig: config.Copy(c.opConfig),
34-
PgTeamMap: c.pgTeamMap,
33+
PgTeamMap: &c.pgTeamMap,
3534
InfrastructureRoles: infrastructureRoles,
3635
PodServiceAccount: c.PodServiceAccount,
3736
}
@@ -395,9 +394,6 @@ func (c *Controller) getInfrastructureRole(
395394
}
396395

397396
func (c *Controller) loadPostgresTeams() {
398-
// reset team map
399-
c.pgTeamMap = teams.PostgresTeamMap{}
400-
401397
pgTeams, err := c.KubeClient.PostgresTeamsGetter.PostgresTeams(c.opConfig.WatchedNamespace).List(context.TODO(), metav1.ListOptions{})
402398
if err != nil {
403399
c.logger.Errorf("could not list postgres team objects: %v", err)

pkg/teams/postgres_team.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,9 @@ func (ptm *PostgresTeamMap) GetAdditionalSuperuserTeams(team string, transitive
9494

9595
// Load function to import data from PostgresTeam CRD
9696
func (ptm *PostgresTeamMap) Load(pgTeams *acidv1.PostgresTeamList) {
97+
// reset the team map
98+
*ptm = make(PostgresTeamMap, 0)
99+
97100
superuserTeamSet := teamHashSet{}
98101
teamSet := teamHashSet{}
99102
teamMemberSet := teamHashSet{}

0 commit comments

Comments
 (0)