Skip to content

Commit ee81cfe

Browse files
Merge pull request #2423 from lionelvillard/dns-network-policies
✨ Adding DNS network policies
2 parents 2a1c00a + 656ad4d commit ee81cfe

File tree

15 files changed

+962
-38
lines changed

15 files changed

+962
-38
lines changed

pkg/cliplugins/workload/plugin/sync_test.go

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,12 @@ rules:
6060
- "list"
6161
- "watch"
6262
- "delete"
63+
- apiGroups:
64+
- ""
65+
resources:
66+
- endpoints
67+
verbs:
68+
- "get"
6369
- apiGroups:
6470
- "apiextensions.k8s.io"
6571
resources:
@@ -68,6 +74,14 @@ rules:
6874
- "get"
6975
- "watch"
7076
- "list"
77+
- apiGroups:
78+
- "networking.k8s.io"
79+
resources:
80+
- networkpolicies
81+
verbs:
82+
- "create"
83+
- "list"
84+
- "watch"
7185
- apiGroups:
7286
- ""
7387
resources:
@@ -290,6 +304,12 @@ rules:
290304
- "list"
291305
- "watch"
292306
- "delete"
307+
- apiGroups:
308+
- ""
309+
resources:
310+
- endpoints
311+
verbs:
312+
- "get"
293313
- apiGroups:
294314
- "apiextensions.k8s.io"
295315
resources:
@@ -298,6 +318,14 @@ rules:
298318
- "get"
299319
- "watch"
300320
- "list"
321+
- apiGroups:
322+
- "networking.k8s.io"
323+
resources:
324+
- networkpolicies
325+
verbs:
326+
- "create"
327+
- "list"
328+
- "watch"
301329
- apiGroups:
302330
- ""
303331
resources:

pkg/cliplugins/workload/plugin/syncer.yaml

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,12 @@ rules:
3434
- "list"
3535
- "watch"
3636
- "delete"
37+
- apiGroups:
38+
- ""
39+
resources:
40+
- endpoints
41+
verbs:
42+
- "get"
3743
- apiGroups:
3844
- "apiextensions.k8s.io"
3945
resources:
@@ -42,6 +48,14 @@ rules:
4248
- "get"
4349
- "watch"
4450
- "list"
51+
- apiGroups:
52+
- "networking.k8s.io"
53+
resources:
54+
- networkpolicies
55+
verbs:
56+
- "create"
57+
- "list"
58+
- "watch"
4559
{{- range $groupMapping := .GroupMappings}}
4660
- apiGroups:
4761
- "{{$groupMapping.APIGroup}}"

pkg/syncer/shared/helpers.go

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,9 @@ package shared
1818

1919
import (
2020
"crypto/sha256"
21+
"encoding/json"
2122
"fmt"
23+
"math/big"
2224
"strings"
2325

2426
"github.com/kcp-dev/logicalcluster/v3"
@@ -72,6 +74,28 @@ func GetDNSID(clusterName logicalcluster.Name, syncTargetUID types.UID, syncTarg
7274
return fmt.Sprintf("kcp-dns-%s-%s-%s", syncTargetName, uid36hash[:8], workspace36hash[:8])
7375
}
7476

77+
// GetTenantID encodes the KCP tenant to which the namespace designated by the given
78+
// NamespaceLocator belongs. It is based on the NamespaceLocator, but with an empty
79+
// namespace value. The value will be the same for all downstream namespaces originating
80+
// from the same KCP workspace / SyncTarget.
81+
// The encoding is repeatable.
82+
func GetTenantID(l NamespaceLocator) (string, error) {
83+
clusterWideLocator := NamespaceLocator{
84+
SyncTarget: l.SyncTarget,
85+
ClusterName: l.ClusterName,
86+
}
87+
88+
b, err := json.Marshal(clusterWideLocator)
89+
if err != nil {
90+
return "", err
91+
}
92+
93+
hash := sha256.Sum224(b)
94+
var i big.Int
95+
i.SetBytes(hash[:])
96+
return i.Text(62), nil
97+
}
98+
7599
func ContainsGVR(gvrs []schema.GroupVersionResource, gvr schema.GroupVersionResource) bool {
76100
for _, item := range gvrs {
77101
if gvr == item {

pkg/syncer/shared/namespace.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030

3131
const (
3232
NamespaceLocatorAnnotation = "kcp.io/namespace-locator"
33+
TenantIDLabel = "kcp.io/tenant-id"
3334
)
3435

3536
// NamespaceLocator stores a logical cluster and namespace and is used

pkg/syncer/spec/dns/dns_process.go

Lines changed: 49 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,9 @@ package dns
1818

1919
import (
2020
"context"
21+
"errors"
2122
"sync"
2223

23-
"github.com/kcp-dev/logicalcluster/v3"
24-
2524
appsv1 "k8s.io/api/apps/v1"
2625
corev1 "k8s.io/api/core/v1"
2726
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -30,6 +29,7 @@ import (
3029
"k8s.io/client-go/kubernetes"
3130
listersappsv1 "k8s.io/client-go/listers/apps/v1"
3231
listerscorev1 "k8s.io/client-go/listers/core/v1"
32+
listersnetworkingv1 "k8s.io/client-go/listers/networking/v1"
3333
listersrbacv1 "k8s.io/client-go/listers/rbac/v1"
3434
"k8s.io/klog/v2"
3535

@@ -45,9 +45,10 @@ type DNSProcessor struct {
4545
deploymentLister listersappsv1.DeploymentLister
4646
serviceLister listerscorev1.ServiceLister
4747
endpointLister listerscorev1.EndpointsLister
48+
networkPolicyLister listersnetworkingv1.NetworkPolicyLister
4849

49-
syncTargetName string
5050
syncTargetUID types.UID
51+
syncTargetName string
5152
dnsNamespace string // namespace containing all DNS objects
5253
dnsImage string
5354

@@ -63,8 +64,9 @@ func NewDNSProcessor(
6364
deploymentLister listersappsv1.DeploymentLister,
6465
serviceLister listerscorev1.ServiceLister,
6566
endpointLister listerscorev1.EndpointsLister,
66-
syncTargetName string,
67+
networkPolicyLister listersnetworkingv1.NetworkPolicyLister,
6768
syncTargetUID types.UID,
69+
syncTargetName string,
6870
dnsNamespace string,
6971
dnsImage string) *DNSProcessor {
7072
return &DNSProcessor{
@@ -75,8 +77,9 @@ func NewDNSProcessor(
7577
deploymentLister: deploymentLister,
7678
serviceLister: serviceLister,
7779
endpointLister: endpointLister,
78-
syncTargetName: syncTargetName,
80+
networkPolicyLister: networkPolicyLister,
7981
syncTargetUID: syncTargetUID,
82+
syncTargetName: syncTargetName,
8083
dnsNamespace: dnsNamespace,
8184
dnsImage: dnsImage,
8285
}
@@ -87,12 +90,12 @@ func NewDNSProcessor(
8790
// are effectively reachable through the Service.
8891
// It returns true if the DNS is setup and reachable, and returns an error if there was an error
8992
// during the check or creation of the DNS-related resources.
90-
func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logicalcluster.Name) (bool, error) {
93+
func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, namespaceLocator shared.NamespaceLocator) (bool, error) {
9194
logger := klog.FromContext(ctx)
92-
logger.WithName("dns")
95+
logger = logger.WithName("dns")
9396

94-
dnsID := shared.GetDNSID(workspace, d.syncTargetUID, d.syncTargetName)
95-
logger.WithValues("name", dnsID, "namespace", d.dnsNamespace)
97+
dnsID := shared.GetDNSID(namespaceLocator.ClusterName, d.syncTargetUID, d.syncTargetName)
98+
logger = logger.WithValues("name", dnsID, "namespace", d.dnsNamespace)
9699

97100
logger.V(4).Info("checking if all dns objects exist and are up-to-date")
98101
ctx = klog.NewContext(ctx, logger)
@@ -132,6 +135,10 @@ func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logica
132135
if err := d.processService(ctx, dnsID); err != nil {
133136
return false, err
134137
}
138+
if err := d.processNetworkPolicy(ctx, dnsID, namespaceLocator); err != nil {
139+
return false, err
140+
}
141+
135142
// Since the Endpoints resource was not found, the DNS is not yet ready,
136143
// even though all the required resources have been created
137144
// (deployment still needs to start).
@@ -233,6 +240,39 @@ func (d *DNSProcessor) processService(ctx context.Context, name string) error {
233240
return nil
234241
}
235242

243+
func (d *DNSProcessor) processNetworkPolicy(ctx context.Context, name string, namespaceLocator shared.NamespaceLocator) error {
244+
logger := klog.FromContext(ctx)
245+
246+
var kubeEndpoints *corev1.Endpoints
247+
_, err := d.networkPolicyLister.NetworkPolicies(d.dnsNamespace).Get(name)
248+
if apierrors.IsNotFound(err) {
249+
kubeEndpoints, err = d.downstreamKubeClient.CoreV1().Endpoints("default").Get(ctx, "kubernetes", metav1.GetOptions{})
250+
if err != nil {
251+
return err
252+
}
253+
if len(kubeEndpoints.Subsets) == 0 || len(kubeEndpoints.Subsets[0].Addresses) == 0 {
254+
return errors.New("missing kubernetes API endpoints")
255+
}
256+
257+
tenantID, err := shared.GetTenantID(namespaceLocator)
258+
if err != nil {
259+
return err
260+
}
261+
262+
expected := MakeNetworkPolicy(name, d.dnsNamespace, tenantID, &kubeEndpoints.Subsets[0])
263+
_, err = d.downstreamKubeClient.NetworkingV1().NetworkPolicies(d.dnsNamespace).Create(ctx, expected, metav1.CreateOptions{})
264+
if err == nil {
265+
logger.Info("NetworkPolicy created")
266+
}
267+
}
268+
if err != nil && !apierrors.IsAlreadyExists(err) {
269+
logger.Error(err, "failed to get NetworkPolicy (retrying)")
270+
return err
271+
}
272+
273+
return nil
274+
}
275+
236276
func hasAtLeastOneReadyAddress(endpoints *corev1.Endpoints) bool {
237277
for _, s := range endpoints.Subsets {
238278
if len(s.Addresses) > 0 && s.Addresses[0].IP != "" {

0 commit comments

Comments
 (0)