From 5021b44694bdbc97baa0e6b513c8769752e78758 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Fri, 25 Apr 2025 17:04:36 -0400 Subject: [PATCH 1/6] Remove EndpointsTranslator and feature flag Fixes #6412 EndpointSliceTranslator is now stable since it was defaulted in v1.29.0 Signed-off-by: Sunjay Bhatia --- apis/projectcontour/v1alpha1/contourconfig.go | 4 - .../v1alpha1/contourconfig_helpers.go | 22 +- .../v1alpha1/contourconfig_helpers_test.go | 93 +- cmd/contour/serve.go | 36 +- internal/featuretests/kubernetes.go | 41 +- .../featuretests/v3/authorization_test.go | 16 +- internal/featuretests/v3/endpoints_test.go | 131 +- .../featuretests/v3/extensionservice_test.go | 18 +- internal/featuretests/v3/featuretests.go | 30 +- .../v3/global_authorization_test.go | 16 +- internal/featuretests/v3/listeners_test.go | 2 +- internal/featuretests/v3/tracing_test.go | 16 +- internal/featuretests/v3/upstreamtls_test.go | 8 +- internal/xdscache/v3/contour_test.go | 19 - .../xdscache/v3/endpointslicetranslator.go | 25 + .../v3/endpointslicetranslator_test.go | 145 +++ internal/xdscache/v3/endpointstranslator.go | 442 ------- .../xdscache/v3/endpointstranslator_test.go | 1066 ----------------- internal/xdscache/v3/snapshot.go | 2 +- pkg/config/parameters.go | 4 - 20 files changed, 327 insertions(+), 1809 deletions(-) delete mode 100644 internal/xdscache/v3/endpointstranslator.go delete mode 100644 internal/xdscache/v3/endpointstranslator_test.go diff --git a/apis/projectcontour/v1alpha1/contourconfig.go b/apis/projectcontour/v1alpha1/contourconfig.go index 7019617a087..2971a9d554c 100644 --- a/apis/projectcontour/v1alpha1/contourconfig.go +++ b/apis/projectcontour/v1alpha1/contourconfig.go @@ -86,10 +86,6 @@ type ContourConfigurationSpec struct { Tracing *TracingConfig `json:"tracing,omitempty"` // FeatureFlags defines toggle to enable new contour features. - // Available toggles are: - // useEndpointSlices - Configures contour to fetch endpoint data - // from k8s endpoint slices. defaults to true, - // If false then reads endpoint data from the k8s endpoints. FeatureFlags FeatureFlags `json:"featureFlags,omitempty"` } diff --git a/apis/projectcontour/v1alpha1/contourconfig_helpers.go b/apis/projectcontour/v1alpha1/contourconfig_helpers.go index a9f572a7609..26797c90f60 100644 --- a/apis/projectcontour/v1alpha1/contourconfig_helpers.go +++ b/apis/projectcontour/v1alpha1/contourconfig_helpers.go @@ -21,11 +21,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) -const featureFlagUseEndpointSlices string = "useEndpointSlices" - -var featureFlagsMap = map[string]struct{}{ - featureFlagUseEndpointSlices: {}, -} +var featureFlagsMap = map[string]struct{}{} // Validate configuration that is not already covered by CRD validation. func (c *ContourConfigurationSpec) Validate() error { @@ -235,26 +231,12 @@ func (f FeatureFlags) Validate() error { for _, featureFlag := range f { fields := strings.Split(featureFlag, "=") if _, found := featureFlagsMap[fields[0]]; !found { - return fmt.Errorf("invalid contour configuration, unknown feature flag:%s", featureFlag) + return fmt.Errorf("invalid contour configuration, unknown feature flag: %s", featureFlag) } } return nil } -func (f FeatureFlags) IsEndpointSliceEnabled() bool { - // only when the flag: 'useEndpointSlices=false' is exists, return false - for _, flag := range f { - if !strings.HasPrefix(flag, featureFlagUseEndpointSlices) { - continue - } - fields := strings.Split(flag, "=") - if len(fields) == 2 && strings.ToLower(fields[1]) == "false" { - return false - } - } - return true -} - // Validate ensures that GatewayRef namespace/name is specified. func (g *GatewayConfig) Validate() error { if g != nil && (g.GatewayRef.Namespace == "" || g.GatewayRef.Name == "") { diff --git a/apis/projectcontour/v1alpha1/contourconfig_helpers_test.go b/apis/projectcontour/v1alpha1/contourconfig_helpers_test.go index 6aa25cfbf72..ff3afd005f5 100644 --- a/apis/projectcontour/v1alpha1/contourconfig_helpers_test.go +++ b/apis/projectcontour/v1alpha1/contourconfig_helpers_test.go @@ -303,36 +303,10 @@ func TestFeatureFlagsValidate(t *testing.T) { flags contour_v1alpha1.FeatureFlags expected error }{ - { - name: "valid flag: no value", - flags: contour_v1alpha1.FeatureFlags{"useEndpointSlices"}, - expected: nil, - }, - { - name: "valid flag2: empty", - flags: contour_v1alpha1.FeatureFlags{"useEndpointSlices="}, - expected: nil, - }, - { - name: "valid flag: true", - flags: contour_v1alpha1.FeatureFlags{"useEndpointSlices=true"}, - expected: nil, - }, - { - name: "valid flag: false", - flags: contour_v1alpha1.FeatureFlags{"useEndpointSlices=false"}, - expected: nil, - }, - { name: "invalid flag", flags: contour_v1alpha1.FeatureFlags{"invalidFlag"}, - expected: fmt.Errorf("invalid contour configuration, unknown feature flag:invalidFlag"), - }, - { - name: "mix of valid and invalid flags", - flags: contour_v1alpha1.FeatureFlags{"useEndpointSlices", "invalidFlag"}, - expected: fmt.Errorf("invalid contour configuration, unknown feature flag:invalidFlag"), + expected: fmt.Errorf("invalid contour configuration, unknown feature flag: invalidFlag"), }, { name: "empty flags", @@ -348,68 +322,3 @@ func TestFeatureFlagsValidate(t *testing.T) { }) } } - -func TestFeatureFlagsIsEndpointSliceEnabled(t *testing.T) { - tests := []struct { - name string - flags contour_v1alpha1.FeatureFlags - expected bool - }{ - { - name: "valid flag: no value", - flags: contour_v1alpha1.FeatureFlags{"useEndpointSlices"}, - expected: true, - }, - { - name: "valid flag2: empty", - flags: contour_v1alpha1.FeatureFlags{"useEndpointSlices="}, - expected: true, - }, - { - name: "valid flag: true", - flags: contour_v1alpha1.FeatureFlags{"useEndpointSlices=true"}, - expected: true, - }, - { - name: "valid flag: ANY", - flags: contour_v1alpha1.FeatureFlags{"useEndpointSlices=ANY"}, - expected: true, - }, - - { - name: "empty flags", - flags: contour_v1alpha1.FeatureFlags{}, - expected: true, - }, - { - name: "empty string", - flags: contour_v1alpha1.FeatureFlags{""}, - expected: true, - }, - - { - name: "multi-flags", - flags: contour_v1alpha1.FeatureFlags{"useEndpointSlices", "otherFlag"}, - expected: true, - }, - - { - name: "valid flag: false", - flags: contour_v1alpha1.FeatureFlags{"useEndpointSlices=false"}, - expected: false, - }, - - { - name: "valid flag: FALSE", - flags: contour_v1alpha1.FeatureFlags{"useEndpointSlices=FALSE"}, - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.flags.IsEndpointSliceEnabled() - assert.Equal(t, tt.expected, err) - }) - } -} diff --git a/cmd/contour/serve.go b/cmd/contour/serve.go index e2110cc3bf7..37dd32d99d2 100644 --- a/cmd/contour/serve.go +++ b/cmd/contour/serve.go @@ -192,12 +192,6 @@ type Server struct { handlerCacheSyncs []cache.InformerSynced } -type EndpointsTranslator interface { - cache.ResourceEventHandler - xdscache.ResourceCache - SetObserver(observer contour.Observer) -} - // NewServer returns a Server object which contains the initial configuration // objects required to start an instance of Contour. func NewServer(log logrus.FieldLogger, ctx *serveContext) (*Server, error) { @@ -488,13 +482,8 @@ func (s *Server) doServe() error { contourMetrics := metrics.NewMetrics(s.registry) - // Endpoints updates are handled directly by the EndpointsTranslator/EndpointSliceTranslator due to the high update volume. - var endpointHandler EndpointsTranslator - if contourConfiguration.FeatureFlags.IsEndpointSliceEnabled() { - endpointHandler = xdscache_v3.NewEndpointSliceTranslator(s.log.WithField("context", "endpointslicetranslator")) - } else { - endpointHandler = xdscache_v3.NewEndpointsTranslator(s.log.WithField("context", "endpointstranslator")) - } + // Endpoints updates are handled directly by the EndpointSliceTranslator due to the high update volume. + endpointHandler := xdscache_v3.NewEndpointSliceTranslator(s.log.WithField("context", "endpointslicetranslator")) envoyGen := envoy_v3.NewEnvoyGen(envoy_v3.EnvoyGenOpt{ XDSClusterName: envoy_v3.DefaultXDSClusterName, @@ -655,21 +644,12 @@ func (s *Server) doServe() error { s.log.WithError(err).WithField("resource", "secrets").Fatal("failed to create informer") } - // Inform on endpoints/endpointSlices. - if contourConfiguration.FeatureFlags.IsEndpointSliceEnabled() { - if err := s.informOnResource(&discovery_v1.EndpointSlice{}, &contour.EventRecorder{ - Next: endpointHandler, - Counter: contourMetrics.EventHandlerOperations, - }); err != nil { - s.log.WithError(err).WithField("resource", "endpointslices").Fatal("failed to create informer") - } - } else { - if err := s.informOnResource(&core_v1.Endpoints{}, &contour.EventRecorder{ - Next: endpointHandler, - Counter: contourMetrics.EventHandlerOperations, - }); err != nil { - s.log.WithError(err).WithField("resource", "endpoints").Fatal("failed to create informer") - } + // Inform on endpointSlices. + if err := s.informOnResource(&discovery_v1.EndpointSlice{}, &contour.EventRecorder{ + Next: endpointHandler, + Counter: contourMetrics.EventHandlerOperations, + }); err != nil { + s.log.WithError(err).WithField("resource", "endpointslices").Fatal("failed to create informer") } // Register our event handler with the manager. diff --git a/internal/featuretests/kubernetes.go b/internal/featuretests/kubernetes.go index e0b5dcfdb15..e9fb47cb807 100644 --- a/internal/featuretests/kubernetes.go +++ b/internal/featuretests/kubernetes.go @@ -20,8 +20,10 @@ import ( "github.com/tsaarni/certyaml" core_v1 "k8s.io/api/core/v1" + discovery_v1 "k8s.io/api/discovery/v1" networking_v1 "k8s.io/api/networking/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" "github.com/projectcontour/contour/internal/dag" "github.com/projectcontour/contour/internal/fixture" @@ -108,32 +110,41 @@ func PEMBytes(t *testing.T, cert *certyaml.Certificate) []byte { return c } -func Endpoints(ns, name string, subsets ...core_v1.EndpointSubset) *core_v1.Endpoints { - return &core_v1.Endpoints{ +func EndpointSlice(ns, name, serviceName string, endpoints []discovery_v1.Endpoint, ports []discovery_v1.EndpointPort) *discovery_v1.EndpointSlice { + return &discovery_v1.EndpointSlice{ ObjectMeta: meta_v1.ObjectMeta{ Name: name, Namespace: ns, + Labels: map[string]string{ + discovery_v1.LabelServiceName: serviceName, + }, }, - Subsets: subsets, + Endpoints: endpoints, + Ports: ports, } } -func Ports(eps ...core_v1.EndpointPort) []core_v1.EndpointPort { - return eps +func Endpoints(endpoints ...discovery_v1.Endpoint) []discovery_v1.Endpoint { + return endpoints } -func Port(name string, port int32) core_v1.EndpointPort { - return core_v1.EndpointPort{ - Name: name, - Port: port, - Protocol: "TCP", +func Endpoint(address string, ready bool) discovery_v1.Endpoint { + return discovery_v1.Endpoint{ + Addresses: []string{address}, + Conditions: discovery_v1.EndpointConditions{ + Ready: ptr.To(ready), + }, } } -func Addresses(ips ...string) []core_v1.EndpointAddress { - var addrs []core_v1.EndpointAddress - for _, ip := range ips { - addrs = append(addrs, core_v1.EndpointAddress{IP: ip}) +func Ports(eps ...discovery_v1.EndpointPort) []discovery_v1.EndpointPort { + return eps +} + +func Port(name string, port int32) discovery_v1.EndpointPort { + return discovery_v1.EndpointPort{ + Name: ptr.To(name), + Port: ptr.To(port), + Protocol: ptr.To(core_v1.ProtocolTCP), } - return addrs } diff --git a/internal/featuretests/v3/authorization_test.go b/internal/featuretests/v3/authorization_test.go index db9e76e3236..efbb9ae21e0 100644 --- a/internal/featuretests/v3/authorization_test.go +++ b/internal/featuretests/v3/authorization_test.go @@ -585,10 +585,10 @@ func TestAuthorization(t *testing.T) { rh.OnAdd(fixture.NewService("auth/oidc-server"). WithPorts(core_v1.ServicePort{Port: 8081})) - rh.OnAdd(featuretests.Endpoints("auth", "oidc-server", core_v1.EndpointSubset{ - Addresses: featuretests.Addresses("192.168.183.21"), - Ports: featuretests.Ports(featuretests.Port("", 8081)), - })) + rh.OnAdd(featuretests.EndpointSlice("auth", "oidc-es", "oidc-server", + featuretests.Endpoints(featuretests.Endpoint("192.168.183.21", true)), + featuretests.Ports(featuretests.Port("", 8081)), + )) rh.OnAdd(&contour_v1alpha1.ExtensionService{ ObjectMeta: fixture.ObjectMeta("auth/extension"), @@ -605,10 +605,10 @@ func TestAuthorization(t *testing.T) { rh.OnAdd(fixture.NewService("app-server"). WithPorts(core_v1.ServicePort{Port: 80})) - rh.OnAdd(featuretests.Endpoints("auth", "app-server", core_v1.EndpointSubset{ - Addresses: featuretests.Addresses("192.168.183.21"), - Ports: featuretests.Ports(featuretests.Port("", 80)), - })) + rh.OnAdd(featuretests.EndpointSlice("auth", "app-es", "app-server", + featuretests.Endpoints(featuretests.Endpoint("192.168.183.21", true)), + featuretests.Ports(featuretests.Port("", 80)), + )) rh.OnAdd(featuretests.TLSSecret(t, "certificate", &featuretests.ServerCertificate)) f(t, rh, c) diff --git a/internal/featuretests/v3/endpoints_test.go b/internal/featuretests/v3/endpoints_test.go index e05298c0e04..58ac8045f08 100644 --- a/internal/featuretests/v3/endpoints_test.go +++ b/internal/featuretests/v3/endpoints_test.go @@ -55,19 +55,18 @@ func TestAddRemoveEndpoints(t *testing.T) { // e1 is a simple endpoint for two hosts, and two ports // it has a long name to check that it's clustername is _not_ // hashed. - e1 := featuretests.Endpoints( + e1 := featuretests.EndpointSlice( "super-long-namespace-name-oh-boy", + "es1", "what-a-descriptive-service-name-you-must-be-so-proud", - core_v1.EndpointSubset{ - Addresses: featuretests.Addresses( - "172.16.0.2", - "172.16.0.1", - ), - Ports: featuretests.Ports( - featuretests.Port("https", 8443), - featuretests.Port("http", 8000), - ), - }, + featuretests.Endpoints( + featuretests.Endpoint("172.16.0.2", true), + featuretests.Endpoint("172.16.0.1", true), + ), + featuretests.Ports( + featuretests.Port("https", 8443), + featuretests.Port("http", 8000), + ), ) rh.OnAdd(e1) @@ -137,33 +136,34 @@ func TestAddEndpointComplicated(t *testing.T) { }), ) - e1 := featuretests.Endpoints( + e1 := featuretests.EndpointSlice( "default", + "es1", "kuard", - core_v1.EndpointSubset{ - Addresses: featuretests.Addresses( - "10.48.1.78", - ), - NotReadyAddresses: featuretests.Addresses( - "10.48.1.77", - ), - Ports: featuretests.Ports( - featuretests.Port("foo", 8080), - ), - }, - core_v1.EndpointSubset{ - Addresses: featuretests.Addresses( - "10.48.1.78", - "10.48.1.77", - ), - Ports: featuretests.Ports( - featuretests.Port("admin", 9000), - ), - }, + featuretests.Endpoints( + featuretests.Endpoint("10.48.1.78", true), + featuretests.Endpoint("10.48.1.77", false), + ), + featuretests.Ports( + featuretests.Port("foo", 8080), + ), ) - rh.OnAdd(e1) + e2 := featuretests.EndpointSlice( + "default", + "es2", + "kuard", + featuretests.Endpoints( + featuretests.Endpoint("10.48.1.78", true), + featuretests.Endpoint("10.48.1.77", true), + ), + featuretests.Ports( + featuretests.Port("admin", 9000), + ), + ) + rh.OnAdd(e2) + c.Request(endpointType).Equals(&envoy_service_discovery_v3.DiscoveryResponse{ TypeUrl: endpointType, Resources: resources(t, @@ -205,32 +205,35 @@ func TestEndpointFilter(t *testing.T) { }), ) - // a single endpoint that represents several - // cluster load assignments. - rh.OnAdd(featuretests.Endpoints( + // Same Service+addresses but should be filterable + // by port name suffix. + e1 := featuretests.EndpointSlice( "default", + "es1", "kuard", - core_v1.EndpointSubset{ - Addresses: featuretests.Addresses( - "10.48.1.78", - ), - NotReadyAddresses: featuretests.Addresses( - "10.48.1.77", - ), - Ports: featuretests.Ports( - featuretests.Port("foo", 8080), - ), - }, - core_v1.EndpointSubset{ - Addresses: featuretests.Addresses( - "10.48.1.77", - "10.48.1.78", - ), - Ports: featuretests.Ports( - featuretests.Port("admin", 9000), - ), - }, - )) + featuretests.Endpoints( + featuretests.Endpoint("10.48.1.78", true), + featuretests.Endpoint("10.48.1.77", false), + ), + featuretests.Ports( + featuretests.Port("foo", 8080), + ), + ) + rh.OnAdd(e1) + + e2 := featuretests.EndpointSlice( + "default", + "es2", + "kuard", + featuretests.Endpoints( + featuretests.Endpoint("10.48.1.78", true), + featuretests.Endpoint("10.48.1.77", true), + ), + featuretests.Ports( + featuretests.Port("admin", 9000), + ), + ) + rh.OnAdd(e2) c.Request(endpointType, "default/kuard/foo").Equals(&envoy_service_discovery_v3.DiscoveryResponse{ TypeUrl: endpointType, @@ -268,12 +271,10 @@ func TestIssue602(t *testing.T) { }), ) - e1 := featuretests.Endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: featuretests.Addresses("192.168.183.24"), - Ports: featuretests.Ports( - featuretests.Port("", 8080), - ), - }) + e1 := featuretests.EndpointSlice("default", "es1", "simple", + featuretests.Endpoints(featuretests.Endpoint("192.168.183.24", true)), + featuretests.Ports(featuretests.Port("", 8080)), + ) rh.OnAdd(e1) // Assert endpoint was added @@ -287,8 +288,8 @@ func TestIssue602(t *testing.T) { TypeUrl: endpointType, }) - // e2 is the same as e1, but without endpoint subsets - e2 := featuretests.Endpoints("default", "simple") + // e2 is the same as e1, but without any actual endpoints + e2 := featuretests.EndpointSlice("default", "es1", "simple", nil, nil) rh.OnUpdate(e1, e2) c.Request(endpointType).Equals(&envoy_service_discovery_v3.DiscoveryResponse{ diff --git a/internal/featuretests/v3/extensionservice_test.go b/internal/featuretests/v3/extensionservice_test.go index b341882e4d7..a82f1fd5726 100644 --- a/internal/featuretests/v3/extensionservice_test.go +++ b/internal/featuretests/v3/extensionservice_test.go @@ -747,15 +747,15 @@ func TestExtensionService(t *testing.T) { rh.OnAdd(fixture.NewService("ns/svc1").WithPorts(core_v1.ServicePort{Port: 8081})) rh.OnAdd(fixture.NewService("ns/svc2").WithPorts(core_v1.ServicePort{Port: 8082})) - rh.OnAdd(featuretests.Endpoints("ns", "svc1", core_v1.EndpointSubset{ - Addresses: featuretests.Addresses("192.168.183.20"), - Ports: featuretests.Ports(featuretests.Port("", 8081)), - })) - - rh.OnAdd(featuretests.Endpoints("ns", "svc2", core_v1.EndpointSubset{ - Addresses: featuretests.Addresses("192.168.183.21"), - Ports: featuretests.Ports(featuretests.Port("", 8082)), - })) + rh.OnAdd(featuretests.EndpointSlice("ns", "es1", "svc1", + featuretests.Endpoints(featuretests.Endpoint("192.168.183.20", true)), + featuretests.Ports(featuretests.Port("", 8081)), + )) + + rh.OnAdd(featuretests.EndpointSlice("ns", "es2", "svc2", + featuretests.Endpoints(featuretests.Endpoint("192.168.183.21", true)), + featuretests.Ports(featuretests.Port("", 8082)), + )) f(t, rh, c) }) diff --git a/internal/featuretests/v3/featuretests.go b/internal/featuretests/v3/featuretests.go index 559f6bb9ad4..6b9acbcdf92 100644 --- a/internal/featuretests/v3/featuretests.go +++ b/internal/featuretests/v3/featuretests.go @@ -41,7 +41,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" - core_v1 "k8s.io/api/core/v1" + discovery_v1 "k8s.io/api/discovery/v1" "k8s.io/client-go/tools/cache" contour_v1 "github.com/projectcontour/contour/apis/projectcontour/v1" @@ -84,7 +84,7 @@ func setup(t *testing.T, opts ...any) (ResourceEventHandlerWrapper, *Contour, fu log := fixture.NewTestLogger(t) log.SetLevel(logrus.DebugLevel) - et := xdscache_v3.NewEndpointsTranslator(log) + et := xdscache_v3.NewEndpointSliceTranslator(log) conf := xdscache_v3.ListenerConfig{} for _, opt := range opts { @@ -199,10 +199,10 @@ func setup(t *testing.T, opts ...any) (ResourceEventHandlerWrapper, *Contour, fu require.NoError(t, err) rh := &resourceEventHandler{ - EventHandler: eh, - EndpointsHandler: et, - Sequence: eh.Sequence(), - statusUpdateCacher: statusUpdateCacher, + EventHandler: eh, + EndpointSliceHandler: et, + Sequence: eh.Sequence(), + statusUpdateCacher: statusUpdateCacher, } return rh, &Contour{ @@ -221,13 +221,13 @@ func setup(t *testing.T, opts ...any) (ResourceEventHandlerWrapper, *Contour, fu } } -// resourceEventHandler composes a contour.EventHandler and a contour.EndpointsTranslator +// resourceEventHandler composes a contour.EventHandler and a contour.EndpointSliceTranslator // into a single ResourceEventHandler type. Its event handlers are *blocking* for non-Endpoints // resources: they wait until the DAG has been rebuilt and observed, and the sequence counter // has been incremented, before returning. type resourceEventHandler struct { - EventHandler cache.ResourceEventHandler - EndpointsHandler cache.ResourceEventHandler + EventHandler cache.ResourceEventHandler + EndpointSliceHandler cache.ResourceEventHandler Sequence <-chan int @@ -240,8 +240,8 @@ func (r *resourceEventHandler) OnAdd(obj any) { } switch obj.(type) { - case *core_v1.Endpoints: - r.EndpointsHandler.OnAdd(obj, false) + case *discovery_v1.EndpointSlice: + r.EndpointSliceHandler.OnAdd(obj, false) default: r.EventHandler.OnAdd(obj, false) @@ -262,8 +262,8 @@ func (r *resourceEventHandler) OnUpdate(oldObj, newObj any) { } switch newObj.(type) { - case *core_v1.Endpoints: - r.EndpointsHandler.OnUpdate(oldObj, newObj) + case *discovery_v1.EndpointSlice: + r.EndpointSliceHandler.OnUpdate(oldObj, newObj) default: r.EventHandler.OnUpdate(oldObj, newObj) @@ -281,8 +281,8 @@ func (r *resourceEventHandler) OnDelete(obj any) { } switch obj.(type) { - case *core_v1.Endpoints: - r.EndpointsHandler.OnDelete(obj) + case *discovery_v1.EndpointSlice: + r.EndpointSliceHandler.OnDelete(obj) default: r.EventHandler.OnDelete(obj) diff --git a/internal/featuretests/v3/global_authorization_test.go b/internal/featuretests/v3/global_authorization_test.go index 36cb797386c..c2b35f58cae 100644 --- a/internal/featuretests/v3/global_authorization_test.go +++ b/internal/featuretests/v3/global_authorization_test.go @@ -825,10 +825,10 @@ func TestGlobalAuthorization(t *testing.T) { rh.OnAdd(fixture.NewService("auth/oidc-server"). WithPorts(core_v1.ServicePort{Port: 8081})) - rh.OnAdd(featuretests.Endpoints("auth", "oidc-server", core_v1.EndpointSubset{ - Addresses: featuretests.Addresses("192.168.183.21"), - Ports: featuretests.Ports(featuretests.Port("", 8081)), - })) + rh.OnAdd(featuretests.EndpointSlice("auth", "oidc-es", "oidc-server", + featuretests.Endpoints(featuretests.Endpoint("192.168.183.21", true)), + featuretests.Ports(featuretests.Port("", 8081)), + )) rh.OnAdd(&contour_v1alpha1.ExtensionService{ ObjectMeta: fixture.ObjectMeta("auth/extension"), @@ -845,10 +845,10 @@ func TestGlobalAuthorization(t *testing.T) { rh.OnAdd(fixture.NewService("app-server"). WithPorts(core_v1.ServicePort{Port: 80})) - rh.OnAdd(featuretests.Endpoints("auth", "app-server", core_v1.EndpointSubset{ - Addresses: featuretests.Addresses("192.168.183.21"), - Ports: featuretests.Ports(featuretests.Port("", 80)), - })) + rh.OnAdd(featuretests.EndpointSlice("auth", "app-es", "app-server", + featuretests.Endpoints(featuretests.Endpoint("192.168.183.21", true)), + featuretests.Ports(featuretests.Port("", 80)), + )) rh.OnAdd(featuretests.TLSSecret(t, "certificate", &featuretests.ServerCertificate)) diff --git a/internal/featuretests/v3/listeners_test.go b/internal/featuretests/v3/listeners_test.go index 525c7601140..d961d5c284b 100644 --- a/internal/featuretests/v3/listeners_test.go +++ b/internal/featuretests/v3/listeners_test.go @@ -43,7 +43,7 @@ import ( func customAdminPort(t *testing.T, port int) []xdscache.ResourceCache { log := fixture.NewTestLogger(t) - et := xdscache_v3.NewEndpointsTranslator(log) + et := xdscache_v3.NewEndpointSliceTranslator(log) conf := xdscache_v3.ListenerConfig{} return []xdscache.ResourceCache{ xdscache_v3.NewListenerCache( diff --git a/internal/featuretests/v3/tracing_test.go b/internal/featuretests/v3/tracing_test.go index 435361c5d60..6a9baa88999 100644 --- a/internal/featuretests/v3/tracing_test.go +++ b/internal/featuretests/v3/tracing_test.go @@ -68,10 +68,10 @@ func TestTracing(t *testing.T) { rh.OnAdd(fixture.NewService("projectcontour/otel-collector"). WithPorts(core_v1.ServicePort{Port: 4317})) - rh.OnAdd(featuretests.Endpoints("projectcontour", "otel-collector", core_v1.EndpointSubset{ - Addresses: featuretests.Addresses("10.244.41.241"), - Ports: featuretests.Ports(featuretests.Port("", 4317)), - })) + rh.OnAdd(featuretests.EndpointSlice("projectcontour", "otel-es", "otel-collector", + featuretests.Endpoints(featuretests.Endpoint("10.244.41.241", true)), + featuretests.Ports(featuretests.Port("", 4317)), + )) rh.OnAdd(&contour_v1alpha1.ExtensionService{ ObjectMeta: fixture.ObjectMeta("projectcontour/otel-collector"), @@ -89,10 +89,10 @@ func TestTracing(t *testing.T) { rh.OnAdd(fixture.NewService("projectcontour/app-server"). WithPorts(core_v1.ServicePort{Port: 80})) - rh.OnAdd(featuretests.Endpoints("projectcontour", "app-server", core_v1.EndpointSubset{ - Addresses: featuretests.Addresses("10.244.184.102"), - Ports: featuretests.Ports(featuretests.Port("", 80)), - })) + rh.OnAdd(featuretests.EndpointSlice("projectcontour", "app-es", "app-server", + featuretests.Endpoints(featuretests.Endpoint("10.244.184.102", true)), + featuretests.Ports(featuretests.Port("", 80)), + )) p := &contour_v1.HTTPProxy{ ObjectMeta: meta_v1.ObjectMeta{ diff --git a/internal/featuretests/v3/upstreamtls_test.go b/internal/featuretests/v3/upstreamtls_test.go index 584be3312ca..4f8b432936e 100644 --- a/internal/featuretests/v3/upstreamtls_test.go +++ b/internal/featuretests/v3/upstreamtls_test.go @@ -163,10 +163,10 @@ func TestUpstreamTLSWithExtensionService(t *testing.T) { rh.OnAdd(fixture.NewService("ns/svc1").WithPorts(core_v1.ServicePort{Port: 8081})) - rh.OnAdd(featuretests.Endpoints("ns", "svc1", core_v1.EndpointSubset{ - Addresses: featuretests.Addresses("192.168.183.20"), - Ports: featuretests.Ports(featuretests.Port("", 8081)), - })) + rh.OnAdd(featuretests.EndpointSlice("ns", "svc1-es", "svc1", + featuretests.Endpoints(featuretests.Endpoint("192.168.183.20", true)), + featuretests.Ports(featuretests.Port("", 8081)), + )) ext := &contour_v1alpha1.ExtensionService{ ObjectMeta: fixture.ObjectMeta("ns/ext"), diff --git a/internal/xdscache/v3/contour_test.go b/internal/xdscache/v3/contour_test.go index bce148fd91b..156c5bc7bf5 100644 --- a/internal/xdscache/v3/contour_test.go +++ b/internal/xdscache/v3/contour_test.go @@ -14,29 +14,10 @@ package v3 import ( - core_v1 "k8s.io/api/core/v1" discovery_v1 "k8s.io/api/discovery/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func endpoints(ns, name string, subsets ...core_v1.EndpointSubset) *core_v1.Endpoints { - return &core_v1.Endpoints{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Subsets: subsets, - } -} - -func addresses(ips ...string) []core_v1.EndpointAddress { - var addrs []core_v1.EndpointAddress - for _, ip := range ips { - addrs = append(addrs, core_v1.EndpointAddress{IP: ip}) - } - return addrs -} - func endpointSlice(ns, name, service string, addressType discovery_v1.AddressType, endpoints []discovery_v1.Endpoint, ports []discovery_v1.EndpointPort) *discovery_v1.EndpointSlice { return &discovery_v1.EndpointSlice{ ObjectMeta: meta_v1.ObjectMeta{ diff --git a/internal/xdscache/v3/endpointslicetranslator.go b/internal/xdscache/v3/endpointslicetranslator.go index 0e2fcb9740c..6a04ede828b 100644 --- a/internal/xdscache/v3/endpointslicetranslator.go +++ b/internal/xdscache/v3/endpointslicetranslator.go @@ -35,6 +35,11 @@ import ( "github.com/projectcontour/contour/internal/sorter" ) +type ( + LocalityEndpoints = envoy_config_endpoint_v3.LocalityLbEndpoints + LoadBalancingEndpoint = envoy_config_endpoint_v3.LbEndpoint +) + // RecalculateEndpoints generates a slice of LoadBalancingEndpoint // resources by matching the given service port to the given discovery_v1.EndpointSlice. // endpointSliceMap may be nil, in which case, the result is also nil. @@ -356,6 +361,26 @@ func (e *EndpointSliceTranslator) OnChange(root *dag.DAG) { } } +// equal returns true if a and b are the same length, have the same set +// of keys, and have proto-equivalent values for each key, or false otherwise. +func equal(a, b map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment) bool { + if len(a) != len(b) { + return false + } + + for k := range a { + if _, ok := b[k]; !ok { + return false + } + + if !proto.Equal(a[k], b[k]) { + return false + } + } + + return true +} + func (e *EndpointSliceTranslator) OnAdd(obj any, _ bool) { switch obj := obj.(type) { case *discovery_v1.EndpointSlice: diff --git a/internal/xdscache/v3/endpointslicetranslator_test.go b/internal/xdscache/v3/endpointslicetranslator_test.go index a902b5ae4f9..3515e7330cb 100644 --- a/internal/xdscache/v3/endpointslicetranslator_test.go +++ b/internal/xdscache/v3/endpointslicetranslator_test.go @@ -16,9 +16,12 @@ package v3 import ( "testing" + envoy_config_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_config_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/wrapperspb" core_v1 "k8s.io/api/core/v1" discovery_v1 "k8s.io/api/discovery/v1" "k8s.io/utils/ptr" @@ -29,6 +32,14 @@ import ( "github.com/projectcontour/contour/internal/protobuf" ) +type simpleObserver struct { + updated bool +} + +func (s *simpleObserver) Refresh() { + s.updated = true +} + func TestEndpointSliceTranslatorContents(t *testing.T) { tests := map[string]struct { contents map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment @@ -1142,3 +1153,137 @@ func TestEndpointSliceTranslatorDefaultWeightedService(t *testing.T) { protobuf.ExpectEqual(t, want, endpointSliceTranslator.Contents()) } + +func TestEqual(t *testing.T) { + tests := map[string]struct { + a, b map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment + want bool + }{ + "both nil": { + a: nil, + b: nil, + want: true, + }, + "one nil, one empty": { + a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{}, + b: nil, + want: true, + }, + "both empty": { + a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{}, + b: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{}, + want: true, + }, + "a is an incomplete subset of b": { + a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ + "a": {ClusterName: "a"}, + "b": {ClusterName: "b"}, + }, + b: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ + "a": {ClusterName: "a"}, + "b": {ClusterName: "b"}, + "c": {ClusterName: "c"}, + }, + want: false, + }, + "b is an incomplete subset of a": { + a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ + "a": {ClusterName: "a"}, + "b": {ClusterName: "b"}, + "c": {ClusterName: "c"}, + }, + b: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ + "a": {ClusterName: "a"}, + "b": {ClusterName: "b"}, + }, + want: false, + }, + "a and b have the same keys, different values": { + a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ + "a": {ClusterName: "a"}, + "b": {ClusterName: "b"}, + "c": {ClusterName: "c"}, + }, + b: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ + "a": {ClusterName: "a"}, + "b": {ClusterName: "b"}, + "c": {ClusterName: "different"}, + }, + want: false, + }, + "a and b have the same values, different keys": { + a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ + "a": {ClusterName: "a"}, + "b": {ClusterName: "b"}, + "c": {ClusterName: "c"}, + }, + b: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ + "d": {ClusterName: "a"}, + "e": {ClusterName: "b"}, + "f": {ClusterName: "c"}, + }, + want: false, + }, + "a and b have the same keys, same values": { + a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ + "a": {ClusterName: "a"}, + "b": {ClusterName: "b"}, + "c": {ClusterName: "c"}, + }, + b: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ + "a": {ClusterName: "a"}, + "b": {ClusterName: "b"}, + "c": {ClusterName: "c"}, + }, + want: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, tc.want, equal(tc.a, tc.b)) + }) + } +} + +func clusterloadassignments(clas ...*envoy_config_endpoint_v3.ClusterLoadAssignment) map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment { + m := make(map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment) + for _, cla := range clas { + m[cla.ClusterName] = cla + } + return m +} + +func weightedHealthcheckEndpoints(weight, healthcheckPort uint32, addrs ...*envoy_config_core_v3.Address) []*envoy_config_endpoint_v3.LocalityLbEndpoints { + lbendpoints := healthcheckEndpoints(healthcheckPort, addrs...) + lbendpoints[0].LoadBalancingWeight = wrapperspb.UInt32(weight) + return lbendpoints +} + +func healthcheckEndpoints(healthcheckPort uint32, addrs ...*envoy_config_core_v3.Address) []*envoy_config_endpoint_v3.LocalityLbEndpoints { + lbendpoints := make([]*envoy_config_endpoint_v3.LbEndpoint, 0, len(addrs)) + for _, addr := range addrs { + lbendpoints = append(lbendpoints, healthCheckLBEndpoint(addr, healthcheckPort)) + } + return []*envoy_config_endpoint_v3.LocalityLbEndpoints{{ + LbEndpoints: lbendpoints, + }} +} + +// healthCheckLBEndpoint creates a new LbEndpoint include healthCheckConfig +func healthCheckLBEndpoint(addr *envoy_config_core_v3.Address, healthCheckPort uint32) *envoy_config_endpoint_v3.LbEndpoint { + var hc *envoy_config_endpoint_v3.Endpoint_HealthCheckConfig + if healthCheckPort != 0 { + hc = &envoy_config_endpoint_v3.Endpoint_HealthCheckConfig{ + PortValue: healthCheckPort, + } + } + return &envoy_config_endpoint_v3.LbEndpoint{ + HostIdentifier: &envoy_config_endpoint_v3.LbEndpoint_Endpoint{ + Endpoint: &envoy_config_endpoint_v3.Endpoint{ + Address: addr, + HealthCheckConfig: hc, + }, + }, + } +} diff --git a/internal/xdscache/v3/endpointstranslator.go b/internal/xdscache/v3/endpointstranslator.go deleted file mode 100644 index 6a588c3ebdb..00000000000 --- a/internal/xdscache/v3/endpointstranslator.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright Project Contour Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3 - -import ( - "fmt" - "sort" - "sync" - - envoy_config_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - resource "github.com/envoyproxy/go-control-plane/pkg/resource/v3" - "github.com/sirupsen/logrus" - "google.golang.org/protobuf/proto" - core_v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/cache" - - "github.com/projectcontour/contour/internal/contour" - "github.com/projectcontour/contour/internal/dag" - envoy_v3 "github.com/projectcontour/contour/internal/envoy/v3" - "github.com/projectcontour/contour/internal/k8s" - "github.com/projectcontour/contour/internal/protobuf" - "github.com/projectcontour/contour/internal/sorter" -) - -type ( - LocalityEndpoints = envoy_config_endpoint_v3.LocalityLbEndpoints - LoadBalancingEndpoint = envoy_config_endpoint_v3.LbEndpoint -) - -// RecalculateEndpoints generates a slice of LoadBalancingEndpoint -// resources by matching the given service port to the given core_v1.Endpoints. -// eps may be nil, in which case, the result is also nil. -func RecalculateEndpoints(port, healthPort core_v1.ServicePort, eps *core_v1.Endpoints) []*LoadBalancingEndpoint { - if eps == nil { - return nil - } - - var lb []*LoadBalancingEndpoint - var healthCheckPort int32 - - for _, s := range eps.Subsets { - // Skip subsets without ready addresses. - if len(s.Addresses) < 1 { - continue - } - - for _, endpointPort := range s.Ports { - if endpointPort.Protocol != core_v1.ProtocolTCP { - // NOTE: we only support "TCP", which is the default. - continue - } - - // Set healthCheckPort only when port and healthPort are different. - if healthPort.Name != "" && healthPort.Name == endpointPort.Name && port.Name != healthPort.Name { - healthCheckPort = endpointPort.Port - } - - // If the port isn't named, it must be the - // only Service port, so it's a match by - // definition. Otherwise, only take endpoint - // ports that match the service port name. - if port.Name != "" && port.Name != endpointPort.Name { - continue - } - - // If we matched this port, collect Envoy endpoints for all the ready addresses. - addresses := append([]core_v1.EndpointAddress{}, s.Addresses...) // Shallow copy. - sort.Slice(addresses, func(i, j int) bool { return addresses[i].IP < addresses[j].IP }) - - for _, a := range addresses { - addr := envoy_v3.SocketAddress(a.IP, int(endpointPort.Port)) - lb = append(lb, envoy_v3.LBEndpoint(addr)) - } - } - } - - if healthCheckPort > 0 { - for _, lbEndpoint := range lb { - lbEndpoint.GetEndpoint().HealthCheckConfig = envoy_v3.HealthCheckConfig(healthCheckPort) - } - } - - return lb -} - -// EndpointsCache is a cache of Endpoint and ServiceCluster objects. -type EndpointsCache struct { - mu sync.Mutex // Protects all fields. - - // Slice of stale clusters. A stale cluster is one that - // needs to be recalculated. Clusters can be added to the stale - // slice due to changes in Endpoints or due to a DAG rebuild. - stale []*dag.ServiceCluster - - // Index of ServiceClusters. ServiceClusters are indexed - // by the name of their Kubernetes Services. This makes it - // easy to determine which Endpoints affect which ServiceCluster. - services map[types.NamespacedName][]*dag.ServiceCluster - - // Cache of endpoints, indexed by name. - endpoints map[types.NamespacedName]*core_v1.Endpoints -} - -// Recalculate regenerates all the ClusterLoadAssignments from the -// cached Endpoints and stale ServiceClusters. A ClusterLoadAssignment -// will be generated for every stale ServerCluster, however, if there -// are no endpoints for the Services in the ServiceCluster, the -// ClusterLoadAssignment will be empty. -func (c *EndpointsCache) Recalculate() map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment { - c.mu.Lock() - defer c.mu.Unlock() - - assignments := map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{} - for _, cluster := range c.stale { - // Clusters can be in the stale list multiple times; - // skip to avoid duplicate recalculations. - if _, ok := assignments[cluster.ClusterName]; ok { - continue - } - - cla := envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: cluster.ClusterName, - Endpoints: nil, - Policy: nil, - } - - // Look up each service, and if we have endpoints for that service, - // attach them as a new LocalityEndpoints resource2. - for _, w := range cluster.Services { - n := types.NamespacedName{Namespace: w.ServiceNamespace, Name: w.ServiceName} - if lb := RecalculateEndpoints(w.ServicePort, w.HealthPort, c.endpoints[n]); lb != nil { - // Append the new set of endpoints. Users are allowed to set the load - // balancing weight to 0, which we reflect to Envoy as nil in order to - // assign no load to that locality. - cla.Endpoints = append( - cla.Endpoints, - &LocalityEndpoints{ - LbEndpoints: lb, - LoadBalancingWeight: protobuf.UInt32OrNil(w.Weight), - }, - ) - } - } - - assignments[cla.ClusterName] = &cla - } - - c.stale = nil - return assignments -} - -// SetClusters replaces the cache of ServiceCluster resources. All -// the added clusters will be marked stale. -func (c *EndpointsCache) SetClusters(clusters []*dag.ServiceCluster) error { - c.mu.Lock() - defer c.mu.Unlock() - - // Keep a local index to start with so that errors don't cause - // partial failure. - serviceIndex := map[types.NamespacedName][]*dag.ServiceCluster{} - - // Reindex the cluster so that we can find them by service name. - for _, cluster := range clusters { - if err := cluster.Validate(); err != nil { - return fmt.Errorf("invalid ServiceCluster %q: %w", cluster.ClusterName, err) - } - - // Make sure service clusters with default weights are balanced. - cluster.Rebalance() - - for _, s := range cluster.Services { - name := types.NamespacedName{ - Namespace: s.ServiceNamespace, - Name: s.ServiceName, - } - - // Create the slice entry if we have not indexed this service yet. - entry := serviceIndex[name] - if entry == nil { - entry = []*dag.ServiceCluster{} - } - - serviceIndex[name] = append(entry, cluster) - } - } - - c.stale = clusters - c.services = serviceIndex - - return nil -} - -// UpdateEndpoint adds eps to the cache, or replaces it if it is -// already cached. Any ServiceClusters that are backed by a Service -// that eps belongs become stale. Returns a boolean indicating whether -// any ServiceClusters use eps or not. -func (c *EndpointsCache) UpdateEndpoint(eps *core_v1.Endpoints) bool { - c.mu.Lock() - defer c.mu.Unlock() - - name := k8s.NamespacedNameOf(eps) - c.endpoints[name] = eps.DeepCopy() - - // If any service clusters include this endpoint, mark them - // all as stale. - if affected := c.services[name]; len(affected) > 0 { - c.stale = append(c.stale, affected...) - return true - } - - return false -} - -// DeleteEndpoint deletes eps from the cache. Any ServiceClusters -// that are backed by a Service that eps belongs become stale. Returns -// a boolean indicating whether any ServiceClusters use eps or not. -func (c *EndpointsCache) DeleteEndpoint(eps *core_v1.Endpoints) bool { - c.mu.Lock() - defer c.mu.Unlock() - - name := k8s.NamespacedNameOf(eps) - delete(c.endpoints, name) - - // If any service clusters include this endpoint, mark them - // all as stale. - if affected := c.services[name]; len(affected) > 0 { - c.stale = append(c.stale, affected...) - return true - } - - return false -} - -// NewEndpointsTranslator allocates a new endpoints translator. -func NewEndpointsTranslator(log logrus.FieldLogger) *EndpointsTranslator { - return &EndpointsTranslator{ - FieldLogger: log, - entries: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{}, - cache: EndpointsCache{ - stale: nil, - services: map[types.NamespacedName][]*dag.ServiceCluster{}, - endpoints: map[types.NamespacedName]*core_v1.Endpoints{}, - }, - } -} - -// A EndpointsTranslator translates Kubernetes Endpoints objects into Envoy -// ClusterLoadAssignment resources. -type EndpointsTranslator struct { - // Observer notifies when the endpoints cache has been updated. - Observer contour.Observer - - logrus.FieldLogger - - cache EndpointsCache - - mu sync.Mutex // Protects entries. - entries map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment -} - -// Merge combines the given entries with the existing entries in the -// EndpointsTranslator. If the same key exists in both maps, an existing entry -// is replaced. -func (e *EndpointsTranslator) Merge(entries map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment) { - e.mu.Lock() - defer e.mu.Unlock() - - for k, v := range entries { - e.entries[k] = v - } -} - -// OnChange observes DAG rebuild events. -func (e *EndpointsTranslator) OnChange(root *dag.DAG) { - clusters := []*dag.ServiceCluster{} - names := map[string]bool{} - - for _, svc := range root.GetServiceClusters() { - if err := svc.Validate(); err != nil { - e.WithError(err).Errorf("dropping invalid service cluster %q", svc.ClusterName) - } else if _, ok := names[svc.ClusterName]; ok { - e.Debugf("dropping service cluster with duplicate name %q", svc.ClusterName) - } else { - e.Debugf("added ServiceCluster %q from DAG", svc.ClusterName) - clusters = append(clusters, svc.DeepCopy()) - names[svc.ClusterName] = true - } - } - - // Update the cache with the new clusters. - if err := e.cache.SetClusters(clusters); err != nil { - e.WithError(err).Error("failed to cache service clusters") - } - - // After rebuilding the DAG, the service cluster could be - // completely different. Some could be added, and some could - // be removed. Since we reset the cluster cache above, all - // the load assignments will be recalculated and we can just - // set the entries rather than merging them. - entries := e.cache.Recalculate() - - // Only update and notify if entries has changed. - changed := false - - e.mu.Lock() - if !equal(e.entries, entries) { - e.entries = entries - changed = true - } - e.mu.Unlock() - - if changed { - e.Debug("cluster load assignments changed, notifying waiters") - if e.Observer != nil { - e.Observer.Refresh() - } - } else { - e.Debug("cluster load assignments did not change") - } -} - -// equal returns true if a and b are the same length, have the same set -// of keys, and have proto-equivalent values for each key, or false otherwise. -func equal(a, b map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment) bool { - if len(a) != len(b) { - return false - } - - for k := range a { - if _, ok := b[k]; !ok { - return false - } - - if !proto.Equal(a[k], b[k]) { - return false - } - } - - return true -} - -func (e *EndpointsTranslator) OnAdd(obj any, _ bool) { - switch obj := obj.(type) { - case *core_v1.Endpoints: - if !e.cache.UpdateEndpoint(obj) { - return - } - - e.WithField("endpoint", k8s.NamespacedNameOf(obj)).Debug("Endpoint is in use by a ServiceCluster, recalculating ClusterLoadAssignments") - e.Merge(e.cache.Recalculate()) - if e.Observer != nil { - e.Observer.Refresh() - } - default: - e.Errorf("OnAdd unexpected type %T: %#v", obj, obj) - } -} - -func (e *EndpointsTranslator) OnUpdate(oldObj, newObj any) { - switch newObj := newObj.(type) { - case *core_v1.Endpoints: - oldObj, ok := oldObj.(*core_v1.Endpoints) - if !ok { - e.Errorf("OnUpdate endpoints %#v received invalid oldObj %T; %#v", newObj, oldObj, oldObj) - return - } - - // Skip computation if either old and new services or - // endpoints are equal (thus also handling nil). - if oldObj == newObj { - return - } - - // If there are no endpoints in this object, and the old - // object also had zero endpoints, ignore this update - // to avoid sending a noop notification to watchers. - if len(oldObj.Subsets) == 0 && len(newObj.Subsets) == 0 { - return - } - - if !e.cache.UpdateEndpoint(newObj) { - return - } - - e.WithField("endpoint", k8s.NamespacedNameOf(newObj)).Debug("Endpoint is in use by a ServiceCluster, recalculating ClusterLoadAssignments") - e.Merge(e.cache.Recalculate()) - if e.Observer != nil { - e.Observer.Refresh() - } - default: - e.Errorf("OnUpdate unexpected type %T: %#v", newObj, newObj) - } -} - -func (e *EndpointsTranslator) OnDelete(obj any) { - switch obj := obj.(type) { - case *core_v1.Endpoints: - if !e.cache.DeleteEndpoint(obj) { - return - } - - e.WithField("endpoint", k8s.NamespacedNameOf(obj)).Debug("Endpoint was in use by a ServiceCluster, recalculating ClusterLoadAssignments") - e.Merge(e.cache.Recalculate()) - if e.Observer != nil { - e.Observer.Refresh() - } - case cache.DeletedFinalStateUnknown: - e.OnDelete(obj.Obj) // recurse into ourselves with the tombstoned value - default: - e.Errorf("OnDelete unexpected type %T: %#v", obj, obj) - } -} - -// Contents returns a copy of the contents of the cache. -func (e *EndpointsTranslator) Contents() []proto.Message { - e.mu.Lock() - defer e.mu.Unlock() - - values := make([]*envoy_config_endpoint_v3.ClusterLoadAssignment, 0, len(e.entries)) - for _, v := range e.entries { - values = append(values, v) - } - - sort.Stable(sorter.For(values)) - return protobuf.AsMessages(values) -} - -func (*EndpointsTranslator) TypeURL() string { return resource.EndpointType } - -func (e *EndpointsTranslator) SetObserver(observer contour.Observer) { e.Observer = observer } diff --git a/internal/xdscache/v3/endpointstranslator_test.go b/internal/xdscache/v3/endpointstranslator_test.go deleted file mode 100644 index 7ec53cf8e90..00000000000 --- a/internal/xdscache/v3/endpointstranslator_test.go +++ /dev/null @@ -1,1066 +0,0 @@ -// Copyright Project Contour Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3 - -import ( - "testing" - - envoy_config_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - envoy_config_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/wrapperspb" - core_v1 "k8s.io/api/core/v1" - - "github.com/projectcontour/contour/internal/dag" - envoy_v3 "github.com/projectcontour/contour/internal/envoy/v3" - "github.com/projectcontour/contour/internal/fixture" - "github.com/projectcontour/contour/internal/protobuf" -) - -func TestEndpointsTranslatorContents(t *testing.T) { - tests := map[string]struct { - contents map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment - want []proto.Message - }{ - "empty": { - contents: nil, - want: nil, - }, - "simple": { - contents: clusterloadassignments( - envoy_v3.ClusterLoadAssignment("default/httpbin-org", - envoy_v3.SocketAddress("10.10.10.10", 80), - ), - ), - want: []proto.Message{ - envoy_v3.ClusterLoadAssignment("default/httpbin-org", - envoy_v3.SocketAddress("10.10.10.10", 80), - ), - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - et := NewEndpointsTranslator(fixture.NewTestLogger(t)) - et.entries = tc.contents - got := et.Contents() - protobuf.ExpectEqual(t, tc.want, got) - }) - } -} - -func TestEndpointsTranslatorAddEndpoints(t *testing.T) { - clusters := []*dag.ServiceCluster{ - { - ClusterName: "default/httpbin-org/a", - Services: []dag.WeightedService{ - { - Weight: 1, - ServiceName: "httpbin-org", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{Name: "a"}, - }, - }, - }, - { - ClusterName: "default/httpbin-org/b", - Services: []dag.WeightedService{ - { - Weight: 1, - ServiceName: "httpbin-org", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{Name: "b"}, - }, - }, - }, - { - ClusterName: "default/simple", - Services: []dag.WeightedService{ - { - Weight: 1, - ServiceName: "simple", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{}, - }, - }, - }, - { - ClusterName: "default/healthcheck-port", - Services: []dag.WeightedService{ - { - Weight: 1, - ServiceName: "healthcheck-port", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{Name: "a"}, - HealthPort: core_v1.ServicePort{Name: "health", Port: 8998}, - }, - }, - }, - } - - tests := map[string]struct { - ep *core_v1.Endpoints - want []proto.Message - wantUpdate bool - }{ - "simple": { - ep: endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/healthcheck-port"}, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/httpbin-org/a"}, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/httpbin-org/b"}, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/simple", - Endpoints: envoy_v3.WeightedEndpoints(1, envoy_v3.SocketAddress("192.168.183.24", 8080)), - }, - }, - wantUpdate: true, - }, - "adding an Endpoints not used by a ServiceCluster should not trigger a recalculation": { - ep: endpoints("default", "not-used-endpoint", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }), - want: nil, - wantUpdate: false, - }, - "multiple addresses": { - ep: endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: addresses( - "50.17.192.147", - "50.17.206.192", - "50.19.99.160", - "23.23.247.89", - ), - Ports: ports( - port("", 80), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/healthcheck-port"}, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/httpbin-org/a"}, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/httpbin-org/b"}, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/simple", - Endpoints: envoy_v3.WeightedEndpoints(1, - envoy_v3.SocketAddress("23.23.247.89", 80), // addresses should be sorted - envoy_v3.SocketAddress("50.17.192.147", 80), - envoy_v3.SocketAddress("50.17.206.192", 80), - envoy_v3.SocketAddress("50.19.99.160", 80), - ), - }, - }, - wantUpdate: true, - }, - "multiple ports": { - ep: endpoints("default", "httpbin-org", core_v1.EndpointSubset{ - Addresses: addresses( - "10.10.1.1", - ), - Ports: ports( - port("b", 309), - port("a", 8675), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/healthcheck-port"}, - // Results should be sorted by cluster name. - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/httpbin-org/a", - Endpoints: envoy_v3.WeightedEndpoints(1, envoy_v3.SocketAddress("10.10.1.1", 8675)), - }, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/httpbin-org/b", - Endpoints: envoy_v3.WeightedEndpoints(1, envoy_v3.SocketAddress("10.10.1.1", 309)), - }, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/simple"}, - }, - wantUpdate: true, - }, - "cartesian product": { - ep: endpoints("default", "httpbin-org", core_v1.EndpointSubset{ - Addresses: addresses( - "10.10.2.2", - "10.10.1.1", - ), - Ports: ports( - port("b", 309), - port("a", 8675), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/healthcheck-port"}, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/httpbin-org/a", - Endpoints: envoy_v3.WeightedEndpoints(1, - envoy_v3.SocketAddress("10.10.1.1", 8675), // addresses should be sorted - envoy_v3.SocketAddress("10.10.2.2", 8675), - ), - }, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/httpbin-org/b", - Endpoints: envoy_v3.WeightedEndpoints(1, - envoy_v3.SocketAddress("10.10.1.1", 309), - envoy_v3.SocketAddress("10.10.2.2", 309), - ), - }, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/simple"}, - }, - wantUpdate: true, - }, - "not ready": { - ep: endpoints("default", "httpbin-org", core_v1.EndpointSubset{ - Addresses: addresses( - "10.10.1.1", - ), - NotReadyAddresses: addresses( - "10.10.2.2", - ), - Ports: ports( - port("a", 8675), - ), - }, core_v1.EndpointSubset{ - Addresses: addresses( - "10.10.2.2", - "10.10.1.1", - ), - Ports: ports( - port("b", 309), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/healthcheck-port"}, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/httpbin-org/a", - Endpoints: envoy_v3.WeightedEndpoints(1, - envoy_v3.SocketAddress("10.10.1.1", 8675), - ), - }, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/httpbin-org/b", - Endpoints: envoy_v3.WeightedEndpoints(1, - envoy_v3.SocketAddress("10.10.1.1", 309), - envoy_v3.SocketAddress("10.10.2.2", 309), - ), - }, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/simple"}, - }, - wantUpdate: true, - }, - "health port": { - ep: endpoints("default", "healthcheck-port", core_v1.EndpointSubset{ - Addresses: addresses("10.10.1.1"), - Ports: ports( - port("a", 309), - port("health", 8998), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/healthcheck-port", - Endpoints: weightedHealthcheckEndpoints(1, 8998, - envoy_v3.SocketAddress("10.10.1.1", 309), - ), - }, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/httpbin-org/a"}, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/httpbin-org/b"}, - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/simple"}, - }, - wantUpdate: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - et := NewEndpointsTranslator(fixture.NewTestLogger(t)) - observer := &simpleObserver{} - et.Observer = observer - - require.NoError(t, et.cache.SetClusters(clusters)) - et.OnAdd(tc.ep, false) - got := et.Contents() - protobuf.ExpectEqual(t, tc.want, got) - require.Equal(t, tc.wantUpdate, observer.updated) - }) - } -} - -type simpleObserver struct { - updated bool -} - -func (s *simpleObserver) Refresh() { - s.updated = true -} - -func TestEndpointsTranslatorRemoveEndpoints(t *testing.T) { - clusters := []*dag.ServiceCluster{ - { - ClusterName: "default/simple", - Services: []dag.WeightedService{ - { - Weight: 1, - ServiceName: "simple", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{}, - }, - }, - }, - { - ClusterName: "super-long-namespace-name-oh-boy/what-a-descriptive-service-name-you-must-be-so-proud/http", - Services: []dag.WeightedService{ - { - Weight: 1, - ServiceName: "what-a-descriptive-service-name-you-must-be-so-proud", - ServiceNamespace: "super-long-namespace-name-oh-boy", - ServicePort: core_v1.ServicePort{Name: "http"}, - }, - }, - }, - { - ClusterName: "super-long-namespace-name-oh-boy/what-a-descriptive-service-name-you-must-be-so-proud/https", - Services: []dag.WeightedService{ - { - Weight: 1, - ServiceName: "what-a-descriptive-service-name-you-must-be-so-proud", - ServiceNamespace: "super-long-namespace-name-oh-boy", - ServicePort: core_v1.ServicePort{Name: "https"}, - }, - }, - }, - } - - tests := map[string]struct { - setup func(*EndpointsTranslator) - ep *core_v1.Endpoints - want []proto.Message - wantUpdate bool - }{ - "remove existing": { - setup: func(et *EndpointsTranslator) { - et.OnAdd(endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }), false) - }, - ep: endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }), - want: []proto.Message{ - envoy_v3.ClusterLoadAssignment("default/simple"), - envoy_v3.ClusterLoadAssignment("super-long-namespace-name-oh-boy/what-a-descriptive-service-name-you-must-be-so-proud/http"), - envoy_v3.ClusterLoadAssignment("super-long-namespace-name-oh-boy/what-a-descriptive-service-name-you-must-be-so-proud/https"), - }, - wantUpdate: true, - }, - "removing an Endpoints not used by a ServiceCluster should not trigger a recalculation": { - setup: func(et *EndpointsTranslator) { - et.OnAdd(endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }), false) - }, - ep: endpoints("default", "different", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/simple", - Endpoints: envoy_v3.WeightedEndpoints(1, envoy_v3.SocketAddress("192.168.183.24", 8080)), - }, - envoy_v3.ClusterLoadAssignment("super-long-namespace-name-oh-boy/what-a-descriptive-service-name-you-must-be-so-proud/http"), - envoy_v3.ClusterLoadAssignment("super-long-namespace-name-oh-boy/what-a-descriptive-service-name-you-must-be-so-proud/https"), - }, - wantUpdate: false, - }, - "remove non existent": { - setup: func(*EndpointsTranslator) {}, - ep: endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }), - want: []proto.Message{ - envoy_v3.ClusterLoadAssignment("default/simple"), - envoy_v3.ClusterLoadAssignment("super-long-namespace-name-oh-boy/what-a-descriptive-service-name-you-must-be-so-proud/http"), - envoy_v3.ClusterLoadAssignment("super-long-namespace-name-oh-boy/what-a-descriptive-service-name-you-must-be-so-proud/https"), - }, - wantUpdate: true, - }, - "remove long name": { - setup: func(et *EndpointsTranslator) { - e1 := endpoints( - "super-long-namespace-name-oh-boy", - "what-a-descriptive-service-name-you-must-be-so-proud", - core_v1.EndpointSubset{ - Addresses: addresses( - "172.16.0.2", - "172.16.0.1", - ), - Ports: ports( - port("https", 8443), - port("http", 8080), - ), - }, - ) - et.OnAdd(e1, false) - }, - ep: endpoints( - "super-long-namespace-name-oh-boy", - "what-a-descriptive-service-name-you-must-be-so-proud", - core_v1.EndpointSubset{ - Addresses: addresses( - "172.16.0.2", - "172.16.0.1", - ), - Ports: ports( - port("https", 8443), - port("http", 8080), - ), - }, - ), - want: []proto.Message{ - envoy_v3.ClusterLoadAssignment("default/simple"), - envoy_v3.ClusterLoadAssignment("super-long-namespace-name-oh-boy/what-a-descriptive-service-name-you-must-be-so-proud/http"), - envoy_v3.ClusterLoadAssignment("super-long-namespace-name-oh-boy/what-a-descriptive-service-name-you-must-be-so-proud/https"), - }, - wantUpdate: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - et := NewEndpointsTranslator(fixture.NewTestLogger(t)) - require.NoError(t, et.cache.SetClusters(clusters)) - tc.setup(et) - - // add the dummy observer after setting things up - // so we only get notified if the deletion triggers - // changes, not if the setup additions trigger changes. - observer := &simpleObserver{} - et.Observer = observer - - // TODO(jpeach): this doesn't actually test - // that deleting endpoints works. We ought to - // ensure the cache is populated first and - // only after that, verify that deletion gives - // the expected result. - et.OnDelete(tc.ep) - got := et.Contents() - protobuf.ExpectEqual(t, tc.want, got) - require.Equal(t, tc.wantUpdate, observer.updated) - }) - } -} - -func TestEndpointsTranslatorUpdateEndpoints(t *testing.T) { - clusters := []*dag.ServiceCluster{ - { - ClusterName: "default/simple", - Services: []dag.WeightedService{ - { - Weight: 1, - ServiceName: "simple", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{}, - }, - }, - }, - } - - tests := map[string]struct { - setup func(*EndpointsTranslator) - old, new *core_v1.Endpoints - want []proto.Message - wantUpdate bool - }{ - "update existing": { - setup: func(et *EndpointsTranslator) { - et.OnAdd(endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }), false) - }, - old: endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }), - new: endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.25"), - Ports: ports( - port("", 8081), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/simple", - Endpoints: envoy_v3.WeightedEndpoints(1, envoy_v3.SocketAddress("192.168.183.25", 8081)), - }, - }, - wantUpdate: true, - }, - "getting an update for an Endpoints not used by a ServiceCluster should not trigger a recalculation": { - setup: func(et *EndpointsTranslator) { - et.OnAdd(endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }), false) - }, - old: endpoints("default", "different", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }), - new: endpoints("default", "different", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.25"), - Ports: ports( - port("", 8081), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/simple", - Endpoints: envoy_v3.WeightedEndpoints(1, envoy_v3.SocketAddress("192.168.183.24", 8080)), - }, - }, - wantUpdate: false, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - et := NewEndpointsTranslator(fixture.NewTestLogger(t)) - require.NoError(t, et.cache.SetClusters(clusters)) - tc.setup(et) - - // add the dummy observer after setting things up - // so we only get notified if the update triggers - // changes, not if the setup additions trigger changes. - observer := &simpleObserver{} - et.Observer = observer - - et.OnUpdate(tc.old, tc.new) - got := et.Contents() - protobuf.ExpectEqual(t, tc.want, got) - require.Equal(t, tc.wantUpdate, observer.updated) - }) - } -} - -func TestEndpointsTranslatorRecomputeClusterLoadAssignment(t *testing.T) { - tests := map[string]struct { - cluster dag.ServiceCluster - ep *core_v1.Endpoints - want []proto.Message - }{ - "simple": { - cluster: dag.ServiceCluster{ - ClusterName: "default/simple", - Services: []dag.WeightedService{{ - Weight: 1, - ServiceName: "simple", - ServiceNamespace: "default", - }}, - }, - ep: endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/simple", - Endpoints: envoy_v3.WeightedEndpoints(1, - envoy_v3.SocketAddress("192.168.183.24", 8080)), - }, - }, - }, - "multiple addresses": { - cluster: dag.ServiceCluster{ - ClusterName: "default/httpbin-org", - Services: []dag.WeightedService{{ - Weight: 1, - ServiceName: "httpbin-org", - ServiceNamespace: "default", - }}, - }, - ep: endpoints("default", "httpbin-org", core_v1.EndpointSubset{ - Addresses: addresses( - "50.17.192.147", - "23.23.247.89", - "50.17.206.192", - "50.19.99.160", - ), - Ports: ports( - port("", 80), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/httpbin-org", - Endpoints: envoy_v3.WeightedEndpoints(1, - envoy_v3.SocketAddress("23.23.247.89", 80), - envoy_v3.SocketAddress("50.17.192.147", 80), - envoy_v3.SocketAddress("50.17.206.192", 80), - envoy_v3.SocketAddress("50.19.99.160", 80), - ), - }, - }, - }, - "named container port": { - cluster: dag.ServiceCluster{ - ClusterName: "default/secure/https", - Services: []dag.WeightedService{{ - Weight: 1, - ServiceName: "secure", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{Name: "https"}, - }}, - }, - ep: endpoints("default", "secure", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("https", 8443), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/secure/https", - Endpoints: envoy_v3.WeightedEndpoints(1, - envoy_v3.SocketAddress("192.168.183.24", 8443)), - }, - }, - }, - "multiple addresses and healthcheck port": { - cluster: dag.ServiceCluster{ - ClusterName: "default/httpbin-org", - Services: []dag.WeightedService{{ - Weight: 1, - ServiceName: "httpbin-org", - ServiceNamespace: "default", - HealthPort: core_v1.ServicePort{Name: "health", Port: 8998}, - ServicePort: core_v1.ServicePort{Name: "a", Port: 80}, - }}, - }, - ep: endpoints("default", "httpbin-org", core_v1.EndpointSubset{ - Addresses: addresses( - "50.17.192.147", - "23.23.247.89", - "50.17.206.192", - "50.19.99.160", - ), - Ports: ports( - port("a", 80), - port("health", 8998), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/httpbin-org", - Endpoints: weightedHealthcheckEndpoints(1, 8998, - envoy_v3.SocketAddress("23.23.247.89", 80), - envoy_v3.SocketAddress("50.17.192.147", 80), - envoy_v3.SocketAddress("50.17.206.192", 80), - envoy_v3.SocketAddress("50.19.99.160", 80), - ), - }, - }, - }, - "health port is the same as service port": { - cluster: dag.ServiceCluster{ - ClusterName: "default/httpbin-org", - Services: []dag.WeightedService{{ - Weight: 1, - ServiceName: "httpbin-org", - ServiceNamespace: "default", - HealthPort: core_v1.ServicePort{Name: "a", Port: 80}, - ServicePort: core_v1.ServicePort{Name: "a", Port: 80}, - }}, - }, - ep: endpoints("default", "httpbin-org", core_v1.EndpointSubset{ - Addresses: addresses( - "50.17.192.147", - "23.23.247.89", - "50.17.206.192", - "50.19.99.160", - ), - Ports: ports( - port("a", 80), - port("health", 8998), - ), - }), - want: []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/httpbin-org", - Endpoints: envoy_v3.WeightedEndpoints(1, - envoy_v3.SocketAddress("23.23.247.89", 80), - envoy_v3.SocketAddress("50.17.192.147", 80), - envoy_v3.SocketAddress("50.17.206.192", 80), - envoy_v3.SocketAddress("50.19.99.160", 80), - ), - }, - }, - }, - } - - for name, tc := range tests { - tc := tc - t.Run(name, func(t *testing.T) { - et := NewEndpointsTranslator(fixture.NewTestLogger(t)) - // nolint:gosec - require.NoError(t, et.cache.SetClusters([]*dag.ServiceCluster{&tc.cluster})) - et.OnAdd(tc.ep, false) - got := et.Contents() - protobuf.ExpectEqual(t, tc.want, got) - }) - } -} - -// See #602 -func TestEndpointsTranslatorScaleToZeroEndpoints(t *testing.T) { - et := NewEndpointsTranslator(fixture.NewTestLogger(t)) - - require.NoError(t, et.cache.SetClusters([]*dag.ServiceCluster{ - { - ClusterName: "default/simple", - Services: []dag.WeightedService{{ - Weight: 1, - ServiceName: "simple", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{}, - }}, - }, - })) - - e1 := endpoints("default", "simple", core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports( - port("", 8080), - ), - }) - et.OnAdd(e1, false) - - // Assert endpoint was added - want := []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/simple", - Endpoints: envoy_v3.WeightedEndpoints(1, envoy_v3.SocketAddress("192.168.183.24", 8080)), - }, - } - - protobuf.RequireEqual(t, want, et.Contents()) - - // e2 is the same as e1, but without endpoint subsets - e2 := endpoints("default", "simple") - et.OnUpdate(e1, e2) - - // Assert endpoints are removed - want = []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ClusterName: "default/simple"}, - } - - protobuf.RequireEqual(t, want, et.Contents()) -} - -// Test that a cluster with weighted services propagates the weights. -func TestEndpointsTranslatorWeightedService(t *testing.T) { - et := NewEndpointsTranslator(fixture.NewTestLogger(t)) - clusters := []*dag.ServiceCluster{ - { - ClusterName: "default/weighted", - Services: []dag.WeightedService{ - { - Weight: 0, - ServiceName: "weight0", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{}, - }, - { - Weight: 1, - ServiceName: "weight1", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{}, - }, - { - Weight: 2, - ServiceName: "weight2", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{}, - }, - }, - }, - } - - require.NoError(t, et.cache.SetClusters(clusters)) - - epSubset := core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports(port("", 8080)), - } - - et.OnAdd(endpoints("default", "weight0", epSubset), false) - et.OnAdd(endpoints("default", "weight1", epSubset), false) - et.OnAdd(endpoints("default", "weight2", epSubset), false) - - // Each helper builds a `LocalityLbEndpoints` with one - // entry, so we can compose the final result by reaching - // in an taking the first element of each slice. - w0 := envoy_v3.Endpoints(envoy_v3.SocketAddress("192.168.183.24", 8080)) - w1 := envoy_v3.WeightedEndpoints(1, envoy_v3.SocketAddress("192.168.183.24", 8080)) - w2 := envoy_v3.WeightedEndpoints(2, envoy_v3.SocketAddress("192.168.183.24", 8080)) - - want := []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/weighted", - Endpoints: []*envoy_config_endpoint_v3.LocalityLbEndpoints{ - w0[0], w1[0], w2[0], - }, - }, - } - - protobuf.ExpectEqual(t, want, et.Contents()) -} - -// Test that a cluster with weighted services that all leave the -// weights unspecified defaults to equally weighed and propagates the -// weights. -func TestEndpointsTranslatorDefaultWeightedService(t *testing.T) { - et := NewEndpointsTranslator(fixture.NewTestLogger(t)) - clusters := []*dag.ServiceCluster{ - { - ClusterName: "default/weighted", - Services: []dag.WeightedService{ - { - ServiceName: "weight0", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{}, - }, - { - ServiceName: "weight1", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{}, - }, - { - ServiceName: "weight2", - ServiceNamespace: "default", - ServicePort: core_v1.ServicePort{}, - }, - }, - }, - } - - require.NoError(t, et.cache.SetClusters(clusters)) - - epSubset := core_v1.EndpointSubset{ - Addresses: addresses("192.168.183.24"), - Ports: ports(port("", 8080)), - } - - et.OnAdd(endpoints("default", "weight0", epSubset), false) - et.OnAdd(endpoints("default", "weight1", epSubset), false) - et.OnAdd(endpoints("default", "weight2", epSubset), false) - - // Each helper builds a `LocalityLbEndpoints` with one - // entry, so we can compose the final result by reaching - // in an taking the first element of each slice. - w0 := envoy_v3.WeightedEndpoints(1, envoy_v3.SocketAddress("192.168.183.24", 8080)) - w1 := envoy_v3.WeightedEndpoints(1, envoy_v3.SocketAddress("192.168.183.24", 8080)) - w2 := envoy_v3.WeightedEndpoints(1, envoy_v3.SocketAddress("192.168.183.24", 8080)) - - want := []proto.Message{ - &envoy_config_endpoint_v3.ClusterLoadAssignment{ - ClusterName: "default/weighted", - Endpoints: []*envoy_config_endpoint_v3.LocalityLbEndpoints{ - w0[0], w1[0], w2[0], - }, - }, - } - - protobuf.ExpectEqual(t, want, et.Contents()) -} - -func TestEqual(t *testing.T) { - tests := map[string]struct { - a, b map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment - want bool - }{ - "both nil": { - a: nil, - b: nil, - want: true, - }, - "one nil, one empty": { - a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{}, - b: nil, - want: true, - }, - "both empty": { - a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{}, - b: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{}, - want: true, - }, - "a is an incomplete subset of b": { - a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ - "a": {ClusterName: "a"}, - "b": {ClusterName: "b"}, - }, - b: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ - "a": {ClusterName: "a"}, - "b": {ClusterName: "b"}, - "c": {ClusterName: "c"}, - }, - want: false, - }, - "b is an incomplete subset of a": { - a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ - "a": {ClusterName: "a"}, - "b": {ClusterName: "b"}, - "c": {ClusterName: "c"}, - }, - b: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ - "a": {ClusterName: "a"}, - "b": {ClusterName: "b"}, - }, - want: false, - }, - "a and b have the same keys, different values": { - a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ - "a": {ClusterName: "a"}, - "b": {ClusterName: "b"}, - "c": {ClusterName: "c"}, - }, - b: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ - "a": {ClusterName: "a"}, - "b": {ClusterName: "b"}, - "c": {ClusterName: "different"}, - }, - want: false, - }, - "a and b have the same values, different keys": { - a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ - "a": {ClusterName: "a"}, - "b": {ClusterName: "b"}, - "c": {ClusterName: "c"}, - }, - b: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ - "d": {ClusterName: "a"}, - "e": {ClusterName: "b"}, - "f": {ClusterName: "c"}, - }, - want: false, - }, - "a and b have the same keys, same values": { - a: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ - "a": {ClusterName: "a"}, - "b": {ClusterName: "b"}, - "c": {ClusterName: "c"}, - }, - b: map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment{ - "a": {ClusterName: "a"}, - "b": {ClusterName: "b"}, - "c": {ClusterName: "c"}, - }, - want: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - assert.Equal(t, tc.want, equal(tc.a, tc.b)) - }) - } -} - -func ports(eps ...core_v1.EndpointPort) []core_v1.EndpointPort { - return eps -} - -func port(name string, port int32) core_v1.EndpointPort { - return core_v1.EndpointPort{ - Name: name, - Port: port, - Protocol: "TCP", - } -} - -func clusterloadassignments(clas ...*envoy_config_endpoint_v3.ClusterLoadAssignment) map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment { - m := make(map[string]*envoy_config_endpoint_v3.ClusterLoadAssignment) - for _, cla := range clas { - m[cla.ClusterName] = cla - } - return m -} - -func weightedHealthcheckEndpoints(weight, healthcheckPort uint32, addrs ...*envoy_config_core_v3.Address) []*envoy_config_endpoint_v3.LocalityLbEndpoints { - lbendpoints := healthcheckEndpoints(healthcheckPort, addrs...) - lbendpoints[0].LoadBalancingWeight = wrapperspb.UInt32(weight) - return lbendpoints -} - -func healthcheckEndpoints(healthcheckPort uint32, addrs ...*envoy_config_core_v3.Address) []*envoy_config_endpoint_v3.LocalityLbEndpoints { - lbendpoints := make([]*envoy_config_endpoint_v3.LbEndpoint, 0, len(addrs)) - for _, addr := range addrs { - lbendpoints = append(lbendpoints, healthCheckLBEndpoint(addr, healthcheckPort)) - } - return []*envoy_config_endpoint_v3.LocalityLbEndpoints{{ - LbEndpoints: lbendpoints, - }} -} - -// healthCheckLBEndpoint creates a new LbEndpoint include healthCheckConfig -func healthCheckLBEndpoint(addr *envoy_config_core_v3.Address, healthCheckPort uint32) *envoy_config_endpoint_v3.LbEndpoint { - var hc *envoy_config_endpoint_v3.Endpoint_HealthCheckConfig - if healthCheckPort != 0 { - hc = &envoy_config_endpoint_v3.Endpoint_HealthCheckConfig{ - PortValue: healthCheckPort, - } - } - return &envoy_config_endpoint_v3.LbEndpoint{ - HostIdentifier: &envoy_config_endpoint_v3.LbEndpoint_Endpoint{ - Endpoint: &envoy_config_endpoint_v3.Endpoint{ - Address: addr, - HealthCheckConfig: hc, - }, - }, - } -} diff --git a/internal/xdscache/v3/snapshot.go b/internal/xdscache/v3/snapshot.go index 7fe84b95faa..0438600f34d 100644 --- a/internal/xdscache/v3/snapshot.go +++ b/internal/xdscache/v3/snapshot.go @@ -86,7 +86,7 @@ func (s *SnapshotHandler) GetCache() envoy_cache_v3.Cache { return s.mux } -// Refresh is called when the EndpointsTranslator updates values +// Refresh is called when the EndpointSliceTranslator updates values // in its cache. It updates the EDS cache. func (s *SnapshotHandler) Refresh() { version := uuid.NewString() diff --git a/pkg/config/parameters.go b/pkg/config/parameters.go index ce9149a917c..d79aab9c6cc 100644 --- a/pkg/config/parameters.go +++ b/pkg/config/parameters.go @@ -699,10 +699,6 @@ type Parameters struct { Tracing *Tracing `yaml:"tracing,omitempty"` // FeatureFlags defines toggle to enable new contour features. - // available toggles are - // useEndpointSlices - configures contour to fetch endpoint data - // from k8s endpoint slices. defaults to true, - // if false then reading endpoint data from the k8s endpoints. FeatureFlags []string `yaml:"featureFlags,omitempty"` } From 2277cbc5c504cbb26b3e2daf38b7ac095540131d Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Fri, 25 Apr 2025 17:17:09 -0400 Subject: [PATCH 2/6] codegen Signed-off-by: Sunjay Bhatia --- examples/contour/01-crds.yaml | 15 +++------------ examples/render/contour-deployment.yaml | 15 +++------------ examples/render/contour-gateway-provisioner.yaml | 15 +++------------ examples/render/contour-gateway.yaml | 15 +++------------ examples/render/contour.yaml | 15 +++------------ site/content/docs/main/config/api-reference.html | 12 ++---------- 6 files changed, 17 insertions(+), 70 deletions(-) diff --git a/examples/contour/01-crds.yaml b/examples/contour/01-crds.yaml index b1a280bdef0..1fad81020cc 100644 --- a/examples/contour/01-crds.yaml +++ b/examples/contour/01-crds.yaml @@ -630,12 +630,7 @@ spec: type: object type: object featureFlags: - description: |- - FeatureFlags defines toggle to enable new contour features. - Available toggles are: - useEndpointSlices - Configures contour to fetch endpoint data - from k8s endpoint slices. defaults to true, - If false then reads endpoint data from the k8s endpoints. + description: FeatureFlags defines toggle to enable new contour features. items: type: string type: array @@ -4473,12 +4468,8 @@ spec: type: object type: object featureFlags: - description: |- - FeatureFlags defines toggle to enable new contour features. - Available toggles are: - useEndpointSlices - Configures contour to fetch endpoint data - from k8s endpoint slices. defaults to true, - If false then reads endpoint data from the k8s endpoints. + description: FeatureFlags defines toggle to enable new contour + features. items: type: string type: array diff --git a/examples/render/contour-deployment.yaml b/examples/render/contour-deployment.yaml index 36b16ded762..d77f6b45645 100644 --- a/examples/render/contour-deployment.yaml +++ b/examples/render/contour-deployment.yaml @@ -845,12 +845,7 @@ spec: type: object type: object featureFlags: - description: |- - FeatureFlags defines toggle to enable new contour features. - Available toggles are: - useEndpointSlices - Configures contour to fetch endpoint data - from k8s endpoint slices. defaults to true, - If false then reads endpoint data from the k8s endpoints. + description: FeatureFlags defines toggle to enable new contour features. items: type: string type: array @@ -4688,12 +4683,8 @@ spec: type: object type: object featureFlags: - description: |- - FeatureFlags defines toggle to enable new contour features. - Available toggles are: - useEndpointSlices - Configures contour to fetch endpoint data - from k8s endpoint slices. defaults to true, - If false then reads endpoint data from the k8s endpoints. + description: FeatureFlags defines toggle to enable new contour + features. items: type: string type: array diff --git a/examples/render/contour-gateway-provisioner.yaml b/examples/render/contour-gateway-provisioner.yaml index ba469e61354..49a71c482f8 100644 --- a/examples/render/contour-gateway-provisioner.yaml +++ b/examples/render/contour-gateway-provisioner.yaml @@ -641,12 +641,7 @@ spec: type: object type: object featureFlags: - description: |- - FeatureFlags defines toggle to enable new contour features. - Available toggles are: - useEndpointSlices - Configures contour to fetch endpoint data - from k8s endpoint slices. defaults to true, - If false then reads endpoint data from the k8s endpoints. + description: FeatureFlags defines toggle to enable new contour features. items: type: string type: array @@ -4484,12 +4479,8 @@ spec: type: object type: object featureFlags: - description: |- - FeatureFlags defines toggle to enable new contour features. - Available toggles are: - useEndpointSlices - Configures contour to fetch endpoint data - from k8s endpoint slices. defaults to true, - If false then reads endpoint data from the k8s endpoints. + description: FeatureFlags defines toggle to enable new contour + features. items: type: string type: array diff --git a/examples/render/contour-gateway.yaml b/examples/render/contour-gateway.yaml index 85b49baa2d3..3c813e844e6 100644 --- a/examples/render/contour-gateway.yaml +++ b/examples/render/contour-gateway.yaml @@ -666,12 +666,7 @@ spec: type: object type: object featureFlags: - description: |- - FeatureFlags defines toggle to enable new contour features. - Available toggles are: - useEndpointSlices - Configures contour to fetch endpoint data - from k8s endpoint slices. defaults to true, - If false then reads endpoint data from the k8s endpoints. + description: FeatureFlags defines toggle to enable new contour features. items: type: string type: array @@ -4509,12 +4504,8 @@ spec: type: object type: object featureFlags: - description: |- - FeatureFlags defines toggle to enable new contour features. - Available toggles are: - useEndpointSlices - Configures contour to fetch endpoint data - from k8s endpoint slices. defaults to true, - If false then reads endpoint data from the k8s endpoints. + description: FeatureFlags defines toggle to enable new contour + features. items: type: string type: array diff --git a/examples/render/contour.yaml b/examples/render/contour.yaml index 93497303d47..534d891b9ec 100644 --- a/examples/render/contour.yaml +++ b/examples/render/contour.yaml @@ -845,12 +845,7 @@ spec: type: object type: object featureFlags: - description: |- - FeatureFlags defines toggle to enable new contour features. - Available toggles are: - useEndpointSlices - Configures contour to fetch endpoint data - from k8s endpoint slices. defaults to true, - If false then reads endpoint data from the k8s endpoints. + description: FeatureFlags defines toggle to enable new contour features. items: type: string type: array @@ -4688,12 +4683,8 @@ spec: type: object type: object featureFlags: - description: |- - FeatureFlags defines toggle to enable new contour features. - Available toggles are: - useEndpointSlices - Configures contour to fetch endpoint data - from k8s endpoint slices. defaults to true, - If false then reads endpoint data from the k8s endpoints. + description: FeatureFlags defines toggle to enable new contour + features. items: type: string type: array diff --git a/site/content/docs/main/config/api-reference.html b/site/content/docs/main/config/api-reference.html index 76cad6d8b2e..292c3b45a47 100644 --- a/site/content/docs/main/config/api-reference.html +++ b/site/content/docs/main/config/api-reference.html @@ -5195,11 +5195,7 @@

ContourConfiguration -

FeatureFlags defines toggle to enable new contour features. -Available toggles are: -useEndpointSlices - Configures contour to fetch endpoint data -from k8s endpoint slices. defaults to true, -If false then reads endpoint data from the k8s endpoints.

+

FeatureFlags defines toggle to enable new contour features.

@@ -6119,11 +6115,7 @@

ContourConfiguratio -

FeatureFlags defines toggle to enable new contour features. -Available toggles are: -useEndpointSlices - Configures contour to fetch endpoint data -from k8s endpoint slices. defaults to true, -If false then reads endpoint data from the k8s endpoints.

+

FeatureFlags defines toggle to enable new contour features.

From 08e2505c87b4e75910ca948c4945a0ecfc365435 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Fri, 25 Apr 2025 17:34:27 -0400 Subject: [PATCH 3/6] changelog Signed-off-by: Sunjay Bhatia --- changelogs/unreleased/7008-sunjayBhatia-deprecation.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 changelogs/unreleased/7008-sunjayBhatia-deprecation.md diff --git a/changelogs/unreleased/7008-sunjayBhatia-deprecation.md b/changelogs/unreleased/7008-sunjayBhatia-deprecation.md new file mode 100644 index 00000000000..176c6b8f103 --- /dev/null +++ b/changelogs/unreleased/7008-sunjayBhatia-deprecation.md @@ -0,0 +1,5 @@ +## useEndpointSlices feature flag removed + +As of v1.29.0, Contour has used the Kubernetes EndpointSlices API by default to determine the endpoints to configure Envoy with, instead of the Endpoints API. +EndpointSlice support is now stable and the remaining Endpoint handling code, along with the associated `useEndpointSlices` feature flag, has been removed. +This should be a no-op change for most users, only affecting those that opted into continuing to use the Endpoints API and possibly also disabled EndpointSlice mirroring of Endpoints. From ffaaf5e8275ea6265357f166d2f1b3371817d9f6 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Fri, 25 Apr 2025 17:52:07 -0400 Subject: [PATCH 4/6] remove usage of Endpoint type from helpers these helpers are not actually used in the processing code path for endpoints or endpointslices Signed-off-by: Sunjay Bhatia --- internal/k8s/helpers.go | 4 -- internal/k8s/helpers_test.go | 5 -- internal/k8s/kind.go | 7 +-- internal/k8s/kind_test.go | 4 +- .../k8s/testdata/endpoint-content-change.yaml | 47 ------------------- 5 files changed, 6 insertions(+), 61 deletions(-) delete mode 100644 internal/k8s/testdata/endpoint-content-change.yaml diff --git a/internal/k8s/helpers.go b/internal/k8s/helpers.go index 36d069990c1..55c759c44f7 100644 --- a/internal/k8s/helpers.go +++ b/internal/k8s/helpers.go @@ -142,10 +142,6 @@ func IsObjectEqual(oldObj, newObj client.Object) (bool, error) { apiequality.Semantic.DeepEqual(oldObj.Status, newObj.Status) && apiequality.Semantic.DeepEqual(oldObj.GetAnnotations(), newObj.GetAnnotations()), nil } - case *core_v1.Endpoints: - if newObj, ok := newObj.(*core_v1.Endpoints); ok { - return apiequality.Semantic.DeepEqual(oldObj.Subsets, newObj.Subsets), nil - } case *core_v1.Namespace: if newObj, ok := newObj.(*core_v1.Namespace); ok { return apiequality.Semantic.DeepEqual(oldObj.Labels, newObj.Labels), nil diff --git a/internal/k8s/helpers_test.go b/internal/k8s/helpers_test.go index 42f5b237394..1ec38fb1a17 100644 --- a/internal/k8s/helpers_test.go +++ b/internal/k8s/helpers_test.go @@ -70,11 +70,6 @@ func TestIsObjectEqual(t *testing.T) { filename: "testdata/service-annotation-change.yaml", equals: false, }, - { - name: "Endpoint with content change", - filename: "testdata/endpoint-content-change.yaml", - equals: false, - }, { name: "HTTPProxy with annotation change", filename: "testdata/httpproxy-annotation-change.yaml", diff --git a/internal/k8s/kind.go b/internal/k8s/kind.go index 0de1fd90225..b95b699641c 100644 --- a/internal/k8s/kind.go +++ b/internal/k8s/kind.go @@ -15,6 +15,7 @@ package k8s import ( core_v1 "k8s.io/api/core/v1" + discovery_v1 "k8s.io/api/discovery/v1" networking_v1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -45,8 +46,8 @@ func KindOf(obj any) string { return "Secret" case *core_v1.Service: return "Service" - case *core_v1.Endpoints: - return "Endpoints" + case *discovery_v1.EndpointSlice: + return "EndpointSlice" case *networking_v1.Ingress: return "Ingress" case *contour_v1.HTTPProxy: @@ -95,7 +96,7 @@ func VersionOf(obj any) string { gvk, _, err := scheme.Scheme.ObjectKinds(obj.(runtime.Object)) if err != nil { switch obj := obj.(type) { - case *core_v1.Secret, *core_v1.Service, *core_v1.Endpoints: + case *core_v1.Secret, *core_v1.Service: return core_v1.SchemeGroupVersion.String() case *networking_v1.Ingress: return networking_v1.SchemeGroupVersion.String() diff --git a/internal/k8s/kind_test.go b/internal/k8s/kind_test.go index da90fb7ddea..b867a988524 100644 --- a/internal/k8s/kind_test.go +++ b/internal/k8s/kind_test.go @@ -18,6 +18,7 @@ import ( "github.com/stretchr/testify/assert" core_v1 "k8s.io/api/core/v1" + discovery_v1 "k8s.io/api/discovery/v1" networking_v1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" gatewayapi_v1 "sigs.k8s.io/gateway-api/apis/v1" @@ -36,8 +37,8 @@ func TestKindOf(t *testing.T) { }{ {"Secret", &core_v1.Secret{}}, {"Service", &core_v1.Service{}}, + {"EndpointSlice", &discovery_v1.EndpointSlice{}}, {"Namespace", &core_v1.Namespace{}}, - {"Endpoints", &core_v1.Endpoints{}}, {"Pod", &core_v1.Pod{}}, {"Ingress", &networking_v1.Ingress{}}, {"HTTPProxy", &contour_v1.HTTPProxy{}}, @@ -75,7 +76,6 @@ func TestVersionOf(t *testing.T) { }{ {"v1", &core_v1.Secret{}}, {"v1", &core_v1.Service{}}, - {"v1", &core_v1.Endpoints{}}, {"networking.k8s.io/v1", &networking_v1.Ingress{}}, {"projectcontour.io/v1", &contour_v1.HTTPProxy{}}, {"projectcontour.io/v1", &contour_v1.TLSCertificateDelegation{}}, diff --git a/internal/k8s/testdata/endpoint-content-change.yaml b/internal/k8s/testdata/endpoint-content-change.yaml deleted file mode 100644 index b698d00dd07..00000000000 --- a/internal/k8s/testdata/endpoint-content-change.yaml +++ /dev/null @@ -1,47 +0,0 @@ -apiVersion: v1 -kind: Endpoints -metadata: - annotations: - endpoints.kubernetes.io/last-change-trigger-time: "2023-02-09T15:06:20Z" - creationTimestamp: "2023-02-09T14:55:33Z" - name: echoserver - namespace: default - resourceVersion: "85303" - uid: 6cb4c5f2-60b2-4240-b2a3-91111c7e2527 -subsets: -- addresses: - - ip: 10.244.1.4 - nodeName: contour-worker - targetRef: - kind: Pod - name: echoserver-59db9c5778-cfwhz - namespace: default - uid: b0014607-0e3b-461e-8b06-df4407fa7a44 - ports: - - name: http - port: 3000 - protocol: TCP ---- -apiVersion: v1 -kind: Endpoints -metadata: - annotations: - endpoints.kubernetes.io/last-change-trigger-time: "2023-02-09T19:56:07Z" - creationTimestamp: "2023-02-09T14:55:33Z" - name: echoserver - namespace: default - resourceVersion: "112662" - uid: 6cb4c5f2-60b2-4240-b2a3-91111c7e2527 -subsets: -- addresses: - - ip: 10.244.1.5 - nodeName: contour-worker - targetRef: - kind: Pod - name: echoserver-59db9c5778-5wdhj - namespace: default - uid: 84878472-00b6-4ae2-b1fd-5508c900707c - ports: - - name: http - port: 3000 - protocol: TCP From f9809182aaa000747dcab4543cb2ce04a41dd94b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Apr 2025 03:52:01 +0000 Subject: [PATCH 5/6] build(deps): bump the k8s-dependencies group with 4 updates Bumps the k8s-dependencies group with 4 updates: [k8s.io/api](https://github.com/kubernetes/api), [k8s.io/apiextensions-apiserver](https://github.com/kubernetes/apiextensions-apiserver), [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) and [k8s.io/client-go](https://github.com/kubernetes/client-go). Updates `k8s.io/api` from 0.32.3 to 0.33.0 - [Commits](https://github.com/kubernetes/api/compare/v0.32.3...v0.33.0) Updates `k8s.io/apiextensions-apiserver` from 0.32.3 to 0.33.0 - [Release notes](https://github.com/kubernetes/apiextensions-apiserver/releases) - [Commits](https://github.com/kubernetes/apiextensions-apiserver/compare/v0.32.3...v0.33.0) Updates `k8s.io/apimachinery` from 0.32.3 to 0.33.0 - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.32.3...v0.33.0) Updates `k8s.io/client-go` from 0.32.3 to 0.33.0 - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.32.3...v0.33.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-version: 0.33.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies - dependency-name: k8s.io/apiextensions-apiserver dependency-version: 0.33.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies - dependency-name: k8s.io/apimachinery dependency-version: 0.33.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies - dependency-name: k8s.io/client-go dependency-version: 0.33.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies ... Signed-off-by: dependabot[bot] --- go.mod | 21 ++++++++++----------- go.sum | 39 +++++++++++++++++++++------------------ 2 files changed, 31 insertions(+), 29 deletions(-) diff --git a/go.mod b/go.mod index 76d6a0c36ae..6f8d162613b 100644 --- a/go.mod +++ b/go.mod @@ -38,10 +38,10 @@ require ( google.golang.org/grpc v1.72.0 google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.32.3 - k8s.io/apiextensions-apiserver v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.32.3 + k8s.io/api v0.33.0 + k8s.io/apiextensions-apiserver v0.33.0 + k8s.io/apimachinery v0.33.0 + k8s.io/client-go v0.33.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241210054802-24370beab758 sigs.k8s.io/controller-runtime v0.20.4 @@ -80,13 +80,11 @@ require ( github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect - github.com/gorilla/websocket v1.5.3 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/huandu/xstrings v1.4.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -131,16 +129,17 @@ require ( golang.org/x/sys v0.32.0 // indirect golang.org/x/term v0.31.0 // indirect golang.org/x/text v0.24.0 // indirect - golang.org/x/time v0.8.0 // indirect + golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.31.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect - k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect + k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index de261b74281..4270e166a32 100644 --- a/go.sum +++ b/go.sum @@ -141,8 +141,8 @@ github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -365,8 +365,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -428,20 +428,20 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= -k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= -k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4= -k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= +k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= +k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= +k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= +k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= +k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= +k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= +k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= +k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog= +k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= @@ -456,7 +456,10 @@ sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7np sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= -sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= -sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= From 1257c673e64972c515c6e8f466c4896eacb849fe Mon Sep 17 00:00:00 2001 From: Tero Saarni Date: Mon, 28 Apr 2025 19:10:20 +0300 Subject: [PATCH 6/6] make generate Signed-off-by: Tero Saarni --- examples/contour/01-crds.yaml | 2 +- examples/render/contour-deployment.yaml | 2 +- examples/render/contour-gateway-provisioner.yaml | 2 +- examples/render/contour-gateway.yaml | 2 +- examples/render/contour.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/contour/01-crds.yaml b/examples/contour/01-crds.yaml index 1fad81020cc..32f0b3ae08d 100644 --- a/examples/contour/01-crds.yaml +++ b/examples/contour/01-crds.yaml @@ -2796,7 +2796,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: diff --git a/examples/render/contour-deployment.yaml b/examples/render/contour-deployment.yaml index d77f6b45645..ae5c1ac6180 100644 --- a/examples/render/contour-deployment.yaml +++ b/examples/render/contour-deployment.yaml @@ -3011,7 +3011,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: diff --git a/examples/render/contour-gateway-provisioner.yaml b/examples/render/contour-gateway-provisioner.yaml index 49a71c482f8..19cdb5e899f 100644 --- a/examples/render/contour-gateway-provisioner.yaml +++ b/examples/render/contour-gateway-provisioner.yaml @@ -2807,7 +2807,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: diff --git a/examples/render/contour-gateway.yaml b/examples/render/contour-gateway.yaml index 3c813e844e6..9812abdf0f2 100644 --- a/examples/render/contour-gateway.yaml +++ b/examples/render/contour-gateway.yaml @@ -2832,7 +2832,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: diff --git a/examples/render/contour.yaml b/examples/render/contour.yaml index 534d891b9ec..49afd49c94a 100644 --- a/examples/render/contour.yaml +++ b/examples/render/contour.yaml @@ -3011,7 +3011,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: