forked from grpc/grpc-node
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathCluster.ts
2717 lines (2648 loc) · 134 KB
/
Cluster.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto
import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration';
import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value';
import type { HealthCheck as _envoy_config_core_v3_HealthCheck, HealthCheck__Output as _envoy_config_core_v3_HealthCheck__Output } from '../../../../envoy/config/core/v3/HealthCheck';
import type { CircuitBreakers as _envoy_config_cluster_v3_CircuitBreakers, CircuitBreakers__Output as _envoy_config_cluster_v3_CircuitBreakers__Output } from '../../../../envoy/config/cluster/v3/CircuitBreakers';
import type { Http1ProtocolOptions as _envoy_config_core_v3_Http1ProtocolOptions, Http1ProtocolOptions__Output as _envoy_config_core_v3_Http1ProtocolOptions__Output } from '../../../../envoy/config/core/v3/Http1ProtocolOptions';
import type { Http2ProtocolOptions as _envoy_config_core_v3_Http2ProtocolOptions, Http2ProtocolOptions__Output as _envoy_config_core_v3_Http2ProtocolOptions__Output } from '../../../../envoy/config/core/v3/Http2ProtocolOptions';
import type { Address as _envoy_config_core_v3_Address, Address__Output as _envoy_config_core_v3_Address__Output } from '../../../../envoy/config/core/v3/Address';
import type { OutlierDetection as _envoy_config_cluster_v3_OutlierDetection, OutlierDetection__Output as _envoy_config_cluster_v3_OutlierDetection__Output } from '../../../../envoy/config/cluster/v3/OutlierDetection';
import type { BindConfig as _envoy_config_core_v3_BindConfig, BindConfig__Output as _envoy_config_core_v3_BindConfig__Output } from '../../../../envoy/config/core/v3/BindConfig';
import type { TransportSocket as _envoy_config_core_v3_TransportSocket, TransportSocket__Output as _envoy_config_core_v3_TransportSocket__Output } from '../../../../envoy/config/core/v3/TransportSocket';
import type { Metadata as _envoy_config_core_v3_Metadata, Metadata__Output as _envoy_config_core_v3_Metadata__Output } from '../../../../envoy/config/core/v3/Metadata';
import type { HttpProtocolOptions as _envoy_config_core_v3_HttpProtocolOptions, HttpProtocolOptions__Output as _envoy_config_core_v3_HttpProtocolOptions__Output } from '../../../../envoy/config/core/v3/HttpProtocolOptions';
import type { UpstreamConnectionOptions as _envoy_config_cluster_v3_UpstreamConnectionOptions, UpstreamConnectionOptions__Output as _envoy_config_cluster_v3_UpstreamConnectionOptions__Output } from '../../../../envoy/config/cluster/v3/UpstreamConnectionOptions';
import type { ClusterLoadAssignment as _envoy_config_endpoint_v3_ClusterLoadAssignment, ClusterLoadAssignment__Output as _envoy_config_endpoint_v3_ClusterLoadAssignment__Output } from '../../../../envoy/config/endpoint/v3/ClusterLoadAssignment';
import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any';
import type { Filter as _envoy_config_cluster_v3_Filter, Filter__Output as _envoy_config_cluster_v3_Filter__Output } from '../../../../envoy/config/cluster/v3/Filter';
import type { LoadBalancingPolicy as _envoy_config_cluster_v3_LoadBalancingPolicy, LoadBalancingPolicy__Output as _envoy_config_cluster_v3_LoadBalancingPolicy__Output } from '../../../../envoy/config/cluster/v3/LoadBalancingPolicy';
import type { ConfigSource as _envoy_config_core_v3_ConfigSource, ConfigSource__Output as _envoy_config_core_v3_ConfigSource__Output } from '../../../../envoy/config/core/v3/ConfigSource';
import type { UpstreamHttpProtocolOptions as _envoy_config_core_v3_UpstreamHttpProtocolOptions, UpstreamHttpProtocolOptions__Output as _envoy_config_core_v3_UpstreamHttpProtocolOptions__Output } from '../../../../envoy/config/core/v3/UpstreamHttpProtocolOptions';
import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig';
import type { TrackClusterStats as _envoy_config_cluster_v3_TrackClusterStats, TrackClusterStats__Output as _envoy_config_cluster_v3_TrackClusterStats__Output } from '../../../../envoy/config/cluster/v3/TrackClusterStats';
import type { DnsResolutionConfig as _envoy_config_core_v3_DnsResolutionConfig, DnsResolutionConfig__Output as _envoy_config_core_v3_DnsResolutionConfig__Output } from '../../../../envoy/config/core/v3/DnsResolutionConfig';
import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue';
import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct';
import type { RuntimeDouble as _envoy_config_core_v3_RuntimeDouble, RuntimeDouble__Output as _envoy_config_core_v3_RuntimeDouble__Output } from '../../../../envoy/config/core/v3/RuntimeDouble';
import type { Percent as _envoy_type_v3_Percent, Percent__Output as _envoy_type_v3_Percent__Output } from '../../../../envoy/type/v3/Percent';
import type { UInt64Value as _google_protobuf_UInt64Value, UInt64Value__Output as _google_protobuf_UInt64Value__Output } from '../../../../google/protobuf/UInt64Value';
import type { MetadataKey as _envoy_type_metadata_v3_MetadataKey, MetadataKey__Output as _envoy_type_metadata_v3_MetadataKey__Output } from '../../../../envoy/type/metadata/v3/MetadataKey';
import type { HealthStatusSet as _envoy_config_core_v3_HealthStatusSet, HealthStatusSet__Output as _envoy_config_core_v3_HealthStatusSet__Output } from '../../../../envoy/config/core/v3/HealthStatusSet';
import type { DoubleValue as _google_protobuf_DoubleValue, DoubleValue__Output as _google_protobuf_DoubleValue__Output } from '../../../../google/protobuf/DoubleValue';
// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto
export const _envoy_config_cluster_v3_Cluster_ClusterProtocolSelection = {
/**
* Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2).
* If :ref:`http2_protocol_options <envoy_v3_api_field_config.cluster.v3.Cluster.http2_protocol_options>` are
* present, HTTP2 will be used, otherwise HTTP1.1 will be used.
*/
USE_CONFIGURED_PROTOCOL: 'USE_CONFIGURED_PROTOCOL',
/**
* Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection.
*/
USE_DOWNSTREAM_PROTOCOL: 'USE_DOWNSTREAM_PROTOCOL',
} as const;
export type _envoy_config_cluster_v3_Cluster_ClusterProtocolSelection =
/**
* Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2).
* If :ref:`http2_protocol_options <envoy_v3_api_field_config.cluster.v3.Cluster.http2_protocol_options>` are
* present, HTTP2 will be used, otherwise HTTP1.1 will be used.
*/
| 'USE_CONFIGURED_PROTOCOL'
| 0
/**
* Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection.
*/
| 'USE_DOWNSTREAM_PROTOCOL'
| 1
export type _envoy_config_cluster_v3_Cluster_ClusterProtocolSelection__Output = typeof _envoy_config_cluster_v3_Cluster_ClusterProtocolSelection[keyof typeof _envoy_config_cluster_v3_Cluster_ClusterProtocolSelection]
/**
* Common configuration for all load balancer implementations.
* [#next-free-field: 9]
*/
export interface _envoy_config_cluster_v3_Cluster_CommonLbConfig {
/**
* Configures the :ref:`healthy panic threshold <arch_overview_load_balancing_panic_threshold>`.
* If not specified, the default is 50%.
* To disable panic mode, set to 0%.
*
* .. note::
* The specified percent will be truncated to the nearest 1%.
*/
'healthy_panic_threshold'?: (_envoy_type_v3_Percent | null);
'zone_aware_lb_config'?: (_envoy_config_cluster_v3_Cluster_CommonLbConfig_ZoneAwareLbConfig | null);
'locality_weighted_lb_config'?: (_envoy_config_cluster_v3_Cluster_CommonLbConfig_LocalityWeightedLbConfig | null);
/**
* If set, all health check/weight/metadata updates that happen within this duration will be
* merged and delivered in one shot when the duration expires. The start of the duration is when
* the first update happens. This is useful for big clusters, with potentially noisy deploys
* that might trigger excessive CPU usage due to a constant stream of healthcheck state changes
* or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new
* cluster). Please always keep in mind that the use of sandbox technologies may change this
* behavior.
*
* If this is not set, we default to a merge window of 1000ms. To disable it, set the merge
* window to 0.
*
* Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is
* because merging those updates isn't currently safe. See
* https://github.com/envoyproxy/envoy/pull/3941.
*/
'update_merge_window'?: (_google_protobuf_Duration | null);
/**
* If set to true, Envoy will :ref:`exclude <arch_overview_load_balancing_excluded>` new hosts
* when computing load balancing weights until they have been health checked for the first time.
* This will have no effect unless active health checking is also configured.
*/
'ignore_new_hosts_until_first_hc'?: (boolean);
/**
* If set to ``true``, the cluster manager will drain all existing
* connections to upstream hosts whenever hosts are added or removed from the cluster.
*/
'close_connections_on_host_set_change'?: (boolean);
/**
* Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)
*/
'consistent_hashing_lb_config'?: (_envoy_config_cluster_v3_Cluster_CommonLbConfig_ConsistentHashingLbConfig | null);
/**
* This controls what hosts are considered valid when using
* :ref:`host overrides <arch_overview_load_balancing_override_host>`, which is used by some
* filters to modify the load balancing decision.
*
* If this is unset then [UNKNOWN, HEALTHY, DEGRADED] will be applied by default. If this is
* set with an empty set of statuses then host overrides will be ignored by the load balancing.
*/
'override_host_status'?: (_envoy_config_core_v3_HealthStatusSet | null);
'locality_config_specifier'?: "zone_aware_lb_config"|"locality_weighted_lb_config";
}
/**
* Common configuration for all load balancer implementations.
* [#next-free-field: 9]
*/
export interface _envoy_config_cluster_v3_Cluster_CommonLbConfig__Output {
/**
* Configures the :ref:`healthy panic threshold <arch_overview_load_balancing_panic_threshold>`.
* If not specified, the default is 50%.
* To disable panic mode, set to 0%.
*
* .. note::
* The specified percent will be truncated to the nearest 1%.
*/
'healthy_panic_threshold': (_envoy_type_v3_Percent__Output | null);
'zone_aware_lb_config'?: (_envoy_config_cluster_v3_Cluster_CommonLbConfig_ZoneAwareLbConfig__Output | null);
'locality_weighted_lb_config'?: (_envoy_config_cluster_v3_Cluster_CommonLbConfig_LocalityWeightedLbConfig__Output | null);
/**
* If set, all health check/weight/metadata updates that happen within this duration will be
* merged and delivered in one shot when the duration expires. The start of the duration is when
* the first update happens. This is useful for big clusters, with potentially noisy deploys
* that might trigger excessive CPU usage due to a constant stream of healthcheck state changes
* or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new
* cluster). Please always keep in mind that the use of sandbox technologies may change this
* behavior.
*
* If this is not set, we default to a merge window of 1000ms. To disable it, set the merge
* window to 0.
*
* Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is
* because merging those updates isn't currently safe. See
* https://github.com/envoyproxy/envoy/pull/3941.
*/
'update_merge_window': (_google_protobuf_Duration__Output | null);
/**
* If set to true, Envoy will :ref:`exclude <arch_overview_load_balancing_excluded>` new hosts
* when computing load balancing weights until they have been health checked for the first time.
* This will have no effect unless active health checking is also configured.
*/
'ignore_new_hosts_until_first_hc': (boolean);
/**
* If set to ``true``, the cluster manager will drain all existing
* connections to upstream hosts whenever hosts are added or removed from the cluster.
*/
'close_connections_on_host_set_change': (boolean);
/**
* Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)
*/
'consistent_hashing_lb_config': (_envoy_config_cluster_v3_Cluster_CommonLbConfig_ConsistentHashingLbConfig__Output | null);
/**
* This controls what hosts are considered valid when using
* :ref:`host overrides <arch_overview_load_balancing_override_host>`, which is used by some
* filters to modify the load balancing decision.
*
* If this is unset then [UNKNOWN, HEALTHY, DEGRADED] will be applied by default. If this is
* set with an empty set of statuses then host overrides will be ignored by the load balancing.
*/
'override_host_status': (_envoy_config_core_v3_HealthStatusSet__Output | null);
'locality_config_specifier': "zone_aware_lb_config"|"locality_weighted_lb_config";
}
/**
* Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)
*/
export interface _envoy_config_cluster_v3_Cluster_CommonLbConfig_ConsistentHashingLbConfig {
/**
* If set to ``true``, the cluster will use hostname instead of the resolved
* address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address.
*/
'use_hostname_for_hashing'?: (boolean);
/**
* Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150
* no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster.
* If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200.
* Minimum is 100.
*
* Applies to both Ring Hash and Maglev load balancers.
*
* This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified
* ``hash_balance_factor``, requests to any upstream host are capped at ``hash_balance_factor/100`` times the average number of requests
* across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing
* is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify
* the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the
* cascading overflow effect when choosing the next host in the ring/table).
*
* If weights are specified on the hosts, they are respected.
*
* This is an O(N) algorithm, unlike other load balancers. Using a lower ``hash_balance_factor`` results in more hosts
* being probed, so use a higher value if you require better performance.
*/
'hash_balance_factor'?: (_google_protobuf_UInt32Value | null);
}
/**
* Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)
*/
export interface _envoy_config_cluster_v3_Cluster_CommonLbConfig_ConsistentHashingLbConfig__Output {
/**
* If set to ``true``, the cluster will use hostname instead of the resolved
* address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address.
*/
'use_hostname_for_hashing': (boolean);
/**
* Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150
* no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster.
* If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200.
* Minimum is 100.
*
* Applies to both Ring Hash and Maglev load balancers.
*
* This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified
* ``hash_balance_factor``, requests to any upstream host are capped at ``hash_balance_factor/100`` times the average number of requests
* across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing
* is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify
* the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the
* cascading overflow effect when choosing the next host in the ring/table).
*
* If weights are specified on the hosts, they are respected.
*
* This is an O(N) algorithm, unlike other load balancers. Using a lower ``hash_balance_factor`` results in more hosts
* being probed, so use a higher value if you require better performance.
*/
'hash_balance_factor': (_google_protobuf_UInt32Value__Output | null);
}
/**
* Extended cluster type.
*/
export interface _envoy_config_cluster_v3_Cluster_CustomClusterType {
/**
* The type of the cluster to instantiate. The name must match a supported cluster type.
*/
'name'?: (string);
/**
* Cluster specific configuration which depends on the cluster being instantiated.
* See the supported cluster for further documentation.
* [#extension-category: envoy.clusters]
*/
'typed_config'?: (_google_protobuf_Any | null);
}
/**
* Extended cluster type.
*/
export interface _envoy_config_cluster_v3_Cluster_CustomClusterType__Output {
/**
* The type of the cluster to instantiate. The name must match a supported cluster type.
*/
'name': (string);
/**
* Cluster specific configuration which depends on the cluster being instantiated.
* See the supported cluster for further documentation.
* [#extension-category: envoy.clusters]
*/
'typed_config': (_google_protobuf_Any__Output | null);
}
// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto
/**
* Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`
* for an explanation on each type.
*/
export const _envoy_config_cluster_v3_Cluster_DiscoveryType = {
/**
* Refer to the :ref:`static discovery type<arch_overview_service_discovery_types_static>`
* for an explanation.
*/
STATIC: 'STATIC',
/**
* Refer to the :ref:`strict DNS discovery
* type<arch_overview_service_discovery_types_strict_dns>`
* for an explanation.
*/
STRICT_DNS: 'STRICT_DNS',
/**
* Refer to the :ref:`logical DNS discovery
* type<arch_overview_service_discovery_types_logical_dns>`
* for an explanation.
*/
LOGICAL_DNS: 'LOGICAL_DNS',
/**
* Refer to the :ref:`service discovery type<arch_overview_service_discovery_types_eds>`
* for an explanation.
*/
EDS: 'EDS',
/**
* Refer to the :ref:`original destination discovery
* type<arch_overview_service_discovery_types_original_destination>`
* for an explanation.
*/
ORIGINAL_DST: 'ORIGINAL_DST',
} as const;
/**
* Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`
* for an explanation on each type.
*/
export type _envoy_config_cluster_v3_Cluster_DiscoveryType =
/**
* Refer to the :ref:`static discovery type<arch_overview_service_discovery_types_static>`
* for an explanation.
*/
| 'STATIC'
| 0
/**
* Refer to the :ref:`strict DNS discovery
* type<arch_overview_service_discovery_types_strict_dns>`
* for an explanation.
*/
| 'STRICT_DNS'
| 1
/**
* Refer to the :ref:`logical DNS discovery
* type<arch_overview_service_discovery_types_logical_dns>`
* for an explanation.
*/
| 'LOGICAL_DNS'
| 2
/**
* Refer to the :ref:`service discovery type<arch_overview_service_discovery_types_eds>`
* for an explanation.
*/
| 'EDS'
| 3
/**
* Refer to the :ref:`original destination discovery
* type<arch_overview_service_discovery_types_original_destination>`
* for an explanation.
*/
| 'ORIGINAL_DST'
| 4
/**
* Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`
* for an explanation on each type.
*/
export type _envoy_config_cluster_v3_Cluster_DiscoveryType__Output = typeof _envoy_config_cluster_v3_Cluster_DiscoveryType[keyof typeof _envoy_config_cluster_v3_Cluster_DiscoveryType]
// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto
/**
* When V4_ONLY is selected, the DNS resolver will only perform a lookup for
* addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will
* only perform a lookup for addresses in the IPv6 family. If AUTO is
* specified, the DNS resolver will first perform a lookup for addresses in
* the IPv6 family and fallback to a lookup for addresses in the IPv4 family.
* This is semantically equivalent to a non-existent V6_PREFERRED option.
* AUTO is a legacy name that is more opaque than
* necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API.
* If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the
* IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback
* target will only get v6 addresses if there were NO v4 addresses to return.
* If ALL is specified, the DNS resolver will perform a lookup for both IPv4 and IPv6 families,
* and return all resolved addresses. When this is used, Happy Eyeballs will be enabled for
* upstream connections. Refer to :ref:`Happy Eyeballs Support <arch_overview_happy_eyeballs>`
* for more information.
* For cluster types other than
* :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>` and
* :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,
* this setting is
* ignored.
* [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.]
*/
export const _envoy_config_cluster_v3_Cluster_DnsLookupFamily = {
AUTO: 'AUTO',
V4_ONLY: 'V4_ONLY',
V6_ONLY: 'V6_ONLY',
V4_PREFERRED: 'V4_PREFERRED',
ALL: 'ALL',
} as const;
/**
* When V4_ONLY is selected, the DNS resolver will only perform a lookup for
* addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will
* only perform a lookup for addresses in the IPv6 family. If AUTO is
* specified, the DNS resolver will first perform a lookup for addresses in
* the IPv6 family and fallback to a lookup for addresses in the IPv4 family.
* This is semantically equivalent to a non-existent V6_PREFERRED option.
* AUTO is a legacy name that is more opaque than
* necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API.
* If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the
* IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback
* target will only get v6 addresses if there were NO v4 addresses to return.
* If ALL is specified, the DNS resolver will perform a lookup for both IPv4 and IPv6 families,
* and return all resolved addresses. When this is used, Happy Eyeballs will be enabled for
* upstream connections. Refer to :ref:`Happy Eyeballs Support <arch_overview_happy_eyeballs>`
* for more information.
* For cluster types other than
* :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>` and
* :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,
* this setting is
* ignored.
* [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.]
*/
export type _envoy_config_cluster_v3_Cluster_DnsLookupFamily =
| 'AUTO'
| 0
| 'V4_ONLY'
| 1
| 'V6_ONLY'
| 2
| 'V4_PREFERRED'
| 3
| 'ALL'
| 4
/**
* When V4_ONLY is selected, the DNS resolver will only perform a lookup for
* addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will
* only perform a lookup for addresses in the IPv6 family. If AUTO is
* specified, the DNS resolver will first perform a lookup for addresses in
* the IPv6 family and fallback to a lookup for addresses in the IPv4 family.
* This is semantically equivalent to a non-existent V6_PREFERRED option.
* AUTO is a legacy name that is more opaque than
* necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API.
* If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the
* IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback
* target will only get v6 addresses if there were NO v4 addresses to return.
* If ALL is specified, the DNS resolver will perform a lookup for both IPv4 and IPv6 families,
* and return all resolved addresses. When this is used, Happy Eyeballs will be enabled for
* upstream connections. Refer to :ref:`Happy Eyeballs Support <arch_overview_happy_eyeballs>`
* for more information.
* For cluster types other than
* :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>` and
* :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,
* this setting is
* ignored.
* [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.]
*/
export type _envoy_config_cluster_v3_Cluster_DnsLookupFamily__Output = typeof _envoy_config_cluster_v3_Cluster_DnsLookupFamily[keyof typeof _envoy_config_cluster_v3_Cluster_DnsLookupFamily]
/**
* Only valid when discovery type is EDS.
*/
export interface _envoy_config_cluster_v3_Cluster_EdsClusterConfig {
/**
* Configuration for the source of EDS updates for this Cluster.
*/
'eds_config'?: (_envoy_config_core_v3_ConfigSource | null);
/**
* Optional alternative to cluster name to present to EDS. This does not
* have the same restrictions as cluster name, i.e. it may be arbitrary
* length. This may be a xdstp:// URL.
*/
'service_name'?: (string);
}
/**
* Only valid when discovery type is EDS.
*/
export interface _envoy_config_cluster_v3_Cluster_EdsClusterConfig__Output {
/**
* Configuration for the source of EDS updates for this Cluster.
*/
'eds_config': (_envoy_config_core_v3_ConfigSource__Output | null);
/**
* Optional alternative to cluster name to present to EDS. This does not
* have the same restrictions as cluster name, i.e. it may be arbitrary
* length. This may be a xdstp:// URL.
*/
'service_name': (string);
}
// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto
/**
* The hash function used to hash hosts onto the ketama ring.
*/
export const _envoy_config_cluster_v3_Cluster_RingHashLbConfig_HashFunction = {
/**
* Use `xxHash <https://github.com/Cyan4973/xxHash>`_, this is the default hash function.
*/
XX_HASH: 'XX_HASH',
/**
* Use `MurmurHash2 <https://sites.google.com/site/murmurhash/>`_, this is compatible with
* std:hash<string> in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled
* on Linux and not macOS.
*/
MURMUR_HASH_2: 'MURMUR_HASH_2',
} as const;
/**
* The hash function used to hash hosts onto the ketama ring.
*/
export type _envoy_config_cluster_v3_Cluster_RingHashLbConfig_HashFunction =
/**
* Use `xxHash <https://github.com/Cyan4973/xxHash>`_, this is the default hash function.
*/
| 'XX_HASH'
| 0
/**
* Use `MurmurHash2 <https://sites.google.com/site/murmurhash/>`_, this is compatible with
* std:hash<string> in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled
* on Linux and not macOS.
*/
| 'MURMUR_HASH_2'
| 1
/**
* The hash function used to hash hosts onto the ketama ring.
*/
export type _envoy_config_cluster_v3_Cluster_RingHashLbConfig_HashFunction__Output = typeof _envoy_config_cluster_v3_Cluster_RingHashLbConfig_HashFunction[keyof typeof _envoy_config_cluster_v3_Cluster_RingHashLbConfig_HashFunction]
// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto
/**
* Refer to :ref:`load balancer type <arch_overview_load_balancing_types>` architecture
* overview section for information on each type.
*/
export const _envoy_config_cluster_v3_Cluster_LbPolicy = {
/**
* Refer to the :ref:`round robin load balancing
* policy<arch_overview_load_balancing_types_round_robin>`
* for an explanation.
*/
ROUND_ROBIN: 'ROUND_ROBIN',
/**
* Refer to the :ref:`least request load balancing
* policy<arch_overview_load_balancing_types_least_request>`
* for an explanation.
*/
LEAST_REQUEST: 'LEAST_REQUEST',
/**
* Refer to the :ref:`ring hash load balancing
* policy<arch_overview_load_balancing_types_ring_hash>`
* for an explanation.
*/
RING_HASH: 'RING_HASH',
/**
* Refer to the :ref:`random load balancing
* policy<arch_overview_load_balancing_types_random>`
* for an explanation.
*/
RANDOM: 'RANDOM',
/**
* Refer to the :ref:`Maglev load balancing policy<arch_overview_load_balancing_types_maglev>`
* for an explanation.
*/
MAGLEV: 'MAGLEV',
/**
* This load balancer type must be specified if the configured cluster provides a cluster
* specific load balancer. Consult the configured cluster's documentation for whether to set
* this option or not.
*/
CLUSTER_PROVIDED: 'CLUSTER_PROVIDED',
/**
* Use the new :ref:`load_balancing_policy
* <envoy_v3_api_field_config.cluster.v3.Cluster.load_balancing_policy>` field to determine the LB policy.
* This has been deprecated in favor of using the :ref:`load_balancing_policy
* <envoy_v3_api_field_config.cluster.v3.Cluster.load_balancing_policy>` field without
* setting any value in :ref:`lb_policy<envoy_v3_api_field_config.cluster.v3.Cluster.lb_policy>`.
*/
LOAD_BALANCING_POLICY_CONFIG: 'LOAD_BALANCING_POLICY_CONFIG',
} as const;
/**
* Refer to :ref:`load balancer type <arch_overview_load_balancing_types>` architecture
* overview section for information on each type.
*/
export type _envoy_config_cluster_v3_Cluster_LbPolicy =
/**
* Refer to the :ref:`round robin load balancing
* policy<arch_overview_load_balancing_types_round_robin>`
* for an explanation.
*/
| 'ROUND_ROBIN'
| 0
/**
* Refer to the :ref:`least request load balancing
* policy<arch_overview_load_balancing_types_least_request>`
* for an explanation.
*/
| 'LEAST_REQUEST'
| 1
/**
* Refer to the :ref:`ring hash load balancing
* policy<arch_overview_load_balancing_types_ring_hash>`
* for an explanation.
*/
| 'RING_HASH'
| 2
/**
* Refer to the :ref:`random load balancing
* policy<arch_overview_load_balancing_types_random>`
* for an explanation.
*/
| 'RANDOM'
| 3
/**
* Refer to the :ref:`Maglev load balancing policy<arch_overview_load_balancing_types_maglev>`
* for an explanation.
*/
| 'MAGLEV'
| 5
/**
* This load balancer type must be specified if the configured cluster provides a cluster
* specific load balancer. Consult the configured cluster's documentation for whether to set
* this option or not.
*/
| 'CLUSTER_PROVIDED'
| 6
/**
* Use the new :ref:`load_balancing_policy
* <envoy_v3_api_field_config.cluster.v3.Cluster.load_balancing_policy>` field to determine the LB policy.
* This has been deprecated in favor of using the :ref:`load_balancing_policy
* <envoy_v3_api_field_config.cluster.v3.Cluster.load_balancing_policy>` field without
* setting any value in :ref:`lb_policy<envoy_v3_api_field_config.cluster.v3.Cluster.lb_policy>`.
*/
| 'LOAD_BALANCING_POLICY_CONFIG'
| 7
/**
* Refer to :ref:`load balancer type <arch_overview_load_balancing_types>` architecture
* overview section for information on each type.
*/
export type _envoy_config_cluster_v3_Cluster_LbPolicy__Output = typeof _envoy_config_cluster_v3_Cluster_LbPolicy[keyof typeof _envoy_config_cluster_v3_Cluster_LbPolicy]
/**
* Optionally divide the endpoints in this cluster into subsets defined by
* endpoint metadata and selected by route and weighted cluster metadata.
* [#next-free-field: 9]
*/
export interface _envoy_config_cluster_v3_Cluster_LbSubsetConfig {
/**
* The behavior used when no endpoint subset matches the selected route's
* metadata. The value defaults to
* :ref:`NO_FALLBACK<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.
*/
'fallback_policy'?: (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy);
/**
* Specifies the default subset of endpoints used during fallback if
* fallback_policy is
* :ref:`DEFAULT_SUBSET<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.DEFAULT_SUBSET>`.
* Each field in default_subset is
* compared to the matching LbEndpoint.Metadata under the ``envoy.lb``
* namespace. It is valid for no hosts to match, in which case the behavior
* is the same as a fallback_policy of
* :ref:`NO_FALLBACK<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.
*/
'default_subset'?: (_google_protobuf_Struct | null);
/**
* For each entry, LbEndpoint.Metadata's
* ``envoy.lb`` namespace is traversed and a subset is created for each unique
* combination of key and value. For example:
*
* .. code-block:: json
*
* { "subset_selectors": [
* { "keys": [ "version" ] },
* { "keys": [ "stage", "hardware_type" ] }
* ]}
*
* A subset is matched when the metadata from the selected route and
* weighted cluster contains the same keys and values as the subset's
* metadata. The same host may appear in multiple subsets.
*/
'subset_selectors'?: (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector)[];
/**
* If true, routing to subsets will take into account the localities and locality weights of the
* endpoints when making the routing decision.
*
* There are some potential pitfalls associated with enabling this feature, as the resulting
* traffic split after applying both a subset match and locality weights might be undesirable.
*
* Consider for example a situation in which you have 50/50 split across two localities X/Y
* which have 100 hosts each without subsetting. If the subset LB results in X having only 1
* host selected but Y having 100, then a lot more load is being dumped on the single host in X
* than originally anticipated in the load balancing assignment delivered via EDS.
*/
'locality_weight_aware'?: (boolean);
/**
* When used with locality_weight_aware, scales the weight of each locality by the ratio
* of hosts in the subset vs hosts in the original subset. This aims to even out the load
* going to an individual locality if said locality is disproportionately affected by the
* subset predicate.
*/
'scale_locality_weight'?: (boolean);
/**
* If true, when a fallback policy is configured and its corresponding subset fails to find
* a host this will cause any host to be selected instead.
*
* This is useful when using the default subset as the fallback policy, given the default
* subset might become empty. With this option enabled, if that happens the LB will attempt
* to select a host from the entire cluster.
*/
'panic_mode_any'?: (boolean);
/**
* If true, metadata specified for a metadata key will be matched against the corresponding
* endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value
* and any of the elements in the list matches the criteria.
*/
'list_as_any'?: (boolean);
/**
* Fallback mechanism that allows to try different route metadata until a host is found.
* If load balancing process, including all its mechanisms (like
* :ref:`fallback_policy<envoy_v3_api_field_config.cluster.v3.Cluster.LbSubsetConfig.fallback_policy>`)
* fails to select a host, this policy decides if and how the process is repeated using another metadata.
*
* The value defaults to
* :ref:`METADATA_NO_FALLBACK<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy.METADATA_NO_FALLBACK>`.
*/
'metadata_fallback_policy'?: (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy);
}
/**
* Optionally divide the endpoints in this cluster into subsets defined by
* endpoint metadata and selected by route and weighted cluster metadata.
* [#next-free-field: 9]
*/
export interface _envoy_config_cluster_v3_Cluster_LbSubsetConfig__Output {
/**
* The behavior used when no endpoint subset matches the selected route's
* metadata. The value defaults to
* :ref:`NO_FALLBACK<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.
*/
'fallback_policy': (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy__Output);
/**
* Specifies the default subset of endpoints used during fallback if
* fallback_policy is
* :ref:`DEFAULT_SUBSET<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.DEFAULT_SUBSET>`.
* Each field in default_subset is
* compared to the matching LbEndpoint.Metadata under the ``envoy.lb``
* namespace. It is valid for no hosts to match, in which case the behavior
* is the same as a fallback_policy of
* :ref:`NO_FALLBACK<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.
*/
'default_subset': (_google_protobuf_Struct__Output | null);
/**
* For each entry, LbEndpoint.Metadata's
* ``envoy.lb`` namespace is traversed and a subset is created for each unique
* combination of key and value. For example:
*
* .. code-block:: json
*
* { "subset_selectors": [
* { "keys": [ "version" ] },
* { "keys": [ "stage", "hardware_type" ] }
* ]}
*
* A subset is matched when the metadata from the selected route and
* weighted cluster contains the same keys and values as the subset's
* metadata. The same host may appear in multiple subsets.
*/
'subset_selectors': (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector__Output)[];
/**
* If true, routing to subsets will take into account the localities and locality weights of the
* endpoints when making the routing decision.
*
* There are some potential pitfalls associated with enabling this feature, as the resulting
* traffic split after applying both a subset match and locality weights might be undesirable.
*
* Consider for example a situation in which you have 50/50 split across two localities X/Y
* which have 100 hosts each without subsetting. If the subset LB results in X having only 1
* host selected but Y having 100, then a lot more load is being dumped on the single host in X
* than originally anticipated in the load balancing assignment delivered via EDS.
*/
'locality_weight_aware': (boolean);
/**
* When used with locality_weight_aware, scales the weight of each locality by the ratio
* of hosts in the subset vs hosts in the original subset. This aims to even out the load
* going to an individual locality if said locality is disproportionately affected by the
* subset predicate.
*/
'scale_locality_weight': (boolean);
/**
* If true, when a fallback policy is configured and its corresponding subset fails to find
* a host this will cause any host to be selected instead.
*
* This is useful when using the default subset as the fallback policy, given the default
* subset might become empty. With this option enabled, if that happens the LB will attempt
* to select a host from the entire cluster.
*/
'panic_mode_any': (boolean);
/**
* If true, metadata specified for a metadata key will be matched against the corresponding
* endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value
* and any of the elements in the list matches the criteria.
*/
'list_as_any': (boolean);
/**
* Fallback mechanism that allows to try different route metadata until a host is found.
* If load balancing process, including all its mechanisms (like
* :ref:`fallback_policy<envoy_v3_api_field_config.cluster.v3.Cluster.LbSubsetConfig.fallback_policy>`)
* fails to select a host, this policy decides if and how the process is repeated using another metadata.
*
* The value defaults to
* :ref:`METADATA_NO_FALLBACK<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy.METADATA_NO_FALLBACK>`.
*/
'metadata_fallback_policy': (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy__Output);
}
// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto
/**
* If NO_FALLBACK is selected, a result
* equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,
* any cluster endpoint may be returned (subject to policy, health checks,
* etc). If DEFAULT_SUBSET is selected, load balancing is performed over the
* endpoints matching the values from the default_subset field.
*/
export const _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy = {
NO_FALLBACK: 'NO_FALLBACK',
ANY_ENDPOINT: 'ANY_ENDPOINT',
DEFAULT_SUBSET: 'DEFAULT_SUBSET',
} as const;
/**
* If NO_FALLBACK is selected, a result
* equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,
* any cluster endpoint may be returned (subject to policy, health checks,
* etc). If DEFAULT_SUBSET is selected, load balancing is performed over the
* endpoints matching the values from the default_subset field.
*/
export type _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy =
| 'NO_FALLBACK'
| 0
| 'ANY_ENDPOINT'
| 1
| 'DEFAULT_SUBSET'
| 2
/**
* If NO_FALLBACK is selected, a result
* equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,
* any cluster endpoint may be returned (subject to policy, health checks,
* etc). If DEFAULT_SUBSET is selected, load balancing is performed over the
* endpoints matching the values from the default_subset field.
*/
export type _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy__Output = typeof _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy[keyof typeof _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy]
// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto
export const _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy = {
/**
* No fallback. Route metadata will be used as-is.
*/
METADATA_NO_FALLBACK: 'METADATA_NO_FALLBACK',
/**
* A special metadata key ``fallback_list`` will be used to provide variants of metadata to try.
* Value of ``fallback_list`` key has to be a list. Every list element has to be a struct - it will
* be merged with route metadata, overriding keys that appear in both places.
* ``fallback_list`` entries will be used in order until a host is found.
*
* ``fallback_list`` key itself is removed from metadata before subset load balancing is performed.
*
* Example:
*
* for metadata:
*
* .. code-block:: yaml
*
* version: 1.0
* fallback_list:
* - version: 2.0
* hardware: c64
* - hardware: c32
* - version: 3.0
*
* at first, metadata:
*
* .. code-block:: json
*
* {"version": "2.0", "hardware": "c64"}
*
* will be used for load balancing. If no host is found, metadata:
*
* .. code-block:: json
*
* {"version": "1.0", "hardware": "c32"}
*
* is next to try. If it still results in no host, finally metadata:
*
* .. code-block:: json
*
* {"version": "3.0"}
*
* is used.
*/
FALLBACK_LIST: 'FALLBACK_LIST',
} as const;
export type _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy =
/**
* No fallback. Route metadata will be used as-is.
*/
| 'METADATA_NO_FALLBACK'
| 0
/**
* A special metadata key ``fallback_list`` will be used to provide variants of metadata to try.
* Value of ``fallback_list`` key has to be a list. Every list element has to be a struct - it will
* be merged with route metadata, overriding keys that appear in both places.
* ``fallback_list`` entries will be used in order until a host is found.
*
* ``fallback_list`` key itself is removed from metadata before subset load balancing is performed.
*
* Example:
*
* for metadata:
*
* .. code-block:: yaml
*
* version: 1.0
* fallback_list:
* - version: 2.0
* hardware: c64
* - hardware: c32
* - version: 3.0
*
* at first, metadata:
*
* .. code-block:: json
*
* {"version": "2.0", "hardware": "c64"}
*
* will be used for load balancing. If no host is found, metadata:
*
* .. code-block:: json
*
* {"version": "1.0", "hardware": "c32"}
*
* is next to try. If it still results in no host, finally metadata:
*
* .. code-block:: json
*
* {"version": "3.0"}
*
* is used.
*/
| 'FALLBACK_LIST'
| 1
export type _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy__Output = typeof _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy[keyof typeof _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy]
/**
* Specifications for subsets.
*/
export interface _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector {
/**
* List of keys to match with the weighted cluster metadata.
*/
'keys'?: (string)[];
/**
* Selects a mode of operation in which each subset has only one host. This mode uses the same rules for
* choosing a host, but updating hosts is faster, especially for large numbers of hosts.
*
* If a match is found to a host, that host will be used regardless of priority levels.
*
* When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in ``keys``
* will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge
* :ref:`lb_subsets_single_host_per_subset_duplicate<config_cluster_manager_cluster_stats_subset_lb>` indicates how many duplicates are
* present in the current configuration.
*/
'single_host_per_subset'?: (boolean);
/**
* The behavior used when no endpoint subset matches the selected route's
* metadata.
*/
'fallback_policy'?: (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy);
/**
* Subset of
* :ref:`keys<envoy_v3_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.keys>` used by
* :ref:`KEYS_SUBSET<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy.KEYS_SUBSET>`
* fallback policy.
* It has to be a non empty list if KEYS_SUBSET fallback policy is selected.
* For any other fallback policy the parameter is not used and should not be set.
* Only values also present in
* :ref:`keys<envoy_v3_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.keys>` are allowed, but
* ``fallback_keys_subset`` cannot be equal to ``keys``.
*/
'fallback_keys_subset'?: (string)[];
}
/**
* Specifications for subsets.
*/
export interface _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector__Output {