Skip to content

Commit 3934e6e

Browse files
authored
[Bugfix] Add RF Toleration (#860)
1 parent 1cef3cf commit 3934e6e

File tree

7 files changed

+63
-42
lines changed

7 files changed

+63
-42
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
- Allow to disable ClusterScalingIntegration and add proper Scheduled label to pods
1010
- Add additional timeout parameters and kubernetes batch size
1111
- Limit parallel Backup uploads
12+
- Bugfix - Adjust Cluster Scaling Integration logic
1213

1314
## [1.2.5](https://github.com/arangodb/kube-arangodb/tree/1.2.5) (2021-10-25)
1415
- Split & Unify Lifecycle management functionality

main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ func init() {
168168
f.DurationVar(&operatorTimeouts.k8s, "timeout.k8s", globals.DefaultKubernetesTimeout, "The request timeout to the kubernetes")
169169
f.DurationVar(&operatorTimeouts.arangoD, "timeout.arangod", globals.DefaultArangoDTimeout, "The request timeout to the ArangoDB")
170170
f.DurationVar(&operatorTimeouts.reconciliation, "timeout.reconciliation", globals.DefaultReconciliationTimeout, "The reconciliation timeout to the ArangoDB CR")
171-
f.BoolVar(&operatorOptions.scalingIntegrationEnabled, "internal.scaling-integration", false, "Enable Scaling Integration")
171+
f.BoolVar(&operatorOptions.scalingIntegrationEnabled, "internal.scaling-integration", true, "Enable Scaling Integration")
172172
f.Int64Var(&operatorKubernetesOptions.maxBatchSize, "kubernetes.max-batch-size", globals.DefaultKubernetesRequestBatchSize, "Size of batch during objects read")
173173
f.IntVar(&operatorBackup.concurrentUploads, "backup-concurrent-uploads", globals.DefaultBackupConcurrentUploads, "Number of concurrent uploads per deployment")
174174
features.Init(&cmdMain)

pkg/apis/deployment/v1/topology_member_status.go

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -22,17 +22,8 @@ package v1
2222

2323
import "k8s.io/apimachinery/pkg/types"
2424

25-
type TopologyMemberStatusInitPhase string
26-
27-
const (
28-
TopologyMemberStatusInitPhaseNone TopologyMemberStatusInitPhase = ""
29-
TopologyMemberStatusInitPhasePending TopologyMemberStatusInitPhase = "pending"
30-
TopologyMemberStatusInitPhaseOK TopologyMemberStatusInitPhase = "ok"
31-
)
32-
3325
type TopologyMemberStatus struct {
34-
ID types.UID `json:"id"`
35-
Zone int `json:"rack"`
36-
Label string `json:"label,omitempty"`
37-
InitPhase TopologyMemberStatusInitPhase `json:"init_phase,omitempty"`
26+
ID types.UID `json:"id"`
27+
Zone int `json:"rack"`
28+
Label string `json:"label,omitempty"`
3829
}

pkg/apis/deployment/v2alpha1/topology_member_status.go

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -22,17 +22,8 @@ package v2alpha1
2222

2323
import "k8s.io/apimachinery/pkg/types"
2424

25-
type TopologyMemberStatusInitPhase string
26-
27-
const (
28-
TopologyMemberStatusInitPhaseNone TopologyMemberStatusInitPhase = ""
29-
TopologyMemberStatusInitPhasePending TopologyMemberStatusInitPhase = "pending"
30-
TopologyMemberStatusInitPhaseOK TopologyMemberStatusInitPhase = "ok"
31-
)
32-
3325
type TopologyMemberStatus struct {
34-
ID types.UID `json:"id"`
35-
Zone int `json:"rack"`
36-
Label string `json:"label,omitempty"`
37-
InitPhase TopologyMemberStatusInitPhase `json:"init_phase,omitempty"`
26+
ID types.UID `json:"id"`
27+
Zone int `json:"rack"`
28+
Label string `json:"label,omitempty"`
3829
}

pkg/deployment/cluster_scaling_integration.go

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -252,8 +252,7 @@ func (ci *clusterScalingIntegration) updateClusterServerCount(ctx context.Contex
252252
var coordinatorCountPtr *int
253253
var dbserverCountPtr *int
254254

255-
coordinatorCount := spec.Coordinators.GetCount()
256-
dbserverCount := spec.DBServers.GetCount()
255+
coordinatorCount, dbserverCount := ci.getNumbersOfServers()
257256

258257
if spec.Coordinators.GetMaxCount() == spec.Coordinators.GetMinCount() {
259258
coordinatorCountPtr = nil
@@ -335,8 +334,11 @@ func (ci *clusterScalingIntegration) EnableScalingCluster(ctx context.Context) e
335334
}
336335

337336
func (ci *clusterScalingIntegration) setNumberOfServers(ctx context.Context) error {
338-
spec := ci.depl.GetSpec()
339-
numOfCoordinators := spec.Coordinators.GetCount()
340-
numOfDBServers := spec.DBServers.GetCount()
337+
numOfCoordinators, numOfDBServers := ci.getNumbersOfServers()
341338
return ci.depl.SetNumberOfServers(ctx, &numOfCoordinators, &numOfDBServers)
342339
}
340+
341+
func (ci *clusterScalingIntegration) getNumbersOfServers() (int, int) {
342+
status, _ := ci.depl.getStatus()
343+
return len(status.Members.Coordinators), len(status.Members.DBServers)
344+
}

pkg/deployment/resources/pod_creator.go

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -561,9 +561,6 @@ func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspect
561561
} else {
562562
m.Conditions.Update(api.ConditionTypeTopologyAware, false, "Topology Aware", "Topology invalid")
563563
}
564-
if m.Topology.InitPhase == api.TopologyMemberStatusInitPhaseNone {
565-
m.Topology.InitPhase = api.TopologyMemberStatusInitPhasePending
566-
}
567564
} else {
568565
m.Conditions.Update(api.ConditionTypeTopologyAware, false, "Topology spec missing", "Topology spec missing")
569566
}

pkg/deployment/resources/pod_inspector.go

Lines changed: 48 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -189,17 +189,39 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
189189
if k8sutil.IsPodScheduled(pod) {
190190
if _, ok := pod.Labels[k8sutil.LabelKeyArangoScheduled]; !ok {
191191
// Adding scheduled label to the pod
192-
l := pod.Labels
193-
if l == nil {
194-
l = map[string]string{}
195-
}
196-
l[k8sutil.LabelKeyArangoScheduled] = "1"
192+
l := addLabel(pod.Labels, k8sutil.LabelKeyArangoScheduled, "1")
193+
197194
if err := r.context.ApplyPatchOnPod(ctx, pod, patch.ItemReplace(patch.NewPath("metadata", "labels"), l)); err != nil {
198195
log.Error().Err(err).Msgf("Unable to update scheduled labels")
199196
}
200197
}
201198
}
202199

200+
// Topology labels
201+
tv, tok := pod.Labels[k8sutil.LabelKeyArangoTopology]
202+
zv, zok := pod.Labels[k8sutil.LabelKeyArangoZone]
203+
204+
if t, ts := status.Topology, memberStatus.Topology; t.Enabled() && t.IsTopologyOwned(ts) {
205+
if tid, tz := string(t.ID), fmt.Sprintf("%d", ts.Zone); !tok || !zok || tv != tid || zv != tz {
206+
l := addLabel(pod.Labels, k8sutil.LabelKeyArangoTopology, tid)
207+
l = addLabel(l, k8sutil.LabelKeyArangoZone, tz)
208+
209+
if err := r.context.ApplyPatchOnPod(ctx, pod, patch.ItemReplace(patch.NewPath("metadata", "labels"), l)); err != nil {
210+
log.Error().Err(err).Msgf("Unable to update topology labels")
211+
}
212+
}
213+
} else {
214+
if tok || zok {
215+
l := removeLabel(pod.Labels, k8sutil.LabelKeyArangoTopology)
216+
l = removeLabel(l, k8sutil.LabelKeyArangoZone)
217+
218+
if err := r.context.ApplyPatchOnPod(ctx, pod, patch.ItemReplace(patch.NewPath("metadata", "labels"), l)); err != nil {
219+
log.Error().Err(err).Msgf("Unable to remove topology labels")
220+
}
221+
}
222+
}
223+
// End of Topology labels
224+
203225
if k8sutil.IsContainerReady(pod, k8sutil.ServerContainerName) {
204226
// Pod is now ready
205227
if memberStatus.Conditions.Update(api.ConditionTypeReady, true, "Pod Ready", "") {
@@ -215,10 +237,6 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
215237
memberStatus.Topology.Label = label
216238
}
217239
}
218-
219-
if memberStatus.Topology.InitPhase == api.TopologyMemberStatusInitPhasePending {
220-
memberStatus.Topology.InitPhase = api.TopologyMemberStatusInitPhaseOK
221-
}
222240
}
223241
}
224242

@@ -366,3 +384,24 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
366384
}
367385
return nextInterval, nil
368386
}
387+
388+
func addLabel(labels map[string]string, key, value string) map[string]string {
389+
if labels != nil {
390+
labels[key] = value
391+
return labels
392+
}
393+
394+
return map[string]string{
395+
key: value,
396+
}
397+
}
398+
399+
func removeLabel(labels map[string]string, key string) map[string]string {
400+
if labels == nil {
401+
return map[string]string{}
402+
}
403+
404+
delete(labels, key)
405+
406+
return labels
407+
}

0 commit comments

Comments
 (0)