@@ -208,6 +208,27 @@ fn assert_enforce_nodes_cpu_capacity_post_condition(
208
208
) ;
209
209
}
210
210
211
+ // ----------------------------------------------------
212
+ // Phase 3
213
+ // Place unassigned sources.
214
+ //
215
+ // We use a greedy algorithm as a simple heuristic here.
216
+ //
217
+ // We go through the sources in decreasing order of their load,
218
+ // in two passes.
219
+ //
220
+ // In the first pass, we have a look at
221
+ // the nodes with which there is an affinity.
222
+ //
223
+ // If one of them has room for all of the shards, then we assign all
224
+ // of the shards to it.
225
+ //
226
+ // In the second pass, we just put as many shards as possible on the node
227
+ // with the highest available capacity.
228
+ //
229
+ // If this algorithm fails to place all remaining shards, we inflate
230
+ // the node capacities by 20% in the scheduling problem and start from the beginning.
231
+
211
232
fn attempt_place_unassigned_shards (
212
233
unassigned_shards : & [ Source ] ,
213
234
problem : & SchedulingProblem ,
@@ -262,26 +283,6 @@ fn place_unassigned_shards_with_affinity(
262
283
}
263
284
}
264
285
265
- // ----------------------------------------------------
266
- // Phase 3
267
- // Place unassigned sources.
268
- //
269
- // We use a greedy algorithm as a simple heuristic here.
270
- //
271
- // We go through the sources in decreasing order of their load,
272
- // in two passes.
273
- //
274
- // In the first pass, we have a look at
275
- // the nodes with which there is an affinity.
276
- //
277
- // If one of them has room for all of the shards, then we assign all
278
- // of the shards to it.
279
- //
280
- // In the second pass, we just put as many shards as possible on the node
281
- // with the highest available capacity.
282
- //
283
- // If this algorithm fails to place all remaining shards, we inflate
284
- // the node capacities by 20% in the scheduling problem and start from the beginning.
285
286
#[ must_use]
286
287
fn place_unassigned_shards_ignoring_affinity (
287
288
mut problem : SchedulingProblem ,
@@ -358,10 +359,6 @@ fn place_unassigned_shards_single_source(
358
359
let num_placable_shards = available_capacity. cpu_millis ( ) / source. load_per_shard ;
359
360
let num_shards_to_place = num_placable_shards. min ( num_shards) ;
360
361
// Update the solution, the shard load, and the number of shards to place.
361
- if num_shards_to_place == 0u32 {
362
- // No need to fill indexer_assignments with empty assignments.
363
- continue ;
364
- }
365
362
solution. indexer_assignments [ indexer_ord]
366
363
. add_shards ( source. source_ord , num_shards_to_place) ;
367
364
num_shards -= num_shards_to_place;
@@ -781,7 +778,16 @@ mod tests {
781
778
proptest ! {
782
779
#[ test]
783
780
fn test_proptest_post_conditions( ( problem, solution) in problem_solution_strategy( ) ) {
784
- solve( problem, solution) ;
781
+ let solution_1 = solve( problem. clone( ) , solution) ;
782
+ let solution_2 = solve( problem. clone( ) , solution_1. clone( ) ) ;
783
+ // TODO: This assert actually fails for some scenarii. We say it is fine
784
+ // for now as long as the solution does not change again during the
785
+ // next resolution:
786
+ // let has_solution_changed_once = solution_1 != solution_2;
787
+ // assert!(!has_solution_changed_once, "Solution changed for same problem\nSolution 1:{solution_1:?}\nSolution 2: {solution_2:?}");
788
+ let solution_3 = solve( problem, solution_2. clone( ) ) ;
789
+ let has_solution_changed_again = solution_2 != solution_3;
790
+ assert!( !has_solution_changed_again, "solution unstable!!!\n Solution 1: {solution_1:?}\n Solution 2: {solution_2:?}\n Solution 3: {solution_3:?}" ) ;
785
791
}
786
792
}
787
793
0 commit comments