@@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
735
735
}
736
736
}
737
737
738
- static inline unsigned long cfs_rq_runnable_load_avg (struct cfs_rq * cfs_rq );
739
- static inline unsigned long cfs_rq_load_avg (struct cfs_rq * cfs_rq );
740
738
#else
741
739
void init_entity_runnable_average (struct sched_entity * se )
742
740
{
@@ -2499,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2499
2497
2500
2498
#ifdef CONFIG_FAIR_GROUP_SCHED
2501
2499
# ifdef CONFIG_SMP
2502
- static inline long calc_tg_weight (struct task_group * tg , struct cfs_rq * cfs_rq )
2500
+ static long calc_cfs_shares (struct cfs_rq * cfs_rq , struct task_group * tg )
2503
2501
{
2504
- long tg_weight ;
2502
+ long tg_weight , load , shares ;
2505
2503
2506
2504
/*
2507
- * Use this CPU's real-time load instead of the last load contribution
2508
- * as the updating of the contribution is delayed, and we will use the
2509
- * the real-time load to calc the share. See update_tg_load_avg() .
2505
+ * This really should be: cfs_rq->avg.load_avg, but instead we use
2506
+ * cfs_rq->load.weight, which is its upper bound. This helps ramp up
2507
+ * the shares for small weight interactive tasks .
2510
2508
*/
2511
- tg_weight = atomic_long_read (& tg -> load_avg );
2512
- tg_weight -= cfs_rq -> tg_load_avg_contrib ;
2513
- tg_weight += cfs_rq -> load .weight ;
2509
+ load = scale_load_down (cfs_rq -> load .weight );
2514
2510
2515
- return tg_weight ;
2516
- }
2517
-
2518
- static long calc_cfs_shares (struct cfs_rq * cfs_rq , struct task_group * tg )
2519
- {
2520
- long tg_weight , load , shares ;
2511
+ tg_weight = atomic_long_read (& tg -> load_avg );
2521
2512
2522
- tg_weight = calc_tg_weight (tg , cfs_rq );
2523
- load = cfs_rq -> load .weight ;
2513
+ /* Ensure tg_weight >= load */
2514
+ tg_weight -= cfs_rq -> tg_load_avg_contrib ;
2515
+ tg_weight += load ;
2524
2516
2525
2517
shares = (tg -> shares * load );
2526
2518
if (tg_weight )
@@ -2539,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2539
2531
return tg -> shares ;
2540
2532
}
2541
2533
# endif /* CONFIG_SMP */
2534
+
2542
2535
static void reweight_entity (struct cfs_rq * cfs_rq , struct sched_entity * se ,
2543
2536
unsigned long weight )
2544
2537
{
@@ -4946,19 +4939,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4946
4939
return wl ;
4947
4940
4948
4941
for_each_sched_entity (se ) {
4949
- long w , W ;
4942
+ struct cfs_rq * cfs_rq = se -> my_q ;
4943
+ long W , w = cfs_rq_load_avg (cfs_rq );
4950
4944
4951
- tg = se -> my_q -> tg ;
4945
+ tg = cfs_rq -> tg ;
4952
4946
4953
4947
/*
4954
4948
* W = @wg + \Sum rw_j
4955
4949
*/
4956
- W = wg + calc_tg_weight (tg , se -> my_q );
4950
+ W = wg + atomic_long_read (& tg -> load_avg );
4951
+
4952
+ /* Ensure \Sum rw_j >= rw_i */
4953
+ W -= cfs_rq -> tg_load_avg_contrib ;
4954
+ W += w ;
4957
4955
4958
4956
/*
4959
4957
* w = rw_i + @wl
4960
4958
*/
4961
- w = cfs_rq_load_avg ( se -> my_q ) + wl ;
4959
+ w += wl ;
4962
4960
4963
4961
/*
4964
4962
* wl = S * s'_i; see (2)
0 commit comments