8000 Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm… · bsd-unix/linux@369da7f · GitHub
[go: up one dir, main page]

Skip to content

Commit 369da7f

Browse files
committed
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Two load-balancing fixes for cgroups-intense workloads" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Fix calc_cfs_shares() fixed point arithmetics width confusion sched/fair: Fix effective_load() to consistently use smoothed load
2 parents 612807f + ea1dc6f commit 369da7f

File tree

1 file changed

+20
-22
lines changed

1 file changed

+20
-22
lines changed

kernel/sched/fair.c

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
735735
}
736736
}
737737

738-
static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
739-
static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
740738
#else
741739
void init_entity_runnable_average(struct sched_entity *se)
742740
{
@@ -2499,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
24992497

25002498
#ifdef CONFIG_FAIR_GROUP_SCHED
25012499
# ifdef CONFIG_SMP
2502-
static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2500+
static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
25032501
{
2504-
long tg_weight;
2502+
long tg_weight, load, shares;
25052503

25062504
/*
2507-
* Use this CPU's real-time load instead of the last load contribution
2508-
* as the updating of the contribution is delayed, and we will use the
2509-
* the real-time load to calc the share. See update_tg_load_avg().
2505+
* This really should be: cfs_rq->avg.load_avg, but instead we use
2506+
* cfs_rq->load.weight, which is its upper bound. This helps ramp up
2507+
* the shares for small weight interactive tasks.
25102508
*/
2511-
tg_weight = atomic_long_read(&tg->load_avg);
2512-
tg_weight -= cfs_rq->tg_load_avg_contrib;
2513-
tg_weight += cfs_rq->load.weight;
2509+
load = scale_load_down(cfs_rq->load.weight);
25142510

2515-
return tg_weight;
2516-
}
2517-
2518-
static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2519-
{
2520-
long tg_weight, load, shares;
2511+
tg_weight = atomic_long_read(&tg->load_avg);
25212512

2522-
tg_weight = calc_tg_weight(tg, cfs_rq);
2523-
load = cfs_rq->load.weight;
2513+
/* Ensure tg_weight >= load */
2514+
tg_weight -= cfs_rq->tg_load_avg_contrib;
2515+
tg_weight += load;
25242516

25252517
shares = (tg->shares * load);
25262518
if (tg_weight)
@@ -2539,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
25392531
return tg->shares;
25402532
}
25412533
# endif /* CONFIG_SMP */
2534+
25422535
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
25432536
unsigned long weight)
25442537
{
@@ -4946,19 +4939,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
49464939
return wl;
49474940

49484941
for_each_sched_entity(se) {
4949-
long w, W;
4942+
struct cfs_rq *cfs_rq = se->my_q;
4943+
long W, w = cfs_rq_load_avg(cfs_rq);
49504944

4951-
tg = se->my_q->tg;
4945+
tg = cfs_rq->tg;
49524946

49534947
/*
49544948
* W = @wg + \Sum rw_j
49554949
*/
4956-
W = wg + calc_tg_weight(tg, se->my_q);
4950+
W = wg + atomic_long_read(&tg->load_avg);
4951+
4952+
/* Ensure \Sum rw_j >= rw_i */
4953+
W -= cfs_rq->tg_load_avg_contrib;
4954+
W += w;
49574955

49584956
/*
49594957
* w = rw_i + @wl
49604958
*/
4961-
w = cfs_rq_load_avg(se->my_q) + wl;
4959+
w += wl;
49624960

49634961
/*
49644962
* wl = S * s'_i; see (2)

0 commit comments

Comments
 (0)
0