summaryrefslogtreecommitdiff
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c23
1 files changed, 9 insertions, 14 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index acf16a8d934b..722d392b0dac 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1348,7 +1348,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
*/
static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
{
- struct sched_domain *tmp, *sd = NULL;
+ struct sched_domain *tmp, *shares = NULL, *sd = NULL;
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
int new_cpu = cpu;
@@ -1387,22 +1387,14 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
break;
}
- switch (sd_flag) {
- case SD_BALANCE_WAKE:
- if (!sched_feat(LB_WAKEUP_UPDATE))
- break;
- case SD_BALANCE_FORK:
- case SD_BALANCE_EXEC:
- if (root_task_group_empty())
- break;
- update_shares(tmp);
- default:
- break;
- }
-
if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
+ if (sched_feat(LB_SHARES_UPDATE)) {
+ update_shares(tmp);
+ shares = tmp;
+ }
+
if (wake_affine(tmp, p, sync)) {
new_cpu = cpu;
goto out;
@@ -1417,6 +1409,9 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
sd = tmp;
}
+ if (sd && sd != shares && sched_feat(LB_SHARES_UPDATE))
+ update_shares(sd);
+
while (sd) {
struct sched_group *group;
int weight;