diff options
author | Chris Redpath <chris.redpath@arm.com> | 2015-02-03 16:46:56 +0900 |
---|---|---|
committer | Seung-Woo Kim <sw0312.kim@samsung.com> | 2016-12-14 13:41:45 +0900 |
commit | caf11f7dc2d6c3d4d23f84a1b0e37d69e6e44b51 (patch) | |
tree | 6f3eeacc687beb53eb4df5c07e7ae4258a311c50 /kernel | |
parent | bbb4f3b399630b7a703a60174383ab499e1ced97 (diff) |
HMP: Access runqueue task clocks directly.
Avoids accesses through cfs_rq going bad when the cpu_rq doesn't
have a cfs member.
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Signed-off-by: Liviu Dudau <liviu.dudau@arm.com>
Signed-off-by: Jon Medhurst <tixy@linaro.org>
[k.kozlowski: rebased on 4.1, no signed-off-by of previous committer]
Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/fair.c | 18 |
1 files changed, 8 insertions, 10 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 26965afd6bd7..280b650c583a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5264,9 +5264,8 @@ static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk, static inline void hmp_next_up_delay(struct sched_entity *se, int cpu) { - struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; - - u64 now = cfs_rq_clock_task(cfs_rq); + /* hack - always use clock from first online CPU */ + u64 now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task; se->avg.hmp_last_up_migration = now; se->avg.hmp_last_down_migration = 0; cpu_rq(cpu)->avg.hmp_last_up_migration = now; @@ -5275,9 +5274,8 @@ static inline void hmp_next_up_delay(struct sched_entity *se, int cpu) static inline void hmp_next_down_delay(struct sched_entity *se, int cpu) { - struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; - - u64 now = cfs_rq_clock_task(cfs_rq); + /* hack - always use clock from first online CPU */ + u64 now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task; se->avg.hmp_last_down_migration = now; se->avg.hmp_last_up_migration = 0; cpu_rq(cpu)->avg.hmp_last_down_migration = now; @@ -8556,7 +8554,6 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { } static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_entity *se) { struct task_struct *p = task_of(se); - struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; u64 now; if (target_cpu) @@ -8574,7 +8571,8 @@ static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_enti return 0; /* Let the task load settle before doing another up migration */ - now = cfs_rq_clock_task(cfs_rq); + /* hack - always use clock from first online CPU */ + now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task; if (((now - se->avg.hmp_last_up_migration) >> 10) < hmp_next_up_threshold) return 0; @@ -8597,7 +8595,6 @@ static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_enti static unsigned int hmp_down_migration(int cpu, struct sched_entity *se) { struct task_struct *p = task_of(se); - struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; u64 now; if (hmp_cpu_is_slowest(cpu)) @@ -8613,7 +8610,8 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se) #endif /* Let the task load settle before doing another down migration */ - now = cfs_rq_clock_task(cfs_rq); + /* hack - always use clock from first online CPU */ + now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task; if (((now - se->avg.hmp_last_down_migration) >> 10) < hmp_next_down_threshold) return 0; |