summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorChris Redpath <Chris.Redpath@arm.com>2015-02-04 14:06:14 +0900
committerSeung-Woo Kim <sw0312.kim@samsung.com>2016-12-14 13:41:52 +0900
commit1b8ae4e8e1c4c1cca6095f225b7fb4992e902e16 (patch)
treecf7d371525c3395a7c0a795a72dee6c65d6278dd /kernel
parentf8d906726c5778e6fbc67d0d92cba0d9845769cc (diff)
sched: hmp: Fix build breakage when not using CONFIG_SCHED_HMP
hmp_variable_scale_convert was used without guards in __update_entity_runnable_avg. Guard it. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Mark Brown <broonie@linaro.org> Signed-off-by: Jon Medhurst <tixy@linaro.org> [k.kozlowski: rebased on 4.1, no signed-off-by of previous committer] Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cf01753a361e..f0bc3bfb1c2e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2499,6 +2499,7 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n];
}
+#ifdef CONFIG_SCHED_HMP
#define HMP_VARIABLE_SCALE_SHIFT 16ULL
struct hmp_global_attr {
struct attribute attr;
@@ -2580,6 +2581,7 @@ struct cpufreq_extents {
static struct cpufreq_extents freq_scale[CONFIG_NR_CPUS];
#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
+#endif /* CONFIG_SCHED_HMP */
/* We can represent the historical contribution to runnable average as the
* coefficients of a geometric series. To do this we sub-divide our runnable
@@ -2625,8 +2627,9 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
delta = now - sa->last_runnable_update;
-
+#ifdef CONFIG_SCHED_HMP
delta = hmp_variable_scale_convert(delta);
+#endif
/*
* This should only happen when time goes backwards, which it
* unfortunately does during sched clock init when we swap over to TSC.