summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2006-10-03 01:14:08 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-03 08:04:06 -0700
commit1a84887080dc15f048db7c3a643e98f1435790d6 (patch)
tree7cd335fee247c0b60f8562c82806b49435b5fb9d /kernel/sched.c
parent74732646431a1bb7e23e6b564127a8881cfef900 (diff)
[PATCH] sched: introduce child field in sched_domain
Introduce the child field in sched_domain struct and use it in sched_balance_self(). We will also use this field in cleaning up the sched group cpu_power setup(done in a different patch) code. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c40
1 files changed, 30 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6d7bf55ec33..0feeacb9149 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1286,21 +1286,29 @@ static int sched_balance_self(int cpu, int flag)
while (sd) {
cpumask_t span;
struct sched_group *group;
- int new_cpu;
- int weight;
+ int new_cpu, weight;
+
+ if (!(sd->flags & flag)) {
+ sd = sd->child;
+ continue;
+ }
span = sd->span;
group = find_idlest_group(sd, t, cpu);
- if (!group)
- goto nextlevel;
+ if (!group) {
+ sd = sd->child;
+ continue;
+ }
new_cpu = find_idlest_cpu(group, t, cpu);
- if (new_cpu == -1 || new_cpu == cpu)
- goto nextlevel;
+ if (new_cpu == -1 || new_cpu == cpu) {
+ /* Now try balancing at a lower domain level of cpu */
+ sd = sd->child;
+ continue;
+ }
- /* Now try balancing at a lower domain level */
+ /* Now try balancing at a lower domain level of new_cpu */
cpu = new_cpu;
-nextlevel:
sd = NULL;
weight = cpus_weight(span);
for_each_domain(cpu, tmp) {
@@ -5448,12 +5456,18 @@ static void cpu_attach_domain(struct sched_domain *sd, int cpu)
struct sched_domain *parent = tmp->parent;
if (!parent)
break;
- if (sd_parent_degenerate(tmp, parent))
+ if (sd_parent_degenerate(tmp, parent)) {
tmp->parent = parent->parent;
+ if (parent->parent)
+ parent->parent->child = tmp;
+ }
}
- if (sd && sd_degenerate(sd))
+ if (sd && sd_degenerate(sd)) {
sd = sd->parent;
+ if (sd)
+ sd->child = NULL;
+ }
sched_domain_debug(sd, cpu);
@@ -6288,6 +6302,8 @@ static int build_sched_domains(const cpumask_t *cpu_map)
*sd = SD_NODE_INIT;
sd->span = sched_domain_node_span(cpu_to_node(i));
sd->parent = p;
+ if (p)
+ p->child = sd;
cpus_and(sd->span, sd->span, *cpu_map);
#endif
@@ -6297,6 +6313,8 @@ static int build_sched_domains(const cpumask_t *cpu_map)
*sd = SD_CPU_INIT;
sd->span = nodemask;
sd->parent = p;
+ if (p)
+ p->child = sd;
sd->groups = &sched_group_phys[group];
#ifdef CONFIG_SCHED_MC
@@ -6307,6 +6325,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
sd->span = cpu_coregroup_map(i);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
+ p->child = sd;
sd->groups = &sched_group_core[group];
#endif
@@ -6318,6 +6337,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
sd->span = cpu_sibling_map[i];
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
+ p->child = sd;
sd->groups = &sched_group_cpus[group];
#endif
}