diff options
-rw-r--r-- | kernel/rcutree.c | 26 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 15 |
2 files changed, 36 insertions, 5 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7e59ffb3d0b..ba06207b1dd 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -84,9 +84,32 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); static struct rcu_state *rcu_state; +/* + * The rcu_scheduler_active variable transitions from zero to one just + * before the first task is spawned. So when this variable is zero, RCU + * can assume that there is but one task, allowing RCU to (for example) + * optimized synchronize_sched() to a simple barrier(). When this variable + * is one, RCU must actually do all the hard work required to detect real + * grace periods. This variable is also used to suppress boot-time false + * positives from lockdep-RCU error checking. + */ int rcu_scheduler_active __read_mostly; EXPORT_SYMBOL_GPL(rcu_scheduler_active); +/* + * The rcu_scheduler_fully_active variable transitions from zero to one + * during the early_initcall() processing, which is after the scheduler + * is capable of creating new tasks. So RCU processing (for example, + * creating tasks for RCU priority boosting) must be delayed until after + * rcu_scheduler_fully_active transitions from zero to one. We also + * currently delay invocation of any RCU callbacks until after this point. + * + * It might later prove better for people registering RCU callbacks during + * early boot to take responsibility for these callbacks, but one step at + * a time. + */ +static int rcu_scheduler_fully_active __read_mostly; + #ifdef CONFIG_RCU_BOOST /* @@ -98,7 +121,6 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(char, rcu_cpu_has_work); -static char rcu_kthreads_spawnable; #endif /* #ifdef CONFIG_RCU_BOOST */ @@ -1467,6 +1489,8 @@ static void rcu_process_callbacks(struct softirq_action *unused) */ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) { + if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active))) + return; if (likely(!rsp->boost)) { rcu_do_batch(rsp, rdp); return; diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 14dc7dd0090..75113cb7c4f 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -1532,7 +1532,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) struct sched_param sp; struct task_struct *t; - if (!rcu_kthreads_spawnable || + if (!rcu_scheduler_fully_active || per_cpu(rcu_cpu_kthread_task, cpu) != NULL) return 0; t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); @@ -1639,7 +1639,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, struct sched_param sp; struct task_struct *t; - if (!rcu_kthreads_spawnable || + if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0) return 0; if (rnp->node_kthread_task == NULL) { @@ -1665,7 +1665,7 @@ static int __init rcu_spawn_kthreads(void) int cpu; struct rcu_node *rnp; - rcu_kthreads_spawnable = 1; + rcu_scheduler_fully_active = 1; for_each_possible_cpu(cpu) { per_cpu(rcu_cpu_has_work, cpu) = 0; if (cpu_online(cpu)) @@ -1687,7 +1687,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) struct rcu_node *rnp = rdp->mynode; /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ - if (rcu_kthreads_spawnable) { + if (rcu_scheduler_fully_active) { (void)rcu_spawn_one_cpu_kthread(cpu); if (rnp->node_kthread_task == NULL) (void)rcu_spawn_one_node_kthread(rcu_state, rnp); @@ -1726,6 +1726,13 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt) { } +static int __init rcu_scheduler_really_started(void) +{ + rcu_scheduler_fully_active = 1; + return 0; +} +early_initcall(rcu_scheduler_really_started); + static void __cpuinit rcu_prepare_kthreads(int cpu) { } |