diff options
author | Paul E. McKenney <paulmck@kernel.org> | 2020-01-08 20:06:25 -0800 |
---|---|---|
committer | Paul E. McKenney <paulmck@kernel.org> | 2020-02-20 15:58:22 -0800 |
commit | 2a2ae872ef7aa958f2152b8b24c6e94cf5f1d0df (patch) | |
tree | a63de3362e628049c785189d39daebdbf139fe95 /kernel/rcu | |
parent | a5b8950180f8e5acb802d1672e0b4d0ceee6126e (diff) |
rcu: Add *_ONCE() to rcu_data ->rcu_forced_tick
The rcu_data structure's ->rcu_forced_tick field is read locklessly, so
this commit adds WRITE_ONCE() to all updates and READ_ONCE() to all
lockless reads.
This data race was reported by KCSAN. Not appropriate for backporting
due to failure being unlikely.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e851a12920e6..be59a5d7299d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -818,11 +818,12 @@ static __always_inline void rcu_nmi_enter_common(bool irq) incby = 1; } else if (tick_nohz_full_cpu(rdp->cpu) && rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE && - READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { + READ_ONCE(rdp->rcu_urgent_qs) && + !READ_ONCE(rdp->rcu_forced_tick)) { raw_spin_lock_rcu_node(rdp->mynode); // Recheck under lock. if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { - rdp->rcu_forced_tick = true; + WRITE_ONCE(rdp->rcu_forced_tick, true); tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); } raw_spin_unlock_rcu_node(rdp->mynode); @@ -899,7 +900,7 @@ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) WRITE_ONCE(rdp->rcu_need_heavy_qs, false); if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); - rdp->rcu_forced_tick = false; + WRITE_ONCE(rdp->rcu_forced_tick, false); } } |