diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2021-10-06 13:18:52 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2021-10-15 11:25:18 +0200 |
commit | 09089db79859cbccccd8df95b034f36f7027efa6 (patch) | |
tree | 52cf18ecadd4750e3a8f319d36ddc8c856f3c656 | |
parent | b4c6f86ec2f648b5e6d4b04564fbc6d5351160a8 (diff) |
irq_work: Also rcuwait for !IRQ_WORK_HARD_IRQ on PREEMPT_RT
On PREEMPT_RT most items are processed as LAZY via softirq context.
Avoid to spin-wait for them because irq_work_sync() could have higher
priority and not allow the irq-work to be completed.
Wait additionally for !IRQ_WORK_HARD_IRQ irq_work items on PREEMPT_RT.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20211006111852.1514359-5-bigeasy@linutronix.de
-rw-r--r-- | include/linux/irq_work.h | 5 | ||||
-rw-r--r-- | kernel/irq_work.c | 6 |
2 files changed, 9 insertions, 2 deletions
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index b48955e9c920..8cd11a223260 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -49,6 +49,11 @@ static inline bool irq_work_is_busy(struct irq_work *work) return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY; } +static inline bool irq_work_is_hard(struct irq_work *work) +{ + return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ; +} + bool irq_work_queue(struct irq_work *work); bool irq_work_queue_on(struct irq_work *work, int cpu); diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 90b6b56f92e9..f7df715ec28e 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -217,7 +217,8 @@ void irq_work_single(void *arg) */ (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); - if (!arch_irq_work_has_interrupt()) + if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || + !arch_irq_work_has_interrupt()) rcuwait_wake_up(&work->irqwait); } @@ -277,7 +278,8 @@ void irq_work_sync(struct irq_work *work) lockdep_assert_irqs_enabled(); might_sleep(); - if (!arch_irq_work_has_interrupt()) { + if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || + !arch_irq_work_has_interrupt()) { rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work), TASK_UNINTERRUPTIBLE); return; |