diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 13:39:00 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 13:39:00 -0700 |
commit | af79ad2b1f337a00aa150b993635b10bc68dc842 (patch) | |
tree | 06abe1d9735b27a449443d7d29a9801f690080be /kernel/sched/wait.c | |
parent | e606d81d2d9596ab2b4fd0dc052eea0485b7e8c2 (diff) | |
parent | 447976ef4fd09b1be88b316d1a81553f1aa7cd07 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar:
"The main changes are:
- irqtime accounting cleanups and enhancements. (Frederic Weisbecker)
- schedstat debugging enhancements, make it more broadly runtime
available. (Josh Poimboeuf)
- More work on asymmetric topology/capacity scheduling. (Morten
Rasmussen)
- sched/wait fixes and cleanups. (Oleg Nesterov)
- PELT (per entity load tracking) improvements. (Peter Zijlstra)
- Rewrite and enhance select_idle_siblings(). (Peter Zijlstra)
- sched/numa enhancements/fixes (Rik van Riel)
- sched/cputime scalability improvements (Stanislaw Gruszka)
- Load calculation arithmetics fixes. (Dietmar Eggemann)
- sched/deadline enhancements (Tommaso Cucinotta)
- Fix utilization accounting when switching to the SCHED_NORMAL
policy. (Vincent Guittot)
- ... plus misc cleanups and enhancements"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (64 commits)
sched/irqtime: Consolidate irqtime flushing code
sched/irqtime: Consolidate accounting synchronization with u64_stats API
u64_stats: Introduce IRQs disabled helpers
sched/irqtime: Remove needless IRQs disablement on kcpustat update
sched/irqtime: No need for preempt-safe accessors
sched/fair: Fix min_vruntime tracking
sched/debug: Add SCHED_WARN_ON()
sched/core: Fix set_user_nice()
sched/fair: Introduce set_curr_task() helper
sched/core, ia64: Rename set_curr_task()
sched/core: Fix incorrect utilization accounting when switching to fair class
sched/core: Optimize SCHED_SMT
sched/core: Rewrite and improve select_idle_siblings()
sched/core: Replace sd_busy/nr_busy_cpus with sched_domain_shared
sched/core: Introduce 'struct sched_domain_shared'
sched/core: Restructure destroy_sched_domain()
sched/core: Remove unused @cpu argument from destroy_sched_domain*()
sched/wait: Introduce init_wait_entry()
sched/wait: Avoid abort_exclusive_wait() in __wait_on_bit_lock()
sched/wait: Avoid abort_exclusive_wait() in ___wait_event()
...
Diffstat (limited to 'kernel/sched/wait.c')
-rw-r--r-- | kernel/sched/wait.c | 113 |
1 files changed, 55 insertions, 58 deletions
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index f15d6b6a538a..4f7053579fe3 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -196,27 +196,48 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) } EXPORT_SYMBOL(prepare_to_wait_exclusive); -long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) +void init_wait_entry(wait_queue_t *wait, int flags) { - unsigned long flags; - - if (signal_pending_state(state, current)) - return -ERESTARTSYS; - + wait->flags = flags; wait->private = current; wait->func = autoremove_wake_function; + INIT_LIST_HEAD(&wait->task_list); +} +EXPORT_SYMBOL(init_wait_entry); + +long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) +{ + unsigned long flags; + long ret = 0; spin_lock_irqsave(&q->lock, flags); - if (list_empty(&wait->task_list)) { - if (wait->flags & WQ_FLAG_EXCLUSIVE) - __add_wait_queue_tail(q, wait); - else - __add_wait_queue(q, wait); + if (unlikely(signal_pending_state(state, current))) { + /* + * Exclusive waiter must not fail if it was selected by wakeup, + * it should "consume" the condition we were waiting for. + * + * The caller will recheck the condition and return success if + * we were already woken up, we can not miss the event because + * wakeup locks/unlocks the same q->lock. + * + * But we need to ensure that set-condition + wakeup after that + * can't see us, it should wake up another exclusive waiter if + * we fail. + */ + list_del_init(&wait->task_list); + ret = -ERESTARTSYS; + } else { + if (list_empty(&wait->task_list)) { + if (wait->flags & WQ_FLAG_EXCLUSIVE) + __add_wait_queue_tail(q, wait); + else + __add_wait_queue(q, wait); + } + set_current_state(state); } - set_current_state(state); spin_unlock_irqrestore(&q->lock, flags); - return 0; + return ret; } EXPORT_SYMBOL(prepare_to_wait_event); @@ -255,39 +276,6 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) } EXPORT_SYMBOL(finish_wait); -/** - * abort_exclusive_wait - abort exclusive waiting in a queue - * @q: waitqueue waited on - * @wait: wait descriptor - * @mode: runstate of the waiter to be woken - * @key: key to identify a wait bit queue or %NULL - * - * Sets current thread back to running state and removes - * the wait descriptor from the given waitqueue if still - * queued. - * - * Wakes up the next waiter if the caller is concurrently - * woken up through the queue. - * - * This prevents waiter starvation where an exclusive waiter - * aborts and is woken up concurrently and no one wakes up - * the next waiter. - */ -void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, - unsigned int mode, void *key) -{ - unsigned long flags; - - __set_current_state(TASK_RUNNING); - spin_lock_irqsave(&q->lock, flags); - if (!list_empty(&wait->task_list)) - list_del_init(&wait->task_list); - else if (waitqueue_active(q)) - __wake_up_locked_key(q, mode, key); - spin_unlock_irqrestore(&q->lock, flags); -} -EXPORT_SYMBOL(abort_exclusive_wait); - int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) { int ret = default_wake_function(wait, mode, sync, key); @@ -425,20 +413,29 @@ int __sched __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, wait_bit_action_f *action, unsigned mode) { - do { - int ret; + int ret = 0; + for (;;) { prepare_to_wait_exclusive(wq, &q->wait, mode); - if (!test_bit(q->key.bit_nr, q->key.flags)) - continue; - ret = action(&q->key, mode); - if (!ret) - continue; - abort_exclusive_wait(wq, &q->wait, mode, &q->key); - return ret; - } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); - finish_wait(wq, &q->wait); - return 0; + if (test_bit(q->key.bit_nr, q->key.flags)) { + ret = action(&q->key, mode); + /* + * See the comment in prepare_to_wait_event(). + * finish_wait() does not necessarily takes wq->lock, + * but test_and_set_bit() implies mb() which pairs with + * smp_mb__after_atomic() before wake_up_page(). + */ + if (ret) + finish_wait(wq, &q->wait); + } + if (!test_and_set_bit(q->key.bit_nr, q->key.flags)) { + if (!ret) + finish_wait(wq, &q->wait); + return 0; + } else if (ret) { + return ret; + } + } } EXPORT_SYMBOL(__wait_on_bit_lock); |