diff options
author | Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | 2011-03-16 19:04:23 -0400 |
---|---|---|
committer | Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | 2011-03-16 19:04:23 -0400 |
commit | a04d1a471c6e498abaf826de24d01f35296868b3 (patch) | |
tree | e90bf86f9c7e6ac8550c385b845b8048f51ab669 /fs | |
parent | 676f8b11d57da2b63a6ede33f0c9ecd9644ef5c1 (diff) |
poll-wait-exclusive
Poll : add poll_wait_set_exclusive
Problem description :
In LTTng, all lttd readers are polling all the available debugfs files
for data. This is principally because the number of reader threads is
user-defined and there are typical workloads where a single CPU is
producing most of the tracing data and all other CPUs are idle,
available to consume data. It therefore makes sense not to tie those
threads to specific buffers. However, when the number of threads grows,
we face a "thundering herd" problem where many threads can be woken up
and put back to sleep, leaving only a single thread doing useful work.
Solution :
I just created a patch which adds a poll_wait_set_exclusive() primitive
to poll(), so the code which implements the pollfd operation can specify
that only a single waiter must be woken up.
poll_wait_set_exclusive : set poll wait queue to exclusive
Sets up a poll wait queue to use exclusive wakeups. This is useful to
wake up only one waiter at each wakeup. Used to work-around "thundering herd"
problem.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
CC: William Lee Irwin III <wli@holomorphy.com>
CC: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/select.c | 41 |
1 files changed, 38 insertions, 3 deletions
diff --git a/fs/select.c b/fs/select.c index e56560d2b08..fa13f263924 100644 --- a/fs/select.c +++ b/fs/select.c @@ -112,6 +112,9 @@ struct poll_table_page { */ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p); +static void __pollwait_exclusive(struct file *filp, + wait_queue_head_t *wait_address, + poll_table *p); void poll_initwait(struct poll_wqueues *pwq) { @@ -152,6 +155,20 @@ void poll_freewait(struct poll_wqueues *pwq) } EXPORT_SYMBOL(poll_freewait); +/** + * poll_wait_set_exclusive - set poll wait queue to exclusive + * + * Sets up a poll wait queue to use exclusive wakeups. This is useful to + * wake up only one waiter at each wakeup. Used to work-around "thundering herd" + * problem. + */ +void poll_wait_set_exclusive(poll_table *p) +{ + if (p) + init_poll_funcptr(p, __pollwait_exclusive); +} +EXPORT_SYMBOL(poll_wait_set_exclusive); + static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) { struct poll_table_page *table = p->table; @@ -213,8 +230,10 @@ static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) } /* Add a new entry */ -static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, - poll_table *p) +static void __pollwait_common(struct file *filp, + wait_queue_head_t *wait_address, + poll_table *p, + int exclusive) { struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt); struct poll_table_entry *entry = poll_get_entry(pwq); @@ -226,7 +245,23 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, entry->key = p->key; init_waitqueue_func_entry(&entry->wait, pollwake); entry->wait.private = pwq; - add_wait_queue(wait_address, &entry->wait); + if (!exclusive) + add_wait_queue(wait_address, &entry->wait); + else + add_wait_queue_exclusive(wait_address, &entry->wait); +} + +static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, + poll_table *p) +{ + __pollwait_common(filp, wait_address, p, 0); +} + +static void __pollwait_exclusive(struct file *filp, + wait_queue_head_t *wait_address, + poll_table *p) +{ + __pollwait_common(filp, wait_address, p, 1); } int poll_schedule_timeout(struct poll_wqueues *pwq, int state, |