summaryrefslogtreecommitdiff
path: root/mm/pdflush.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/pdflush.c')
-rw-r--r--mm/pdflush.c30
1 files changed, 24 insertions, 6 deletions
diff --git a/mm/pdflush.c b/mm/pdflush.c
index 118905e3d78..235ac440c44 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -98,7 +98,6 @@ static int __pdflush(struct pdflush_work *my_work)
INIT_LIST_HEAD(&my_work->list);
spin_lock_irq(&pdflush_lock);
- nr_pdflush_threads++;
for ( ; ; ) {
struct pdflush_work *pdf;
@@ -126,20 +125,26 @@ static int __pdflush(struct pdflush_work *my_work)
(*my_work->fn)(my_work->arg0);
+ spin_lock_irq(&pdflush_lock);
+
/*
* Thread creation: For how long have there been zero
* available threads?
+ *
+ * To throttle creation, we reset last_empty_jifs.
*/
if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
- /* unlocked list_empty() test is OK here */
if (list_empty(&pdflush_list)) {
- /* unlocked test is OK here */
- if (nr_pdflush_threads < MAX_PDFLUSH_THREADS)
+ if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) {
+ last_empty_jifs = jiffies;
+ nr_pdflush_threads++;
+ spin_unlock_irq(&pdflush_lock);
start_one_pdflush_thread();
+ spin_lock_irq(&pdflush_lock);
+ }
}
}
- spin_lock_irq(&pdflush_lock);
my_work->fn = NULL;
/*
@@ -236,13 +241,26 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
static void start_one_pdflush_thread(void)
{
- kthread_run(pdflush, NULL, "pdflush");
+ struct task_struct *k;
+
+ k = kthread_run(pdflush, NULL, "pdflush");
+ if (unlikely(IS_ERR(k))) {
+ spin_lock_irq(&pdflush_lock);
+ nr_pdflush_threads--;
+ spin_unlock_irq(&pdflush_lock);
+ }
}
static int __init pdflush_init(void)
{
int i;
+ /*
+ * Pre-set nr_pdflush_threads... If we fail to create,
+ * the count will be decremented.
+ */
+ nr_pdflush_threads = MIN_PDFLUSH_THREADS;
+
for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
start_one_pdflush_thread();
return 0;