summaryrefslogtreecommitdiff
path: root/fs/gfs2
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2020-06-10 18:31:56 +0200
committerAndreas Gruenbacher <agruenba@redhat.com>2020-06-30 13:04:45 +0200
commit34244d711dea568f4a42c5b0d6b3d620f8cb6971 (patch)
tree137ff7207b0d91d4d6988d92605cfb2124bf7f62 /fs/gfs2
parent58e08e8d83ab03a1ca25d53420bd0b87f2dfe458 (diff)
gfs2: Don't sleep during glock hash walk
In flush_delete_work, instead of flushing each individual pending delayed work item, cancel and re-queue them for immediate execution. The waiting isn't needed here because we're already waiting for all queued work items to complete in gfs2_flush_delete_work. This makes the code more efficient, but more importantly, it avoids sleeping during a rhashtable walk, inside rcu_read_lock(). Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 2299dcc417ea..8545024a1401 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1899,7 +1899,10 @@ bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
static void flush_delete_work(struct gfs2_glock *gl)
{
- flush_delayed_work(&gl->gl_delete);
+ if (cancel_delayed_work(&gl->gl_delete)) {
+ queue_delayed_work(gfs2_delete_workqueue,
+ &gl->gl_delete, 0);
+ }
gfs2_glock_queue_work(gl, 0);
}