summaryrefslogtreecommitdiff
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2011-03-09 10:58:04 +0000
committerSteven Whitehouse <swhiteho@redhat.com>2011-03-09 10:58:04 +0000
commitfc0e38dae645f65424d1fb5d2a938aab8ce48a58 (patch)
tree8b0a89a0de01a0504a002a184a290a53545fce82 /fs/gfs2/glock.c
parent662e3a551b468c7338f5291d7a00389fe85885e2 (diff)
GFS2: Fix glock deallocation race
This patch fixes a race in deallocating glocks which was introduced in the RCU glock patch. We need to ensure that the glock count is kept correct even in the case that there is a race to add a new glock into the hash table. Also, to avoid having to wait for an RCU grace period, the glock counter can be decremented before call_rcu() is called. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index ddc3e1e3faa..3f45a14009b 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -103,16 +103,21 @@ static inline void spin_unlock_bucket(unsigned int hash)
__bit_spin_unlock(0, (unsigned long *)bl);
}
-void gfs2_glock_free(struct rcu_head *rcu)
+static void gfs2_glock_dealloc(struct rcu_head *rcu)
{
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
- struct gfs2_sbd *sdp = gl->gl_sbd;
if (gl->gl_ops->go_flags & GLOF_ASPACE)
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
else
kmem_cache_free(gfs2_glock_cachep, gl);
+}
+
+void gfs2_glock_free(struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
}
@@ -760,6 +765,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (tmp) {
spin_unlock_bucket(hash);
kmem_cache_free(cachep, gl);
+ atomic_dec(&sdp->sd_glock_disposal);
gl = tmp;
} else {
hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);