From 90ae40d243d46f2268a5334fc8faa2dd67f875f5 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 12 Jul 2022 12:54:25 +0200 Subject: ext2: factor our freeing of xattr block reference Free of xattr block reference is opencode in two places. Factor it out into a separate function and use it. Signed-off-by: Jan Kara Link: https://lore.kernel.org/r/20220712105436.32204-6-jack@suse.cz Signed-off-by: Theodore Ts'o --- fs/ext2/xattr.c | 90 ++++++++++++++++++++++++--------------------------------- 1 file changed, 38 insertions(+), 52 deletions(-) (limited to 'fs/ext2') diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 841fa6d9d744..9885294993ef 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -651,6 +651,42 @@ cleanup: return error; } +static void ext2_xattr_release_block(struct inode *inode, + struct buffer_head *bh) +{ + struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); + + lock_buffer(bh); + if (HDR(bh)->h_refcount == cpu_to_le32(1)) { + __u32 hash = le32_to_cpu(HDR(bh)->h_hash); + + /* + * This must happen under buffer lock for + * ext2_xattr_set2() to reliably detect freed block + */ + mb_cache_entry_delete(ea_block_cache, hash, + bh->b_blocknr); + /* Free the old block. */ + ea_bdebug(bh, "freeing"); + ext2_free_blocks(inode, bh->b_blocknr, 1); + /* We let our caller release bh, so we + * need to duplicate the buffer before. */ + get_bh(bh); + bforget(bh); + unlock_buffer(bh); + } else { + /* Decrement the refcount only. */ + le32_add_cpu(&HDR(bh)->h_refcount, -1); + dquot_free_block(inode, 1); + mark_buffer_dirty(bh); + unlock_buffer(bh); + ea_bdebug(bh, "refcount now=%d", + le32_to_cpu(HDR(bh)->h_refcount)); + if (IS_SYNC(inode)) + sync_dirty_buffer(bh); + } +} + /* * Second half of ext2_xattr_set(): Update the file system. */ @@ -747,34 +783,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, * If there was an old block and we are no longer using it, * release the old block. */ - lock_buffer(old_bh); - if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) { - __u32 hash = le32_to_cpu(HDR(old_bh)->h_hash); - - /* - * This must happen under buffer lock for - * ext2_xattr_set2() to reliably detect freed block - */ - mb_cache_entry_delete(ea_block_cache, hash, - old_bh->b_blocknr); - /* Free the old block. */ - ea_bdebug(old_bh, "freeing"); - ext2_free_blocks(inode, old_bh->b_blocknr, 1); - mark_inode_dirty(inode); - /* We let our caller release old_bh, so we - * need to duplicate the buffer before. */ - get_bh(old_bh); - bforget(old_bh); - } else { - /* Decrement the refcount only. */ - le32_add_cpu(&HDR(old_bh)->h_refcount, -1); - dquot_free_block_nodirty(inode, 1); - mark_inode_dirty(inode); - mark_buffer_dirty(old_bh); - ea_bdebug(old_bh, "refcount now=%d", - le32_to_cpu(HDR(old_bh)->h_refcount)); - } - unlock_buffer(old_bh); + ext2_xattr_release_block(inode, old_bh); } cleanup: @@ -828,30 +837,7 @@ ext2_xattr_delete_inode(struct inode *inode) EXT2_I(inode)->i_file_acl); goto cleanup; } - lock_buffer(bh); - if (HDR(bh)->h_refcount == cpu_to_le32(1)) { - __u32 hash = le32_to_cpu(HDR(bh)->h_hash); - - /* - * This must happen under buffer lock for ext2_xattr_set2() to - * reliably detect freed block - */ - mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash, - bh->b_blocknr); - ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); - get_bh(bh); - bforget(bh); - unlock_buffer(bh); - } else { - le32_add_cpu(&HDR(bh)->h_refcount, -1); - ea_bdebug(bh, "refcount now=%d", - le32_to_cpu(HDR(bh)->h_refcount)); - unlock_buffer(bh); - mark_buffer_dirty(bh); - if (IS_SYNC(inode)) - sync_dirty_buffer(bh); - dquot_free_block_nodirty(inode, 1); - } + ext2_xattr_release_block(inode, bh); EXT2_I(inode)->i_file_acl = 0; cleanup: -- cgit v1.2.3 From b67798d55185ba4471f70b67b3f7c2866f216499 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 12 Jul 2022 12:54:26 +0200 Subject: ext2: unindent codeblock in ext2_xattr_set() Replace one else in ext2_xattr_set() with a goto. This makes following code changes simpler to follow. No functional changes. Signed-off-by: Jan Kara Reviewed-by: Ritesh Harjani (IBM) Link: https://lore.kernel.org/r/20220712105436.32204-7-jack@suse.cz Signed-off-by: Theodore Ts'o --- fs/ext2/xattr.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'fs/ext2') diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 9885294993ef..37ce495eb279 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -517,7 +517,8 @@ bad_block: /* Here we know that we can set the new attribute. */ if (header) { - /* assert(header == HDR(bh)); */ + int offset; + lock_buffer(bh); if (header->h_refcount == cpu_to_le32(1)) { __u32 hash = le32_to_cpu(header->h_hash); @@ -531,22 +532,20 @@ bad_block: bh->b_blocknr); /* keep the buffer locked while modifying it. */ - } else { - int offset; - - unlock_buffer(bh); - ea_bdebug(bh, "cloning"); - header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL); - error = -ENOMEM; - if (header == NULL) - goto cleanup; - header->h_refcount = cpu_to_le32(1); - - offset = (char *)here - bh->b_data; - here = ENTRY((char *)header + offset); - offset = (char *)last - bh->b_data; - last = ENTRY((char *)header + offset); + goto update_block; } + unlock_buffer(bh); + ea_bdebug(bh, "cloning"); + header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL); + error = -ENOMEM; + if (header == NULL) + goto cleanup; + header->h_refcount = cpu_to_le32(1); + + offset = (char *)here - bh->b_data; + here = ENTRY((char *)header + offset); + offset = (char *)last - bh->b_data; + last = ENTRY((char *)header + offset); } else { /* Allocate a buffer where we construct the new block. */ header = kzalloc(sb->s_blocksize, GFP_KERNEL); @@ -559,6 +558,7 @@ bad_block: last = here = ENTRY(header+1); } +update_block: /* Iff we are modifying the block in-place, bh is locked here. */ if (not_found) { -- cgit v1.2.3 From 1189d8ec5105f7628fb0e3a6988cbf0d0cbfedeb Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 12 Jul 2022 12:54:27 +0200 Subject: ext2: avoid deleting xattr block that is being reused Currently when we decide to reuse xattr block we detect the case when the last reference to xattr block is being dropped at the same time and cancel the reuse attempt. Convert ext2 to a new scheme when as soon as matching mbcache entry is found, we wait with dropping the last xattr block reference until mbcache entry reference is dropped (meaning either the xattr block reference is increased or we decided not to reuse the block). Signed-off-by: Jan Kara Link: https://lore.kernel.org/r/20220712105436.32204-8-jack@suse.cz Signed-off-by: Theodore Ts'o --- fs/ext2/xattr.c | 58 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) (limited to 'fs/ext2') diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 37ce495eb279..641abfa4b718 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -522,17 +522,18 @@ bad_block: lock_buffer(bh); if (header->h_refcount == cpu_to_le32(1)) { __u32 hash = le32_to_cpu(header->h_hash); + struct mb_cache_entry *oe; - ea_bdebug(bh, "modifying in-place"); + oe = mb_cache_entry_delete_or_get(EA_BLOCK_CACHE(inode), + hash, bh->b_blocknr); + if (!oe) { + ea_bdebug(bh, "modifying in-place"); + goto update_block; + } /* - * This must happen under buffer lock for - * ext2_xattr_set2() to reliably detect modified block + * Someone is trying to reuse the block, leave it alone */ - mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash, - bh->b_blocknr); - - /* keep the buffer locked while modifying it. */ - goto update_block; + mb_cache_entry_put(EA_BLOCK_CACHE(inode), oe); } unlock_buffer(bh); ea_bdebug(bh, "cloning"); @@ -656,16 +657,29 @@ static void ext2_xattr_release_block(struct inode *inode, { struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); +retry_ref: lock_buffer(bh); if (HDR(bh)->h_refcount == cpu_to_le32(1)) { __u32 hash = le32_to_cpu(HDR(bh)->h_hash); + struct mb_cache_entry *oe; /* - * This must happen under buffer lock for - * ext2_xattr_set2() to reliably detect freed block + * This must happen under buffer lock to properly + * serialize with ext2_xattr_set() reusing the block. */ - mb_cache_entry_delete(ea_block_cache, hash, - bh->b_blocknr); + oe = mb_cache_entry_delete_or_get(ea_block_cache, hash, + bh->b_blocknr); + if (oe) { + /* + * Someone is trying to reuse the block. Wait + * and retry. + */ + unlock_buffer(bh); + mb_cache_entry_wait_unused(oe); + mb_cache_entry_put(ea_block_cache, oe); + goto retry_ref; + } + /* Free the old block. */ ea_bdebug(bh, "freeing"); ext2_free_blocks(inode, bh->b_blocknr, 1); @@ -929,7 +943,7 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header) if (!header->h_hash) return NULL; /* never share */ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); -again: + ce = mb_cache_entry_find_first(ea_block_cache, hash); while (ce) { struct buffer_head *bh; @@ -941,22 +955,8 @@ again: inode->i_ino, (unsigned long) ce->e_value); } else { lock_buffer(bh); - /* - * We have to be careful about races with freeing or - * rehashing of xattr block. Once we hold buffer lock - * xattr block's state is stable so we can check - * whether the block got freed / rehashed or not. - * Since we unhash mbcache entry under buffer lock when - * freeing / rehashing xattr block, checking whether - * entry is still hashed is reliable. - */ - if (hlist_bl_unhashed(&ce->e_hash_list)) { - mb_cache_entry_put(ea_block_cache, ce); - unlock_buffer(bh); - brelse(bh); - goto again; - } else if (le32_to_cpu(HDR(bh)->h_refcount) > - EXT2_XATTR_REFCOUNT_MAX) { + if (le32_to_cpu(HDR(bh)->h_refcount) > + EXT2_XATTR_REFCOUNT_MAX) { ea_idebug(inode, "block %ld refcount %d>%d", (unsigned long) ce->e_value, le32_to_cpu(HDR(bh)->h_refcount), -- cgit v1.2.3