summaryrefslogtreecommitdiff
path: root/fs/f2fs
diff options
context:
space:
mode:
authorChao Yu <yuchao0@huawei.com>2020-06-18 14:36:23 +0800
committerJaegeuk Kim <jaegeuk@kernel.org>2020-07-07 21:51:44 -0700
commit79963d967b492876fa17c8c2c2c17b7438683d9b (patch)
tree6567a2612ff20d21ce355035717b31813e8697b7 /fs/f2fs
parentfa6795552ad20509ffb1cc9ed1246b6b337f5da5 (diff)
f2fs: shrink node_write lock coverage
- to avoid race between checkpoint and quota file writeback, it just needs to hold read lock of node_write in writeback path. - node_write lock has covered all LFS data write paths, it's not necessary, we only need to hold node_write lock at write path of quota file. This refactors commit ca7f76e68074 ("f2fs: fix wrong discard space"). Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/compress.c18
-rw-r--r--fs/f2fs/data.c12
-rw-r--r--fs/f2fs/segment.c11
3 files changed, 27 insertions, 14 deletions
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 7dbd56abe936..5643aa2b8377 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1096,8 +1096,16 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
loff_t psize;
int i, err;
- if (!IS_NOQUOTA(inode) && !f2fs_trylock_op(sbi))
+ if (IS_NOQUOTA(inode)) {
+ /*
+ * We need to wait for node_write to avoid block allocation during
+ * checkpoint. This can only happen to quota writes which can cause
+ * the below discard race condition.
+ */
+ down_read(&sbi->node_write);
+ } else if (!f2fs_trylock_op(sbi)) {
return -EAGAIN;
+ }
set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
@@ -1203,7 +1211,9 @@ unlock_continue:
set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
f2fs_put_dnode(&dn);
- if (!IS_NOQUOTA(inode))
+ if (IS_NOQUOTA(inode))
+ up_read(&sbi->node_write);
+ else
f2fs_unlock_op(sbi);
spin_lock(&fi->i_size_lock);
@@ -1230,7 +1240,9 @@ out_put_cic:
out_put_dnode:
f2fs_put_dnode(&dn);
out_unlock_op:
- if (!IS_NOQUOTA(inode))
+ if (IS_NOQUOTA(inode))
+ up_read(&sbi->node_write);
+ else
f2fs_unlock_op(sbi);
return -EAGAIN;
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index c78ce08f6400..cbdf062d3562 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2719,8 +2719,20 @@ write:
/* Dentry/quota blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
+ /*
+ * We need to wait for node_write to avoid block allocation during
+ * checkpoint. This can only happen to quota writes which can cause
+ * the below discard race condition.
+ */
+ if (IS_NOQUOTA(inode))
+ down_read(&sbi->node_write);
+
fio.need_lock = LOCK_DONE;
err = f2fs_do_write_data_page(&fio);
+
+ if (IS_NOQUOTA(inode))
+ up_read(&sbi->node_write);
+
goto done;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index cf693a018bec..2e3098f9118c 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -3133,14 +3133,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
type = CURSEG_COLD_DATA;
}
- /*
- * We need to wait for node_write to avoid block allocation during
- * checkpoint. This can only happen to quota writes which can cause
- * the below discard race condition.
- */
- if (IS_DATASEG(type))
- down_write(&sbi->node_write);
-
down_read(&SM_I(sbi)->curseg_lock);
mutex_lock(&curseg->curseg_mutex);
@@ -3206,9 +3198,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
up_read(&SM_I(sbi)->curseg_lock);
- if (IS_DATASEG(type))
- up_write(&sbi->node_write);
-
if (put_pin_sem)
up_read(&sbi->pin_sem);
}