summaryrefslogtreecommitdiff
path: root/fs/nfs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-05 00:35:42 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-06 10:46:40 -0500
commit61822ab5e3ed09fcfc49e37227b655202adf6130 (patch)
tree224a470689463469672ed38b0da3d9f47bc8a245 /fs/nfs
parente261f51f25b98c213e0b3d7f2109b117d714f69d (diff)
NFS: Ensure we only call set_page_writeback() under the page lock
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/file.c6
-rw-r--r--fs/nfs/write.c38
2 files changed, 29 insertions, 15 deletions
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index c2fe3bd83ab..238fb6641aa 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -307,14 +307,10 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse
static void nfs_invalidate_page(struct page *page, unsigned long offset)
{
- loff_t range_start, range_end;
-
if (offset != 0)
return;
/* Cancel any unstarted writes on this page */
- range_start = page_offset(page);
- range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
- nfs_sync_mapping_range(page->mapping, range_start, range_end, FLUSH_INVALIDATE);
+ nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE);
}
static int nfs_release_page(struct page *page, gfp_t gfp)
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 130528d09a2..bd4dff9dbd6 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -81,7 +81,6 @@ static void nfs_mark_request_dirty(struct nfs_page *req);
static int nfs_wait_on_write_congestion(struct address_space *, int);
static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
-static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how);
static const struct rpc_call_ops nfs_write_partial_ops;
static const struct rpc_call_ops nfs_write_full_ops;
static const struct rpc_call_ops nfs_commit_ops;
@@ -280,8 +279,10 @@ static int nfs_page_mark_flush(struct page *page)
spin_lock(req_lock);
}
spin_unlock(req_lock);
- if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0)
+ if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) {
nfs_mark_request_dirty(req);
+ set_page_writeback(page);
+ }
ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
nfs_unlock_request(req);
return ret;
@@ -443,6 +444,13 @@ nfs_mark_request_dirty(struct nfs_page *req)
mark_inode_dirty(inode);
}
+static void
+nfs_redirty_request(struct nfs_page *req)
+{
+ clear_bit(PG_FLUSHING, &req->wb_flags);
+ __set_page_dirty_nobuffers(req->wb_page);
+}
+
/*
* Check if a request is dirty
*/
@@ -777,7 +785,7 @@ static void nfs_writepage_release(struct nfs_page *req)
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if (!PageError(req->wb_page)) {
if (NFS_NEED_RESCHED(req)) {
- nfs_mark_request_dirty(req);
+ nfs_redirty_request(req);
goto out;
} else if (NFS_NEED_COMMIT(req)) {
nfs_mark_request_commit(req);
@@ -893,7 +901,6 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
atomic_set(&req->wb_complete, requests);
ClearPageError(page);
- set_page_writeback(page);
offset = 0;
nbytes = req->wb_bytes;
do {
@@ -923,7 +930,7 @@ out_bad:
list_del(&data->pages);
nfs_writedata_release(data);
}
- nfs_mark_request_dirty(req);
+ nfs_redirty_request(req);
nfs_clear_page_writeback(req);
return -ENOMEM;
}
@@ -954,7 +961,6 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
nfs_list_remove_request(req);
nfs_list_add_request(req, &data->pages);
ClearPageError(req->wb_page);
- set_page_writeback(req->wb_page);
*pages++ = req->wb_page;
count += req->wb_bytes;
}
@@ -969,7 +975,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_mark_request_dirty(req);
+ nfs_redirty_request(req);
nfs_clear_page_writeback(req);
}
return -ENOMEM;
@@ -1004,7 +1010,7 @@ out_err:
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_mark_request_dirty(req);
+ nfs_redirty_request(req);
nfs_clear_page_writeback(req);
}
return error;
@@ -1320,7 +1326,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
}
/* We have a mismatch. Write the page again */
dprintk(" mismatch\n");
- nfs_mark_request_dirty(req);
+ nfs_redirty_request(req);
next:
nfs_clear_page_writeback(req);
}
@@ -1451,13 +1457,18 @@ int nfs_wb_all(struct inode *inode)
.bdi = mapping->backing_dev_info,
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
+ .for_writepages = 1,
.range_cyclic = 1,
};
int ret;
+ ret = generic_writepages(mapping, &wbc);
+ if (ret < 0)
+ goto out;
ret = nfs_sync_mapping_wait(mapping, &wbc, 0);
if (ret >= 0)
return 0;
+out:
return ret;
}
@@ -1469,16 +1480,23 @@ int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, lo
.nr_to_write = LONG_MAX,
.range_start = range_start,
.range_end = range_end,
+ .for_writepages = 1,
};
int ret;
+ if (!(how & FLUSH_NOWRITEPAGE)) {
+ ret = generic_writepages(mapping, &wbc);
+ if (ret < 0)
+ goto out;
+ }
ret = nfs_sync_mapping_wait(mapping, &wbc, how);
if (ret >= 0)
return 0;
+out:
return ret;
}
-static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
+int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
{
loff_t range_start = page_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);