diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2010-05-13 12:51:01 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2010-05-14 15:09:33 -0400 |
commit | 8535b2be5181fc3019e4150567ef53210fe3b04f (patch) | |
tree | ba366f5304f12876f1e45d3c2b423d12f9ec0c90 /fs/nfs/nfs4state.c | |
parent | 712a4338669d7d57f952244abb608e6ac07e39da (diff) |
NFSv4: Don't use GFP_KERNEL allocations in state recovery
We do not want to have the state recovery thread kick off and wait for a
memory reclaim, since that may deadlock when the writebacks end up
waiting for the state recovery thread to complete.
The safe thing is therefore to use GFP_NOFS in all open, close,
delegation return, lock, etc. operations that may be called by the
state recovery thread.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/nfs4state.c')
-rw-r--r-- | fs/nfs/nfs4state.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index cd2d90400d46..34acf5926fdc 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -366,7 +366,7 @@ nfs4_alloc_state_owner(void) { struct nfs4_state_owner *sp; - sp = kzalloc(sizeof(*sp),GFP_KERNEL); + sp = kzalloc(sizeof(*sp),GFP_NOFS); if (!sp) return NULL; spin_lock_init(&sp->so_lock); @@ -440,7 +440,7 @@ nfs4_alloc_open_state(void) { struct nfs4_state *state; - state = kzalloc(sizeof(*state), GFP_KERNEL); + state = kzalloc(sizeof(*state), GFP_NOFS); if (!state) return NULL; atomic_set(&state->count, 1); @@ -542,7 +542,8 @@ void nfs4_put_open_state(struct nfs4_state *state) /* * Close the current file. */ -static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, int wait) +static void __nfs4_close(struct path *path, struct nfs4_state *state, + fmode_t fmode, gfp_t gfp_mask, int wait) { struct nfs4_state_owner *owner = state->owner; int call_close = 0; @@ -583,17 +584,17 @@ static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fm nfs4_put_open_state(state); nfs4_put_state_owner(owner); } else - nfs4_do_close(path, state, wait); + nfs4_do_close(path, state, gfp_mask, wait); } void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode) { - __nfs4_close(path, state, fmode, 0); + __nfs4_close(path, state, fmode, GFP_NOFS, 0); } void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode) { - __nfs4_close(path, state, fmode, 1); + __nfs4_close(path, state, fmode, GFP_KERNEL, 1); } /* @@ -623,7 +624,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f struct nfs4_lock_state *lsp; struct nfs_client *clp = state->owner->so_client; - lsp = kzalloc(sizeof(*lsp), GFP_KERNEL); + lsp = kzalloc(sizeof(*lsp), GFP_NOFS); if (lsp == NULL) return NULL; rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue"); @@ -759,11 +760,11 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f nfs4_put_lock_state(lsp); } -struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter) +struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask) { struct nfs_seqid *new; - new = kmalloc(sizeof(*new), GFP_KERNEL); + new = kmalloc(sizeof(*new), gfp_mask); if (new != NULL) { new->sequence = counter; INIT_LIST_HEAD(&new->list); @@ -1352,7 +1353,7 @@ static int nfs4_recall_slot(struct nfs_client *clp) nfs4_begin_drain_session(clp); new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot), - GFP_KERNEL); + GFP_NOFS); if (!new) return -ENOMEM; |