summaryrefslogtreecommitdiff
path: root/fs/cifs/cifs_debug.c
diff options
context:
space:
mode:
authorSuresh Jayaraman <sjayaraman@suse.de>2010-10-18 23:29:37 +0530
committerSteve French <sfrench@us.ibm.com>2010-10-21 13:14:27 +0000
commit3f9bcca7820a6711307b6499952b13cfcfc31dd6 (patch)
tree6c380f5877562778335d6794e1e4a297f8970d77 /fs/cifs/cifs_debug.c
parent3e24e132878c83910b61eb7704511a6d96a0389f (diff)
cifs: convert cifs_tcp_ses_lock from a rwlock to a spinlock
cifs_tcp_ses_lock is a rwlock with protects the cifs_tcp_ses_list, server->smb_ses_list and the ses->tcon_list. It also protects a few ref counters in server, ses and tcon. In most cases the critical section doesn't seem to be large, in a few cases where it is slightly large, there seem to be really no benefit from concurrent access. I briefly considered RCU mechanism but it appears to me that there is no real need. Replace it with a spinlock and get rid of the last rwlock in the cifs code. Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de> Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'fs/cifs/cifs_debug.c')
-rw-r--r--fs/cifs/cifs_debug.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index eb1ba493489..103ab8b605b 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -148,7 +148,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "Servers:");
i = 0;
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
@@ -230,7 +230,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
spin_unlock(&GlobalMid_Lock);
}
}
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
seq_putc(m, '\n');
/* BB add code to dump additional info such as TCP session info now */
@@ -270,7 +270,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
atomic_set(&totBufAllocCount, 0);
atomic_set(&totSmBufAllocCount, 0);
#endif /* CONFIG_CIFS_STATS2 */
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
@@ -303,7 +303,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
}
}
}
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
}
return count;
@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
GlobalCurrentXid, GlobalMaxActiveXid);
i = 0;
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
@@ -397,7 +397,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
}
}
}
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
seq_putc(m, '\n');
return 0;