summaryrefslogtreecommitdiff
path: root/arch/sparc/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2005-09-10 00:25:56 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-10 10:06:21 -0700
commitfb1c8f93d869b34cacb8b8932e2b83d96a19d720 (patch)
treea006d078aa02e421a7dc4793c335308204859d36 /arch/sparc/lib
parent4327edf6b8a7ac7dce144313947995538842d8fd (diff)
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van de Ven) does a major cleanup of the spinlock code. It does the following things: - consolidates and enhances the spinlock/rwlock debugging code - simplifies the asm/spinlock.h files - encapsulates the raw spinlock type and moves generic spinlock features (such as ->break_lock) into the generic code. - cleans up the spinlock code hierarchy to get rid of the spaghetti. Most notably there's now only a single variant of the debugging code, located in lib/spinlock_debug.c. (previously we had one SMP debugging variant per architecture, plus a separate generic one for UP builds) Also, i've enhanced the rwlock debugging facility, it will now track write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too. All locks have lockup detection now, which will work for both soft and hard spin/rwlock lockups. The arch-level include files now only contain the minimally necessary subset of the spinlock code - all the rest that can be generalized now lives in the generic headers: include/asm-i386/spinlock_types.h | 16 include/asm-x86_64/spinlock_types.h | 16 I have also split up the various spinlock variants into separate files, making it easier to see which does what. The new layout is: SMP | UP ----------------------------|----------------------------------- asm/spinlock_types_smp.h | linux/spinlock_types_up.h linux/spinlock_types.h | linux/spinlock_types.h asm/spinlock_smp.h | linux/spinlock_up.h linux/spinlock_api_smp.h | linux/spinlock_api_up.h linux/spinlock.h | linux/spinlock.h /* * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the __raw_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ All SMP and UP architectures are converted by this patch. arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should be mostly fine. From: Grant Grundler <grundler@parisc-linux.org> Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU). Builds 32-bit SMP kernel (not booted or tested). I did not try to build non-SMP kernels. That should be trivial to fix up later if necessary. I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids some ugly nesting of linux/*.h and asm/*.h files. Those particular locks are well tested and contained entirely inside arch specific code. I do NOT expect any new issues to arise with them. If someone does ever need to use debug/metrics with them, then they will need to unravel this hairball between spinlocks, atomic ops, and bit ops that exist only because parisc has exactly one atomic instruction: LDCW (load and clear word). From: "Luck, Tony" <tony.luck@intel.com> ia64 fix Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjanv@infradead.org> Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Cc: Matthew Wilcox <willy@debian.org> Signed-off-by: Hirokazu Takata <takata@linux-m32r.org> Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se> Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/sparc/lib')
-rw-r--r--arch/sparc/lib/Makefile2
-rw-r--r--arch/sparc/lib/debuglocks.c202
2 files changed, 0 insertions, 204 deletions
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 2296ff9dc47..fa500694606 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -9,5 +9,3 @@ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
copy_user.o locks.o atomic.o atomic32.o bitops.o \
lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
-
-lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
diff --git a/arch/sparc/lib/debuglocks.c b/arch/sparc/lib/debuglocks.c
deleted file mode 100644
index fb182352782..00000000000
--- a/arch/sparc/lib/debuglocks.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/* $Id: debuglocks.c,v 1.11 2001/09/20 00:35:31 davem Exp $
- * debuglocks.c: Debugging versions of SMP locking primitives.
- *
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au)
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/threads.h> /* For NR_CPUS */
-#include <linux/spinlock.h>
-#include <asm/psr.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_SMP
-
-/* Some notes on how these debugging routines work. When a lock is acquired
- * an extra debugging member lock->owner_pc is set to the caller of the lock
- * acquisition routine. Right before releasing a lock, the debugging program
- * counter is cleared to zero.
- *
- * Furthermore, since PC's are 4 byte aligned on Sparc, we stuff the CPU
- * number of the owner in the lowest two bits.
- */
-
-#define STORE_CALLER(A) __asm__ __volatile__("mov %%i7, %0" : "=r" (A));
-
-static inline void show(char *str, spinlock_t *lock, unsigned long caller)
-{
- int cpu = smp_processor_id();
-
- printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n",str,
- lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
-}
-
-static inline void show_read(char *str, rwlock_t *lock, unsigned long caller)
-{
- int cpu = smp_processor_id();
-
- printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n", str,
- lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
-}
-
-static inline void show_write(char *str, rwlock_t *lock, unsigned long caller)
-{
- int cpu = smp_processor_id();
- int i;
-
- printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)", str,
- lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
-
- for(i = 0; i < NR_CPUS; i++)
- printk(" reader[%d]=%08lx", i, lock->reader_pc[i]);
-
- printk("\n");
-}
-
-#undef INIT_STUCK
-#define INIT_STUCK 100000000
-
-void _do_spin_lock(spinlock_t *lock, char *str)
-{
- unsigned long caller;
- unsigned long val;
- int cpu = smp_processor_id();
- int stuck = INIT_STUCK;
-
- STORE_CALLER(caller);
-
-again:
- __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock)));
- if(val) {
- while(lock->lock) {
- if (!--stuck) {
- show(str, lock, caller);
- stuck = INIT_STUCK;
- }
- barrier();
- }
- goto again;
- }
- lock->owner_pc = (cpu & 3) | (caller & ~3);
-}
-
-int _spin_trylock(spinlock_t *lock)
-{
- unsigned long val;
- unsigned long caller;
- int cpu = smp_processor_id();
-
- STORE_CALLER(caller);
-
- __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock)));
- if(!val) {
- /* We got it, record our identity for debugging. */
- lock->owner_pc = (cpu & 3) | (caller & ~3);
- }
- return val == 0;
-}
-
-void _do_spin_unlock(spinlock_t *lock)
-{
- lock->owner_pc = 0;
- barrier();
- lock->lock = 0;
-}
-
-void _do_read_lock(rwlock_t *rw, char *str)
-{
- unsigned long caller;
- unsigned long val;
- int cpu = smp_processor_id();
- int stuck = INIT_STUCK;
-
- STORE_CALLER(caller);
-
-wlock_again:
- __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
- if(val) {
- while(rw->lock & 0xff) {
- if (!--stuck) {
- show_read(str, rw, caller);
- stuck = INIT_STUCK;
- }
- barrier();
- }
- goto wlock_again;
- }
-
- rw->reader_pc[cpu] = caller;
- barrier();
- rw->lock++;
-}
-
-void _do_read_unlock(rwlock_t *rw, char *str)
-{
- unsigned long caller;
- unsigned long val;
- int cpu = smp_processor_id();
- int stuck = INIT_STUCK;
-
- STORE_CALLER(caller);
-
-wlock_again:
- __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
- if(val) {
- while(rw->lock & 0xff) {
- if (!--stuck) {
- show_read(str, rw, caller);
- stuck = INIT_STUCK;
- }
- barrier();
- }
- goto wlock_again;
- }
-
- rw->reader_pc[cpu] = 0;
- barrier();
- rw->lock -= 0x1ff;
-}
-
-void _do_write_lock(rwlock_t *rw, char *str)
-{
- unsigned long caller;
- unsigned long val;
- int cpu = smp_processor_id();
- int stuck = INIT_STUCK;
-
- STORE_CALLER(caller);
-
-wlock_again:
- __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
- if(val) {
-wlock_wait:
- while(rw->lock) {
- if (!--stuck) {
- show_write(str, rw, caller);
- stuck = INIT_STUCK;
- }
- barrier();
- }
- goto wlock_again;
- }
-
- if (rw->lock & ~0xff) {
- *(((unsigned char *)&rw->lock)+3) = 0;
- barrier();
- goto wlock_wait;
- }
-
- barrier();
- rw->owner_pc = (cpu & 3) | (caller & ~3);
-}
-
-void _do_write_unlock(rwlock_t *rw)
-{
- rw->owner_pc = 0;
- barrier();
- rw->lock = 0;
-}
-
-#endif /* SMP */