diff options
author | Jeff Dike <jdike@addtoit.com> | 2007-05-08 00:35:02 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-08 11:15:20 -0700 |
commit | a436ed9c5106b41606cbb55ab3b28389fe8ae04f (patch) | |
tree | b8df0bde6d7eb2808c37da815d8857396ee1eaf1 /include/asm-x86_64/cmpxchg.h | |
parent | 5dc12ddee93d63d7107cbbf70db23476d7b30e43 (diff) |
x86: create asm/cmpxchg.h
i386:
Rearrange the cmpxchg code to allow atomic.h to get it without needing to
include system.h. This kills warnings in the UML build from atomic.h about
implicit declarations of cmpxchg symbols. The i386 build presumably isn't
seeing this because a separate inclusion of system.h is covering it over.
The cmpxchg stuff is moved to asm-i386/cmpxchg.h, with an include left in
system.h for the benefit of generic code which expects cmpxchg there.
Meanwhile, atomic.h includes cmpxchg.h.
This causes no noticable damage to the i386 build.
x86_64:
Move cmpxchg into its own header. atomic.h already included system.h, so
this is changed to include cmpxchg.h.
This is purely cleanup - it's not fixing any warnings - so if the x86_64
system.h isn't considered as cleanup-worthy as i386, then this can be
dropped.
It causes no noticable damage to the x86_64 build.
uml:
The i386 and x86_64 cmpxchg patches require an asm-um/cmpxchg.h for the
UML build.
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-x86_64/cmpxchg.h')
-rw-r--r-- | include/asm-x86_64/cmpxchg.h | 134 |
1 files changed, 134 insertions, 0 deletions
diff --git a/include/asm-x86_64/cmpxchg.h b/include/asm-x86_64/cmpxchg.h new file mode 100644 index 00000000000..09a6b6b6b74 --- /dev/null +++ b/include/asm-x86_64/cmpxchg.h @@ -0,0 +1,134 @@ +#ifndef __ASM_CMPXCHG_H +#define __ASM_CMPXCHG_H + +#include <asm/alternative.h> /* Provides LOCK_PREFIX */ + +#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) + +#define __xg(x) ((volatile long *)(x)) + +static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) +{ + *ptr = val; +} + +#define _set_64bit set_64bit + +/* + * Note: no "lock" prefix even on SMP: xchg always implies lock anyway + * Note 2: xchg has side effect, so that attribute volatile is necessary, + * but generally the primitive is invalid, *ptr is output argument. --ANK + */ +static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +{ + switch (size) { + case 1: + __asm__ __volatile__("xchgb %b0,%1" + :"=q" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + case 2: + __asm__ __volatile__("xchgw %w0,%1" + :"=r" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + case 4: + __asm__ __volatile__("xchgl %k0,%1" + :"=r" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + case 8: + __asm__ __volatile__("xchgq %0,%1" + :"=r" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + } + return x; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ + +#define __HAVE_ARCH_CMPXCHG 1 + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long prev; + switch (size) { + case 1: + __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 2: + __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 4: + __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 8: + __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + } + return old; +} + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, unsigned long new, int size) +{ + unsigned long prev; + switch (size) { + case 1: + __asm__ __volatile__("cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 2: + __asm__ __volatile__("cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 4: + __asm__ __volatile__("cmpxchgl %k1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 8: + __asm__ __volatile__("cmpxchgq %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + } + return old; +} + +#define cmpxchg(ptr,o,n)\ + ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ + (unsigned long)(n),sizeof(*(ptr)))) +#define cmpxchg_local(ptr,o,n)\ + ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ + (unsigned long)(n),sizeof(*(ptr)))) + +#endif |