diff options
-rw-r--r-- | tools/arch/sparc/include/asm/barrier.h | 8 | ||||
-rw-r--r-- | tools/arch/sparc/include/asm/barrier_32.h | 6 | ||||
-rw-r--r-- | tools/arch/sparc/include/asm/barrier_64.h | 42 | ||||
-rw-r--r-- | tools/include/asm/barrier.h | 2 | ||||
-rw-r--r-- | tools/perf/MANIFEST | 3 | ||||
-rw-r--r-- | tools/perf/perf-sys.h | 9 |
6 files changed, 61 insertions, 9 deletions
diff --git a/tools/arch/sparc/include/asm/barrier.h b/tools/arch/sparc/include/asm/barrier.h new file mode 100644 index 000000000000..8c017b3b1391 --- /dev/null +++ b/tools/arch/sparc/include/asm/barrier.h @@ -0,0 +1,8 @@ +#ifndef ___TOOLS_LINUX_ASM_SPARC_BARRIER_H +#define ___TOOLS_LINUX_ASM_SPARC_BARRIER_H +#if defined(__sparc__) && defined(__arch64__) +#include "barrier_64.h" +#else +#include "barrier_32.h" +#endif +#endif diff --git a/tools/arch/sparc/include/asm/barrier_32.h b/tools/arch/sparc/include/asm/barrier_32.h new file mode 100644 index 000000000000..c5eadd0a7233 --- /dev/null +++ b/tools/arch/sparc/include/asm/barrier_32.h @@ -0,0 +1,6 @@ +#ifndef __TOOLS_PERF_SPARC_BARRIER_H +#define __TOOLS_PERF_SPARC_BARRIER_H + +#include <asm-generic/barrier.h> + +#endif /* !(__TOOLS_PERF_SPARC_BARRIER_H) */ diff --git a/tools/arch/sparc/include/asm/barrier_64.h b/tools/arch/sparc/include/asm/barrier_64.h new file mode 100644 index 000000000000..9a7d7322c3f7 --- /dev/null +++ b/tools/arch/sparc/include/asm/barrier_64.h @@ -0,0 +1,42 @@ +#ifndef __TOOLS_LINUX_SPARC64_BARRIER_H +#define __TOOLS_LINUX_SPARC64_BARRIER_H + +/* Copied from the kernel sources to tools/: + * + * These are here in an effort to more fully work around Spitfire Errata + * #51. Essentially, if a memory barrier occurs soon after a mispredicted + * branch, the chip can stop executing instructions until a trap occurs. + * Therefore, if interrupts are disabled, the chip can hang forever. + * + * It used to be believed that the memory barrier had to be right in the + * delay slot, but a case has been traced recently wherein the memory barrier + * was one instruction after the branch delay slot and the chip still hung. + * The offending sequence was the following in sym_wakeup_done() of the + * sym53c8xx_2 driver: + * + * call sym_ccb_from_dsa, 0 + * movge %icc, 0, %l0 + * brz,pn %o0, .LL1303 + * mov %o0, %l2 + * membar #LoadLoad + * + * The branch has to be mispredicted for the bug to occur. Therefore, we put + * the memory barrier explicitly into a "branch always, predicted taken" + * delay slot to avoid the problem case. + */ +#define membar_safe(type) \ +do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ + " membar " type "\n" \ + "1:\n" \ + : : : "memory"); \ +} while (0) + +/* The kernel always executes in TSO memory model these days, + * and furthermore most sparc64 chips implement more stringent + * memory ordering than required by the specifications. + */ +#define mb() membar_safe("#StoreLoad") +#define rmb() __asm__ __volatile__("":::"memory") +#define wmb() __asm__ __volatile__("":::"memory") + +#endif /* !(__TOOLS_LINUX_SPARC64_BARRIER_H) */ diff --git a/tools/include/asm/barrier.h b/tools/include/asm/barrier.h index 1338c8a404ef..87f6309ef97d 100644 --- a/tools/include/asm/barrier.h +++ b/tools/include/asm/barrier.h @@ -6,4 +6,6 @@ #include "../../arch/s390/include/asm/barrier.h" #elif defined(__sh__) #include "../../arch/sh/include/asm/barrier.h" +#elif defined(__sparc__) +#include "../../arch/sparc/include/asm/barrier.h" #endif diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 2ceb1b262729..ee692408f8dd 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -2,6 +2,9 @@ tools/perf tools/arch/powerpc/include/asm/barrier.h tools/arch/s390/include/asm/barrier.h tools/arch/sh/include/asm/barrier.h +tools/arch/sparc/include/asm/barrier.h +tools/arch/sparc/include/asm/barrier_32.h +tools/arch/sparc/include/asm/barrier_64.h tools/arch/x86/include/asm/barrier.h tools/scripts tools/build diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h index 7ef5e821d418..b3e911afa1de 100644 --- a/tools/perf/perf-sys.h +++ b/tools/perf/perf-sys.h @@ -57,15 +57,6 @@ #endif #ifdef __sparc__ -#ifdef __LP64__ -#define mb() asm volatile("ba,pt %%xcc, 1f\n" \ - "membar #StoreLoad\n" \ - "1:\n":::"memory") -#else -#define mb() asm volatile("":::"memory") -#endif -#define wmb() asm volatile("":::"memory") -#define rmb() asm volatile("":::"memory") #define CPUINFO_PROC {"cpu"} #endif |