diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 17:59:33 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 17:59:33 -0800 |
| commit | c77417132c12af338a7d37956809b2b98d20413c (patch) | |
| tree | 02cb0ef1f8dfa1af8ce0965883dd449adf33eb2c /arch/m68k/include/asm/uaccess_mm.h | |
| parent | e4e88f31bcb5f05f24b9ae518d4ecb44e1a7774d (diff) | |
| parent | 1f7034b9616e6f14dc7b6aa280210421428f31af (diff) | |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu: (56 commits)
m68k: allow ColdFire 547x and 548x CPUs to be built with MMU enabled
m68k/Kconfig: Separate classic m68k and coldfire early
m68k: add ColdFire with MMU enabled support to the m68k mem init code
m68k: do not use m68k startup or interrupt code for ColdFire CPUs
m68k: add ColdFire FPU support for the V4e ColdFire CPUs
m68k: adjustments to stack frame for ColdFire with MMU enabled
m68k: use non-MMU linker script for ColdFire MMU builds
m68k: ColdFire with MMU enabled uses same clocking code as non-MMU
m68k: add code to setup a ColdFire 54xx platform when MMU enabled
m68k: use non-MMU entry.S code when compiling for ColdFire CPU
m68k: create ColdFire MMU pgalloc code
m68k: compile appropriate mm arch files for ColdFire MMU support
m68k: ColdFire V4e MMU paging init code and miss handler
m68k: use ColdFire MMU read/write bit flags when ioremapping
m68k: modify cache push and clear code for ColdFire with MMU enable
m68k: use tracehook_report_syscall_entry/exit for ColdFire MMU ptrace path
m68k: ColdFire V4e MMU context support code
m68k: MMU enabled ColdFire needs 8k ELF alignment
m68k: set ColdFire MMU page size
m68k: define PAGE_OFFSET_RAW for ColdFire CPU with MMU enabled
...
Diffstat (limited to 'arch/m68k/include/asm/uaccess_mm.h')
| -rw-r--r-- | arch/m68k/include/asm/uaccess_mm.h | 42 |
1 files changed, 29 insertions, 13 deletions
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 7107f3fbdbb..9c80cd515b2 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -21,6 +21,22 @@ static inline int access_ok(int type, const void __user *addr, } /* + * Not all varients of the 68k family support the notion of address spaces. + * The traditional 680x0 parts do, and they use the sfc/dfc registers and + * the "moves" instruction to access user space from kernel space. Other + * family members like ColdFire don't support this, and only have a single + * address space, and use the usual "move" instruction for user space access. + * + * Outside of this difference the user space access functions are the same. + * So lets keep the code simple and just define in what we need to use. + */ +#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES +#define MOVES "moves" +#else +#define MOVES "move" +#endif + +/* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is * the address at which the program should continue. No registers are @@ -43,7 +59,7 @@ extern int __get_user_bad(void); #define __put_user_asm(res, x, ptr, bwl, reg, err) \ asm volatile ("\n" \ - "1: moves."#bwl" %2,%1\n" \ + "1: "MOVES"."#bwl" %2,%1\n" \ "2:\n" \ " .section .fixup,\"ax\"\n" \ " .even\n" \ @@ -83,8 +99,8 @@ asm volatile ("\n" \ { \ const void __user *__pu_ptr = (ptr); \ asm volatile ("\n" \ - "1: moves.l %2,(%1)+\n" \ - "2: moves.l %R2,(%1)\n" \ + "1: "MOVES".l %2,(%1)+\n" \ + "2: "MOVES".l %R2,(%1)\n" \ "3:\n" \ " .section .fixup,\"ax\"\n" \ " .even\n" \ @@ -115,12 +131,12 @@ asm volatile ("\n" \ #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ type __gu_val; \ asm volatile ("\n" \ - "1: moves."#bwl" %2,%1\n" \ + "1: "MOVES"."#bwl" %2,%1\n" \ "2:\n" \ " .section .fixup,\"ax\"\n" \ " .even\n" \ "10: move.l %3,%0\n" \ - " sub."#bwl" %1,%1\n" \ + " sub.l %1,%1\n" \ " jra 2b\n" \ " .previous\n" \ "\n" \ @@ -152,8 +168,8 @@ asm volatile ("\n" \ const void *__gu_ptr = (ptr); \ u64 __gu_val; \ asm volatile ("\n" \ - "1: moves.l (%2)+,%1\n" \ - "2: moves.l (%2),%R1\n" \ + "1: "MOVES".l (%2)+,%1\n" \ + "2: "MOVES".l (%2),%R1\n" \ "3:\n" \ " .section .fixup,\"ax\"\n" \ " .even\n" \ @@ -188,12 +204,12 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned #define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ asm volatile ("\n" \ - "1: moves."#s1" (%2)+,%3\n" \ + "1: "MOVES"."#s1" (%2)+,%3\n" \ " move."#s1" %3,(%1)+\n" \ - "2: moves."#s2" (%2)+,%3\n" \ + "2: "MOVES"."#s2" (%2)+,%3\n" \ " move."#s2" %3,(%1)+\n" \ " .ifnc \""#s3"\",\"\"\n" \ - "3: moves."#s3" (%2)+,%3\n" \ + "3: "MOVES"."#s3" (%2)+,%3\n" \ " move."#s3" %3,(%1)+\n" \ " .endif\n" \ "4:\n" \ @@ -269,13 +285,13 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n) #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ asm volatile ("\n" \ " move."#s1" (%2)+,%3\n" \ - "11: moves."#s1" %3,(%1)+\n" \ + "11: "MOVES"."#s1" %3,(%1)+\n" \ "12: move."#s2" (%2)+,%3\n" \ - "21: moves."#s2" %3,(%1)+\n" \ + "21: "MOVES"."#s2" %3,(%1)+\n" \ "22:\n" \ " .ifnc \""#s3"\",\"\"\n" \ " move."#s3" (%2)+,%3\n" \ - "31: moves."#s3" %3,(%1)+\n" \ + "31: "MOVES"."#s3" %3,(%1)+\n" \ "32:\n" \ " .endif\n" \ "4:\n" \ |
