From c755238d2ce0960ced9ffccc2ce14de2cd01b647 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 11 Jul 2021 23:31:43 +0100 Subject: ARM: 9099/1: crypto: rename 'mod_init' & 'mod_exit' functions to be module-specific Rename module_init & module_exit functions that are named "mod_init" and "mod_exit" so that they are unique in both the System.map file and in initcall_debug output instead of showing up as almost anonymous "mod_init". This is helpful for debugging and in determining how long certain module_init calls take to execute. Signed-off-by: Randy Dunlap Cc: Jason A. Donenfeld Cc: linux-arm-kernel@lists.infradead.org Cc: patches@armlinux.org.uk Signed-off-by: Russell King (Oracle) --- arch/arm/crypto/curve25519-glue.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/crypto/curve25519-glue.c b/arch/arm/crypto/curve25519-glue.c index 31eb75b6002f..9bdafd57888c 100644 --- a/arch/arm/crypto/curve25519-glue.c +++ b/arch/arm/crypto/curve25519-glue.c @@ -112,7 +112,7 @@ static struct kpp_alg curve25519_alg = { .max_size = curve25519_max_size, }; -static int __init mod_init(void) +static int __init arm_curve25519_init(void) { if (elf_hwcap & HWCAP_NEON) { static_branch_enable(&have_neon); @@ -122,14 +122,14 @@ static int __init mod_init(void) return 0; } -static void __exit mod_exit(void) +static void __exit arm_curve25519_exit(void) { if (IS_REACHABLE(CONFIG_CRYPTO_KPP) && elf_hwcap & HWCAP_NEON) crypto_unregister_kpp(&curve25519_alg); } -module_init(mod_init); -module_exit(mod_exit); +module_init(arm_curve25519_init); +module_exit(arm_curve25519_exit); MODULE_ALIAS_CRYPTO("curve25519"); MODULE_ALIAS_CRYPTO("curve25519-neon"); -- cgit v1.2.3 From d7bcc5e22967c96685d03dbbd167e1a1ddf9b910 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 29 Jul 2021 15:03:51 +0100 Subject: ARM: 9102/1: move theinstall rules to arch/arm/Makefile Currently, the (z/u)install targets in arch/arm/Makefile descend into arch/arm/boot/Makefile to invoke the shell script, but there is no good reason to do so. arch/arm/Makefile can run the shell script directly. Signed-off-by: Masahiro Yamada Signed-off-by: Russell King (Oracle) --- arch/arm/Makefile | 3 ++- arch/arm/boot/Makefile | 14 +------------- 2 files changed, 3 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 173da685a52e..847c31e7c368 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -308,7 +308,8 @@ $(BOOT_TARGETS): vmlinux @$(kecho) ' Kernel: $(boot)/$@ is ready' $(INSTALL_TARGETS): - $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ + $(CONFIG_SHELL) $(srctree)/$(boot)/install.sh "$(KERNELRELEASE)" \ + $(boot)/$(patsubst %install,%Image,$@) System.map "$(INSTALL_PATH)" PHONY += vdso_install vdso_install: diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index 0b3cd7a33a26..54a09f9464fb 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -96,23 +96,11 @@ $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE $(obj)/bootpImage: $(obj)/bootp/bootp FORCE $(call if_changed,objcopy) -PHONY += initrd install zinstall uinstall +PHONY += initrd initrd: @test "$(INITRD_PHYS)" != "" || \ (echo This machine does not support INITRD; exit -1) @test "$(INITRD)" != "" || \ (echo You must specify INITRD; exit -1) -install: - $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ - $(obj)/Image System.map "$(INSTALL_PATH)" - -zinstall: - $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ - $(obj)/zImage System.map "$(INSTALL_PATH)" - -uinstall: - $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ - $(obj)/uImage System.map "$(INSTALL_PATH)" - subdir- := bootp compressed dts -- cgit v1.2.3 From 6fec92d9b2bfd2fb1a2a4295dc859d9bab16c8fc Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 30 Jul 2021 10:43:02 +0100 Subject: ARM: 9103/1: Drop ARCH_NR_GPIOS definition The conditional by the generic header is the same, hence drop unnecessary duplication. Link: https://lore.kernel.org/r/20210510114107.43006-1-andriy.shevchenko@linux.intel.com Signed-off-by: Andy Shevchenko Reviewed-by: Linus Walleij Signed-off-by: Linus Walleij Signed-off-by: Russell King (Oracle) --- arch/arm/include/asm/gpio.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h index c50e383358c4..f3bb8a2bf788 100644 --- a/arch/arm/include/asm/gpio.h +++ b/arch/arm/include/asm/gpio.h @@ -2,10 +2,6 @@ #ifndef _ARCH_ARM_GPIO_H #define _ARCH_ARM_GPIO_H -#if CONFIG_ARCH_NR_GPIO > 0 -#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO -#endif - /* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */ #include -- cgit v1.2.3 From b30d0289de72c62516df03fdad8d53f552c69839 Mon Sep 17 00:00:00 2001 From: David Heidelberg Date: Mon, 9 Aug 2021 19:07:30 +0100 Subject: ARM: 9105/1: atags_to_fdt: don't warn about stack size The merge_fdt_bootargs() function by definition consumes more than 1024 bytes of stack because it has a 1024 byte command line on the stack, meaning that we always get a warning when building this file: arch/arm/boot/compressed/atags_to_fdt.c: In function 'merge_fdt_bootargs': arch/arm/boot/compressed/atags_to_fdt.c:98:1: warning: the frame size of 1032 bytes is larger than 1024 bytes [-Wframe-larger-than=] However, as this is the decompressor and we know that it has a very shallow call chain, and we do not actually risk overflowing the kernel stack at runtime here. This just shuts up the warning by disabling the warning flag for this file. Tested on Nexus 7 2012 builds. Acked-by: Nicolas Pitre Signed-off-by: David Heidelberg Signed-off-by: Arnd Bergmann Cc: Signed-off-by: Russell King (Oracle) --- arch/arm/boot/compressed/Makefile | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 9d91ae1091b0..91265e7ff672 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -85,6 +85,8 @@ compress-$(CONFIG_KERNEL_LZ4) = lz4 libfdt_objs := fdt_rw.o fdt_ro.o fdt_wip.o fdt.o ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y) +CFLAGS_REMOVE_atags_to_fdt.o += -Wframe-larger-than=${CONFIG_FRAME_WARN} +CFLAGS_atags_to_fdt.o += -Wframe-larger-than=1280 OBJS += $(libfdt_objs) atags_to_fdt.o endif ifeq ($(CONFIG_USE_OF),y) -- cgit v1.2.3 From 344179fc7ef427910de438affbf3703fed51fe5a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Aug 2021 08:30:19 +0100 Subject: ARM: 9106/1: traps: use get_kernel_nofault instead of set_fs() ARM uses set_fs() and __get_user() to allow the stack dumping code to access possibly invalid pointers carefully. These can be changed to the simpler get_kernel_nofault(), and allow the eventual removal of set_fs(). dump_instr() will print either kernel or user space pointers, depending on how it was called. For dump_mem(), I assume we are only interested in kernel pointers, and the only time that this is called with user_mode(regs)==true is when the regs themselves are unreliable as a result of the condition that caused the trap. Reviewed-by: Christoph Hellwig Reviewed-by: Linus Walleij Signed-off-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) --- arch/arm/kernel/traps.c | 47 ++++++++++++++++------------------------------- 1 file changed, 16 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 64308e3a5d0c..10dd3ef1f398 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -122,17 +122,8 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, unsigned long top) { unsigned long first; - mm_segment_t fs; int i; - /* - * We need to switch to kernel mode so that we can use __get_user - * to safely read from kernel space. Note that we now dump the - * code first, just in case the backtrace kills us. - */ - fs = get_fs(); - set_fs(KERNEL_DS); - printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top); for (first = bottom & ~31; first < top; first += 32) { @@ -145,7 +136,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { if (p >= bottom && p < top) { unsigned long val; - if (__get_user(val, (unsigned long *)p) == 0) + if (get_kernel_nofault(val, (unsigned long *)p)) sprintf(str + i * 9, " %08lx", val); else sprintf(str + i * 9, " ????????"); @@ -153,11 +144,9 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, } printk("%s%04lx:%s\n", lvl, first & 0xffff, str); } - - set_fs(fs); } -static void __dump_instr(const char *lvl, struct pt_regs *regs) +static void dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); const int thumb = thumb_mode(regs); @@ -173,10 +162,20 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs) for (i = -4; i < 1 + !!thumb; i++) { unsigned int val, bad; - if (thumb) - bad = get_user(val, &((u16 *)addr)[i]); - else - bad = get_user(val, &((u32 *)addr)[i]); + if (!user_mode(regs)) { + if (thumb) { + u16 val16; + bad = get_kernel_nofault(val16, &((u16 *)addr)[i]); + val = val16; + } else { + bad = get_kernel_nofault(val, &((u32 *)addr)[i]); + } + } else { + if (thumb) + bad = get_user(val, &((u16 *)addr)[i]); + else + bad = get_user(val, &((u32 *)addr)[i]); + } if (!bad) p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", @@ -189,20 +188,6 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs) printk("%sCode: %s\n", lvl, str); } -static void dump_instr(const char *lvl, struct pt_regs *regs) -{ - mm_segment_t fs; - - if (!user_mode(regs)) { - fs = get_fs(); - set_fs(KERNEL_DS); - __dump_instr(lvl, regs); - set_fs(fs); - } else { - __dump_instr(lvl, regs); - } -} - #ifdef CONFIG_ARM_UNWIND static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, const char *loglvl) -- cgit v1.2.3 From b6e47f3c11c17965acb2a12001af3b1cd5658f37 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Aug 2021 08:30:20 +0100 Subject: ARM: 9109/1: oabi-compat: add epoll_pwait handler The epoll_wait() syscall has a special version for OABI compat mode to convert the arguments to the EABI structure layout of the kernel. However, the later epoll_pwait() syscall was added in arch/arm in linux-2.6.32 without this conversion. Use the same kind of handler for both. Fixes: 369842658a36 ("ARM: 5677/1: ARM support for TIF_RESTORE_SIGMASK/pselect6/ppoll/epoll_pwait") Cc: stable@vger.kernel.org Reviewed-by: Christoph Hellwig Signed-off-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) --- arch/arm/kernel/sys_oabi-compat.c | 38 +++++++++++++++++++++++++++++++++++--- arch/arm/tools/syscall.tbl | 2 +- 2 files changed, 36 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 075a2e0ed2c1..443203fafb6b 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -265,9 +265,8 @@ asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd, return do_epoll_ctl(epfd, op, fd, &kernel, false); } -asmlinkage long sys_oabi_epoll_wait(int epfd, - struct oabi_epoll_event __user *events, - int maxevents, int timeout) +static long do_oabi_epoll_wait(int epfd, struct oabi_epoll_event __user *events, + int maxevents, int timeout) { struct epoll_event *kbuf; struct oabi_epoll_event e; @@ -314,6 +313,39 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, } #endif +SYSCALL_DEFINE4(oabi_epoll_wait, int, epfd, + struct oabi_epoll_event __user *, events, + int, maxevents, int, timeout) +{ + return do_oabi_epoll_wait(epfd, events, maxevents, timeout); +} + +/* + * Implement the event wait interface for the eventpoll file. It is the kernel + * part of the user space epoll_pwait(2). + */ +SYSCALL_DEFINE6(oabi_epoll_pwait, int, epfd, + struct oabi_epoll_event __user *, events, int, maxevents, + int, timeout, const sigset_t __user *, sigmask, + size_t, sigsetsize) +{ + int error; + + /* + * If the caller wants a certain signal mask to be set during the wait, + * we apply it here. + */ + error = set_user_sigmask(sigmask, sigsetsize); + if (error) + return error; + + error = do_oabi_epoll_wait(epfd, events, maxevents, timeout); + restore_saved_sigmask_unless(error == -EINTR); + + return error; +} +#endif + struct oabi_sembuf { unsigned short sem_num; short sem_op; diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index c5df1179fc5d..11d0b960b2c2 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -360,7 +360,7 @@ 343 common vmsplice sys_vmsplice 344 common move_pages sys_move_pages 345 common getcpu sys_getcpu -346 common epoll_pwait sys_epoll_pwait +346 common epoll_pwait sys_epoll_pwait sys_oabi_epoll_pwait 347 common kexec_load sys_kexec_load 348 common utimensat sys_utimensat_time32 349 common signalfd sys_signalfd -- cgit v1.2.3 From 4e57a4ddf6b0d9cce1cf2ffd153df1ad3c2c9cc2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Aug 2021 08:30:21 +0100 Subject: ARM: 9107/1: syscall: always store thread_info->abi_syscall The system call number is used in a a couple of places, in particular ptrace, seccomp and /proc//syscall. The last one apparently never worked reliably on ARM for tasks that are not currently getting traced. Storing the syscall number in the normal entry path makes it work, as well as allowing us to see if the current system call is for OABI compat mode, which is the next thing I want to hook into. Since the thread_info->syscall field is not just the number any more, it is now renamed to abi_syscall. In kernels that enable both OABI and EABI, the upper bits of this field encode 0x900000 (__NR_OABI_SYSCALL_BASE) for OABI tasks, while normal EABI tasks do not set the upper bits. This makes it possible to implement the in_oabi_syscall() helper later. All other users of thread_info->syscall go through the syscall_get_nr() helper, which in turn filters out the ABI bits. Note that the ABI information is lost with PTRACE_SET_SYSCALL, so one cannot set the internal number to a particular version, but this was already the case. We could change it to let gdb encode the ABI type along with the syscall in a CONFIG_OABI_COMPAT-enabled kernel, but that itself would be a (backwards-compatible) ABI change, so I don't do it here. Acked-by: Christoph Hellwig Signed-off-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) --- arch/arm/include/asm/syscall.h | 5 ++++- arch/arm/include/asm/thread_info.h | 2 +- arch/arm/include/uapi/asm/unistd.h | 1 + arch/arm/kernel/asm-offsets.c | 1 + arch/arm/kernel/entry-common.S | 8 ++++++-- arch/arm/kernel/ptrace.c | 14 ++++++++------ 6 files changed, 21 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index fd02761ba06c..f055e846a5cc 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -22,7 +22,10 @@ extern const unsigned long sys_call_table[]; static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - return task_thread_info(task)->syscall; + if (IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT)) + return task_thread_info(task)->abi_syscall; + + return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK; } static inline void syscall_rollback(struct task_struct *task, diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 70d4cbc49ae1..17c56051747b 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -62,7 +62,7 @@ struct thread_info { unsigned long stack_canary; #endif struct cpu_context_save cpu_context; /* cpu context */ - __u32 syscall; /* syscall number */ + __u32 abi_syscall; /* ABI type and syscall nr */ __u8 used_cp[16]; /* thread used copro */ unsigned long tp_value[2]; /* TLS registers */ #ifdef CONFIG_CRUNCH diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index ae7749e15726..a1149911464c 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -15,6 +15,7 @@ #define _UAPI__ASM_ARM_UNISTD_H #define __NR_OABI_SYSCALL_BASE 0x900000 +#define __NR_SYSCALL_MASK 0x0fffff #if defined(__thumb__) || defined(__ARM_EABI__) #define __NR_SYSCALL_BASE 0 diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 70993af22d80..a0945b898ca3 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -48,6 +48,7 @@ int main(void) DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); + DEFINE(TI_ABI_SYSCALL, offsetof(struct thread_info, abi_syscall)); DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp)); DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 7f0b7aba1498..e837af90cd44 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -226,6 +226,7 @@ ENTRY(vector_swi) /* saved_psr and saved_pc are now dead */ uaccess_disable tbl + get_thread_info tsk adr tbl, sys_call_table @ load syscall table pointer @@ -237,13 +238,17 @@ ENTRY(vector_swi) * get the old ABI syscall table address. */ bics r10, r10, #0xff000000 + strne r10, [tsk, #TI_ABI_SYSCALL] + streq scno, [tsk, #TI_ABI_SYSCALL] eorne scno, r10, #__NR_OABI_SYSCALL_BASE ldrne tbl, =sys_oabi_call_table #elif !defined(CONFIG_AEABI) bic scno, scno, #0xff000000 @ mask off SWI op-code + str scno, [tsk, #TI_ABI_SYSCALL] eor scno, scno, #__NR_SYSCALL_BASE @ check OS number +#else + str scno, [tsk, #TI_ABI_SYSCALL] #endif - get_thread_info tsk /* * Reload the registers that may have been corrupted on entry to * the syscall assembly (by tracing or context tracking.) @@ -288,7 +293,6 @@ ENDPROC(vector_swi) * context switches, and waiting for our parent to respond. */ __sys_trace: - mov r1, scno add r0, sp, #S_OFF bl syscall_trace_enter mov scno, r0 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 2771e682220b..d886ea8910cb 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -25,6 +25,7 @@ #include #include +#include #include #define CREATE_TRACE_POINTS @@ -811,7 +812,8 @@ long arch_ptrace(struct task_struct *child, long request, break; case PTRACE_SET_SYSCALL: - task_thread_info(child)->syscall = data; + task_thread_info(child)->abi_syscall = data & + __NR_SYSCALL_MASK; ret = 0; break; @@ -880,14 +882,14 @@ static void tracehook_report_syscall(struct pt_regs *regs, if (dir == PTRACE_SYSCALL_EXIT) tracehook_report_syscall_exit(regs, 0); else if (tracehook_report_syscall_entry(regs)) - current_thread_info()->syscall = -1; + current_thread_info()->abi_syscall = -1; regs->ARM_ip = ip; } -asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) +asmlinkage int syscall_trace_enter(struct pt_regs *regs) { - current_thread_info()->syscall = scno; + int scno; if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); @@ -898,11 +900,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) return -1; #else /* XXX: remove this once OABI gets fixed */ - secure_computing_strict(current_thread_info()->syscall); + secure_computing_strict(syscall_get_nr(current, regs)); #endif /* Tracer or seccomp may have changed syscall. */ - scno = current_thread_info()->syscall; + scno = syscall_get_nr(current, regs); if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, scno); -- cgit v1.2.3 From 249dbe74d3c4b568a623fb55c56cddf19fdf0b89 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Aug 2021 08:30:22 +0100 Subject: ARM: 9108/1: oabi-compat: rework epoll_wait/epoll_pwait emulation The epoll_wait() system call wrapper is one of the remaining users of the set_fs() infrasturcture for Arm. Changing it to not require set_fs() is rather complex unfortunately. The approach I'm taking here is to allow architectures to override the code that copies the output to user space, and let the oabi-compat implementation check whether it is getting called from an EABI or OABI system call based on the thread_info->syscall value. The in_oabi_syscall() check here mirrors the in_compat_syscall() and in_x32_syscall() helpers for 32-bit compat implementations on other architectures. Overall, the amount of code goes down, at least with the newly added sys_oabi_epoll_pwait() helper getting removed again. The downside is added complexity in the source code for the native implementation. There should be no difference in runtime performance except for Arm kernels with CONFIG_OABI_COMPAT enabled that now have to go through an external function call to check which of the two variants to use. Acked-by: Christoph Hellwig Signed-off-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) --- arch/arm/include/asm/syscall.h | 11 ++++++ arch/arm/kernel/sys_oabi-compat.c | 83 ++++++++------------------------------- arch/arm/tools/syscall.tbl | 4 +- fs/eventpoll.c | 5 +-- include/linux/eventpoll.h | 18 +++++++++ 5 files changed, 49 insertions(+), 72 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index f055e846a5cc..24c19d63ff0a 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -28,6 +28,17 @@ static inline int syscall_get_nr(struct task_struct *task, return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK; } +static inline bool __in_oabi_syscall(struct task_struct *task) +{ + return IS_ENABLED(CONFIG_OABI_COMPAT) && + (task_thread_info(task)->abi_syscall & __NR_OABI_SYSCALL_BASE); +} + +static inline bool in_oabi_syscall(void) +{ + return __in_oabi_syscall(current); +} + static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 443203fafb6b..1f6a433200f1 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -83,6 +83,8 @@ #include #include +#include + struct oldabi_stat64 { unsigned long long st_dev; unsigned int __pad1; @@ -264,87 +266,34 @@ asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd, return do_epoll_ctl(epfd, op, fd, &kernel, false); } - -static long do_oabi_epoll_wait(int epfd, struct oabi_epoll_event __user *events, - int maxevents, int timeout) -{ - struct epoll_event *kbuf; - struct oabi_epoll_event e; - mm_segment_t fs; - long ret, err, i; - - if (maxevents <= 0 || - maxevents > (INT_MAX/sizeof(*kbuf)) || - maxevents > (INT_MAX/sizeof(*events))) - return -EINVAL; - if (!access_ok(events, sizeof(*events) * maxevents)) - return -EFAULT; - kbuf = kmalloc_array(maxevents, sizeof(*kbuf), GFP_KERNEL); - if (!kbuf) - return -ENOMEM; - fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout); - set_fs(fs); - err = 0; - for (i = 0; i < ret; i++) { - e.events = kbuf[i].events; - e.data = kbuf[i].data; - err = __copy_to_user(events, &e, sizeof(e)); - if (err) - break; - events++; - } - kfree(kbuf); - return err ? -EFAULT : ret; -} #else asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd, struct oabi_epoll_event __user *event) { return -EINVAL; } - -asmlinkage long sys_oabi_epoll_wait(int epfd, - struct oabi_epoll_event __user *events, - int maxevents, int timeout) -{ - return -EINVAL; -} #endif -SYSCALL_DEFINE4(oabi_epoll_wait, int, epfd, - struct oabi_epoll_event __user *, events, - int, maxevents, int, timeout) +struct epoll_event __user * +epoll_put_uevent(__poll_t revents, __u64 data, + struct epoll_event __user *uevent) { - return do_oabi_epoll_wait(epfd, events, maxevents, timeout); -} + if (in_oabi_syscall()) { + struct oabi_epoll_event __user *oevent = (void __user *)uevent; -/* - * Implement the event wait interface for the eventpoll file. It is the kernel - * part of the user space epoll_pwait(2). - */ -SYSCALL_DEFINE6(oabi_epoll_pwait, int, epfd, - struct oabi_epoll_event __user *, events, int, maxevents, - int, timeout, const sigset_t __user *, sigmask, - size_t, sigsetsize) -{ - int error; + if (__put_user(revents, &oevent->events) || + __put_user(data, &oevent->data)) + return NULL; - /* - * If the caller wants a certain signal mask to be set during the wait, - * we apply it here. - */ - error = set_user_sigmask(sigmask, sigsetsize); - if (error) - return error; + return (void __user *)(oevent+1); + } - error = do_oabi_epoll_wait(epfd, events, maxevents, timeout); - restore_saved_sigmask_unless(error == -EINTR); + if (__put_user(revents, &uevent->events) || + __put_user(data, &uevent->data)) + return NULL; - return error; + return uevent+1; } -#endif struct oabi_sembuf { unsigned short sem_num; diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index 11d0b960b2c2..344424a9611f 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -266,7 +266,7 @@ 249 common lookup_dcookie sys_lookup_dcookie 250 common epoll_create sys_epoll_create 251 common epoll_ctl sys_epoll_ctl sys_oabi_epoll_ctl -252 common epoll_wait sys_epoll_wait sys_oabi_epoll_wait +252 common epoll_wait sys_epoll_wait 253 common remap_file_pages sys_remap_file_pages # 254 for set_thread_area # 255 for get_thread_area @@ -360,7 +360,7 @@ 343 common vmsplice sys_vmsplice 344 common move_pages sys_move_pages 345 common getcpu sys_getcpu -346 common epoll_pwait sys_epoll_pwait sys_oabi_epoll_pwait +346 common epoll_pwait sys_epoll_pwait 347 common kexec_load sys_kexec_load 348 common utimensat sys_utimensat_time32 349 common signalfd sys_signalfd diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 1e596e1d0bba..c90c4352325e 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1684,8 +1684,8 @@ static int ep_send_events(struct eventpoll *ep, if (!revents) continue; - if (__put_user(revents, &events->events) || - __put_user(epi->event.data, &events->data)) { + events = epoll_put_uevent(revents, epi->event.data, events); + if (!events) { list_add(&epi->rdllink, &txlist); ep_pm_stay_awake(epi); if (!res) @@ -1693,7 +1693,6 @@ static int ep_send_events(struct eventpoll *ep, break; } res++; - events++; if (epi->event.events & EPOLLONESHOT) epi->event.events &= EP_PRIVATE_BITS; else if (!(epi->event.events & EPOLLET)) { diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 593322c946e6..3337745d81bd 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -68,4 +68,22 @@ static inline void eventpoll_release(struct file *file) {} #endif +#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT) +/* ARM OABI has an incompatible struct layout and needs a special handler */ +extern struct epoll_event __user * +epoll_put_uevent(__poll_t revents, __u64 data, + struct epoll_event __user *uevent); +#else +static inline struct epoll_event __user * +epoll_put_uevent(__poll_t revents, __u64 data, + struct epoll_event __user *uevent) +{ + if (__put_user(revents, &uevent->events) || + __put_user(data, &uevent->data)) + return NULL; + + return uevent+1; +} +#endif + #endif /* #ifndef _LINUX_EVENTPOLL_H */ -- cgit v1.2.3 From bdec0145286f7e6be9b3134aa35f0f335fa27c38 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Aug 2021 08:30:23 +0100 Subject: ARM: 9114/1: oabi-compat: rework sys_semtimedop emulation sys_oabi_semtimedop() is one of the last users of set_fs() on Arm. To remove this one, expose the internal code of the actual implementation that operates on a kernel pointer and call it directly after copying. There should be no measurable impact on the normal execution of this function, and it makes the overly long function a little shorter, which may help readability. While reworking the oabi version, make it behave a little more like the native one, using kvmalloc_array() and restructure the code flow in a similar way. The naming of __do_semtimedop() is not very good, I hope someone can come up with a better name. One regression was spotted by kernel test robot and fixed before the first mailing list submission. Acked-by: Christoph Hellwig Signed-off-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) --- arch/arm/kernel/sys_oabi-compat.c | 60 ++++++++++++++++++++-------- include/linux/syscalls.h | 3 ++ ipc/sem.c | 84 ++++++++++++++++++++++++--------------- 3 files changed, 99 insertions(+), 48 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 1f6a433200f1..5ea365c35ca5 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -80,6 +80,7 @@ #include #include #include +#include #include #include @@ -302,46 +303,52 @@ struct oabi_sembuf { unsigned short __pad; }; +#define sc_semopm sem_ctls[2] + +#ifdef CONFIG_SYSVIPC asmlinkage long sys_oabi_semtimedop(int semid, struct oabi_sembuf __user *tsops, unsigned nsops, const struct old_timespec32 __user *timeout) { + struct ipc_namespace *ns; struct sembuf *sops; - struct old_timespec32 local_timeout; long err; int i; + ns = current->nsproxy->ipc_ns; + if (nsops > ns->sc_semopm) + return -E2BIG; if (nsops < 1 || nsops > SEMOPM) return -EINVAL; - if (!access_ok(tsops, sizeof(*tsops) * nsops)) - return -EFAULT; - sops = kmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); + sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); if (!sops) return -ENOMEM; err = 0; for (i = 0; i < nsops; i++) { struct oabi_sembuf osb; - err |= __copy_from_user(&osb, tsops, sizeof(osb)); + err |= copy_from_user(&osb, tsops, sizeof(osb)); sops[i].sem_num = osb.sem_num; sops[i].sem_op = osb.sem_op; sops[i].sem_flg = osb.sem_flg; tsops++; } - if (timeout) { - /* copy this as well before changing domain protection */ - err |= copy_from_user(&local_timeout, timeout, sizeof(*timeout)); - timeout = &local_timeout; - } if (err) { err = -EFAULT; - } else { - mm_segment_t fs = get_fs(); - set_fs(KERNEL_DS); - err = sys_semtimedop_time32(semid, sops, nsops, timeout); - set_fs(fs); + goto out; + } + + if (timeout) { + struct timespec64 ts; + err = get_old_timespec32(&ts, timeout); + if (err) + goto out; + err = __do_semtimedop(semid, sops, nsops, &ts, ns); + goto out; } - kfree(sops); + err = __do_semtimedop(semid, sops, nsops, NULL, ns); +out: + kvfree(sops); return err; } @@ -368,6 +375,27 @@ asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, return sys_ipc(call, first, second, third, ptr, fifth); } } +#else +asmlinkage long sys_oabi_semtimedop(int semid, + struct oabi_sembuf __user *tsops, + unsigned nsops, + const struct old_timespec32 __user *timeout) +{ + return -ENOSYS; +} + +asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops, + unsigned nsops) +{ + return -ENOSYS; +} + +asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, + void __user *ptr, long fifth) +{ + return -ENOSYS; +} +#endif asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen) { diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 69c9a7010081..6c6fc3fd5b72 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1373,6 +1373,9 @@ long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, unsigned int nsops, const struct old_timespec32 __user *timeout); +long __do_semtimedop(int semid, struct sembuf *tsems, unsigned int nsops, + const struct timespec64 *timeout, + struct ipc_namespace *ns); int __sys_getsockopt(int fd, int level, int optname, char __user *optval, int __user *optlen); diff --git a/ipc/sem.c b/ipc/sem.c index 971e75d28364..ae8d9104b0a0 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -1984,46 +1984,34 @@ out: return un; } -static long do_semtimedop(int semid, struct sembuf __user *tsops, - unsigned nsops, const struct timespec64 *timeout) +long __do_semtimedop(int semid, struct sembuf *sops, + unsigned nsops, const struct timespec64 *timeout, + struct ipc_namespace *ns) { int error = -EINVAL; struct sem_array *sma; - struct sembuf fast_sops[SEMOPM_FAST]; - struct sembuf *sops = fast_sops, *sop; + struct sembuf *sop; struct sem_undo *un; int max, locknum; bool undos = false, alter = false, dupsop = false; struct sem_queue queue; unsigned long dup = 0, jiffies_left = 0; - struct ipc_namespace *ns; - - ns = current->nsproxy->ipc_ns; if (nsops < 1 || semid < 0) return -EINVAL; if (nsops > ns->sc_semopm) return -E2BIG; - if (nsops > SEMOPM_FAST) { - sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); - if (sops == NULL) - return -ENOMEM; - } - - if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { - error = -EFAULT; - goto out_free; - } if (timeout) { if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 || timeout->tv_nsec >= 1000000000L) { error = -EINVAL; - goto out_free; + goto out; } jiffies_left = timespec64_to_jiffies(timeout); } + max = 0; for (sop = sops; sop < sops + nsops; sop++) { unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG); @@ -2052,7 +2040,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, un = find_alloc_undo(ns, semid); if (IS_ERR(un)) { error = PTR_ERR(un); - goto out_free; + goto out; } } else { un = NULL; @@ -2063,25 +2051,25 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, if (IS_ERR(sma)) { rcu_read_unlock(); error = PTR_ERR(sma); - goto out_free; + goto out; } error = -EFBIG; if (max >= sma->sem_nsems) { rcu_read_unlock(); - goto out_free; + goto out; } error = -EACCES; if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) { rcu_read_unlock(); - goto out_free; + goto out; } error = security_sem_semop(&sma->sem_perm, sops, nsops, alter); if (error) { rcu_read_unlock(); - goto out_free; + goto out; } error = -EIDRM; @@ -2095,7 +2083,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, * entangled here and why it's RMID race safe on comments at sem_lock() */ if (!ipc_valid_object(&sma->sem_perm)) - goto out_unlock_free; + goto out_unlock; /* * semid identifiers are not unique - find_alloc_undo may have * allocated an undo structure, it was invalidated by an RMID @@ -2104,7 +2092,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, * "un" itself is guaranteed by rcu. */ if (un && un->semid == -1) - goto out_unlock_free; + goto out_unlock; queue.sops = sops; queue.nsops = nsops; @@ -2130,10 +2118,10 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, rcu_read_unlock(); wake_up_q(&wake_q); - goto out_free; + goto out; } if (error < 0) /* non-blocking error path */ - goto out_unlock_free; + goto out_unlock; /* * We need to sleep on this operation, so we put the current @@ -2198,14 +2186,14 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, if (error != -EINTR) { /* see SEM_BARRIER_2 for purpose/pairing */ smp_acquire__after_ctrl_dep(); - goto out_free; + goto out; } rcu_read_lock(); locknum = sem_lock(sma, sops, nsops); if (!ipc_valid_object(&sma->sem_perm)) - goto out_unlock_free; + goto out_unlock; /* * No necessity for any barrier: We are protect by sem_lock() @@ -2217,7 +2205,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, * Leave without unlink_queue(), but with sem_unlock(). */ if (error != -EINTR) - goto out_unlock_free; + goto out_unlock; /* * If an interrupt occurred we have to clean up the queue. @@ -2228,13 +2216,45 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, unlink_queue(sma, &queue); -out_unlock_free: +out_unlock: sem_unlock(sma, locknum); rcu_read_unlock(); +out: + return error; +} + +static long do_semtimedop(int semid, struct sembuf __user *tsops, + unsigned nsops, const struct timespec64 *timeout) +{ + struct sembuf fast_sops[SEMOPM_FAST]; + struct sembuf *sops = fast_sops; + struct ipc_namespace *ns; + int ret; + + ns = current->nsproxy->ipc_ns; + if (nsops > ns->sc_semopm) + return -E2BIG; + if (nsops < 1) + return -EINVAL; + + if (nsops > SEMOPM_FAST) { + sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); + if (sops == NULL) + return -ENOMEM; + } + + if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { + ret = -EFAULT; + goto out_free; + } + + ret = __do_semtimedop(semid, sops, nsops, timeout, ns); + out_free: if (sops != fast_sops) kvfree(sops); - return error; + + return ret; } long ksys_semtimedop(int semid, struct sembuf __user *tsops, -- cgit v1.2.3 From 7e2d8c29ecdd86afbcedb9d9a977bab8af527add Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Aug 2021 08:30:24 +0100 Subject: ARM: 9111/1: oabi-compat: rework fcntl64() emulation This is one of the last users of get_fs(), and this is fairly easy to change, since the infrastructure for it is already there. The replacement here is essentially a copy of the existing fcntl64() syscall entry function. Acked-by: Christoph Hellwig Signed-off-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) --- arch/arm/kernel/sys_oabi-compat.c | 93 +++++++++++++++++++++++++-------------- 1 file changed, 60 insertions(+), 33 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 5ea365c35ca5..223ee46b6e75 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -194,56 +194,83 @@ struct oabi_flock64 { pid_t l_pid; } __attribute__ ((packed,aligned(4))); -static long do_locks(unsigned int fd, unsigned int cmd, - unsigned long arg) +static int get_oabi_flock(struct flock64 *kernel, struct oabi_flock64 __user *arg) { - struct flock64 kernel; struct oabi_flock64 user; - mm_segment_t fs; - long ret; if (copy_from_user(&user, (struct oabi_flock64 __user *)arg, sizeof(user))) return -EFAULT; - kernel.l_type = user.l_type; - kernel.l_whence = user.l_whence; - kernel.l_start = user.l_start; - kernel.l_len = user.l_len; - kernel.l_pid = user.l_pid; - - fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_fcntl64(fd, cmd, (unsigned long)&kernel); - set_fs(fs); - - if (!ret && (cmd == F_GETLK64 || cmd == F_OFD_GETLK)) { - user.l_type = kernel.l_type; - user.l_whence = kernel.l_whence; - user.l_start = kernel.l_start; - user.l_len = kernel.l_len; - user.l_pid = kernel.l_pid; - if (copy_to_user((struct oabi_flock64 __user *)arg, - &user, sizeof(user))) - ret = -EFAULT; - } - return ret; + + kernel->l_type = user.l_type; + kernel->l_whence = user.l_whence; + kernel->l_start = user.l_start; + kernel->l_len = user.l_len; + kernel->l_pid = user.l_pid; + + return 0; +} + +static int put_oabi_flock(struct flock64 *kernel, struct oabi_flock64 __user *arg) +{ + struct oabi_flock64 user; + + user.l_type = kernel->l_type; + user.l_whence = kernel->l_whence; + user.l_start = kernel->l_start; + user.l_len = kernel->l_len; + user.l_pid = kernel->l_pid; + + if (copy_to_user((struct oabi_flock64 __user *)arg, + &user, sizeof(user))) + return -EFAULT; + + return 0; } asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg) { + void __user *argp = (void __user *)arg; + struct fd f = fdget_raw(fd); + struct flock64 flock; + long err = -EBADF; + + if (!f.file) + goto out; + switch (cmd) { - case F_OFD_GETLK: - case F_OFD_SETLK: - case F_OFD_SETLKW: case F_GETLK64: + case F_OFD_GETLK: + err = security_file_fcntl(f.file, cmd, arg); + if (err) + break; + err = get_oabi_flock(&flock, argp); + if (err) + break; + err = fcntl_getlk64(f.file, cmd, &flock); + if (!err) + err = put_oabi_flock(&flock, argp); + break; case F_SETLK64: case F_SETLKW64: - return do_locks(fd, cmd, arg); - + case F_OFD_SETLK: + case F_OFD_SETLKW: + err = security_file_fcntl(f.file, cmd, arg); + if (err) + break; + err = get_oabi_flock(&flock, argp); + if (err) + break; + err = fcntl_setlk64(fd, f.file, cmd, &flock); + break; default: - return sys_fcntl64(fd, cmd, arg); + err = sys_fcntl64(fd, cmd, arg); + break; } + fdput(f); +out: + return err; } struct oabi_epoll_event { -- cgit v1.2.3 From 2df4c9a741a0b5e2883cc356c1b724d1f739fb55 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Aug 2021 08:30:25 +0100 Subject: ARM: 9112/1: uaccess: add __{get,put}_kernel_nofault These mimic the behavior of get_user and put_user, except for domain switching, address limit checking and handling of mismatched sizes, none of which are relevant here. To work with pre-Armv6 kernels, this has to avoid TUSER() inside of the new macros, the new approach passes the "t" string along with the opcode, which is a bit uglier but avoids duplicating more code. As there is no __get_user_asm_dword(), I work around it by copying 32 bit at a time, which is possible because the output size is known. Acked-by: Christoph Hellwig Signed-off-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) --- arch/arm/include/asm/uaccess.h | 123 +++++++++++++++++++++++++++-------------- 1 file changed, 83 insertions(+), 40 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index a13d90206472..4f60638755c4 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -308,11 +308,11 @@ static inline void set_fs(mm_segment_t fs) #define __get_user(x, ptr) \ ({ \ long __gu_err = 0; \ - __get_user_err((x), (ptr), __gu_err); \ + __get_user_err((x), (ptr), __gu_err, TUSER()); \ __gu_err; \ }) -#define __get_user_err(x, ptr, err) \ +#define __get_user_err(x, ptr, err, __t) \ do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_val; \ @@ -321,18 +321,19 @@ do { \ might_fault(); \ __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(ptr))) { \ - case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ - case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ - case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ + case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \ + case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \ + case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \ default: (__gu_val) = __get_user_bad(); \ } \ uaccess_restore(__ua_flags); \ (x) = (__typeof__(*(ptr)))__gu_val; \ } while (0) +#endif #define __get_user_asm(x, addr, err, instr) \ __asm__ __volatile__( \ - "1: " TUSER(instr) " %1, [%2], #0\n" \ + "1: " instr " %1, [%2], #0\n" \ "2:\n" \ " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ @@ -348,40 +349,38 @@ do { \ : "r" (addr), "i" (-EFAULT) \ : "cc") -#define __get_user_asm_byte(x, addr, err) \ - __get_user_asm(x, addr, err, ldrb) +#define __get_user_asm_byte(x, addr, err, __t) \ + __get_user_asm(x, addr, err, "ldrb" __t) #if __LINUX_ARM_ARCH__ >= 6 -#define __get_user_asm_half(x, addr, err) \ - __get_user_asm(x, addr, err, ldrh) +#define __get_user_asm_half(x, addr, err, __t) \ + __get_user_asm(x, addr, err, "ldrh" __t) #else #ifndef __ARMEB__ -#define __get_user_asm_half(x, __gu_addr, err) \ +#define __get_user_asm_half(x, __gu_addr, err, __t) \ ({ \ unsigned long __b1, __b2; \ - __get_user_asm_byte(__b1, __gu_addr, err); \ - __get_user_asm_byte(__b2, __gu_addr + 1, err); \ + __get_user_asm_byte(__b1, __gu_addr, err, __t); \ + __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \ (x) = __b1 | (__b2 << 8); \ }) #else -#define __get_user_asm_half(x, __gu_addr, err) \ +#define __get_user_asm_half(x, __gu_addr, err, __t) \ ({ \ unsigned long __b1, __b2; \ - __get_user_asm_byte(__b1, __gu_addr, err); \ - __get_user_asm_byte(__b2, __gu_addr + 1, err); \ + __get_user_asm_byte(__b1, __gu_addr, err, __t); \ + __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \ (x) = (__b1 << 8) | __b2; \ }) #endif #endif /* __LINUX_ARM_ARCH__ >= 6 */ -#define __get_user_asm_word(x, addr, err) \ - __get_user_asm(x, addr, err, ldr) -#endif - +#define __get_user_asm_word(x, addr, err, __t) \ + __get_user_asm(x, addr, err, "ldr" __t) #define __put_user_switch(x, ptr, __err, __fn) \ do { \ @@ -425,7 +424,7 @@ do { \ #define __put_user_nocheck(x, __pu_ptr, __err, __size) \ do { \ unsigned long __pu_addr = (unsigned long)__pu_ptr; \ - __put_user_nocheck_##__size(x, __pu_addr, __err); \ + __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\ } while (0) #define __put_user_nocheck_1 __put_user_asm_byte @@ -433,9 +432,11 @@ do { \ #define __put_user_nocheck_4 __put_user_asm_word #define __put_user_nocheck_8 __put_user_asm_dword +#endif /* !CONFIG_CPU_SPECTRE */ + #define __put_user_asm(x, __pu_addr, err, instr) \ __asm__ __volatile__( \ - "1: " TUSER(instr) " %1, [%2], #0\n" \ + "1: " instr " %1, [%2], #0\n" \ "2:\n" \ " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ @@ -450,36 +451,36 @@ do { \ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ : "cc") -#define __put_user_asm_byte(x, __pu_addr, err) \ - __put_user_asm(x, __pu_addr, err, strb) +#define __put_user_asm_byte(x, __pu_addr, err, __t) \ + __put_user_asm(x, __pu_addr, err, "strb" __t) #if __LINUX_ARM_ARCH__ >= 6 -#define __put_user_asm_half(x, __pu_addr, err) \ - __put_user_asm(x, __pu_addr, err, strh) +#define __put_user_asm_half(x, __pu_addr, err, __t) \ + __put_user_asm(x, __pu_addr, err, "strh" __t) #else #ifndef __ARMEB__ -#define __put_user_asm_half(x, __pu_addr, err) \ +#define __put_user_asm_half(x, __pu_addr, err, __t) \ ({ \ unsigned long __temp = (__force unsigned long)(x); \ - __put_user_asm_byte(__temp, __pu_addr, err); \ - __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ + __put_user_asm_byte(__temp, __pu_addr, err, __t); \ + __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\ }) #else -#define __put_user_asm_half(x, __pu_addr, err) \ +#define __put_user_asm_half(x, __pu_addr, err, __t) \ ({ \ unsigned long __temp = (__force unsigned long)(x); \ - __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ - __put_user_asm_byte(__temp, __pu_addr + 1, err); \ + __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \ + __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \ }) #endif #endif /* __LINUX_ARM_ARCH__ >= 6 */ -#define __put_user_asm_word(x, __pu_addr, err) \ - __put_user_asm(x, __pu_addr, err, str) +#define __put_user_asm_word(x, __pu_addr, err, __t) \ + __put_user_asm(x, __pu_addr, err, "str" __t) #ifndef __ARMEB__ #define __reg_oper0 "%R2" @@ -489,12 +490,12 @@ do { \ #define __reg_oper1 "%R2" #endif -#define __put_user_asm_dword(x, __pu_addr, err) \ +#define __put_user_asm_dword(x, __pu_addr, err, __t) \ __asm__ __volatile__( \ - ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ - ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ - THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \ - THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \ + ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \ + ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \ + THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \ + THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \ "3:\n" \ " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ @@ -510,7 +511,49 @@ do { \ : "r" (x), "i" (-EFAULT) \ : "cc") -#endif /* !CONFIG_CPU_SPECTRE */ +#define HAVE_GET_KERNEL_NOFAULT + +#define __get_kernel_nofault(dst, src, type, err_label) \ +do { \ + const type *__pk_ptr = (src); \ + unsigned long __src = (unsigned long)(__pk_ptr); \ + type __val; \ + int __err = 0; \ + switch (sizeof(type)) { \ + case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \ + case 2: __get_user_asm_half(__val, __src, __err, ""); break; \ + case 4: __get_user_asm_word(__val, __src, __err, ""); break; \ + case 8: { \ + u32 *__v32 = (u32*)&__val; \ + __get_user_asm_word(__v32[0], __src, __err, ""); \ + if (__err) \ + break; \ + __get_user_asm_word(__v32[1], __src+4, __err, ""); \ + break; \ + } \ + default: __err = __get_user_bad(); break; \ + } \ + *(type *)(dst) = __val; \ + if (__err) \ + goto err_label; \ +} while (0) + +#define __put_kernel_nofault(dst, src, type, err_label) \ +do { \ + const type *__pk_ptr = (dst); \ + unsigned long __dst = (unsigned long)__pk_ptr; \ + int __err = 0; \ + type __val = *(type *)src; \ + switch (sizeof(type)) { \ + case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \ + case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \ + case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \ + case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \ + default: __err = __put_user_bad(); break; \ + } \ + if (__err) \ + goto err_label; \ +} while (0) #ifdef CONFIG_MMU extern unsigned long __must_check -- cgit v1.2.3 From 8ac6f5d7f84bf362e67591708bcb9788cdc42c50 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Aug 2021 08:30:26 +0100 Subject: ARM: 9113/1: uaccess: remove set_fs() implementation There are no remaining callers of set_fs(), so just remove it along with all associated code that operates on thread_info->addr_limit. There are still further optimizations that can be done: - In get_user(), the address check could be moved entirely into the out of line code, rather than passing a constant as an argument, - I assume the DACR handling can be simplified as we now only change it during user access when CONFIG_CPU_SW_DOMAIN_PAN is set, but not during set_fs(). Acked-by: Christoph Hellwig Signed-off-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) --- arch/arm/Kconfig | 1 - arch/arm/include/asm/ptrace.h | 1 - arch/arm/include/asm/thread_info.h | 4 ---- arch/arm/include/asm/uaccess-asm.h | 6 ----- arch/arm/include/asm/uaccess.h | 46 ++++---------------------------------- arch/arm/kernel/asm-offsets.c | 2 -- arch/arm/kernel/entry-common.S | 12 ---------- arch/arm/kernel/process.c | 7 +----- arch/arm/kernel/signal.c | 8 ------- arch/arm/lib/copy_from_user.S | 3 +-- arch/arm/lib/copy_to_user.S | 3 +-- 11 files changed, 7 insertions(+), 86 deletions(-) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 3ea1c417339f..51897e4cc413 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -126,7 +126,6 @@ config ARM select PCI_SYSCALL if PCI select PERF_USE_VMALLOC select RTC_LIB - select SET_FS select SYS_SUPPORTS_APM_EMULATION # Above selects are sorted alphabetically; please add new ones # according to that. Thanks. diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 91d6b7856be4..93051e2f402c 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -19,7 +19,6 @@ struct pt_regs { struct svc_pt_regs { struct pt_regs regs; u32 dacr; - u32 addr_limit; }; #define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 17c56051747b..d89931aed59f 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -31,8 +31,6 @@ struct task_struct; #include -typedef unsigned long mm_segment_t; - struct cpu_context_save { __u32 r4; __u32 r5; @@ -54,7 +52,6 @@ struct cpu_context_save { struct thread_info { unsigned long flags; /* low level flags */ int preempt_count; /* 0 => preemptable, <0 => bug */ - mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ __u32 cpu; /* cpu */ __u32 cpu_domain; /* cpu domain */ @@ -80,7 +77,6 @@ struct thread_info { .task = &tsk, \ .flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ } /* diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h index e6eb7a2aaf1e..6451a433912c 100644 --- a/arch/arm/include/asm/uaccess-asm.h +++ b/arch/arm/include/asm/uaccess-asm.h @@ -84,12 +84,8 @@ * if \disable is set. */ .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable - ldr \tmp1, [\tsk, #TI_ADDR_LIMIT] - ldr \tmp2, =TASK_SIZE - str \tmp2, [\tsk, #TI_ADDR_LIMIT] DACR( mrc p15, 0, \tmp0, c3, c0, 0) DACR( str \tmp0, [sp, #SVC_DACR]) - str \tmp1, [sp, #SVC_ADDR_LIMIT] .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN) /* kernel=client, user=no access */ mov \tmp2, #DACR_UACCESS_DISABLE @@ -106,9 +102,7 @@ /* Restore the user access state previously saved by uaccess_entry */ .macro uaccess_exit, tsk, tmp0, tmp1 - ldr \tmp1, [sp, #SVC_ADDR_LIMIT] DACR( ldr \tmp0, [sp, #SVC_DACR]) - str \tmp1, [\tsk, #TI_ADDR_LIMIT] DACR( mcr p15, 0, \tmp0, c3, c0, 0) .endm diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 4f60638755c4..084d1c07c2d0 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -52,32 +52,8 @@ static __always_inline void uaccess_restore(unsigned int flags) extern int __get_user_bad(void); extern int __put_user_bad(void); -/* - * Note that this is actually 0x1,0000,0000 - */ -#define KERNEL_DS 0x00000000 - #ifdef CONFIG_MMU -#define USER_DS TASK_SIZE -#define get_fs() (current_thread_info()->addr_limit) - -static inline void set_fs(mm_segment_t fs) -{ - current_thread_info()->addr_limit = fs; - - /* - * Prevent a mispredicted conditional call to set_fs from forwarding - * the wrong address limit to access_ok under speculation. - */ - dsb(nsh); - isb(); - - modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); -} - -#define uaccess_kernel() (get_fs() == KERNEL_DS) - /* * We use 33-bit arithmetic here. Success returns zero, failure returns * addr_limit. We take advantage that addr_limit will be zero for KERNEL_DS, @@ -89,7 +65,7 @@ static inline void set_fs(mm_segment_t fs) __asm__(".syntax unified\n" \ "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \ : "=&r" (flag), "=&r" (roksum) \ - : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \ + : "r" (addr), "Ir" (size), "0" (TASK_SIZE) \ : "cc"); \ flag; }) @@ -120,7 +96,7 @@ static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr, " subshs %1, %1, %2\n" " movlo %0, #0\n" : "+r" (safe_ptr), "=&r" (tmp) - : "r" (size), "r" (current_thread_info()->addr_limit) + : "r" (size), "r" (TASK_SIZE) : "cc"); csdb(); @@ -194,7 +170,7 @@ extern int __get_user_64t_4(void *); #define __get_user_check(x, p) \ ({ \ - unsigned long __limit = current_thread_info()->addr_limit - 1; \ + unsigned long __limit = TASK_SIZE - 1; \ register typeof(*(p)) __user *__p asm("r0") = (p); \ register __inttype(x) __r2 asm("r2"); \ register unsigned long __l asm("r1") = __limit; \ @@ -245,7 +221,7 @@ extern int __put_user_8(void *, unsigned long long); #define __put_user_check(__pu_val, __ptr, __err, __s) \ ({ \ - unsigned long __limit = current_thread_info()->addr_limit - 1; \ + unsigned long __limit = TASK_SIZE - 1; \ register typeof(__pu_val) __r2 asm("r2") = __pu_val; \ register const void __user *__p asm("r0") = __ptr; \ register unsigned long __l asm("r1") = __limit; \ @@ -262,19 +238,8 @@ extern int __put_user_8(void *, unsigned long long); #else /* CONFIG_MMU */ -/* - * uClinux has only one addr space, so has simplified address limits. - */ -#define USER_DS KERNEL_DS - -#define uaccess_kernel() (true) #define __addr_ok(addr) ((void)(addr), 1) #define __range_ok(addr, size) ((void)(addr), 0) -#define get_fs() (KERNEL_DS) - -static inline void set_fs(mm_segment_t fs) -{ -} #define get_user(x, p) __get_user(x, p) #define __put_user_check __put_user_nocheck @@ -283,9 +248,6 @@ static inline void set_fs(mm_segment_t fs) #define access_ok(addr, size) (__range_ok(addr, size) == 0) -#define user_addr_max() \ - (uaccess_kernel() ? ~0UL : get_fs()) - #ifdef CONFIG_CPU_SPECTRE /* * When mitigating Spectre variant 1, it is not worth fixing the non- diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index a0945b898ca3..c60fefabc868 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -43,7 +43,6 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); - DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); @@ -92,7 +91,6 @@ int main(void) DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr)); - DEFINE(SVC_ADDR_LIMIT, offsetof(struct svc_pt_regs, addr_limit)); DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs)); BLANK(); DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3])); diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index e837af90cd44..d9c99db50243 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -49,10 +49,6 @@ __ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) disable_irq_notrace @ disable interrupts - ldr r2, [tsk, #TI_ADDR_LIMIT] - ldr r1, =TASK_SIZE - cmp r2, r1 - blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing movs r1, r1, lsl #16 bne fast_work_pending @@ -87,10 +83,6 @@ __ret_fast_syscall: bl do_rseq_syscall #endif disable_irq_notrace @ disable interrupts - ldr r2, [tsk, #TI_ADDR_LIMIT] - ldr r1, =TASK_SIZE - cmp r2, r1 - blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing movs r1, r1, lsl #16 beq no_work_pending @@ -129,10 +121,6 @@ ret_slow_syscall: #endif disable_irq_notrace @ disable interrupts ENTRY(ret_to_user_from_irq) - ldr r2, [tsk, #TI_ADDR_LIMIT] - ldr r1, =TASK_SIZE - cmp r2, r1 - blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] movs r1, r1, lsl #16 bne slow_work_pending diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index fc9e8b37eaa8..581542fbf5bf 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -108,7 +108,7 @@ void __show_regs(struct pt_regs *regs) unsigned long flags; char buf[64]; #ifndef CONFIG_CPU_V7M - unsigned int domain, fs; + unsigned int domain; #ifdef CONFIG_CPU_SW_DOMAIN_PAN /* * Get the domain register for the parent context. In user @@ -117,14 +117,11 @@ void __show_regs(struct pt_regs *regs) */ if (user_mode(regs)) { domain = DACR_UACCESS_ENABLE; - fs = get_fs(); } else { domain = to_svc_pt_regs(regs)->dacr; - fs = to_svc_pt_regs(regs)->addr_limit; } #else domain = get_domain(); - fs = get_fs(); #endif #endif @@ -160,8 +157,6 @@ void __show_regs(struct pt_regs *regs) if ((domain & domain_mask(DOMAIN_USER)) == domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) segment = "none"; - else if (fs == KERNEL_DS) - segment = "kernel"; else segment = "user"; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index a3a38d0a4c85..3e7ddac086c2 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -711,14 +711,6 @@ struct page *get_signal_page(void) return page; } -/* Defer to generic check */ -asmlinkage void addr_limit_check_failed(void) -{ -#ifdef CONFIG_MMU - addr_limit_user_check(); -#endif -} - #ifdef CONFIG_DEBUG_RSEQ asmlinkage void do_rseq_syscall(struct pt_regs *regs) { diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index f8016e3db65d..480a20766137 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -109,8 +109,7 @@ ENTRY(arm_copy_from_user) #ifdef CONFIG_CPU_SPECTRE - get_thread_info r3 - ldr r3, [r3, #TI_ADDR_LIMIT] + ldr r3, =TASK_SIZE uaccess_mask_range_ptr r1, r2, r3, ip #endif diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index ebfe4cb3d912..842ea5ede485 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -109,8 +109,7 @@ ENTRY(__copy_to_user_std) WEAK(arm_copy_to_user) #ifdef CONFIG_CPU_SPECTRE - get_thread_info r3 - ldr r3, [r3, #TI_ADDR_LIMIT] + ldr r3, =TASK_SIZE uaccess_mask_range_ptr r0, r2, r3, ip #endif -- cgit v1.2.3 From da0b9ee43c152401f711e522c19b1468c84907bf Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Aug 2021 08:30:27 +0100 Subject: ARM: 9110/1: oabi-compat: fix oabi epoll sparse warning As my patches change the oabi epoll definition, I received a report from the kernel test robot about a pre-existing issue with a mismatched __poll_t type. The OABI code was correct when it was initially added in linux-2.16, but a later (also correct) change to the generic __poll_t triggered a type mismatch warning from sparse. As __poll_t is always 32-bit bits wide and otherwise compatible, using this instead of __u32 in the oabi_epoll_event definition is a valid workaround. Reported-by: kernel test robot Fixes: 8ced390c2b18 ("define __poll_t, annotate constants") Fixes: ee219b946e4b ("uapi: turn __poll_t sparse checks on by default") Fixes: 687ad0191488 ("[ARM] 3109/1: old ABI compat: syscall wrappers for ABI impedance matching") Acked-by: Christoph Hellwig Signed-off-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) --- arch/arm/kernel/sys_oabi-compat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 223ee46b6e75..68112c172025 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -274,7 +274,7 @@ out: } struct oabi_epoll_event { - __u32 events; + __poll_t events; __u64 data; } __attribute__ ((packed,aligned(4))); -- cgit v1.2.3 From 88210317eec69d8c06cac6d6751528160e153ffd Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 11 Aug 2021 10:27:03 +0100 Subject: ARM: 9116/1: unified: Remove check for gcc < 4 Since commit cafa0010cd51fb71 ("Raise the minimum required gcc version to 4.6"), the kernel can no longer be compiled using gcc-3. Hence this condition is never true, and the check can thus be removed. Signed-off-by: Geert Uytterhoeven Signed-off-by: Russell King (Oracle) --- arch/arm/include/asm/unified.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h index 1e2c3eb04353..ce9689118dbb 100644 --- a/arch/arm/include/asm/unified.h +++ b/arch/arm/include/asm/unified.h @@ -24,10 +24,6 @@ __asm__(".syntax unified"); #ifdef CONFIG_THUMB2_KERNEL -#if __GNUC__ < 4 -#error Thumb-2 kernel requires gcc >= 4 -#endif - /* The CPSR bit describing the instruction set (Thumb) */ #define PSR_ISETSTATE PSR_T_BIT -- cgit v1.2.3 From 6c974e79d37608bc8fd5cf6b61b4233b82b60428 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 11 Aug 2021 10:27:02 +0100 Subject: ARM: 9118/1: div64: Remove always-true __div64_const32_is_OK() duplicate Since commit cafa0010cd51fb71 ("Raise the minimum required gcc version to 4.6"), the kernel can no longer be compiled using gcc-3. Hence __div64_const32_is_OK() is always true. Moreover, __div64_const32_is_OK() is defined in the same way in include/asm-generic/div64.h, so the ARM-specific definition can be removed regardless. Signed-off-by: Geert Uytterhoeven Signed-off-by: Russell King (Oracle) --- arch/arm/include/asm/div64.h | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index 595e538f5bfb..4b69cf850451 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h @@ -52,17 +52,6 @@ static inline uint32_t __div64_32(uint64_t *n, uint32_t base) #else -/* - * gcc versions earlier than 4.0 are simply too problematic for the - * __div64_const32() code in asm-generic/div64.h. First there is - * gcc PR 15089 that tend to trig on more complex constructs, spurious - * .global __udivsi3 are inserted even if none of those symbols are - * referenced in the generated code, and those gcc versions are not able - * to do constant propagation on long long values anyway. - */ - -#define __div64_const32_is_OK (__GNUC__ >= 4) - static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias) { unsigned long long res; -- cgit v1.2.3