summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2016-05-27 12:57:02 -0700
committerKees Cook <keescook@chromium.org>2016-06-14 10:54:39 -0700
commit2f275de5d1ed7269913ef9b4c64a13952c0a38e8 (patch)
tree0151774ac6f2d8d8e89cc3402fc57ab8918bf610 /arch
parent58d0a862f573c3354fa912603ef5a4db188774e7 (diff)
seccomp: Add a seccomp_data parameter secure_computing()
Currently, if arch code wants to supply seccomp_data directly to seccomp (which is generally much faster than having seccomp do it using the syscall_get_xyz() API), it has to use the two-phase seccomp hooks. Add it to the easy hooks, too. Cc: linux-arch@vger.kernel.org Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Kees Cook <keescook@chromium.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/ptrace.c2
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/parisc/kernel/ptrace.c2
-rw-r--r--arch/powerpc/kernel/ptrace.c2
-rw-r--r--arch/s390/kernel/ptrace.c2
-rw-r--r--arch/tile/kernel/ptrace.c2
-rw-r--r--arch/um/kernel/skas/syscall.c2
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c2
9 files changed, 9 insertions, 9 deletions
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 4d9375814b53..1027d3b54541 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -934,7 +934,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
/* Do the secure computing check first; failures should be fast. */
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
- if (secure_computing() == -1)
+ if (secure_computing(NULL) == -1)
return -1;
#else
/* XXX: remove this once OABI gets fixed */
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 3f6cd5c5234f..6e2cf046615d 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1247,7 +1247,7 @@ static void tracehook_report_syscall(struct pt_regs *regs,
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
/* Do the secure computing check first; failures should be fast. */
- if (secure_computing() == -1)
+ if (secure_computing(NULL) == -1)
return -1;
if (test_thread_flag(TIF_SYSCALL_TRACE))
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 0dcf69194473..c50af846ecf9 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -893,7 +893,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
current_thread_info()->syscall = syscall;
- if (secure_computing() == -1)
+ if (secure_computing(NULL) == -1)
return -1;
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index b5458b37fc5b..8edc47c0b98e 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -312,7 +312,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs)
{
/* Do the secure computing check first. */
- if (secure_computing() == -1)
+ if (secure_computing(NULL) == -1)
return -1;
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 30a03c03fe73..ed799e994773 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1783,7 +1783,7 @@ static int do_seccomp(struct pt_regs *regs)
* have already loaded -ENOSYS into r3, or seccomp has put
* something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
*/
- if (__secure_computing())
+ if (__secure_computing(NULL))
return -1;
/*
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 49b1c13bf6c9..c238e9958c2a 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -824,7 +824,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
long ret = 0;
/* Do the secure computing check first. */
- if (secure_computing()) {
+ if (secure_computing(NULL)) {
/* seccomp failures shouldn't expose any additional code. */
ret = -1;
goto out;
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 54e7b723db99..8c6d2f2fefa3 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -255,7 +255,7 @@ int do_syscall_trace_enter(struct pt_regs *regs)
{
u32 work = ACCESS_ONCE(current_thread_info()->flags);
- if (secure_computing() == -1)
+ if (secure_computing(NULL) == -1)
return -1;
if (work & _TIF_SYSCALL_TRACE) {
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 48b0dcbd87be..9c5570f0f397 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -21,7 +21,7 @@ void handle_syscall(struct uml_pt_regs *r)
PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
/* Do the secure computing check first; failures should be fast. */
- if (secure_computing() == -1)
+ if (secure_computing(NULL) == -1)
return;
if (syscall_trace_enter(regs))
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 174c2549939d..85acde5fa442 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -207,7 +207,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
*/
regs->orig_ax = syscall_nr;
regs->ax = -ENOSYS;
- tmp = secure_computing();
+ tmp = secure_computing(NULL);
if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
warn_bad_vsyscall(KERN_DEBUG, regs,
"seccomp tried to change syscall nr or ip");