summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-09 13:12:47 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-09 13:12:47 -0800
commitb64bb1d758163814687eb3b84d74e56f04d0c9d1 (patch)
tree59f1db8b718e98d13c6cf9d3486221cfff6e7eef /include/asm-generic
parent50569687e9c688a8688982805be6d8e3c8879042 (diff)
parenteb8a653137b7e74f7cdc01f814eb9d094a65aed9 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "Here's the usual mixed bag of arm64 updates, also including some related EFI changes (Acked by Matt) and the MMU gather range cleanup (Acked by you). Changes include: - support for alternative instruction patching from Andre - seccomp from Akashi - some AArch32 instruction emulation, required by the Android folks - optimisations for exception entry/exit code, cmpxchg, pcpu atomics - mmu_gather range calculations moved into core code - EFI updates from Ard, including long-awaited SMBIOS support - /proc/cpuinfo fixes to align with the format used by arch/arm/ - a few non-critical fixes across the architecture" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (70 commits) arm64: remove the unnecessary arm64_swiotlb_init() arm64: add module support for alternatives fixups arm64: perf: Prevent wraparound during overflow arm64/include/asm: Fixed a warning about 'struct pt_regs' arm64: Provide a namespace to NCAPS arm64: bpf: lift restriction on last instruction arm64: Implement support for read-mostly sections arm64: compat: align cacheflush syscall with arch/arm arm64: add seccomp support arm64: add SIGSYS siginfo for compat task arm64: add seccomp syscall for compat task asm-generic: add generic seccomp.h for secure computing mode 1 arm64: ptrace: allow tracer to skip a system call arm64: ptrace: add NT_ARM_SYSTEM_CALL regset arm64: Move some head.text functions to executable section arm64: jump labels: NOP out NOP -> NOP replacement arm64: add support to dump the kernel page tables arm64: Add FIX_HOLE to permanent fixed addresses arm64: alternatives: fix pr_fmt string for consistency arm64: vmlinux.lds.S: don't discard .exit.* sections at link-time ...
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/seccomp.h30
-rw-r--r--include/asm-generic/tlb.h57
2 files changed, 77 insertions, 10 deletions
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
new file mode 100644
index 000000000000..9fa1f653ed3b
--- /dev/null
+++ b/include/asm-generic/seccomp.h
@@ -0,0 +1,30 @@
+/*
+ * include/asm-generic/seccomp.h
+ *
+ * Copyright (C) 2014 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_GENERIC_SECCOMP_H
+#define _ASM_GENERIC_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32)
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
+#endif /* CONFIG_COMPAT && ! already defined */
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#ifndef __NR_seccomp_sigreturn
+#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+#endif
+
+#endif /* _ASM_GENERIC_SECCOMP_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 5672d7ea1fa0..08848050922e 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -96,10 +96,9 @@ struct mmu_gather {
#endif
unsigned long start;
unsigned long end;
- unsigned int need_flush : 1, /* Did free PTEs */
/* we are in the middle of an operation to clear
* a full mm and can make some optimizations */
- fullmm : 1,
+ unsigned int fullmm : 1,
/* we have performed an operation which
* requires a complete flush of the tlb */
need_flush_all : 1;
@@ -128,16 +127,54 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
tlb_flush_mmu(tlb);
}
+static inline void __tlb_adjust_range(struct mmu_gather *tlb,
+ unsigned long address)
+{
+ tlb->start = min(tlb->start, address);
+ tlb->end = max(tlb->end, address + PAGE_SIZE);
+}
+
+static inline void __tlb_reset_range(struct mmu_gather *tlb)
+{
+ tlb->start = TASK_SIZE;
+ tlb->end = 0;
+}
+
+/*
+ * In the case of tlb vma handling, we can optimise these away in the
+ * case where we're doing a full MM flush. When we're doing a munmap,
+ * the vmas are adjusted to only cover the region to be torn down.
+ */
+#ifndef tlb_start_vma
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#endif
+
+#define __tlb_end_vma(tlb, vma) \
+ do { \
+ if (!tlb->fullmm && tlb->end) { \
+ tlb_flush(tlb); \
+ __tlb_reset_range(tlb); \
+ } \
+ } while (0)
+
+#ifndef tlb_end_vma
+#define tlb_end_vma __tlb_end_vma
+#endif
+
+#ifndef __tlb_remove_tlb_entry
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+#endif
+
/**
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
*
- * Record the fact that pte's were really umapped in ->need_flush, so we can
- * later optimise away the tlb invalidate. This helps when userspace is
- * unmapping already-unmapped pages, which happens quite a lot.
+ * Record the fact that pte's were really unmapped by updating the range,
+ * so we can later optimise away the tlb invalidate. This helps when
+ * userspace is unmapping already-unmapped pages, which happens quite a lot.
*/
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
- tlb->need_flush = 1; \
+ __tlb_adjust_range(tlb, address); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
@@ -151,27 +188,27 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
- tlb->need_flush = 1; \
+ __tlb_adjust_range(tlb, address); \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
#define pte_free_tlb(tlb, ptep, address) \
do { \
- tlb->need_flush = 1; \
+ __tlb_adjust_range(tlb, address); \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#ifndef __ARCH_HAS_4LEVEL_HACK
#define pud_free_tlb(tlb, pudp, address) \
do { \
- tlb->need_flush = 1; \
+ __tlb_adjust_range(tlb, address); \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
- tlb->need_flush = 1; \
+ __tlb_adjust_range(tlb, address); \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)