diff options
Diffstat (limited to 'arch/arm64/include/asm')
35 files changed, 440 insertions, 210 deletions
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h index ffb1a40d5475..09e43272ccb0 100644 --- a/arch/arm64/include/asm/archrandom.h +++ b/arch/arm64/include/asm/archrandom.h @@ -4,10 +4,26 @@ #ifdef CONFIG_ARCH_RANDOM +#include <linux/arm-smccc.h> #include <linux/bug.h> #include <linux/kernel.h> #include <asm/cpufeature.h> +#define ARM_SMCCC_TRNG_MIN_VERSION 0x10000UL + +extern bool smccc_trng_available; + +static inline bool __init smccc_probe_trng(void) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_VERSION, &res); + if ((s32)res.a0 < 0) + return false; + + return res.a0 >= ARM_SMCCC_TRNG_MIN_VERSION; +} + static inline bool __arm64_rndr(unsigned long *v) { bool ok; @@ -38,26 +54,55 @@ static inline bool __must_check arch_get_random_int(unsigned int *v) static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { + struct arm_smccc_res res; + + /* + * We prefer the SMCCC call, since its semantics (return actual + * hardware backed entropy) is closer to the idea behind this + * function here than what even the RNDRSS register provides + * (the output of a pseudo RNG freshly seeded by a TRNG). + */ + if (smccc_trng_available) { + arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res); + if ((int)res.a0 >= 0) { + *v = res.a3; + return true; + } + } + /* * Only support the generic interface after we have detected * the system wide capability, avoiding complexity with the * cpufeature code and with potential scheduling between CPUs * with and without the feature. */ - if (!cpus_have_const_cap(ARM64_HAS_RNG)) - return false; + if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v)) + return true; - return __arm64_rndr(v); + return false; } - static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { + struct arm_smccc_res res; unsigned long val; - bool ok = arch_get_random_seed_long(&val); - *v = val; - return ok; + if (smccc_trng_available) { + arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 32, &res); + if ((int)res.a0 >= 0) { + *v = res.a3 & GENMASK(31, 0); + return true; + } + } + + if (cpus_have_const_cap(ARM64_HAS_RNG)) { + if (__arm64_rndr(&val)) { + *v = val; + return true; + } + } + + return false; } static inline bool __init __early_cpu_has_rndr(void) @@ -72,12 +117,29 @@ arch_get_random_seed_long_early(unsigned long *v) { WARN_ON(system_state != SYSTEM_BOOTING); - if (!__early_cpu_has_rndr()) - return false; + if (smccc_trng_available) { + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res); + if ((int)res.a0 >= 0) { + *v = res.a3; + return true; + } + } + + if (__early_cpu_has_rndr() && __arm64_rndr(v)) + return true; - return __arm64_rndr(v); + return false; } #define arch_get_random_seed_long_early arch_get_random_seed_long_early +#else /* !CONFIG_ARCH_RANDOM */ + +static inline bool __init smccc_probe_trng(void) +{ + return false; +} + #endif /* CONFIG_ARCH_RANDOM */ #endif /* _ASM_ARCHRANDOM_H */ diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 9990059be106..ccedf548dac9 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -15,10 +15,10 @@ .macro __uaccess_ttbr0_disable, tmp1 mrs \tmp1, ttbr1_el1 // swapper_pg_dir bic \tmp1, \tmp1, #TTBR_ASID_MASK - sub \tmp1, \tmp1, #PAGE_SIZE // reserved_pg_dir just before swapper_pg_dir + sub \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET // reserved_pg_dir msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 isb - add \tmp1, \tmp1, #PAGE_SIZE + add \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET msr ttbr1_el1, \tmp1 // set reserved ASID isb .endm diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index bf125c591116..ca31594d3d6c 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -676,6 +676,23 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm /* + * Set SCTLR_EL1 to the passed value, and invalidate the local icache + * in the process. This is called when setting the MMU on. + */ +.macro set_sctlr_el1, reg + msr sctlr_el1, \reg + isb + /* + * Invalidate the local I-cache so that any instructions fetched + * speculatively from the PoC are discarded, since they may have + * been dynamically patched at the PoU. + */ + ic iallu + dsb nsh + isb +.endm + +/* * Check whether to yield to another runnable task from kernel mode NEON code * (which runs with preemption disabled). * @@ -745,6 +762,22 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .Lyield_out_\@ : .endm + /* + * Check whether preempt-disabled code should yield as soon as it + * is able. This is the case if re-enabling preemption a single + * time results in a preempt count of zero, and the TIF_NEED_RESCHED + * flag is set. (Note that the latter is stored negated in the + * top word of the thread_info::preempt_count field) + */ + .macro cond_yield, lbl:req, tmp:req +#ifdef CONFIG_PREEMPTION + get_current_task \tmp + ldr \tmp, [\tmp, #TSK_TI_PREEMPT] + sub \tmp, \tmp, #PREEMPT_DISABLE_OFFSET + cbz \tmp, \lbl +#endif + .endm + /* * This macro emits a program property note section identifying * architecture features which require special handling, mainly for diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 77cbbe3625f2..a074459f8f2f 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -6,7 +6,6 @@ #define __ASM_CACHE_H #include <asm/cputype.h> -#include <asm/mte-kasan.h> #define CTR_L1IP_SHIFT 14 #define CTR_L1IP_MASK 3 diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 45217f21f1fe..52e5c1623224 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -30,11 +30,6 @@ * the implementation assumes non-aliasing VIPT D-cache and (aliasing) * VIPT I-cache. * - * flush_cache_mm(mm) - * - * Clean and invalidate all user space cache entries - * before a change of page tables. - * * flush_icache_range(start, end) * * Ensure coherency between the I-cache and the D-cache in the diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 9a555809b89c..61177bac49fa 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -63,6 +63,11 @@ struct arm64_ftr_bits { s64 safe_val; /* safe value for FTR_EXACT features */ }; +struct arm64_ftr_override { + u64 val; + u64 mask; +}; + /* * @arm64_ftr_reg - Feature register * @strict_mask Bits which should match across all CPUs for sanity. @@ -74,6 +79,7 @@ struct arm64_ftr_reg { u64 user_mask; u64 sys_val; u64 user_val; + struct arm64_ftr_override *override; const struct arm64_ftr_bits *ftr_bits; }; @@ -600,6 +606,7 @@ void __init setup_cpu_features(void); void check_local_cpu_capabilities(void); u64 read_sanitised_ftr_reg(u32 id); +u64 __read_sysreg_by_encoding(u32 sys_id); static inline bool cpu_supports_mixed_endian_el0(void) { @@ -811,6 +818,10 @@ static inline unsigned int get_vmid_bits(u64 mmfr1) return 8; } +extern struct arm64_ftr_override id_aa64mmfr1_override; +extern struct arm64_ftr_override id_aa64pfr1_override; +extern struct arm64_ftr_override id_aa64isar1_override; + u32 get_kvm_ipa_limit(void); void dump_cpu_features(void); diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index a7f5a1bbc8ac..d77d358f9395 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -32,46 +32,39 @@ * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in * EL2. */ -.macro __init_el2_timers mode -.ifeqs "\mode", "nvhe" +.macro __init_el2_timers mrs x0, cnthctl_el2 orr x0, x0, #3 // Enable EL1 physical timers msr cnthctl_el2, x0 -.endif msr cntvoff_el2, xzr // Clear virtual offset .endm -.macro __init_el2_debug mode +.macro __init_el2_debug mrs x1, id_aa64dfr0_el1 sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 cmp x0, #1 - b.lt 1f // Skip if no PMU present + b.lt .Lskip_pmu_\@ // Skip if no PMU present mrs x0, pmcr_el0 // Disable debug access traps ubfx x0, x0, #11, #5 // to EL2 and allow access to -1: +.Lskip_pmu_\@: csel x2, xzr, x0, lt // all PMU counters from EL1 /* Statistical profiling */ ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 - cbz x0, 3f // Skip if SPE not present + cbz x0, .Lskip_spe_\@ // Skip if SPE not present -.ifeqs "\mode", "nvhe" mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT) - cbnz x0, 2f // then permit sampling of physical + cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ 1 << SYS_PMSCR_EL2_PA_SHIFT) msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter -2: +.Lskip_spe_el2_\@: mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) orr x2, x2, x0 // If we don't have VHE, then // use EL1&0 translation. -.else - orr x2, x2, #MDCR_EL2_TPMS // For VHE, use EL2 translation - // and disable access from EL1 -.endif -3: +.Lskip_spe_\@: msr mdcr_el2, x2 // Configure debug traps .endm @@ -79,9 +72,9 @@ .macro __init_el2_lor mrs x1, id_aa64mmfr1_el1 ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 - cbz x0, 1f + cbz x0, .Lskip_lor_\@ msr_s SYS_LORC_EL1, xzr -1: +.Lskip_lor_\@: .endm /* Stage-2 translation */ @@ -93,7 +86,7 @@ .macro __init_el2_gicv3 mrs x0, id_aa64pfr0_el1 ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4 - cbz x0, 1f + cbz x0, .Lskip_gicv3_\@ mrs_s x0, SYS_ICC_SRE_EL2 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 @@ -103,7 +96,7 @@ mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, tbz x0, #0, 1f // and check that it sticks msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults -1: +.Lskip_gicv3_\@: .endm .macro __init_el2_hstr @@ -128,14 +121,14 @@ .macro __init_el2_nvhe_sve mrs x1, id_aa64pfr0_el1 ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4 - cbz x1, 1f + cbz x1, .Lskip_sve_\@ bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps msr cptr_el2, x0 // Disable copro. traps to EL2 isb mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector msr_s SYS_ZCR_EL2, x1 // length for EL1. -1: +.Lskip_sve_\@: .endm .macro __init_el2_nvhe_prepare_eret @@ -145,37 +138,24 @@ /** * Initialize EL2 registers to sane values. This should be called early on all - * cores that were booted in EL2. + * cores that were booted in EL2. Note that everything gets initialised as + * if VHE was not evailable. The kernel context will be upgraded to VHE + * if possible later on in the boot process * * Regs: x0, x1 and x2 are clobbered. */ -.macro init_el2_state mode -.ifnes "\mode", "vhe" -.ifnes "\mode", "nvhe" -.error "Invalid 'mode' argument" -.endif -.endif - +.macro init_el2_state __init_el2_sctlr - __init_el2_timers \mode - __init_el2_debug \mode + __init_el2_timers + __init_el2_debug __init_el2_lor __init_el2_stage2 __init_el2_gicv3 __init_el2_hstr - - /* - * When VHE is not in use, early init of EL2 needs to be done here. - * When VHE _is_ in use, EL1 will not be used in the host and - * requires no configuration, and all non-hyp-specific EL2 setup - * will be done via the _EL1 system register aliases in __cpu_setup. - */ -.ifeqs "\mode", "nvhe" __init_el2_nvhe_idregs __init_el2_nvhe_cptr __init_el2_nvhe_sve __init_el2_nvhe_prepare_eret -.endif .endm #endif /* __ARM_KVM_INIT_H__ */ diff --git a/arch/arm64/include/asm/hyp_image.h b/arch/arm64/include/asm/hyp_image.h index daa1a1da539e..737ded6b6d0d 100644 --- a/arch/arm64/include/asm/hyp_image.h +++ b/arch/arm64/include/asm/hyp_image.h @@ -7,6 +7,9 @@ #ifndef __ARM64_HYP_IMAGE_H__ #define __ARM64_HYP_IMAGE_H__ +#define __HYP_CONCAT(a, b) a ## b +#define HYP_CONCAT(a, b) __HYP_CONCAT(a, b) + /* * KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_, * to separate it from the kernel proper. @@ -21,9 +24,31 @@ */ #define HYP_SECTION_NAME(NAME) .hyp##NAME +/* Symbol defined at the beginning of each hyp section. */ +#define HYP_SECTION_SYMBOL_NAME(NAME) \ + HYP_CONCAT(__hyp_section_, HYP_SECTION_NAME(NAME)) + +/* + * Helper to generate linker script statements starting a hyp section. + * + * A symbol with a well-known name is defined at the first byte. This + * is used as a base for hyp relocations (see gen-hyprel.c). It must + * be defined inside the section so the linker of `vmlinux` cannot + * separate it from the section data. + */ +#define BEGIN_HYP_SECTION(NAME) \ + HYP_SECTION_NAME(NAME) : { \ + HYP_SECTION_SYMBOL_NAME(NAME) = .; + +/* Helper to generate linker script statements ending a hyp section. */ +#define END_HYP_SECTION \ + } + /* Defines an ELF hyp section from input section @NAME and its subsections. */ -#define HYP_SECTION(NAME) \ - HYP_SECTION_NAME(NAME) : { *(NAME NAME##.*) } +#define HYP_SECTION(NAME) \ + BEGIN_HYP_SECTION(NAME) \ + *(NAME NAME##.*) \ + END_HYP_SECTION /* * Defines a linker script alias of a kernel-proper symbol referenced by diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index 0aaf9044cd6a..12d5f47f7dbe 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/memory.h> +#include <asm/mte-kasan.h> #include <asm/pgtable-types.h> #define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag) diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index d24b527e8c00..9befcd87e9a8 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -90,18 +90,19 @@ static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif -#ifdef CONFIG_KEXEC_FILE #define ARCH_HAS_KIMAGE_ARCH struct kimage_arch { void *dtb; - unsigned long dtb_mem; + phys_addr_t dtb_mem; + phys_addr_t kern_reloc; /* Core ELF header buffer */ void *elf_headers; unsigned long elf_headers_mem; unsigned long elf_headers_sz; }; +#ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_image_ops; struct kimage; diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h new file mode 100644 index 000000000000..d061176d57ea --- /dev/null +++ b/arch/arm64/include/asm/kfence.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arm64 KFENCE support. + * + * Copyright (C) 2020, Google LLC. + */ + +#ifndef __ASM_KFENCE_H +#define __ASM_KFENCE_H + +#include <asm/cacheflush.h> + +static inline bool arch_kfence_init_pool(void) { return true; } + +static inline bool kfence_protect_page(unsigned long addr, bool protect) +{ + set_memory_valid(addr, 1, !protect); + + return true; +} + +#endif /* __ASM_KFENCE_H */ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 8a33d83ea843..22d933e9b59e 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -199,32 +199,6 @@ extern void __vgic_v3_init_lrs(void); extern u32 __kvm_get_mdcr_el2(void); -#if defined(GCC_VERSION) && GCC_VERSION < 50000 -#define SYM_CONSTRAINT "i" -#else -#define SYM_CONSTRAINT "S" -#endif - -/* - * Obtain the PC-relative address of a kernel symbol - * s: symbol - * - * The goal of this macro is to return a symbol's address based on a - * PC-relative computation, as opposed to a loading the VA from a - * constant pool or something similar. This works well for HYP, as an - * absolute VA is guaranteed to be wrong. Only use this if trying to - * obtain the address of a symbol (i.e. not something you obtained by - * following a pointer). - */ -#define hyp_symbol_addr(s) \ - ({ \ - typeof(s) *addr; \ - asm("adrp %0, %1\n" \ - "add %0, %0, :lo12:%1\n" \ - : "=r" (addr) : SYM_CONSTRAINT (&s)); \ - addr; \ - }) - #define __KVM_EXTABLE(from, to) \ " .pushsection __kvm_ex_table, \"a\"\n" \ " .align 3\n" \ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8fcfab0c2567..3d10e6527f7d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -30,7 +30,6 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#define KVM_USER_MEM_SLOTS 512 #define KVM_HALT_POLL_NS_DEFAULT 500000 #include <kvm/arm_vgic.h> @@ -771,4 +770,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); #define kvm_vcpu_has_pmu(vcpu) \ (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) +int kvm_trng_call(struct kvm_vcpu *vcpu); + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index e52d82aeadca..90873851f677 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -73,49 +73,39 @@ alternative_cb_end .endm /* - * Convert a kernel image address to a PA - * reg: kernel address to be converted in place + * Convert a hypervisor VA to a PA + * reg: hypervisor address to be converted in place * tmp: temporary register - * - * The actual code generation takes place in kvm_get_kimage_voffset, and - * the instructions below are only there to reserve the space and - * perform the register allocation (kvm_get_kimage_voffset uses the - * specific registers encoded in the instructions). */ -.macro kimg_pa reg, tmp -alternative_cb kvm_get_kimage_voffset - movz \tmp, #0 - movk \tmp, #0, lsl #16 - movk \tmp, #0, lsl #32 - movk \tmp, #0, lsl #48 -alternative_cb_end - - /* reg = __pa(reg) */ - sub \reg, \reg, \tmp +.macro hyp_pa reg, tmp + ldr_l \tmp, hyp_physvirt_offset + add \reg, \reg, \tmp .endm /* - * Convert a kernel image address to a hyp VA - * reg: kernel address to be converted in place + * Convert a hypervisor VA to a kernel image address + * reg: hypervisor address to be converted in place * tmp: temporary register * * The actual code generation takes place in kvm_get_kimage_voffset, and * the instructions below are only there to reserve the space and - * perform the register allocation (kvm_update_kimg_phys_offset uses the + * perform the register allocation (kvm_get_kimage_voffset uses the * specific registers encoded in the instructions). */ -.macro kimg_hyp_va reg, tmp -alternative_cb kvm_update_kimg_phys_offset +.macro hyp_kimg_va reg, tmp + /* Convert hyp VA -> PA. */ + hyp_pa \reg, \tmp + + /* Load kimage_voffset. */ +alternative_cb kvm_get_kimage_voffset movz \tmp, #0 movk \tmp, #0, lsl #16 movk \tmp, #0, lsl #32 movk \tmp, #0, lsl #48 alternative_cb_end - sub \reg, \reg, \tmp - mov_q \tmp, PAGE_OFFSET - orr \reg, \reg, \tmp - kern_hyp_va \reg + /* Convert PA -> kimg VA. */ + add \reg, \reg, \tmp .endm #else @@ -129,6 +119,7 @@ alternative_cb_end void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void kvm_compute_layout(void); +void kvm_apply_hyp_relocations(void); static __always_inline unsigned long __kern_hyp_va(unsigned long v) { @@ -144,24 +135,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) -static __always_inline unsigned long __kimg_hyp_va(unsigned long v) -{ - unsigned long offset; - - asm volatile(ALTERNATIVE_CB("movz %0, #0\n" - "movk %0, #0, lsl #16\n" - "movk %0, #0, lsl #32\n" - "movk %0, #0, lsl #48\n", - kvm_update_kimg_phys_offset) - : "=r" (offset)); - - return __kern_hyp_va((v - offset) | PAGE_OFFSET); -} - -#define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v)))) - -#define kimg_fn_ptr(x) (typeof(x) **)(x) - /* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits. diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 52ab38db04c7..8886d43cfb11 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -157,6 +157,11 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); * If device attributes are not explicitly requested in @prot, then the * mapping will be normal, cacheable. * + * Note that the update of a valid leaf PTE in this function will be aborted, + * if it's trying to recreate the exact same mapping or only change the access + * permissions. Instead, the vCPU will exit one more time from guest if still + * needed and then go through the path of relaxing permissions. + * * Note that this function will both coalesce existing table entries and split * existing block mappings, relying on page-faults to fault back areas outside * of the new mapping lazily. diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 18fce223b67b..c759faf7a1ff 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -159,6 +159,18 @@ #define IOREMAP_MAX_ORDER (PMD_SHIFT) #endif +/* + * Open-coded (swapper_pg_dir - reserved_pg_dir) as this cannot be calculated + * until link time. + */ +#define RESERVED_SWAPPER_OFFSET (PAGE_SIZE) + +/* + * Open-coded (swapper_pg_dir - tramp_pg_dir) as this cannot be calculated + * until link time. + */ +#define TRAMP_SWAPPER_OFFSET (2 * PAGE_SIZE) + #ifndef __ASSEMBLY__ #include <linux/bitops.h> @@ -232,6 +244,7 @@ static inline const void *__tag_set(const void *addr, u8 tag) #ifdef CONFIG_KASAN_HW_TAGS #define arch_enable_tagging() mte_enable_kernel() +#define arch_set_tagging_report_once(state) mte_set_report_once(state) #define arch_init_tags(max_tag) mte_init_tags(max_tag) #define arch_get_random_tag() mte_get_random_tag() #define arch_get_mem_tag(addr) mte_get_mem_tag(addr) @@ -247,11 +260,13 @@ static inline const void *__tag_set(const void *addr, u8 tag) /* - * The linear kernel range starts at the bottom of the virtual address space. + * Check whether an arbitrary address is within the linear map, which + * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the + * kernel's TTBR1 address range. */ -#define __is_lm_address(addr) (((u64)(addr) & ~PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) +#define __is_lm_address(addr) (((u64)(addr) - PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) -#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) +#define __lm_to_phys(addr) (((addr) - PAGE_OFFSET) + PHYS_OFFSET) #define __kimg_to_phys(addr) ((addr) - kimage_voffset) #define __virt_to_phys_nodebug(x) ({ \ @@ -330,7 +345,7 @@ static inline void *phys_to_virt(phys_addr_t x) #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ #define virt_addr_valid(addr) ({ \ - __typeof__(addr) __addr = addr; \ + __typeof__(addr) __addr = __tag_reset(addr); \ __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \ }) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 0b3079fd28eb..70ce8c1d2b07 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -81,16 +81,15 @@ static inline bool __cpu_uses_extended_idmap_level(void) } /* - * Set TCR.T0SZ to its default value (based on VA_BITS) + * Ensure TCR.T0SZ is set to the provided value. */ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) { - unsigned long tcr; + unsigned long tcr = read_sysreg(tcr_el1); - if (!__cpu_uses_extended_idmap()) + if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz) return; - tcr = read_sysreg(tcr_el1); tcr &= ~TCR_T0SZ_MASK; tcr |= t0sz << TCR_T0SZ_OFFSET; write_sysreg(tcr, tcr_el1); diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h index 691f15af788e..810045628c66 100644 --- a/arch/arm64/include/asm/module.lds.h +++ b/arch/arm64/include/asm/module.lds.h @@ -1,7 +1,7 @@ #ifdef CONFIG_ARM64_MODULE_PLTS SECTIONS { - .plt (NOLOAD) : { BYTE(0) } - .init.plt (NOLOAD) : { BYTE(0) } - .text.ftrace_trampoline (NOLOAD) : { BYTE(0) } + .plt 0 (NOLOAD) : { BYTE(0) } + .init.plt 0 (NOLOAD) : { BYTE(0) } + .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) } } #endif diff --git a/arch/arm64/include/asm/mte-def.h b/arch/arm64/include/asm/mte-def.h index 2d73a1612f09..cf241b0f0a42 100644 --- a/arch/arm64/include/asm/mte-def.h +++ b/arch/arm64/include/asm/mte-def.h @@ -11,4 +11,6 @@ #define MTE_TAG_SIZE 4 #define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT) +#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n" + #endif /* __ASM_MTE_DEF_H */ diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h index 26349a4b5e2e..7ab500e2ad17 100644 --- a/arch/arm64/include/asm/mte-kasan.h +++ b/arch/arm64/include/asm/mte-kasan.h @@ -11,11 +11,14 @@ #include <linux/types.h> +#ifdef CONFIG_ARM64_MTE + /* - * The functions below are meant to be used only for the - * KASAN_HW_TAGS interface defined in asm/memory.h. + * These functions are meant to be only used from KASAN runtime through + * the arch_*() interface defined in asm/memory.h. + * These functions don't include system_supports_mte() checks, + * as KASAN only calls them when MTE is supported and enabled. */ -#ifdef CONFIG_ARM64_MTE static inline u8 mte_get_ptr_tag(void *ptr) { @@ -25,13 +28,61 @@ static inline u8 mte_get_ptr_tag(void *ptr) return tag; } -u8 mte_get_mem_tag(void *addr); -u8 mte_get_random_tag(void); -void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag); +/* Get allocation tag for the address. */ +static inline u8 mte_get_mem_tag(void *addr) +{ + asm(__MTE_PREAMBLE "ldg %0, [%0]" + : "+r" (addr)); + + return mte_get_ptr_tag(addr); +} + +/* Generate a random tag. */ +static inline u8 mte_get_random_tag(void) +{ + void *addr; + + asm(__MTE_PREAMBLE "irg %0, %0" + : "=r" (addr)); + + return mte_get_ptr_tag(addr); +} + +/* + * Assign allocation tags for a region of memory based on the pointer tag. + * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and + * size must be non-zero and MTE_GRANULE_SIZE aligned. + */ +static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag) +{ + u64 curr, end; + + if (!size) + return; + + curr = (u64)__tag_set(addr, tag); + end = curr + size; + + do { + /* + * 'asm volatile' is required to prevent the compiler to move + * the statement outside of the loop. + */ + asm volatile(__MTE_PREAMBLE "stg %0, [%0]" + : + : "r" (curr) + : "memory"); + + curr += MTE_GRANULE_SIZE; + } while (curr != end); +} void mte_enable_kernel(void); void mte_init_tags(u64 max_tag); +void mte_set_report_once(bool state); +bool mte_report_once(void); + #else /* CONFIG_ARM64_MTE */ static inline u8 mte_get_ptr_tag(void *ptr) @@ -43,13 +94,14 @@ static inline u8 mte_get_mem_tag(void *addr) { return 0xFF; } + static inline u8 mte_get_random_tag(void) { return 0xFF; } -static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag) + +static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag) { - return addr; } static inline void mte_enable_kernel(void) @@ -60,6 +112,15 @@ static inline void mte_init_tags(u64 max_tag) { } +static inline void mte_set_report_once(bool state) +{ +} + +static inline bool mte_report_once(void) +{ + return false; +} + #endif /* CONFIG_ARM64_MTE */ #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index d02aff9f493d..9b557a457f24 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -8,8 +8,6 @@ #include <asm/compiler.h> #include <asm/mte-def.h> -#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n" - #ifndef __ASSEMBLY__ #include <linux/bitfield.h> diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h index dd870390d639..8c8cf4297cc3 100644 --- a/arch/arm64/include/asm/numa.h +++ b/arch/arm64/include/asm/numa.h @@ -3,52 +3,6 @@ #define __ASM_NUMA_H #include <asm/topology.h> - -#ifdef CONFIG_NUMA - -#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2) - -int __node_distance(int from, int to); -#define node_distance(a, b) __node_distance(a, b) - -extern nodemask_t numa_nodes_parsed __initdata; - -extern bool numa_off; - -/* Mappings between node number and cpus on that node. */ -extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; -void numa_clear_node(unsigned int cpu); - -#ifdef CONFIG_DEBUG_PER_CPU_MAPS -const struct cpumask *cpumask_of_node(int node); -#else -/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ -static inline const struct cpumask *cpumask_of_node(int node) -{ - if (node == NUMA_NO_NODE) - return cpu_all_mask; - - return node_to_cpumask_map[node]; -} -#endif - -void __init arm64_numa_init(void); -int __init numa_add_memblk(int nodeid, u64 start, u64 end); -void __init numa_set_distance(int from, int to, int distance); -void __init numa_free_distance(void); -void __init early_map_cpu_to_node(unsigned int cpu, int nid); -void numa_store_cpu_info(unsigned int cpu); -void numa_add_cpu(unsigned int cpu); -void numa_remove_cpu(unsigned int cpu); - -#else /* CONFIG_NUMA */ - -static inline void numa_store_cpu_info(unsigned int cpu) { } -static inline void numa_add_cpu(unsigned int cpu) { } -static inline void numa_remove_cpu(unsigned int cpu) { } -static inline void arm64_numa_init(void) { } -static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } - -#endif /* CONFIG_NUMA */ +#include <asm-generic/numa.h> #endif /* __ASM_NUMA_H */ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 501562793ce2..e17b96d0e4b5 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -980,7 +980,17 @@ static inline bool arch_faults_on_old_pte(void) return !cpu_has_hw_af(); } -#define arch_faults_on_old_pte arch_faults_on_old_pte +#define arch_faults_on_old_pte arch_faults_on_old_pte + +/* + * Experimentally, it's cheap to set the access flag in hardware and we + * benefit from prefaulting mappings as 'old' to start with. + */ +static inline bool arch_wants_old_prefaulted_pte(void) +{ + return !arch_faults_on_old_pte(); +} +#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index c6b4f0603024..b112a11e9302 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -76,6 +76,15 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) return ptrauth_clear_pac(ptr); } +static __always_inline void ptrauth_enable(void) +{ + if (!system_supports_address_auth()) + return; + sysreg_clear_set(sctlr_el1, 0, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | + SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)); + isb(); +} + #define ptrauth_thread_init_user(tsk) \ ptrauth_keys_init_user(&(tsk)->thread.keys_user) #define ptrauth_thread_init_kernel(tsk) \ @@ -84,6 +93,7 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel) #else /* CONFIG_ARM64_PTR_AUTH */ +#define ptrauth_enable() #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_strip_insn_pac(lr) (lr) #define ptrauth_thread_init_user(tsk) diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 8ff579361731..2f36b16a5b5d 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -11,7 +11,8 @@ extern char __alt_instructions[], __alt_instructions_end[]; extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; extern char __hyp_text_start[], __hyp_text_end[]; -extern char __hyp_data_ro_after_init_start[], __hyp_data_ro_after_init_end[]; +extern char __hyp_rodata_start[], __hyp_rodata_end[]; +extern char __hyp_reloc_begin[], __hyp_reloc_end[]; extern char __idmap_text_start[], __idmap_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h new file mode 100644 index 000000000000..d3320618ed14 --- /dev/null +++ b/arch/arm64/include/asm/setup.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef __ARM64_ASM_SETUP_H +#define __ARM64_ASM_SETUP_H + +#include <uapi/asm/setup.h> + +void *get_early_fdt_ptr(void); +void early_fdt_map(u64 dt_phys); + +#endif diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h index 1f43fcc79738..eb4a75d720ed 100644 --- a/arch/arm64/include/asm/sparsemem.h +++ b/arch/arm64/include/asm/sparsemem.h @@ -7,7 +7,26 @@ #ifdef CONFIG_SPARSEMEM #define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS -#define SECTION_SIZE_BITS 30 -#endif + +/* + * Section size must be at least 512MB for 64K base + * page size config. Otherwise it will be less than + * (MAX_ORDER - 1) and the build process will fail. + */ +#ifdef CONFIG_ARM64_64K_PAGES +#define SECTION_SIZE_BITS 29 + +#else + +/* + * Section size must be at least 128MB for 4K base + * page size config. Otherwise PMD based huge page + * entries could not be created for vmemmap mappings. + * 16K follows 4K for simplicity. + */ +#define SECTION_SIZE_BITS 27 +#endif /* CONFIG_ARM64_64K_PAGES */ + +#endif /* CONFIG_SPARSEMEM*/ #endif diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 9083d6992603..0525c0b089ed 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -5,8 +5,8 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#include <asm/qrwlock.h> #include <asm/qspinlock.h> +#include <asm/qrwlock.h> /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h index 7263e0bac680..33f1bb453150 100644 --- a/arch/arm64/include/asm/stackprotector.h +++ b/arch/arm64/include/asm/stackprotector.h @@ -41,6 +41,7 @@ static __always_inline void boot_init_stack_canary(void) #endif ptrauth_thread_init_kernel(current); ptrauth_thread_switch_kernel(current); + ptrauth_enable(); } #endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 8b5e7e5c3cc8..dfd4edbfe360 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -191,6 +191,7 @@ #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) #define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) +#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) #define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0) #define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1) @@ -291,7 +292,11 @@ #define SYS_PMSFCR_EL1_ST_SHIFT 18 #define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) -#define SYS_PMSEVFR_EL1_RES0 0x0000ffff00ff0f55UL +#define SYS_PMSEVFR_EL1_RES0_8_2 \ + (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\ + BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) +#define SYS_PMSEVFR_EL1_RES0_8_3 \ + (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) #define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) #define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 @@ -471,6 +476,7 @@ #define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0) #define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) +#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) @@ -829,6 +835,7 @@ #define ID_AA64MMFR2_CNP_SHIFT 0 /* id_aa64dfr0 */ +#define ID_AA64DFR0_TRACE_FILT_SHIFT 40 #define ID_AA64DFR0_DOUBLELOCK_SHIFT 36 #define ID_AA64DFR0_PMSVER_SHIFT 32 #define ID_AA64DFR0_CTX_CMPS_SHIFT 28 @@ -844,9 +851,15 @@ #define ID_AA64DFR0_PMUVER_8_5 0x6 #define ID_AA64DFR0_PMUVER_IMP_DEF 0xf +#define ID_AA64DFR0_PMSVER_8_2 0x1 +#define ID_AA64DFR0_PMSVER_8_3 0x2 + #define ID_DFR0_PERFMON_SHIFT 24 +#define ID_DFR0_PERFMON_8_0 0x3 #define ID_DFR0_PERFMON_8_1 0x4 +#define ID_DFR0_PERFMON_8_4 0x5 +#define ID_DFR0_PERFMON_8_5 0x6 #define ID_ISAR4_SWP_FRAC_SHIFT 28 #define ID_ISAR4_PSR_M_SHIFT 24 @@ -1003,6 +1016,14 @@ /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ #define SYS_MPIDR_SAFE_VAL (BIT(31)) +#define TRFCR_ELx_TS_SHIFT 5 +#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT) +#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT) +#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT) +#define TRFCR_EL2_CX BIT(3) +#define TRFCR_ELx_ExTRE BIT(1) +#define TRFCR_ELx_E0TRE BIT(0) + #ifdef __ASSEMBLY__ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 diff --git a/arch/arm64/include/asm/trans_pgd.h b/arch/arm64/include/asm/trans_pgd.h new file mode 100644 index 000000000000..5d08e5adf3d5 --- /dev/null +++ b/arch/arm64/include/asm/trans_pgd.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (c) 2020, Microsoft Corporation. + * Pavel Tatashin <pasha.tatashin@soleen.com> + */ + +#ifndef _ASM_TRANS_TABLE_H +#define _ASM_TRANS_TABLE_H + +#include <linux/bits.h> +#include <linux/types.h> +#include <asm/pgtable-types.h> + +/* + * trans_alloc_page + * - Allocator that should return exactly one zeroed page, if this + * allocator fails, trans_pgd_create_copy() and trans_pgd_map_page() + * return -ENOMEM error. + * + * trans_alloc_arg + * - Passed to trans_alloc_page as an argument + */ + +struct trans_pgd_info { + void * (*trans_alloc_page)(void *arg); + void *trans_alloc_arg; +}; + +int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **trans_pgd, + unsigned long start, unsigned long end); + +int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd, + void *page, unsigned long dst_addr, pgprot_t pgprot); + +int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0, + unsigned long *t0sz, void *page); + +#endif /* _ASM_TRANS_TABLE_H */ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index f0fe0cc6abe0..0deb88467111 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -87,7 +87,7 @@ static inline void __uaccess_ttbr0_disable(void) ttbr = read_sysreg(ttbr1_el1); ttbr &= ~TTBR_ASID_MASK; /* reserved_pg_dir placed before swapper_pg_dir */ - write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1); + write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1); isb(); /* Set reserved ASID */ write_sysreg(ttbr, ttbr1_el1); diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 86a9d7b3eabe..949788f5ba40 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -38,7 +38,7 @@ #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 442 +#define __NR_compat_syscalls 443 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index cccfbbefbf95..3d874f624056 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -891,6 +891,8 @@ __SYSCALL(__NR_faccessat2, sys_faccessat2) __SYSCALL(__NR_process_madvise, sys_process_madvise) #define __NR_epoll_pwait2 441 __SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2) +#define __NR_mount_setattr 442 +__SYSCALL(__NR_mount_setattr, sys_mount_setattr) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index ee6a48df89d9..7379f35ae2c6 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -35,8 +35,13 @@ */ #define HVC_RESET_VECTORS 2 +/* + * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible + */ +#define HVC_VHE_RESTART 3 + /* Max number of HYP stub hypercalls */ -#define HVC_STUB_HCALL_NR 3 +#define HVC_STUB_HCALL_NR 4 /* Error returned when an invalid stub number is passed into x0 */ #define HVC_STUB_ERR 0xbadca11 |