diff options
Diffstat (limited to 'arch')
26 files changed, 135 insertions, 116 deletions
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts index 1466580be295..70b1943a86b1 100644 --- a/arch/arm/boot/dts/armada-370-db.dts +++ b/arch/arm/boot/dts/armada-370-db.dts @@ -203,27 +203,3 @@ compatible = "linux,spdif-dir"; }; }; - -&pinctrl { - /* - * These pins might be muxed as I2S by - * the bootloader, but it conflicts - * with the real I2S pins that are - * muxed using i2s_pins. We must mux - * those pins to a function other than - * I2S. - */ - pinctrl-0 = <&hog_pins1 &hog_pins2>; - pinctrl-names = "default"; - - hog_pins1: hog-pins1 { - marvell,pins = "mpp6", "mpp8", "mpp10", - "mpp12", "mpp13"; - marvell,function = "gpio"; - }; - - hog_pins2: hog-pins2 { - marvell,pins = "mpp5", "mpp7", "mpp9"; - marvell,function = "gpo"; - }; -}; diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 2328fe752e9c..bc393b7e5ece 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -338,6 +338,7 @@ CONFIG_USB=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_MVEBU=y CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_EXYNOS=y CONFIG_USB_EHCI_TEGRA=y CONFIG_USB_EHCI_HCD_STI=y CONFIG_USB_EHCI_HCD_PLATFORM=y diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index f9c863911038..715ae19bc7c8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -1046,6 +1046,15 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "model name\t: %s rev %d (%s)\n", cpu_name, cpuid & 15, elf_platform); +#if defined(CONFIG_SMP) + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), + (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); +#else + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + loops_per_jiffy / (500000/HZ), + (loops_per_jiffy / (5000/HZ)) % 100); +#endif /* dump out the processor features */ seq_puts(m, "Features\t: "); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5e6052e18850..86ef244c5a24 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -387,6 +387,18 @@ asmlinkage void secondary_start_kernel(void) void __init smp_cpus_done(unsigned int max_cpus) { + int cpu; + unsigned long bogosum = 0; + + for_each_online_cpu(cpu) + bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; + + printk(KERN_INFO "SMP: Total of %d processors activated " + "(%lu.%02lu BogoMIPS).\n", + num_online_cpus(), + bogosum / (500000/HZ), + (bogosum / (5000/HZ)) % 100); + hyp_mode_check(); } diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index dd301be89ecc..5376d908eabe 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1,6 +1,7 @@ # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_FHANDLE=y CONFIG_AUDIT=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y @@ -13,14 +14,12 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_RESOURCE_COUNTERS=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_MEMCG_KMEM=y CONFIG_CGROUP_HUGETLB=y # CONFIG_UTS_NS is not set # CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y @@ -92,7 +91,6 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_VIRTIO_CONSOLE=y # CONFIG_HW_RANDOM is not set -# CONFIG_HMC_DRV is not set CONFIG_SPI=y CONFIG_SPI_PL022=y CONFIG_GPIO_PL061=y @@ -133,6 +131,8 @@ CONFIG_EXT3_FS=y CONFIG_EXT4_FS=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y CONFIG_FUSE_FS=y CONFIG_CUSE=y CONFIG_VFAT_FS=y @@ -152,14 +152,15 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_LOCKUP_DETECTOR=y # CONFIG_SCHED_DEBUG is not set +# CONFIG_DEBUG_PREEMPT is not set # CONFIG_FTRACE is not set +CONFIG_KEYS=y CONFIG_SECURITY=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y CONFIG_CRYPTO_GHASH_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE=y CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index d34189bceff7..9ce3e680ae1c 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -52,13 +52,14 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) dev->archdata.dma_ops = ops; } -static inline int set_arch_dma_coherent_ops(struct device *dev) +static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, + struct iommu_ops *iommu, bool coherent) { - dev->archdata.dma_coherent = true; - set_dma_ops(dev, &coherent_swiotlb_dma_ops); - return 0; + dev->archdata.dma_coherent = coherent; + if (coherent) + set_dma_ops(dev, &coherent_swiotlb_dma_ops); } -#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops +#define arch_setup_dma_ops arch_setup_dma_ops /* do not use this function in a driver */ static inline bool is_device_dma_coherent(struct device *dev) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index df22314f57cf..210d632aa5ad 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -298,7 +298,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) -#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) #define pud_write(pud) pte_write(pud_pte(pud)) #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) @@ -401,7 +400,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); } -#define pud_page(pud) pmd_page(pud_pmd(pud)) +#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */ @@ -437,6 +436,8 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr); } +#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK)) + #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */ #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 3771b72b6569..2d6b6065fe7f 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -5,6 +5,7 @@ #include <asm/debug-monitors.h> #include <asm/pgtable.h> #include <asm/memory.h> +#include <asm/mmu_context.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #include <asm/tlbflush.h> @@ -98,7 +99,18 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) */ ret = __cpu_suspend_enter(arg, fn); if (ret == 0) { - cpu_switch_mm(mm->pgd, mm); + /* + * We are resuming from reset with TTBR0_EL1 set to the + * idmap to enable the MMU; restore the active_mm mappings in + * TTBR0_EL1 unless the active_mm == &init_mm, in which case + * the thread entered __cpu_suspend with TTBR0_EL1 set to + * reserved TTBR0 page tables and should be restored as such. + */ + if (mm == &init_mm) + cpu_set_reserved_ttbr0(); + else + cpu_switch_mm(mm->pgd, mm); + flush_tlb_all(); /* diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index f3b51b57740a..95c39b95e97e 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -11,7 +11,7 @@ -#define NR_syscalls 318 /* length of syscall table */ +#define NR_syscalls 319 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 4c2240c1b0cb..461079560c78 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -331,5 +331,6 @@ #define __NR_getrandom 1339 #define __NR_memfd_create 1340 #define __NR_bpf 1341 +#define __NR_execveat 1342 #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index f5e96dffc63c..fcf8b8cbca0b 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1779,6 +1779,7 @@ sys_call_table: data8 sys_getrandom data8 sys_memfd_create // 1340 data8 sys_bpf + data8 sys_execveat .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c index 51d5bb90d3e5..a223691dff4f 100644 --- a/arch/nios2/kernel/cpuinfo.c +++ b/arch/nios2/kernel/cpuinfo.c @@ -72,6 +72,7 @@ void __init setup_cpuinfo(void) cpuinfo.has_div = fcpu_has(cpu, "altr,has-div"); cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul"); cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx"); + cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu"); if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div) err_cpu("DIV"); diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S index 83bca17d1008..0bdfd13ff98b 100644 --- a/arch/nios2/kernel/entry.S +++ b/arch/nios2/kernel/entry.S @@ -365,30 +365,14 @@ ENTRY(ret_from_interrupt) GET_THREAD_INFO r1 ldw r4, TI_PREEMPT_COUNT(r1) bne r4, r0, restore_all - -need_resched: ldw r4, TI_FLAGS(r1) /* ? Need resched set */ BTBZ r10, r4, TIF_NEED_RESCHED, restore_all ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */ andi r10, r4, ESTATUS_EPIE beq r10, r0, restore_all - movia r4, PREEMPT_ACTIVE - stw r4, TI_PREEMPT_COUNT(r1) - rdctl r10, status /* enable intrs again */ - ori r10, r10 ,STATUS_PIE - wrctl status, r10 - PUSH r1 - call schedule - POP r1 - mov r4, r0 - stw r4, TI_PREEMPT_COUNT(r1) - rdctl r10, status /* disable intrs */ - andi r10, r10, %lo(~STATUS_PIE) - wrctl status, r10 - br need_resched -#else - br restore_all + call preempt_schedule_irq #endif + br restore_all /*********************************************************************** * A few syscall wrappers diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h index d2d11b7055ba..8121aa6db2ff 100644 --- a/arch/parisc/include/asm/ldcw.h +++ b/arch/parisc/include/asm/ldcw.h @@ -33,11 +33,18 @@ #endif /*!CONFIG_PA20*/ -/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ +/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. + We don't explicitly expose that "*a" may be written as reload + fails to find a register in class R1_REGS when "a" needs to be + reloaded when generating 64-bit PIC code. Instead, we clobber + memory to indicate to the compiler that the assembly code reads + or writes to items other than those listed in the input and output + operands. This may pessimize the code somewhat but __ldcw is + usually used within code blocks surrounded by memory barriors. */ #define __ldcw(a) ({ \ unsigned __ret; \ - __asm__ __volatile__(__LDCW " 0(%2),%0" \ - : "=r" (__ret), "+m" (*(a)) : "r" (a)); \ + __asm__ __volatile__(__LDCW " 0(%1),%0" \ + : "=r" (__ret) : "r" (a) : "memory"); \ __ret; \ }) diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 19c36cba37c4..a46f5f45570c 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -86,6 +86,11 @@ extern int overlaps_crashkernel(unsigned long start, unsigned long size); extern void reserve_crashkernel(void); extern void machine_kexec_mask_interrupts(void); +static inline bool kdump_in_progress(void) +{ + return crashing_cpu >= 0; +} + #else /* !CONFIG_KEXEC */ static inline void crash_kexec_secondary(struct pt_regs *regs) { } @@ -106,6 +111,11 @@ static inline int crash_shutdown_unregister(crash_shutdown_t handler) return 0; } +static inline bool kdump_in_progress(void) +{ + return false; +} + #endif /* CONFIG_KEXEC */ #endif /* ! __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index ce9577d693be..91062eef582f 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -366,3 +366,4 @@ SYSCALL_SPU(seccomp) SYSCALL_SPU(getrandom) SYSCALL_SPU(memfd_create) SYSCALL_SPU(bpf) +COMPAT_SYS(execveat) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index e0da021caa00..36b79c31eedd 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 362 +#define __NR_syscalls 363 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index f55351f2e66e..ef5b5b1f3123 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -384,5 +384,6 @@ #define __NR_getrandom 359 #define __NR_memfd_create 360 #define __NR_bpf 361 +#define __NR_execveat 362 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 879b3aacac32..f96d1ec24189 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -330,7 +330,7 @@ void default_machine_kexec(struct kimage *image) * using debugger IPI. */ - if (crashing_cpu == -1) + if (!kdump_in_progress()) kexec_prepare_cpus(); pr_debug("kexec: Starting switchover sequence.\n"); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8ec017cb4446..8b2d2dc8ef10 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -700,6 +700,7 @@ void start_secondary(void *unused) smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); preempt_disable(); + cpu_callin_map[cpu] = 1; if (smp_ops->setup_cpu) smp_ops->setup_cpu(cpu); @@ -738,14 +739,6 @@ void start_secondary(void *unused) notify_cpu_starting(cpu); set_cpu_online(cpu, true); - /* - * CPU must be marked active and online before we signal back to the - * master, because the scheduler needs to see the cpu_online and - * cpu_active bits set. - */ - smp_wmb(); - cpu_callin_map[cpu] = 1; - local_irq_enable(); cpu_startup_entry(CPUHP_ONLINE); diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 469751d92004..b5682fd6c984 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -43,6 +43,7 @@ #include <asm/trace.h> #include <asm/firmware.h> #include <asm/plpar_wrappers.h> +#include <asm/kexec.h> #include <asm/fadump.h> #include "pseries.h" @@ -267,8 +268,13 @@ static void pSeries_lpar_hptab_clear(void) * out to the user, but at least this will stop us from * continuing on further and creating an even more * difficult to debug situation. + * + * There is a known problem when kdump'ing, if cpus are offline + * the above call will fail. Rather than panicking again, keep + * going and hope the kdump kernel is also little endian, which + * it usually is. */ - if (rc) + if (rc && !kdump_in_progress()) panic("Could not enable big endian exceptions"); } #endif diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common index 87bc86821bc9..d195a87ca542 100644 --- a/arch/um/Kconfig.common +++ b/arch/um/Kconfig.common @@ -3,6 +3,7 @@ config UML default y select HAVE_ARCH_AUDITSYSCALL select HAVE_UID16 + select HAVE_FUTEX_CMPXCHG if FUTEX select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select GENERIC_IO diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 10fbed126b11..f83fc6c5e0ba 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4448,7 +4448,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm) * zap all shadow pages. */ if (unlikely(kvm_current_mmio_generation(kvm) == 0)) { - printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n"); + printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n"); kvm_mmu_invalidate_zap_all_pages(kvm); } } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index feb852b04598..d4c58d884838 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5840,53 +5840,10 @@ static __init int hardware_setup(void) memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); - vmx_disable_intercept_for_msr(MSR_FS_BASE, false); - vmx_disable_intercept_for_msr(MSR_GS_BASE, false); - vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); - vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); - vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); - vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); - vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true); - - memcpy(vmx_msr_bitmap_legacy_x2apic, - vmx_msr_bitmap_legacy, PAGE_SIZE); - memcpy(vmx_msr_bitmap_longmode_x2apic, - vmx_msr_bitmap_longmode, PAGE_SIZE); - - if (enable_apicv) { - for (msr = 0x800; msr <= 0x8ff; msr++) - vmx_disable_intercept_msr_read_x2apic(msr); - - /* According SDM, in x2apic mode, the whole id reg is used. - * But in KVM, it only use the highest eight bits. Need to - * intercept it */ - vmx_enable_intercept_msr_read_x2apic(0x802); - /* TMCCT */ - vmx_enable_intercept_msr_read_x2apic(0x839); - /* TPR */ - vmx_disable_intercept_msr_write_x2apic(0x808); - /* EOI */ - vmx_disable_intercept_msr_write_x2apic(0x80b); - /* SELF-IPI */ - vmx_disable_intercept_msr_write_x2apic(0x83f); - } - - if (enable_ept) { - kvm_mmu_set_mask_ptes(0ull, - (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull, - (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull, - 0ull, VMX_EPT_EXECUTABLE_MASK); - ept_set_mmio_spte_mask(); - kvm_enable_tdp(); - } else - kvm_disable_tdp(); - - update_ple_window_actual_max(); - if (setup_vmcs_config(&vmcs_config) < 0) { r = -EIO; goto out7; - } + } if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); @@ -5945,6 +5902,49 @@ static __init int hardware_setup(void) if (nested) nested_vmx_setup_ctls_msrs(); + vmx_disable_intercept_for_msr(MSR_FS_BASE, false); + vmx_disable_intercept_for_msr(MSR_GS_BASE, false); + vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); + vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); + vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); + vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); + vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true); + + memcpy(vmx_msr_bitmap_legacy_x2apic, + vmx_msr_bitmap_legacy, PAGE_SIZE); + memcpy(vmx_msr_bitmap_longmode_x2apic, + vmx_msr_bitmap_longmode, PAGE_SIZE); + + if (enable_apicv) { + for (msr = 0x800; msr <= 0x8ff; msr++) + vmx_disable_intercept_msr_read_x2apic(msr); + + /* According SDM, in x2apic mode, the whole id reg is used. + * But in KVM, it only use the highest eight bits. Need to + * intercept it */ + vmx_enable_intercept_msr_read_x2apic(0x802); + /* TMCCT */ + vmx_enable_intercept_msr_read_x2apic(0x839); + /* TPR */ + vmx_disable_intercept_msr_write_x2apic(0x808); + /* EOI */ + vmx_disable_intercept_msr_write_x2apic(0x80b); + /* SELF-IPI */ + vmx_disable_intercept_msr_write_x2apic(0x83f); + } + + if (enable_ept) { + kvm_mmu_set_mask_ptes(0ull, + (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull, + (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull, + 0ull, VMX_EPT_EXECUTABLE_MASK); + ept_set_mmio_spte_mask(); + kvm_enable_tdp(); + } else + kvm_disable_tdp(); + + update_ple_window_actual_max(); + return alloc_kvm_area(); out7: diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c index 531d4269e2e3..bd16d6c370ec 100644 --- a/arch/x86/um/sys_call_table_32.c +++ b/arch/x86/um/sys_call_table_32.c @@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void); extern asmlinkage void sys_ni_syscall(void); -const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { +const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { /* * Smells like a compiler bug -- it doesn't work * when the & below is removed. diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c index 20c3649d0691..5cdfa9db2217 100644 --- a/arch/x86/um/sys_call_table_64.c +++ b/arch/x86/um/sys_call_table_64.c @@ -47,7 +47,7 @@ typedef void (*sys_call_ptr_t)(void); extern void sys_ni_syscall(void); -const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { +const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { /* * Smells like a compiler bug -- it doesn't work * when the & below is removed. |