diff options
Diffstat (limited to 'arch')
97 files changed, 1021 insertions, 655 deletions
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi index 1e12aeff403b..aa537ed13f0a 100644 --- a/arch/arm/boot/dts/bcm2835.dtsi +++ b/arch/arm/boot/dts/bcm2835.dtsi @@ -85,6 +85,8 @@ reg = <0x7e205000 0x1000>; interrupts = <2 21>; clocks = <&clk_i2c>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; @@ -93,6 +95,8 @@ reg = <0x7e804000 0x1000>; interrupts = <2 21>; clocks = <&clk_i2c>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi index dc259e8b8a73..9b186ac06c8b 100644 --- a/arch/arm/boot/dts/cros5250-common.dtsi +++ b/arch/arm/boot/dts/cros5250-common.dtsi @@ -27,6 +27,13 @@ i2c2_bus: i2c2-bus { samsung,pin-pud = <0>; }; + + max77686_irq: max77686-irq { + samsung,pins = "gpx3-2"; + samsung,pin-function = <0>; + samsung,pin-pud = <0>; + samsung,pin-drv = <0>; + }; }; i2c@12C60000 { @@ -35,6 +42,11 @@ max77686@09 { compatible = "maxim,max77686"; + interrupt-parent = <&gpx3>; + interrupts = <2 0>; + pinctrl-names = "default"; + pinctrl-0 = <&max77686_irq>; + wakeup-source; reg = <0x09>; voltage-regulators { diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 59154dc15fe4..fb28b2ecb1db 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -161,7 +161,7 @@ clocks = <&clks 197>, <&clks 3>, <&clks 197>, <&clks 107>, <&clks 0>, <&clks 118>, - <&clks 62>, <&clks 139>, + <&clks 0>, <&clks 139>, <&clks 0>; clock-names = "core", "rxtx0", "rxtx1", "rxtx2", diff --git a/arch/arm/boot/dts/omap-zoom-common.dtsi b/arch/arm/boot/dts/omap-zoom-common.dtsi index b0ee342598f0..68221fab978d 100644 --- a/arch/arm/boot/dts/omap-zoom-common.dtsi +++ b/arch/arm/boot/dts/omap-zoom-common.dtsi @@ -13,7 +13,7 @@ * they probably share the same GPIO IRQ * REVISIT: Add timing support from slls644g.pdf */ - 8250@3,0 { + uart@3,0 { compatible = "ns16550a"; reg = <3 0 0x100>; bank-width = <2>; diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi index a2bfcde858a6..d0c5b37e248c 100644 --- a/arch/arm/boot/dts/omap2.dtsi +++ b/arch/arm/boot/dts/omap2.dtsi @@ -9,6 +9,7 @@ */ #include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/pinctrl/omap.h> #include "skeleton.dtsi" @@ -21,6 +22,8 @@ serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; + i2c0 = &i2c1; + i2c1 = &i2c2; }; cpus { @@ -53,6 +56,28 @@ ranges; ti,hwmods = "l3_main"; + aes: aes@480a6000 { + compatible = "ti,omap2-aes"; + ti,hwmods = "aes"; + reg = <0x480a6000 0x50>; + dmas = <&sdma 9 &sdma 10>; + dma-names = "tx", "rx"; + }; + + hdq1w: 1w@480b2000 { + compatible = "ti,omap2420-1w"; + ti,hwmods = "hdq1w"; + reg = <0x480b2000 0x1000>; + interrupts = <58>; + }; + + mailbox: mailbox@48094000 { + compatible = "ti,omap2-mailbox"; + ti,hwmods = "mailbox"; + reg = <0x48094000 0x200>; + interrupts = <26>; + }; + intc: interrupt-controller@1 { compatible = "ti,omap2-intc"; interrupt-controller; @@ -63,6 +88,7 @@ sdma: dma-controller@48056000 { compatible = "ti,omap2430-sdma", "ti,omap2420-sdma"; + ti,hwmods = "dma"; reg = <0x48056000 0x1000>; interrupts = <12>, <13>, @@ -73,21 +99,91 @@ #dma-requests = <64>; }; + i2c1: i2c@48070000 { + compatible = "ti,omap2-i2c"; + ti,hwmods = "i2c1"; + reg = <0x48070000 0x80>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <56>; + dmas = <&sdma 27 &sdma 28>; + dma-names = "tx", "rx"; + }; + + i2c2: i2c@48072000 { + compatible = "ti,omap2-i2c"; + ti,hwmods = "i2c2"; + reg = <0x48072000 0x80>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <57>; + dmas = <&sdma 29 &sdma 30>; + dma-names = "tx", "rx"; + }; + + mcspi1: mcspi@48098000 { + compatible = "ti,omap2-mcspi"; + ti,hwmods = "mcspi1"; + reg = <0x48098000 0x100>; + interrupts = <65>; + dmas = <&sdma 35 &sdma 36 &sdma 37 &sdma 38 + &sdma 39 &sdma 40 &sdma 41 &sdma 42>; + dma-names = "tx0", "rx0", "tx1", "rx1", + "tx2", "rx2", "tx3", "rx3"; + }; + + mcspi2: mcspi@4809a000 { + compatible = "ti,omap2-mcspi"; + ti,hwmods = "mcspi2"; + reg = <0x4809a000 0x100>; + interrupts = <66>; + dmas = <&sdma 43 &sdma 44 &sdma 45 &sdma 46>; + dma-names = "tx0", "rx0", "tx1", "rx1"; + }; + + rng: rng@480a0000 { + compatible = "ti,omap2-rng"; + ti,hwmods = "rng"; + reg = <0x480a0000 0x50>; + interrupts = <36>; + }; + + sham: sham@480a4000 { + compatible = "ti,omap2-sham"; + ti,hwmods = "sham"; + reg = <0x480a4000 0x64>; + interrupts = <51>; + dmas = <&sdma 13>; + dma-names = "rx"; + }; + uart1: serial@4806a000 { compatible = "ti,omap2-uart"; ti,hwmods = "uart1"; + reg = <0x4806a000 0x2000>; + interrupts = <72>; + dmas = <&sdma 49 &sdma 50>; + dma-names = "tx", "rx"; clock-frequency = <48000000>; }; uart2: serial@4806c000 { compatible = "ti,omap2-uart"; ti,hwmods = "uart2"; + reg = <0x4806c000 0x400>; + interrupts = <73>; + dmas = <&sdma 51 &sdma 52>; + dma-names = "tx", "rx"; clock-frequency = <48000000>; }; uart3: serial@4806e000 { compatible = "ti,omap2-uart"; ti,hwmods = "uart3"; + reg = <0x4806e000 0x400>; + interrupts = <74>; + dmas = <&sdma 53 &sdma 54>; + dma-names = "tx", "rx"; clock-frequency = <48000000>; }; diff --git a/arch/arm/boot/dts/omap2420.dtsi b/arch/arm/boot/dts/omap2420.dtsi index c8f9c55169ea..60c605de22dd 100644 --- a/arch/arm/boot/dts/omap2420.dtsi +++ b/arch/arm/boot/dts/omap2420.dtsi @@ -114,6 +114,15 @@ dma-names = "tx", "rx"; }; + msdi1: mmc@4809c000 { + compatible = "ti,omap2420-mmc"; + ti,hwmods = "msdi1"; + reg = <0x4809c000 0x80>; + interrupts = <83>; + dmas = <&sdma 61 &sdma 62>; + dma-names = "tx", "rx"; + }; + timer1: timer@48028000 { compatible = "ti,omap2420-timer"; reg = <0x48028000 0x400>; @@ -121,5 +130,19 @@ ti,hwmods = "timer1"; ti,timer-alwon; }; + + wd_timer2: wdt@48022000 { + compatible = "ti,omap2-wdt"; + ti,hwmods = "wd_timer2"; + reg = <0x48022000 0x80>; + }; }; }; + +&i2c1 { + compatible = "ti,omap2420-i2c"; +}; + +&i2c2 { + compatible = "ti,omap2420-i2c"; +}; diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi index c535a5a2b27f..d624345666f5 100644 --- a/arch/arm/boot/dts/omap2430.dtsi +++ b/arch/arm/boot/dts/omap2430.dtsi @@ -175,6 +175,25 @@ dma-names = "tx", "rx"; }; + mmc1: mmc@4809c000 { + compatible = "ti,omap2-hsmmc"; + reg = <0x4809c000 0x200>; + interrupts = <83>; + ti,hwmods = "mmc1"; + ti,dual-volt; + dmas = <&sdma 61>, <&sdma 62>; + dma-names = "tx", "rx"; + }; + + mmc2: mmc@480b4000 { + compatible = "ti,omap2-hsmmc"; + reg = <0x480b4000 0x200>; + interrupts = <86>; + ti,hwmods = "mmc2"; + dmas = <&sdma 47>, <&sdma 48>; + dma-names = "tx", "rx"; + }; + timer1: timer@49018000 { compatible = "ti,omap2420-timer"; reg = <0x49018000 0x400>; @@ -182,5 +201,35 @@ ti,hwmods = "timer1"; ti,timer-alwon; }; + + mcspi3: mcspi@480b8000 { + compatible = "ti,omap2-mcspi"; + ti,hwmods = "mcspi3"; + reg = <0x480b8000 0x100>; + interrupts = <91>; + dmas = <&sdma 15 &sdma 16 &sdma 23 &sdma 24>; + dma-names = "tx0", "rx0", "tx1", "rx1"; + }; + + usb_otg_hs: usb_otg_hs@480ac000 { + compatible = "ti,omap2-musb"; + ti,hwmods = "usb_otg_hs"; + reg = <0x480ac000 0x1000>; + interrupts = <93>; + }; + + wd_timer2: wdt@49016000 { + compatible = "ti,omap2-wdt"; + ti,hwmods = "wd_timer2"; + reg = <0x49016000 0x80>; + }; }; }; + +&i2c1 { + compatible = "ti,omap2430-i2c"; +}; + +&i2c2 { + compatible = "ti,omap2430-i2c"; +}; diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 8a6f6db14ee4..098f7dd6d564 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -225,4 +225,7 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext) int kvm_perf_init(void); int kvm_perf_teardown(void); +u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); +int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index c498b60c0505..ef0c8785ba16 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -119,6 +119,26 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 #define KVM_REG_ARM_32_CRN_SHIFT 11 +#define ARM_CP15_REG_SHIFT_MASK(x,n) \ + (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) + +#define __ARM_CP15_REG(op1,crn,crm,op2) \ + (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \ + ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \ + ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \ + ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \ + ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2)) + +#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32) + +#define __ARM_CP15_REG64(op1,crm) \ + (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) +#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) + +#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) +#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) +#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) + /* Normal registers are mapped as coprocessor 16. */ #define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) #define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) @@ -143,6 +163,14 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_VFP_FPINST 0x1009 #define KVM_REG_ARM_VFP_FPINST2 0x100A +/* Device Control API: ARM VGIC */ +#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 +#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 +#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 +#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 +#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) +#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 +#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 151eb9160482..2d4b4a8068c8 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -137,6 +137,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (ret) goto out_free_stage2_pgd; + kvm_timer_init(kvm); + /* Mark the initial VMID generation invalid */ kvm->arch.vmid_gen = 0; @@ -188,6 +190,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_IRQCHIP: r = vgic_present; break; + case KVM_CAP_DEVICE_CTRL: case KVM_CAP_USER_MEMORY: case KVM_CAP_SYNC_MMU: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: @@ -339,6 +342,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { + /* + * The arch-generic KVM code expects the cpu field of a vcpu to be -1 + * if the vcpu is no longer assigned to a cpu. This is used for the + * optimized make_all_cpus_request path. + */ + vcpu->cpu = -1; + kvm_arm_set_running_vcpu(NULL); } @@ -462,6 +472,8 @@ static void update_vttbr(struct kvm *kvm) static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) { + int ret; + if (likely(vcpu->arch.has_run_once)) return 0; @@ -471,9 +483,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) * Initialize the VGIC before running a vcpu the first time on * this VM. */ - if (irqchip_in_kernel(vcpu->kvm) && - unlikely(!vgic_initialized(vcpu->kvm))) { - int ret = kvm_vgic_init(vcpu->kvm); + if (unlikely(!vgic_initialized(vcpu->kvm))) { + ret = kvm_vgic_init(vcpu->kvm); if (ret) return ret; } @@ -780,7 +791,7 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, case KVM_ARM_DEVICE_VGIC_V2: if (!vgic_present) return -ENXIO; - return kvm_vgic_set_addr(kvm, type, dev_addr->addr); + return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); default: return -ENODEV; } diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 20f8d97904af..2786eae10c0d 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -109,6 +109,83 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) return -EINVAL; } +#ifndef CONFIG_KVM_ARM_TIMER + +#define NUM_TIMER_REGS 0 + +static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) +{ + return 0; +} + +static bool is_timer_reg(u64 index) +{ + return false; +} + +int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) +{ + return 0; +} + +u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) +{ + return 0; +} + +#else + +#define NUM_TIMER_REGS 3 + +static bool is_timer_reg(u64 index) +{ + switch (index) { + case KVM_REG_ARM_TIMER_CTL: + case KVM_REG_ARM_TIMER_CNT: + case KVM_REG_ARM_TIMER_CVAL: + return true; + } + return false; +} + +static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) +{ + if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) + return -EFAULT; + uindices++; + if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) + return -EFAULT; + uindices++; + if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) + return -EFAULT; + + return 0; +} + +#endif + +static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + void __user *uaddr = (void __user *)(long)reg->addr; + u64 val; + int ret; + + ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); + if (ret != 0) + return ret; + + return kvm_arm_timer_set_reg(vcpu, reg->id, val); +} + +static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + void __user *uaddr = (void __user *)(long)reg->addr; + u64 val; + + val = kvm_arm_timer_get_reg(vcpu, reg->id); + return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); +} + static unsigned long num_core_regs(void) { return sizeof(struct kvm_regs) / sizeof(u32); @@ -121,7 +198,8 @@ static unsigned long num_core_regs(void) */ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) { - return num_core_regs() + kvm_arm_num_coproc_regs(vcpu); + return num_core_regs() + kvm_arm_num_coproc_regs(vcpu) + + NUM_TIMER_REGS; } /** @@ -133,6 +211,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { unsigned int i; const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE; + int ret; for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) { if (put_user(core_reg | i, uindices)) @@ -140,6 +219,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices++; } + ret = copy_timer_indices(vcpu, uindices); + if (ret) + return ret; + uindices += NUM_TIMER_REGS; + return kvm_arm_copy_coproc_indices(vcpu, uindices); } @@ -153,6 +237,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return get_core_reg(vcpu, reg); + if (is_timer_reg(reg->id)) + return get_timer_reg(vcpu, reg); + return kvm_arm_coproc_get_reg(vcpu, reg); } @@ -166,6 +253,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return set_core_reg(vcpu, reg); + if (is_timer_reg(reg->id)) + return set_timer_reg(vcpu, reg); + return kvm_arm_coproc_set_reg(vcpu, reg); } diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 1f25f3e99c05..adcef406ff0a 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -19,11 +19,11 @@ secure-common = omap-smc.o omap-secure.o obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common) -obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common) +obj-$(CONFIG_ARCH_OMAP4) += $(hwmod-common) $(secure-common) obj-$(CONFIG_SOC_AM33XX) += irq.o $(hwmod-common) -obj-$(CONFIG_SOC_OMAP5) += prm44xx.o $(hwmod-common) $(secure-common) +obj-$(CONFIG_SOC_OMAP5) += $(hwmod-common) $(secure-common) obj-$(CONFIG_SOC_AM43XX) += $(hwmod-common) $(secure-common) -obj-$(CONFIG_SOC_DRA7XX) += prm44xx.o $(hwmod-common) $(secure-common) +obj-$(CONFIG_SOC_DRA7XX) += $(hwmod-common) $(secure-common) ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),) obj-y += mcbsp.o diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index f7644febee81..e30ef6797c63 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -299,7 +299,6 @@ struct omap_sdrc_params; extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0, struct omap_sdrc_params *sdrc_cs1); struct omap2_hsmmc_info; -extern int omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers); extern void omap_reserve(void); struct omap_hwmod; diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index a4e536b11ec9..58347bb874a0 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -32,7 +32,6 @@ #include "soc.h" #include "iomap.h" -#include "mux.h" #include "control.h" #include "display.h" #include "prm.h" @@ -102,90 +101,13 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = { { "dss_hdmi", "omapdss_hdmi", -1 }, }; -static void __init omap4_tpd12s015_mux_pads(void) -{ - omap_mux_init_signal("hdmi_cec", - OMAP_PIN_INPUT_PULLUP); - omap_mux_init_signal("hdmi_ddc_scl", - OMAP_PIN_INPUT_PULLUP); - omap_mux_init_signal("hdmi_ddc_sda", - OMAP_PIN_INPUT_PULLUP); -} - -static void __init omap4_hdmi_mux_pads(enum omap_hdmi_flags flags) -{ - u32 reg; - u16 control_i2c_1; - - /* - * CONTROL_I2C_1: HDMI_DDC_SDA_PULLUPRESX (bit 28) and - * HDMI_DDC_SCL_PULLUPRESX (bit 24) are set to disable - * internal pull up resistor. - */ - if (flags & OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP) { - control_i2c_1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_I2C_1; - reg = omap4_ctrl_pad_readl(control_i2c_1); - reg |= (OMAP4_HDMI_DDC_SDA_PULLUPRESX_MASK | - OMAP4_HDMI_DDC_SCL_PULLUPRESX_MASK); - omap4_ctrl_pad_writel(reg, control_i2c_1); - } -} - -static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) -{ - u32 enable_mask, enable_shift; - u32 pipd_mask, pipd_shift; - u32 reg; - - if (dsi_id == 0) { - enable_mask = OMAP4_DSI1_LANEENABLE_MASK; - enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT; - pipd_mask = OMAP4_DSI1_PIPD_MASK; - pipd_shift = OMAP4_DSI1_PIPD_SHIFT; - } else if (dsi_id == 1) { - enable_mask = OMAP4_DSI2_LANEENABLE_MASK; - enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT; - pipd_mask = OMAP4_DSI2_PIPD_MASK; - pipd_shift = OMAP4_DSI2_PIPD_SHIFT; - } else { - return -ENODEV; - } - - reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); - - reg &= ~enable_mask; - reg &= ~pipd_mask; - - reg |= (lanes << enable_shift) & enable_mask; - reg |= (lanes << pipd_shift) & pipd_mask; - - omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); - - return 0; -} - -int __init omap_hdmi_init(enum omap_hdmi_flags flags) -{ - if (cpu_is_omap44xx()) { - omap4_hdmi_mux_pads(flags); - omap4_tpd12s015_mux_pads(); - } - - return 0; -} - static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) { - if (cpu_is_omap44xx()) - return omap4_dsi_mux_pads(dsi_id, lane_mask); - return 0; } static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) { - if (cpu_is_omap44xx()) - omap4_dsi_mux_pads(dsi_id, 0); } static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 81de56251955..d24926e6340f 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -1502,6 +1502,22 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, } /* + * For some GPMC devices we still need to rely on the bootloader + * timings because the devices can be connected via FPGA. So far + * the list is smc91x on the omap2 SDP boards, and 8250 on zooms. + * REVISIT: Add timing support from slls644g.pdf and from the + * lan91c96 manual. + */ + if (of_device_is_compatible(child, "ns16550a") || + of_device_is_compatible(child, "smsc,lan91c94") || + of_device_is_compatible(child, "smsc,lan91c111")) { + dev_warn(&pdev->dev, + "%s using bootloader timings on CS%d\n", + child->name, cs); + goto no_timings; + } + + /* * FIXME: gpmc_cs_request() will map the CS to an arbitary * location in the gpmc address space. When booting with * device-tree we want the NOR flash to be mapped to the @@ -1529,6 +1545,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, gpmc_read_timings_dt(child, &gpmc_t); gpmc_cs_set_timings(cs, &gpmc_t); +no_timings: if (of_platform_device_create(child, NULL, &pdev->dev)) return 0; @@ -1541,42 +1558,6 @@ err: return ret; } -/* - * REVISIT: Add timing support from slls644g.pdf - */ -static int gpmc_probe_8250(struct platform_device *pdev, - struct device_node *child) -{ - struct resource res; - unsigned long base; - int ret, cs; - - if (of_property_read_u32(child, "reg", &cs) < 0) { - dev_err(&pdev->dev, "%s has no 'reg' property\n", - child->full_name); - return -ENODEV; - } - - if (of_address_to_resource(child, 0, &res) < 0) { - dev_err(&pdev->dev, "%s has malformed 'reg' property\n", - child->full_name); - return -ENODEV; - } - - ret = gpmc_cs_request(cs, resource_size(&res), &base); - if (ret < 0) { - dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs); - return ret; - } - - if (of_platform_device_create(child, NULL, &pdev->dev)) - return 0; - - dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name); - - return -ENODEV; -} - static int gpmc_probe_dt(struct platform_device *pdev) { int ret; @@ -1618,10 +1599,9 @@ static int gpmc_probe_dt(struct platform_device *pdev) else if (of_node_cmp(child->name, "onenand") == 0) ret = gpmc_probe_onenand_child(pdev, child); else if (of_node_cmp(child->name, "ethernet") == 0 || - of_node_cmp(child->name, "nor") == 0) + of_node_cmp(child->name, "nor") == 0 || + of_node_cmp(child->name, "uart") == 0) ret = gpmc_probe_generic_child(pdev, child); - else if (of_node_cmp(child->name, "8250") == 0) - ret = gpmc_probe_8250(pdev, child); if (WARN(ret < 0, "%s: probing gpmc child %s failed\n", __func__, child->full_name)) diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h index 8cc7d331437d..3e97c6c8ecf1 100644 --- a/arch/arm/mach-omap2/omap-secure.h +++ b/arch/arm/mach-omap2/omap-secure.h @@ -76,6 +76,13 @@ static inline void omap_barrier_reserve_memblock(void) { } #endif +#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER void set_cntfreq(void); +#else +static inline void set_cntfreq(void) +{ +} +#endif + #endif /* __ASSEMBLER__ */ #endif /* OMAP_ARCH_OMAP_SECURE_H */ diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 57911430324e..b39efd46abf9 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -35,7 +35,6 @@ #include "iomap.h" #include "common.h" #include "mmc.h" -#include "hsmmc.h" #include "prminst44xx.h" #include "prcm_mpu44xx.h" #include "omap4-sar-layout.h" @@ -284,59 +283,3 @@ skip_errata_init: omap_wakeupgen_init(); irqchip_init(); } - -#if defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE) -static int omap4_twl6030_hsmmc_late_init(struct device *dev) -{ - int irq = 0; - struct platform_device *pdev = container_of(dev, - struct platform_device, dev); - struct omap_mmc_platform_data *pdata = dev->platform_data; - - /* Setting MMC1 Card detect Irq */ - if (pdev->id == 0) { - irq = twl6030_mmc_card_detect_config(); - if (irq < 0) { - dev_err(dev, "%s: Error card detect config(%d)\n", - __func__, irq); - return irq; - } - pdata->slots[0].card_detect_irq = irq; - pdata->slots[0].card_detect = twl6030_mmc_card_detect; - } - return 0; -} - -static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev) -{ - struct omap_mmc_platform_data *pdata; - - /* dev can be null if CONFIG_MMC_OMAP_HS is not set */ - if (!dev) { - pr_err("Failed %s\n", __func__); - return; - } - pdata = dev->platform_data; - pdata->init = omap4_twl6030_hsmmc_late_init; -} - -int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers) -{ - struct omap2_hsmmc_info *c; - - omap_hsmmc_init(controllers); - for (c = controllers; c->mmc; c++) { - /* pdev can be null if CONFIG_MMC_OMAP_HS is not set */ - if (!c->pdev) - continue; - omap4_twl6030_hsmmc_set_late_init(&c->pdev->dev); - } - - return 0; -} -#else -int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers) -{ - return 0; -} -#endif diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 93b80e5da8d4..1f3770a8a728 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -120,7 +120,7 @@ static void omap3_save_secure_ram_context(void) * will hang the system. */ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); - ret = _omap_save_secure_sram((u32 *) + ret = _omap_save_secure_sram((u32 *)(unsigned long) __pa(omap3_secure_ram_storage)); pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); /* Following is for error tracking, it should not happen */ diff --git a/arch/arm/mach-omap2/prm44xx_54xx.h b/arch/arm/mach-omap2/prm44xx_54xx.h index 7a976065e138..8d95aa543ef5 100644 --- a/arch/arm/mach-omap2/prm44xx_54xx.h +++ b/arch/arm/mach-omap2/prm44xx_54xx.h @@ -43,7 +43,7 @@ extern void omap4_prm_vcvp_write(u32 val, u8 offset); extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset); #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ - defined(CONFIG_SOC_DRA7XX) + defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX) void omap44xx_prm_reconfigure_io_chain(void); #else static inline void omap44xx_prm_reconfigure_io_chain(void) diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c index d4639c506622..9a4e910c3796 100644 --- a/arch/arm/mach-tegra/fuse.c +++ b/arch/arm/mach-tegra/fuse.c @@ -209,13 +209,3 @@ void __init tegra_init_fuse(void) tegra_sku_id, tegra_cpu_process_id, tegra_core_process_id); } - -unsigned long long tegra_chip_uid(void) -{ - unsigned long long lo, hi; - - lo = tegra_fuse_readl(FUSE_UID_LOW); - hi = tegra_fuse_readl(FUSE_UID_HIGH); - return (hi << 32ull) | lo; -} -EXPORT_SYMBOL(tegra_chip_uid); diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index 033d34dcbd3f..c26ef5b92ca7 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c @@ -53,6 +53,11 @@ #define A15_BX_ADDR0 0x68 #define A7_BX_ADDR0 0x78 +/* SPC CPU/cluster reset statue */ +#define STANDBYWFI_STAT 0x3c +#define STANDBYWFI_STAT_A15_CPU_MASK(cpu) (1 << (cpu)) +#define STANDBYWFI_STAT_A7_CPU_MASK(cpu) (1 << (3 + (cpu))) + /* SPC system config interface registers */ #define SYSCFG_WDATA 0x70 #define SYSCFG_RDATA 0x74 @@ -213,6 +218,41 @@ void ve_spc_powerdown(u32 cluster, bool enable) writel_relaxed(enable, info->baseaddr + pwdrn_reg); } +static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster) +{ + return cluster_is_a15(cluster) ? + STANDBYWFI_STAT_A15_CPU_MASK(cpu) + : STANDBYWFI_STAT_A7_CPU_MASK(cpu); +} + +/** + * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) + * + * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster + * @cluster: mpidr[15:8] bitfield describing cluster affinity level + * + * @return: non-zero if and only if the specified CPU is in WFI + * + * Take care when interpreting the result of this function: a CPU might + * be in WFI temporarily due to idle, and is not necessarily safely + * parked. + */ +int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) +{ + int ret; + u32 mask = standbywfi_cpu_mask(cpu, cluster); + + if (cluster >= MAX_CLUSTERS) + return 1; + + ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT); + + pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n", + __func__, STANDBYWFI_STAT, ret, mask); + + return ret & mask; +} + static int ve_spc_get_performance(int cluster, u32 *freq) { struct ve_spc_opp *opps = info->opps[cluster]; diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h index dbd44c3720f9..793d065243b9 100644 --- a/arch/arm/mach-vexpress/spc.h +++ b/arch/arm/mach-vexpress/spc.h @@ -20,5 +20,6 @@ void ve_spc_global_wakeup_irq(bool set); void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set); void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr); void ve_spc_powerdown(u32 cluster, bool enable); +int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster); #endif diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c index 05a364c5077a..29e7785a54bc 100644 --- a/arch/arm/mach-vexpress/tc2_pm.c +++ b/arch/arm/mach-vexpress/tc2_pm.c @@ -12,6 +12,7 @@ * published by the Free Software Foundation. */ +#include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> @@ -32,11 +33,17 @@ #include "spc.h" /* SCC conf registers */ +#define RESET_CTRL 0x018 +#define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu))) +#define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu))) + #define A15_CONF 0x400 #define A7_CONF 0x500 #define SYS_INFO 0x700 #define SPC_BASE 0xb00 +static void __iomem *scc; + /* * We can't use regular spinlocks. In the switcher case, it is possible * for an outbound CPU to call power_down() after its inbound counterpart @@ -190,6 +197,55 @@ static void tc2_pm_power_down(void) tc2_pm_down(0); } +static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) +{ + u32 mask = cluster ? + RESET_A7_NCORERESET(cpu) + : RESET_A15_NCORERESET(cpu); + + return !(readl_relaxed(scc + RESET_CTRL) & mask); +} + +#define POLL_MSEC 10 +#define TIMEOUT_MSEC 1000 + +static int tc2_pm_power_down_finish(unsigned int cpu, unsigned int cluster) +{ + unsigned tries; + + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); + BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); + + for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) { + /* + * Only examine the hardware state if the target CPU has + * caught up at least as far as tc2_pm_down(): + */ + if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) { + pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n", + __func__, cpu, cluster, + readl_relaxed(scc + RESET_CTRL)); + + /* + * We need the CPU to reach WFI, but the power + * controller may put the cluster in reset and + * power it off as soon as that happens, before + * we have a chance to see STANDBYWFI. + * + * So we need to check for both conditions: + */ + if (tc2_core_in_reset(cpu, cluster) || + ve_spc_cpu_in_wfi(cpu, cluster)) + return 0; /* success: the CPU is halted */ + } + + /* Otherwise, wait and retry: */ + msleep(POLL_MSEC); + } + + return -ETIMEDOUT; /* timeout */ +} + static void tc2_pm_suspend(u64 residency) { unsigned int mpidr, cpu, cluster; @@ -232,10 +288,11 @@ static void tc2_pm_powered_up(void) } static const struct mcpm_platform_ops tc2_pm_power_ops = { - .power_up = tc2_pm_power_up, - .power_down = tc2_pm_power_down, - .suspend = tc2_pm_suspend, - .powered_up = tc2_pm_powered_up, + .power_up = tc2_pm_power_up, + .power_down = tc2_pm_power_down, + .power_down_finish = tc2_pm_power_down_finish, + .suspend = tc2_pm_suspend, + .powered_up = tc2_pm_powered_up, }; static bool __init tc2_pm_usage_count_init(void) @@ -269,7 +326,6 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level) static int __init tc2_pm_init(void) { int ret, irq; - void __iomem *scc; u32 a15_cluster_id, a7_cluster_id, sys_info; struct device_node *np; diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts index 84fcc5018284..519c4b2c0687 100644 --- a/arch/arm64/boot/dts/foundation-v8.dts +++ b/arch/arm64/boot/dts/foundation-v8.dts @@ -6,6 +6,8 @@ /dts-v1/; +/memreserve/ 0x80000000 0x00010000; + / { model = "Foundation-v8A"; compatible = "arm,foundation-aarch64", "arm,vexpress"; diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index aa11943b8502..b2fcfbc51ecc 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -56,6 +56,9 @@ static inline void arch_local_irq_disable(void) #define local_fiq_enable() asm("msr daifclr, #1" : : : "memory") #define local_fiq_disable() asm("msr daifset, #1" : : : "memory") +#define local_async_enable() asm("msr daifclr, #4" : : : "memory") +#define local_async_disable() asm("msr daifset, #4" : : : "memory") + /* * Save the current interrupt enable state. */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5d85a02d1231..0a1d69751562 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -26,7 +26,12 @@ #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> -#define KVM_MAX_VCPUS 4 +#if defined(CONFIG_KVM_ARM_MAX_VCPUS) +#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS +#else +#define KVM_MAX_VCPUS 0 +#endif + #define KVM_USER_MEM_SLOTS 32 #define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 17bd3af0a117..7f2b60affbb4 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -25,10 +25,11 @@ * Software defined PTE bits definition. */ #define PTE_VALID (_AT(pteval_t, 1) << 0) -#define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */ -#define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */ +#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ #define PTE_DIRTY (_AT(pteval_t, 1) << 55) #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) + /* bit 57 for PMD_SECT_SPLITTING */ +#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ /* * VMALLOC and SPARSEMEM_VMEMMAP ranges. @@ -254,7 +255,7 @@ static inline int has_transparent_hugepage(void) #define pgprot_noncached(prot) \ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE)) #define pgprot_writecombine(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE)) + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) #define pgprot_dmacoherent(prot) \ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) #define __HAVE_PHYS_MEM_ACCESS_PROT @@ -357,18 +358,20 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; /* * Encode and decode a swap entry: - * bits 0, 2: present (must both be zero) - * bit 3: PTE_FILE - * bits 4-8: swap type - * bits 9-63: swap offset + * bits 0-1: present (must be zero) + * bit 2: PTE_FILE + * bits 3-8: swap type + * bits 9-57: swap offset */ -#define __SWP_TYPE_SHIFT 4 +#define __SWP_TYPE_SHIFT 3 #define __SWP_TYPE_BITS 6 +#define __SWP_OFFSET_BITS 49 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) +#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) -#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) +#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) @@ -382,15 +385,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; /* * Encode and decode a file entry: - * bits 0, 2: present (must both be zero) - * bit 3: PTE_FILE - * bits 4-63: file offset / PAGE_SIZE + * bits 0-1: present (must be zero) + * bit 2: PTE_FILE + * bits 3-57: file offset / PAGE_SIZE */ #define pte_file(pte) (pte_val(pte) & PTE_FILE) -#define pte_to_pgoff(x) (pte_val(x) >> 4) -#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE) +#define pte_to_pgoff(x) (pte_val(x) >> 3) +#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE) -#define PTE_FILE_MAX_BITS 60 +#define PTE_FILE_MAX_BITS 55 extern int kern_addr_valid(unsigned long addr); diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 5031f4263937..495ab6f84a61 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -55,8 +55,9 @@ struct kvm_regs { #define KVM_ARM_TARGET_AEM_V8 0 #define KVM_ARM_TARGET_FOUNDATION_V8 1 #define KVM_ARM_TARGET_CORTEX_A57 2 +#define KVM_ARM_TARGET_XGENE_POTENZA 3 -#define KVM_ARM_NUM_TARGETS 3 +#define KVM_ARM_NUM_TARGETS 4 /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ #define KVM_ARM_DEVICE_TYPE_SHIFT 0 @@ -129,6 +130,24 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007 #define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0 +#define ARM64_SYS_REG_SHIFT_MASK(x,n) \ + (((x) << KVM_REG_ARM64_SYSREG_ ## n ## _SHIFT) & \ + KVM_REG_ARM64_SYSREG_ ## n ## _MASK) + +#define __ARM64_SYS_REG(op0,op1,crn,crm,op2) \ + (KVM_REG_ARM64 | KVM_REG_ARM64_SYSREG | \ + ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \ + ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \ + ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \ + ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \ + ARM64_SYS_REG_SHIFT_MASK(op2, OP2)) + +#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64) + +#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1) +#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) +#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) + /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 #define KVM_ARM_IRQ_TYPE_MASK 0xff diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 6a0a9b132d7a..4ae68579031d 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -248,7 +248,8 @@ static int brk_handler(unsigned long addr, unsigned int esr, int aarch32_break_handler(struct pt_regs *regs) { siginfo_t info; - unsigned int instr; + u32 arm_instr; + u16 thumb_instr; bool bp = false; void __user *pc = (void __user *)instruction_pointer(regs); @@ -257,18 +258,21 @@ int aarch32_break_handler(struct pt_regs *regs) if (compat_thumb_mode(regs)) { /* get 16-bit Thumb instruction */ - get_user(instr, (u16 __user *)pc); - if (instr == AARCH32_BREAK_THUMB2_LO) { + get_user(thumb_instr, (u16 __user *)pc); + thumb_instr = le16_to_cpu(thumb_instr); + if (thumb_instr == AARCH32_BREAK_THUMB2_LO) { /* get second half of 32-bit Thumb-2 instruction */ - get_user(instr, (u16 __user *)(pc + 2)); - bp = instr == AARCH32_BREAK_THUMB2_HI; + get_user(thumb_instr, (u16 __user *)(pc + 2)); + thumb_instr = le16_to_cpu(thumb_instr); + bp = thumb_instr == AARCH32_BREAK_THUMB2_HI; } else { - bp = instr == AARCH32_BREAK_THUMB; + bp = thumb_instr == AARCH32_BREAK_THUMB; } } else { /* 32-bit ARM instruction */ - get_user(instr, (u32 __user *)pc); - bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; + get_user(arm_instr, (u32 __user *)pc); + arm_instr = le32_to_cpu(arm_instr); + bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM; } if (!bp) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e1166145ca29..4d2c6f3f0c41 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -309,15 +309,12 @@ el1_irq: #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif -#ifdef CONFIG_PREEMPT - get_thread_info tsk - ldr w24, [tsk, #TI_PREEMPT] // get preempt count - add w0, w24, #1 // increment it - str w0, [tsk, #TI_PREEMPT] -#endif + irq_handler + #ifdef CONFIG_PREEMPT - str w24, [tsk, #TI_PREEMPT] // restore preempt count + get_thread_info tsk + ldr w24, [tsk, #TI_PREEMPT] // restore preempt count cbnz w24, 1f // preempt count != 0 ldr x0, [tsk, #TI_FLAGS] // get flags tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? @@ -507,22 +504,10 @@ el0_irq_naked: #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif - get_thread_info tsk -#ifdef CONFIG_PREEMPT - ldr w24, [tsk, #TI_PREEMPT] // get preempt count - add w23, w24, #1 // increment it - str w23, [tsk, #TI_PREEMPT] -#endif + irq_handler -#ifdef CONFIG_PREEMPT - ldr w0, [tsk, #TI_PREEMPT] - str w24, [tsk, #TI_PREEMPT] - cmp w0, w23 - b.eq 1f - mov x1, #0 - str x1, [x1] // BUG -1: -#endif + get_thread_info tsk + #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on #endif diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index fecdbf7de82e..6777a2192b83 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -636,28 +636,27 @@ static int compat_gpr_get(struct task_struct *target, for (i = 0; i < num_regs; ++i) { unsigned int idx = start + i; - void *reg; + compat_ulong_t reg; switch (idx) { case 15: - reg = (void *)&task_pt_regs(target)->pc; + reg = task_pt_regs(target)->pc; break; case 16: - reg = (void *)&task_pt_regs(target)->pstate; + reg = task_pt_regs(target)->pstate; break; case 17: - reg = (void *)&task_pt_regs(target)->orig_x0; + reg = task_pt_regs(target)->orig_x0; break; default: - reg = (void *)&task_pt_regs(target)->regs[idx]; + reg = task_pt_regs(target)->regs[idx]; } - ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t)); - + ret = copy_to_user(ubuf, ®, sizeof(reg)); if (ret) break; - else - ubuf += sizeof(compat_ulong_t); + + ubuf += sizeof(reg); } return ret; @@ -685,28 +684,28 @@ static int compat_gpr_set(struct task_struct *target, for (i = 0; i < num_regs; ++i) { unsigned int idx = start + i; - void *reg; + compat_ulong_t reg; + + ret = copy_from_user(®, ubuf, sizeof(reg)); + if (ret) + return ret; + + ubuf += sizeof(reg); switch (idx) { case 15: - reg = (void *)&newregs.pc; + newregs.pc = reg; break; case 16: - reg = (void *)&newregs.pstate; + newregs.pstate = reg; break; case 17: - reg = (void *)&newregs.orig_x0; + newregs.orig_x0 = reg; break; default: - reg = (void *)&newregs.regs[idx]; + newregs.regs[idx] = reg; } - ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t)); - - if (ret) - goto out; - else - ubuf += sizeof(compat_ulong_t); } if (valid_user_regs(&newregs.user_regs)) @@ -714,7 +713,6 @@ static int compat_gpr_set(struct task_struct *target, else ret = -EINVAL; -out: return ret; } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 0bc5e4cbc017..bd9bbd0e44ed 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -205,6 +205,11 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; void __init setup_arch(char **cmdline_p) { + /* + * Unmask asynchronous aborts early to catch possible system errors. + */ + local_async_enable(); + setup_processor(); setup_machine_fdt(__fdt_pointer); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index a5aeefab03c3..a0c2ca602cf8 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -160,6 +160,7 @@ asmlinkage void secondary_start_kernel(void) local_irq_enable(); local_fiq_enable(); + local_async_enable(); /* * OK, it's off to the idle thread for us diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 4480ab339a00..8ba85e9ea388 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -36,6 +36,17 @@ config KVM_ARM_HOST ---help--- Provides host support for ARM processors. +config KVM_ARM_MAX_VCPUS + int "Number maximum supported virtual CPUs per VM" + depends on KVM_ARM_HOST + default 4 + help + Static number of max supported virtual CPUs per VM. + + If you choose a high number, the vcpu structures will be quite + large, so only choose a reasonable number that you expect to + actually use. + config KVM_ARM_VGIC bool depends on KVM_ARM_HOST && OF diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 3f0731e53274..08745578d54d 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -207,20 +207,26 @@ int __attribute_const__ kvm_target_cpu(void) unsigned long implementor = read_cpuid_implementor(); unsigned long part_number = read_cpuid_part_number(); - if (implementor != ARM_CPU_IMP_ARM) - return -EINVAL; + switch (implementor) { + case ARM_CPU_IMP_ARM: + switch (part_number) { + case ARM_CPU_PART_AEM_V8: + return KVM_ARM_TARGET_AEM_V8; + case ARM_CPU_PART_FOUNDATION: + return KVM_ARM_TARGET_FOUNDATION_V8; + case ARM_CPU_PART_CORTEX_A57: + return KVM_ARM_TARGET_CORTEX_A57; + }; + break; + case ARM_CPU_IMP_APM: + switch (part_number) { + case APM_CPU_PART_POTENZA: + return KVM_ARM_TARGET_XGENE_POTENZA; + }; + break; + }; - switch (part_number) { - case ARM_CPU_PART_AEM_V8: - return KVM_ARM_TARGET_AEM_V8; - case ARM_CPU_PART_FOUNDATION: - return KVM_ARM_TARGET_FOUNDATION_V8; - case ARM_CPU_PART_CORTEX_A57: - /* Currently handled by the generic backend */ - return KVM_ARM_TARGET_CORTEX_A57; - default: - return -EINVAL; - } + return -EINVAL; } int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 8da56067c304..df84d7bcc7df 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -39,9 +39,6 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { - if (kvm_psci_call(vcpu)) - return 1; - kvm_inject_undefined(vcpu); return 1; } diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c index 4268ab9356b1..8fe6f76b0edc 100644 --- a/arch/arm64/kvm/sys_regs_generic_v8.c +++ b/arch/arm64/kvm/sys_regs_generic_v8.c @@ -90,6 +90,9 @@ static int __init sys_reg_genericv8_init(void) &genericv8_target_table); kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57, &genericv8_target_table); + kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA, + &genericv8_target_table); + return 0; } late_initcall(sys_reg_genericv8_init); diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 985bf80c622e..53f44bee9ebb 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -702,7 +702,7 @@ again: out: srcu_read_unlock(&vcpu->kvm->srcu, idx); if (r > 0) { - kvm_resched(vcpu); + cond_resched(); idx = srcu_read_lock(&vcpu->kvm->srcu); goto again; } diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 8a2463670a5b..0f4344e6fbca 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -75,8 +75,10 @@ LDEMULATION := lppc GNUTARGET := powerpcle MULTIPLEWORD := -mno-multiple else +ifeq ($(call cc-option-yn,-mbig-endian),y) override CC += -mbig-endian override AS += -mbig-endian +endif override LD += -EB LDEMULATION := ppc GNUTARGET := powerpc @@ -128,7 +130,12 @@ CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5) CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6) CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7) +# Altivec option not allowed with e500mc64 in GCC. +ifeq ($(CONFIG_ALTIVEC),y) +E5500_CPU := -mcpu=powerpc64 +else E5500_CPU := $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64) +endif CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU) CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU)) diff --git a/arch/powerpc/boot/dts/xcalibur1501.dts b/arch/powerpc/boot/dts/xcalibur1501.dts index cc00f4ddd9a7..c409cbafb126 100644 --- a/arch/powerpc/boot/dts/xcalibur1501.dts +++ b/arch/powerpc/boot/dts/xcalibur1501.dts @@ -637,14 +637,14 @@ tlu@2f000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x2f000 0x1000>; - interupts = <61 2 >; + interrupts = <61 2>; interrupt-parent = <&mpic>; }; tlu@15000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x15000 0x1000>; - interupts = <75 2>; + interrupts = <75 2>; interrupt-parent = <&mpic>; }; }; diff --git a/arch/powerpc/boot/dts/xpedite5301.dts b/arch/powerpc/boot/dts/xpedite5301.dts index 53c1c6a9752f..04cb410da48b 100644 --- a/arch/powerpc/boot/dts/xpedite5301.dts +++ b/arch/powerpc/boot/dts/xpedite5301.dts @@ -547,14 +547,14 @@ tlu@2f000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x2f000 0x1000>; - interupts = <61 2 >; + interrupts = <61 2>; interrupt-parent = <&mpic>; }; tlu@15000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x15000 0x1000>; - interupts = <75 2>; + interrupts = <75 2>; interrupt-parent = <&mpic>; }; }; diff --git a/arch/powerpc/boot/dts/xpedite5330.dts b/arch/powerpc/boot/dts/xpedite5330.dts index 215225983150..73f8620f1ce7 100644 --- a/arch/powerpc/boot/dts/xpedite5330.dts +++ b/arch/powerpc/boot/dts/xpedite5330.dts @@ -583,14 +583,14 @@ tlu@2f000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x2f000 0x1000>; - interupts = <61 2 >; + interrupts = <61 2>; interrupt-parent = <&mpic>; }; tlu@15000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x15000 0x1000>; - interupts = <75 2>; + interrupts = <75 2>; interrupt-parent = <&mpic>; }; }; diff --git a/arch/powerpc/boot/dts/xpedite5370.dts b/arch/powerpc/boot/dts/xpedite5370.dts index 11dbda10d756..cd0ea2b99362 100644 --- a/arch/powerpc/boot/dts/xpedite5370.dts +++ b/arch/powerpc/boot/dts/xpedite5370.dts @@ -545,14 +545,14 @@ tlu@2f000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x2f000 0x1000>; - interupts = <61 2 >; + interrupts = <61 2>; interrupt-parent = <&mpic>; }; tlu@15000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x15000 0x1000>; - interupts = <75 2>; + interrupts = <75 2>; interrupt-parent = <&mpic>; }; }; diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S index 5143228e3e5f..6636b1d7821b 100644 --- a/arch/powerpc/boot/util.S +++ b/arch/powerpc/boot/util.S @@ -71,18 +71,32 @@ udelay: add r4,r4,r5 addi r4,r4,-1 divw r4,r4,r5 /* BUS ticks */ +#ifdef CONFIG_8xx +1: mftbu r5 + mftb r6 + mftbu r7 +#else 1: mfspr r5, SPRN_TBRU mfspr r6, SPRN_TBRL mfspr r7, SPRN_TBRU +#endif cmpw 0,r5,r7 bne 1b /* Get [synced] base time */ addc r9,r6,r4 /* Compute end time */ addze r8,r5 +#ifdef CONFIG_8xx +2: mftbu r5 +#else 2: mfspr r5, SPRN_TBRU +#endif cmpw 0,r5,r8 blt 2b bgt 3f +#ifdef CONFIG_8xx + mftb r6 +#else mfspr r6, SPRN_TBRL +#endif cmpw 0,r6,r9 blt 2b 3: blr diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 16cb92d215d2..694012877bf7 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -16,6 +16,7 @@ struct vmemmap_backing { unsigned long phys; unsigned long virt_addr; }; +extern struct vmemmap_backing *vmemmap_list; /* * Functions that deal with pagetables that could be at any level of diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 3c1acc31a092..f595b98079ee 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -366,6 +366,8 @@ BEGIN_FTR_SECTION_NESTED(96); \ cmpwi dest,0; \ beq- 90b; \ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) +#elif defined(CONFIG_8xx) +#define MFTB(dest) mftb dest #else #define MFTB(dest) mfspr dest, SPRN_TBRL #endif diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 5c45787d551e..fa8388ed94c5 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1174,12 +1174,19 @@ #else /* __powerpc64__ */ +#if defined(CONFIG_8xx) +#define mftbl() ({unsigned long rval; \ + asm volatile("mftbl %0" : "=r" (rval)); rval;}) +#define mftbu() ({unsigned long rval; \ + asm volatile("mftbu %0" : "=r" (rval)); rval;}) +#else #define mftbl() ({unsigned long rval; \ asm volatile("mfspr %0, %1" : "=r" (rval) : \ "i" (SPRN_TBRL)); rval;}) #define mftbu() ({unsigned long rval; \ asm volatile("mfspr %0, %1" : "=r" (rval) : \ "i" (SPRN_TBRU)); rval;}) +#endif #endif /* !__powerpc64__ */ #define mttbl(v) asm volatile("mttbl %0":: "r"(v)) diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h index 18908caa1f3b..2cf846edb3fc 100644 --- a/arch/powerpc/include/asm/timex.h +++ b/arch/powerpc/include/asm/timex.h @@ -29,7 +29,11 @@ static inline cycles_t get_cycles(void) ret = 0; __asm__ __volatile__( +#ifdef CONFIG_8xx + "97: mftb %0\n" +#else "97: mfspr %0, %2\n" +#endif "99:\n" ".section __ftr_fixup,\"a\"\n" ".align 2\n" @@ -41,7 +45,11 @@ static inline cycles_t get_cycles(void) " .long 0\n" " .long 0\n" ".previous" +#ifdef CONFIG_8xx + : "=r" (ret) : "i" (CPU_FTR_601)); +#else : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL)); +#endif return ret; #endif } diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index e1ec57e87b3b..88a7fb458dfd 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -18,6 +18,7 @@ #include <linux/ftrace.h> #include <asm/machdep.h> +#include <asm/pgalloc.h> #include <asm/prom.h> #include <asm/sections.h> @@ -75,6 +76,17 @@ void arch_crash_save_vmcoreinfo(void) #ifndef CONFIG_NEED_MULTIPLE_NODES VMCOREINFO_SYMBOL(contig_page_data); #endif +#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP) + VMCOREINFO_SYMBOL(vmemmap_list); + VMCOREINFO_SYMBOL(mmu_vmemmap_psize); + VMCOREINFO_SYMBOL(mmu_psize_defs); + VMCOREINFO_STRUCT_SIZE(vmemmap_backing); + VMCOREINFO_OFFSET(vmemmap_backing, list); + VMCOREINFO_OFFSET(vmemmap_backing, phys); + VMCOREINFO_OFFSET(vmemmap_backing, virt_addr); + VMCOREINFO_STRUCT_SIZE(mmu_psize_def); + VMCOREINFO_OFFSET(mmu_psize_def, shift); +#endif } /* diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index fd82c289ab1c..28b898e68185 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -210,7 +210,7 @@ static void __init nvram_print_partitions(char * label) printk(KERN_WARNING "--------%s---------\n", label); printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n"); list_for_each_entry(tmp_part, &nvram_partitions, partition) { - printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12s\n", + printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12.12s\n", tmp_part->index, tmp_part->header.signature, tmp_part->header.checksum, tmp_part->header.length, tmp_part->header.name); diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 1844298f5ea4..68027bfa5f8e 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -445,6 +445,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, #endif /* CONFIG_ALTIVEC */ if (copy_fpr_to_user(&frame->mc_fregs, current)) return 1; + + /* + * Clear the MSR VSX bit to indicate there is no valid state attached + * to this context, except in the specific case below where we set it. + */ + msr &= ~MSR_VSX; #ifdef CONFIG_VSX /* * Copy VSR 0-31 upper half from thread_struct to local @@ -457,15 +463,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, if (copy_vsx_to_user(&frame->mc_vsregs, current)) return 1; msr |= MSR_VSX; - } else if (!ctx_has_vsx_region) - /* - * With a small context structure we can't hold the VSX - * registers, hence clear the MSR value to indicate the state - * was not saved. - */ - msr &= ~MSR_VSX; - - + } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* save spe registers */ diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index e66f67b8b9e6..42991045349f 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -122,6 +122,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, flush_fp_to_thread(current); /* copy fpr regs and fpscr */ err |= copy_fpr_to_user(&sc->fp_regs, current); + + /* + * Clear the MSR VSX bit to indicate there is no valid state attached + * to this context, except in the specific case below where we set it. + */ + msr &= ~MSR_VSX; #ifdef CONFIG_VSX /* * Copy VSX low doubleword to local buffer for formatting, diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index 6b1f2a6d5517..6b2b69616e77 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -232,9 +232,15 @@ __do_get_tspec: lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) /* Get a stable TB value */ +#ifdef CONFIG_8xx +2: mftbu r3 + mftbl r4 + mftbu r0 +#else 2: mfspr r3, SPRN_TBRU mfspr r4, SPRN_TBRL mfspr r0, SPRN_TBRU +#endif cmplw cr0,r3,r0 bne- 2b diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 072287f1c3bc..3fa99b20894f 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1348,7 +1348,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) kvm_guest_exit(); preempt_enable(); - kvm_resched(vcpu); + cond_resched(); spin_lock(&vc->lock); now = get_tb(); diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c index 3bc700655fc8..74551b5e41e5 100644 --- a/arch/powerpc/mm/hugetlbpage-book3e.c +++ b/arch/powerpc/mm/hugetlbpage-book3e.c @@ -117,6 +117,5 @@ void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) struct hstate *hstate = hstate_file(vma->vm_file); unsigned long tsize = huge_page_shift(hstate) - 10; - __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, tsize, 0); - + __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); } diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 41cd68dee681..358d74303138 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -305,7 +305,7 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { #ifdef CONFIG_HUGETLB_PAGE - if (is_vm_hugetlb_page(vma)) + if (vma && is_vm_hugetlb_page(vma)) flush_hugetlb_page(vma, vmaddr); #endif diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 132f8726a257..bca2465a9c34 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -404,13 +404,27 @@ config PPC_DOORBELL endmenu -config CPU_LITTLE_ENDIAN - bool "Build little endian kernel" - default n +choice + prompt "Endianness selection" + default CPU_BIG_ENDIAN help This option selects whether a big endian or little endian kernel will be built. +config CPU_BIG_ENDIAN + bool "Build big endian kernel" + help + Build a big endian kernel. + + If unsure, select this option. + +config CPU_LITTLE_ENDIAN + bool "Build little endian kernel" + help + Build a little endian kernel. + Note that if cross compiling a little endian kernel, CROSS_COMPILE must point to a toolchain capable of targeting little endian powerpc. + +endchoice diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 314fced4fc14..5877e71901b3 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -101,7 +101,7 @@ config S390 select GENERIC_CPU_DEVICES if !SMP select GENERIC_FIND_FIRST_BIT select GENERIC_SMP_IDLE_THREAD - select GENERIC_TIME_VSYSCALL_OLD + select GENERIC_TIME_VSYSCALL select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 select HAVE_ARCH_SECCOMP_FILTER diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 46cae138ece2..4363528dc8fd 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -35,7 +35,6 @@ static u8 *ctrblk; static char keylen_flag; struct s390_aes_ctx { - u8 iv[AES_BLOCK_SIZE]; u8 key[AES_MAX_KEY_SIZE]; long enc; long dec; @@ -441,30 +440,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return aes_set_key(tfm, in_key, key_len); } -static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, +static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, struct blkcipher_walk *walk) { + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); int ret = blkcipher_walk_virt(desc, walk); unsigned int nbytes = walk->nbytes; + struct { + u8 iv[AES_BLOCK_SIZE]; + u8 key[AES_MAX_KEY_SIZE]; + } param; if (!nbytes) goto out; - memcpy(param, walk->iv, AES_BLOCK_SIZE); + memcpy(param.iv, walk->iv, AES_BLOCK_SIZE); + memcpy(param.key, sctx->key, sctx->key_len); do { /* only use complete blocks */ unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); u8 *out = walk->dst.virt.addr; u8 *in = walk->src.virt.addr; - ret = crypt_s390_kmc(func, param, out, in, n); + ret = crypt_s390_kmc(func, ¶m, out, in, n); if (ret < 0 || ret != n) return -EIO; nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, walk, nbytes); } while ((nbytes = walk->nbytes)); - memcpy(walk->iv, param, AES_BLOCK_SIZE); + memcpy(walk->iv, param.iv, AES_BLOCK_SIZE); out: return ret; @@ -481,7 +486,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, return fallback_blk_enc(desc, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); + return cbc_aes_crypt(desc, sctx->enc, &walk); } static int cbc_aes_decrypt(struct blkcipher_desc *desc, @@ -495,7 +500,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, return fallback_blk_dec(desc, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); + return cbc_aes_crypt(desc, sctx->dec, &walk); } static struct crypto_alg cbc_aes_alg = { diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 316c8503a3b4..114258eeaacd 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -48,33 +48,21 @@ static inline void clear_page(void *page) : "memory", "cc"); } +/* + * copy_page uses the mvcl instruction with 0xb0 padding byte in order to + * bypass caches when copying a page. Especially when copying huge pages + * this keeps L1 and L2 data caches alive. + */ static inline void copy_page(void *to, void *from) { - if (MACHINE_HAS_MVPG) { - register unsigned long reg0 asm ("0") = 0; - asm volatile( - " mvpg %0,%1" - : : "a" (to), "a" (from), "d" (reg0) - : "memory", "cc"); - } else - asm volatile( - " mvc 0(256,%0),0(%1)\n" - " mvc 256(256,%0),256(%1)\n" - " mvc 512(256,%0),512(%1)\n" - " mvc 768(256,%0),768(%1)\n" - " mvc 1024(256,%0),1024(%1)\n" - " mvc 1280(256,%0),1280(%1)\n" - " mvc 1536(256,%0),1536(%1)\n" - " mvc 1792(256,%0),1792(%1)\n" - " mvc 2048(256,%0),2048(%1)\n" - " mvc 2304(256,%0),2304(%1)\n" - " mvc 2560(256,%0),2560(%1)\n" - " mvc 2816(256,%0),2816(%1)\n" - " mvc 3072(256,%0),3072(%1)\n" - " mvc 3328(256,%0),3328(%1)\n" - " mvc 3584(256,%0),3584(%1)\n" - " mvc 3840(256,%0),3840(%1)\n" - : : "a" (to), "a" (from) : "memory"); + register void *reg2 asm ("2") = to; + register unsigned long reg3 asm ("3") = 0x1000; + register void *reg4 asm ("4") = from; + register unsigned long reg5 asm ("5") = 0xb0001000; + asm volatile( + " mvcl 2,4" + : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5) + : : "memory", "cc"); } #define clear_user_page(page, vaddr, pg) clear_page(page) diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index 5a87d16d3e7c..d091aa1aaf11 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h @@ -5,6 +5,7 @@ #define SIGP_SENSE 1 #define SIGP_EXTERNAL_CALL 2 #define SIGP_EMERGENCY_SIGNAL 3 +#define SIGP_START 4 #define SIGP_STOP 5 #define SIGP_RESTART 6 #define SIGP_STOP_AND_STORE_STATUS 9 @@ -12,6 +13,7 @@ #define SIGP_SET_PREFIX 13 #define SIGP_STORE_STATUS_AT_ADDRESS 14 #define SIGP_SET_ARCHITECTURE 18 +#define SIGP_COND_EMERGENCY_SIGNAL 19 #define SIGP_SENSE_RUNNING 21 /* SIGP condition codes */ diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index a73eb2e1e918..bc9746a7d47c 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -26,8 +26,9 @@ struct vdso_data { __u64 wtom_clock_nsec; /* 0x28 */ __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ __u32 tz_dsttime; /* Type of dst correction 0x34 */ - __u32 ectg_available; - __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */ + __u32 ectg_available; /* ECTG instruction present 0x38 */ + __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */ + __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */ }; struct vdso_per_cpu_data { diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 2416138ebd3e..496116cd65ec 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -65,7 +65,8 @@ int main(void) DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); - DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult)); + DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult)); + DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift)); DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); /* constants used by the vdso */ diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 6e2442978409..95e7ba0fbb7e 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -194,7 +194,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) return -EINVAL; /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ - regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | + regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 | (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 | (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE); diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S index 4a460c44e17e..813ec7260878 100644 --- a/arch/s390/kernel/pgm_check.S +++ b/arch/s390/kernel/pgm_check.S @@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */ PGM_CHECK_DEFAULT /* 35 */ PGM_CHECK_DEFAULT /* 36 */ PGM_CHECK_DEFAULT /* 37 */ -PGM_CHECK_DEFAULT /* 38 */ +PGM_CHECK_64BIT(do_dat_exception) /* 38 */ PGM_CHECK_64BIT(do_dat_exception) /* 39 */ PGM_CHECK_64BIT(do_dat_exception) /* 3a */ PGM_CHECK_64BIT(do_dat_exception) /* 3b */ diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index fb535874a246..d8fd508ccd1e 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -94,7 +94,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) return -EINVAL; /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ - regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | + regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI)); /* Check for invalid user address space control. */ if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 064c3082ab33..dd95f1631621 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta) set_clock_comparator(S390_lowcore.clock_comparator); } -static int s390_next_ktime(ktime_t expires, +static int s390_next_event(unsigned long delta, struct clock_event_device *evt) { - struct timespec ts; - u64 nsecs; - - ts.tv_sec = ts.tv_nsec = 0; - monotonic_to_bootbased(&ts); - nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); - do_div(nsecs, 125); - S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); - /* Program the maximum value if we have an overflow (== year 2042) */ - if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc)) - S390_lowcore.clock_comparator = -1ULL; + S390_lowcore.clock_comparator = get_tod_clock() + delta; set_clock_comparator(S390_lowcore.clock_comparator); return 0; } @@ -146,15 +136,14 @@ void init_cpu_timer(void) cpu = smp_processor_id(); cd = &per_cpu(comparators, cpu); cd->name = "comparator"; - cd->features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_KTIME; + cd->features = CLOCK_EVT_FEAT_ONESHOT; cd->mult = 16777; cd->shift = 12; cd->min_delta_ns = 1; cd->max_delta_ns = LONG_MAX; cd->rating = 400; cd->cpumask = cpumask_of(cpu); - cd->set_next_ktime = s390_next_ktime; + cd->set_next_event = s390_next_event; cd->set_mode = s390_set_mode; clockevents_register_device(cd); @@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void) return &clocksource_tod; } -void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, - struct clocksource *clock, u32 mult) +void update_vsyscall(struct timekeeper *tk) { - if (clock != &clocksource_tod) + u64 nsecps; + + if (tk->clock != &clocksource_tod) return; /* Make userspace gettimeofday spin until we're done. */ ++vdso_data->tb_update_count; smp_wmb(); - vdso_data->xtime_tod_stamp = clock->cycle_last; - vdso_data->xtime_clock_sec = wall_time->tv_sec; - vdso_data->xtime_clock_nsec = wall_time->tv_nsec; - vdso_data->wtom_clock_sec = wtm->tv_sec; - vdso_data->wtom_clock_nsec = wtm->tv_nsec; - vdso_data->ntp_mult = mult; + vdso_data->xtime_tod_stamp = tk->clock->cycle_last; + vdso_data->xtime_clock_sec = tk->xtime_sec; + vdso_data->xtime_clock_nsec = tk->xtime_nsec; + vdso_data->wtom_clock_sec = + tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + vdso_data->wtom_clock_nsec = tk->xtime_nsec + + + (tk->wall_to_monotonic.tv_nsec << tk->shift); + nsecps = (u64) NSEC_PER_SEC << tk->shift; + while (vdso_data->wtom_clock_nsec >= nsecps) { + vdso_data->wtom_clock_nsec -= nsecps; + vdso_data->wtom_clock_sec++; + } + vdso_data->tk_mult = tk->mult; + vdso_data->tk_shift = tk->shift; smp_wmb(); ++vdso_data->tb_update_count; } diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S index b2224e0b974c..5be8e472f57d 100644 --- a/arch/s390/kernel/vdso32/clock_gettime.S +++ b/arch/s390/kernel/vdso32/clock_gettime.S @@ -38,25 +38,26 @@ __kernel_clock_gettime: sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,2f ahi %r0,-1 -2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ +2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ lr %r2,%r0 - l %r0,__VDSO_NTP_MULT(%r5) + l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 3f - a %r0,__VDSO_NTP_MULT(%r5) + a %r0,__VDSO_TK_MULT(%r5) 3: alr %r0,%r2 - srdl %r0,12 - al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ + al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ al %r1,__VDSO_XTIME_NSEC+4(%r5) brc 12,4f ahi %r0,1 -4: l %r2,__VDSO_XTIME_SEC+4(%r5) - al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ +4: al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */ al %r1,__VDSO_WTOM_NSEC+4(%r5) brc 12,5f ahi %r0,1 -5: al %r2,__VDSO_WTOM_SEC+4(%r5) +5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + srdl %r0,0(%r2) /* >> tk->shift */ + l %r2,__VDSO_XTIME_SEC+4(%r5) + al %r2,__VDSO_WTOM_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 1b basr %r5,0 @@ -86,20 +87,21 @@ __kernel_clock_gettime: sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,12f ahi %r0,-1 -12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ +12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ lr %r2,%r0 - l %r0,__VDSO_NTP_MULT(%r5) + l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 13f - a %r0,__VDSO_NTP_MULT(%r5) + a %r0,__VDSO_TK_MULT(%r5) 13: alr %r0,%r2 - srdl %r0,12 - al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ + al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ al %r1,__VDSO_XTIME_NSEC+4(%r5) brc 12,14f ahi %r0,1 -14: l %r2,__VDSO_XTIME_SEC+4(%r5) +14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + srdl %r0,0(%r2) /* >> tk->shift */ + l %r2,__VDSO_XTIME_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 11b basr %r5,0 diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S index 2d3633175e3b..fd621a950f7c 100644 --- a/arch/s390/kernel/vdso32/gettimeofday.S +++ b/arch/s390/kernel/vdso32/gettimeofday.S @@ -35,15 +35,14 @@ __kernel_gettimeofday: sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,3f ahi %r0,-1 -3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ +3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ st %r0,24(%r15) - l %r0,__VDSO_NTP_MULT(%r5) + l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 4f - a %r0,__VDSO_NTP_MULT(%r5) + a %r0,__VDSO_TK_MULT(%r5) 4: al %r0,24(%r15) - srdl %r0,12 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ al %r1,__VDSO_XTIME_NSEC+4(%r5) brc 12,5f @@ -51,6 +50,8 @@ __kernel_gettimeofday: 5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 1b + l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + srdl %r0,0(%r4) /* >> tk->shift */ l %r4,24(%r15) /* get tv_sec from stack */ basr %r5,0 6: ltr %r0,%r0 diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index d46c95ed5f19..0add1072ba30 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S @@ -34,14 +34,15 @@ __kernel_clock_gettime: tmll %r4,0x0001 /* pending update ? loop */ jnz 0b stck 48(%r15) /* Store TOD clock */ + lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ + alg %r0,__VDSO_WTOM_SEC(%r5) /* + wall_to_monotonic.sec */ lg %r1,48(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ - lg %r0,__VDSO_XTIME_SEC(%r5) - alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ - alg %r0,__VDSO_WTOM_SEC(%r5) + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ + alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */ + srlg %r1,%r1,0(%r2) /* >> tk->shift */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 0b larl %r5,13f @@ -62,12 +63,13 @@ __kernel_clock_gettime: tmll %r4,0x0001 /* pending update ? loop */ jnz 5b stck 48(%r15) /* Store TOD clock */ + lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ lg %r1,48(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ - lg %r0,__VDSO_XTIME_SEC(%r5) + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ + srlg %r1,%r1,0(%r2) /* >> tk->shift */ + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 5b larl %r5,13f diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S index 36ee674722ec..d0860d1d0ccc 100644 --- a/arch/s390/kernel/vdso64/gettimeofday.S +++ b/arch/s390/kernel/vdso64/gettimeofday.S @@ -31,12 +31,13 @@ __kernel_gettimeofday: stck 48(%r15) /* Store TOD clock */ lg %r1,48(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ - lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 0b + lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + srlg %r1,%r1,0(%r5) /* >> tk->shift */ larl %r5,5f 2: clg %r1,0(%r5) jl 3f diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 5ff29be7d87a..8216c0e0b2e2 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -121,7 +121,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) * - gpr 4 contains the index on the bus (optionally) */ ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS, - vcpu->run->s.regs.gprs[2], + vcpu->run->s.regs.gprs[2] & 0xffffffff, 8, &vcpu->run->s.regs.gprs[3], vcpu->run->s.regs.gprs[4]); diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index c3700585b4bb..87c2b3a3bd3e 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -1,7 +1,7 @@ /* * handling interprocessor communication * - * Copyright IBM Corp. 2008, 2009 + * Copyright IBM Corp. 2008, 2013 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) @@ -89,6 +89,37 @@ unlock: return rc; } +static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, + u16 asn, u64 *reg) +{ + struct kvm_vcpu *dst_vcpu = NULL; + const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; + u16 p_asn, s_asn; + psw_t *psw; + u32 flags; + + if (cpu_addr < KVM_MAX_VCPUS) + dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); + if (!dst_vcpu) + return SIGP_CC_NOT_OPERATIONAL; + flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags); + psw = &dst_vcpu->arch.sie_block->gpsw; + p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ + s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ + + /* Deliver the emergency signal? */ + if (!(flags & CPUSTAT_STOPPED) + || (psw->mask & psw_int_mask) != psw_int_mask + || ((flags & CPUSTAT_WAIT) && psw->addr != 0) + || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) { + return __sigp_emergency(vcpu, cpu_addr); + } else { + *reg &= 0xffffffff00000000UL; + *reg |= SIGP_STATUS_INCORRECT_STATE; + return SIGP_CC_STATUS_STORED; + } +} + static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; @@ -332,7 +363,8 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, return rc; } -static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr) +/* Test whether the destination CPU is available and not busy */ +static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_local_interrupt *li; @@ -351,9 +383,6 @@ static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr) spin_lock_bh(&li->lock); if (li->action_bits & ACTION_STOP_ON_STOP) rc = SIGP_CC_BUSY; - else - VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace", - cpu_addr); spin_unlock_bh(&li->lock); out: spin_unlock(&fi->lock); @@ -417,17 +446,31 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, &vcpu->run->s.regs.gprs[r1]); break; + case SIGP_COND_EMERGENCY_SIGNAL: + rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter, + &vcpu->run->s.regs.gprs[r1]); + break; case SIGP_SENSE_RUNNING: vcpu->stat.instruction_sigp_sense_running++; rc = __sigp_sense_running(vcpu, cpu_addr, &vcpu->run->s.regs.gprs[r1]); break; + case SIGP_START: + rc = sigp_check_callable(vcpu, cpu_addr); + if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) + rc = -EOPNOTSUPP; /* Handle START in user space */ + break; case SIGP_RESTART: vcpu->stat.instruction_sigp_restart++; - rc = __sigp_restart(vcpu, cpu_addr); - if (rc == SIGP_CC_BUSY) - break; - /* user space must know about restart */ + rc = sigp_check_callable(vcpu, cpu_addr); + if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) { + VCPU_EVENT(vcpu, 4, + "sigp restart %x to handle userspace", + cpu_addr); + /* user space must know about restart */ + rc = -EOPNOTSUPP; + } + break; default: return -EOPNOTSUPP; } @@ -435,7 +478,6 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) if (rc < 0) return rc; - vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); - vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44; + kvm_s390_set_psw_cc(vcpu, rc); return 0; } diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 97e03caf7825..dbdab3e7a1a6 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -78,11 +78,14 @@ static size_t copy_in_kernel(size_t count, void __user *to, * contains the (negative) exception code. */ #ifdef CONFIG_64BIT + static unsigned long follow_table(struct mm_struct *mm, unsigned long address, int write) { unsigned long *table = (unsigned long *)__pa(mm->pgd); + if (unlikely(address > mm->context.asce_limit - 1)) + return -0x38UL; switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { case _ASCE_TYPE_REGION1: table = table + ((address >> 53) & 0x7ff); diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 7d6ba9db1be9..e0fc24db234a 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -3,8 +3,9 @@ # avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) +avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ + $(comma)4)$(comma)%ymm2,yes,no) -obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o diff --git a/arch/x86/crypto/ablk_helper.c b/arch/x86/crypto/ablk_helper.c deleted file mode 100644 index 43282fe04a8b..000000000000 --- a/arch/x86/crypto/ablk_helper.c +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Shared async block cipher helpers - * - * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> - * - * Based on aesni-intel_glue.c by: - * Copyright (C) 2008, Intel Corp. - * Author: Huang Ying <ying.huang@intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA - * - */ - -#include <linux/kernel.h> -#include <linux/crypto.h> -#include <linux/init.h> -#include <linux/module.h> -#include <crypto/algapi.h> -#include <crypto/cryptd.h> -#include <asm/i387.h> -#include <asm/crypto/ablk_helper.h> - -int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, - unsigned int key_len) -{ - struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; - int err; - - crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) - & CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(child, key, key_len); - crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) - & CRYPTO_TFM_RES_MASK); - return err; -} -EXPORT_SYMBOL_GPL(ablk_set_key); - -int __ablk_encrypt(struct ablkcipher_request *req) -{ - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - struct blkcipher_desc desc; - - desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); - desc.info = req->info; - desc.flags = 0; - - return crypto_blkcipher_crt(desc.tfm)->encrypt( - &desc, req->dst, req->src, req->nbytes); -} -EXPORT_SYMBOL_GPL(__ablk_encrypt); - -int ablk_encrypt(struct ablkcipher_request *req) -{ - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - - if (!irq_fpu_usable()) { - struct ablkcipher_request *cryptd_req = - ablkcipher_request_ctx(req); - - memcpy(cryptd_req, req, sizeof(*req)); - ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); - - return crypto_ablkcipher_encrypt(cryptd_req); - } else { - return __ablk_encrypt(req); - } -} -EXPORT_SYMBOL_GPL(ablk_encrypt); - -int ablk_decrypt(struct ablkcipher_request *req) -{ - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - - if (!irq_fpu_usable()) { - struct ablkcipher_request *cryptd_req = - ablkcipher_request_ctx(req); - - memcpy(cryptd_req, req, sizeof(*req)); - ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); - - return crypto_ablkcipher_decrypt(cryptd_req); - } else { - struct blkcipher_desc desc; - - desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); - desc.info = req->info; - desc.flags = 0; - - return crypto_blkcipher_crt(desc.tfm)->decrypt( - &desc, req->dst, req->src, req->nbytes); - } -} -EXPORT_SYMBOL_GPL(ablk_decrypt); - -void ablk_exit(struct crypto_tfm *tfm) -{ - struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); - - cryptd_free_ablkcipher(ctx->cryptd_tfm); -} -EXPORT_SYMBOL_GPL(ablk_exit); - -int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name) -{ - struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); - struct cryptd_ablkcipher *cryptd_tfm; - - cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0); - if (IS_ERR(cryptd_tfm)) - return PTR_ERR(cryptd_tfm); - - ctx->cryptd_tfm = cryptd_tfm; - tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + - crypto_ablkcipher_reqsize(&cryptd_tfm->base); - - return 0; -} -EXPORT_SYMBOL_GPL(ablk_init_common); - -int ablk_init(struct crypto_tfm *tfm) -{ - char drv_name[CRYPTO_MAX_ALG_NAME]; - - snprintf(drv_name, sizeof(drv_name), "__driver-%s", - crypto_tfm_alg_driver_name(tfm)); - - return ablk_init_common(tfm, drv_name); -} -EXPORT_SYMBOL_GPL(ablk_init); - -MODULE_LICENSE("GPL"); diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index f80e668785c0..835488b745ee 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -34,7 +34,7 @@ #include <asm/cpu_device_id.h> #include <asm/i387.h> #include <asm/crypto/aes.h> -#include <asm/crypto/ablk_helper.h> +#include <crypto/ablk_helper.h> #include <crypto/scatterwalk.h> #include <crypto/internal/aead.h> #include <linux/workqueue.h> diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index 414fe5d7946b..4209a76fcdaa 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -14,6 +14,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/ctr.h> #include <crypto/lrw.h> @@ -21,7 +22,6 @@ #include <asm/xcr.h> #include <asm/xsave.h> #include <asm/crypto/camellia.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 37fd0c0a81ea..87a041a10f4a 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -14,6 +14,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/ctr.h> #include <crypto/lrw.h> @@ -21,7 +22,6 @@ #include <asm/xcr.h> #include <asm/xsave.h> #include <asm/crypto/camellia.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index c6631813dc11..e6a3700489b9 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -26,13 +26,13 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/cast5.h> #include <crypto/cryptd.h> #include <crypto/ctr.h> #include <asm/xcr.h> #include <asm/xsave.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> #define CAST5_PARALLEL_BLOCKS 16 diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 8d0dfb86a559..09f3677393e4 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -28,6 +28,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/cast6.h> #include <crypto/cryptd.h> @@ -37,7 +38,6 @@ #include <crypto/xts.h> #include <asm/xcr.h> #include <asm/xsave.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> #define CAST6_PARALLEL_BLOCKS 8 diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index 23aabc6c20a5..2fae489b1524 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c @@ -14,6 +14,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/ctr.h> #include <crypto/lrw.h> @@ -22,7 +23,6 @@ #include <asm/xcr.h> #include <asm/xsave.h> #include <asm/crypto/serpent-avx.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> #define SERPENT_AVX2_PARALLEL_BLOCKS 16 diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index 9ae83cf8d21e..ff4870870972 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -28,6 +28,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/serpent.h> #include <crypto/cryptd.h> @@ -38,7 +39,6 @@ #include <asm/xcr.h> #include <asm/xsave.h> #include <asm/crypto/serpent-avx.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> /* 8-way parallel cipher functions */ diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 97a356ece24d..8c95f8637306 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c @@ -34,6 +34,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/serpent.h> #include <crypto/cryptd.h> @@ -42,7 +43,6 @@ #include <crypto/lrw.h> #include <crypto/xts.h> #include <asm/crypto/serpent-sse2.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c index 50226c4b86ed..f248546da1ca 100644 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c @@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void) /* allow AVX to override SSSE3, it's a little faster */ if (avx_usable()) { #ifdef CONFIG_AS_AVX2 - if (boot_cpu_has(X86_FEATURE_AVX2)) + if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2)) sha256_transform_asm = sha256_transform_rorx; else #endif @@ -319,4 +319,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); MODULE_ALIAS("sha256"); -MODULE_ALIAS("sha384"); +MODULE_ALIAS("sha224"); diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index a62ba541884e..4e3c665be129 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -28,6 +28,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/twofish.h> #include <crypto/cryptd.h> @@ -39,7 +40,6 @@ #include <asm/xcr.h> #include <asm/xsave.h> #include <asm/crypto/twofish.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> #include <crypto/scatterwalk.h> #include <linux/workqueue.h> diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 89270b4318db..e099f9502ace 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -216,6 +216,7 @@ #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ +#define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */ #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ diff --git a/arch/x86/include/asm/crypto/ablk_helper.h b/arch/x86/include/asm/crypto/ablk_helper.h deleted file mode 100644 index 4f93df50c23e..000000000000 --- a/arch/x86/include/asm/crypto/ablk_helper.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Shared async block cipher helpers - */ - -#ifndef _CRYPTO_ABLK_HELPER_H -#define _CRYPTO_ABLK_HELPER_H - -#include <linux/crypto.h> -#include <linux/kernel.h> -#include <crypto/cryptd.h> - -struct async_helper_ctx { - struct cryptd_ablkcipher *cryptd_tfm; -}; - -extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, - unsigned int key_len); - -extern int __ablk_encrypt(struct ablkcipher_request *req); - -extern int ablk_encrypt(struct ablkcipher_request *req); - -extern int ablk_decrypt(struct ablkcipher_request *req); - -extern void ablk_exit(struct crypto_tfm *tfm); - -extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name); - -extern int ablk_init(struct crypto_tfm *tfm); - -#endif /* _CRYPTO_ABLK_HELPER_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 7b034a4057f9..b7845a126792 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -370,6 +370,26 @@ struct ymmh_struct { u32 ymmh_space[64]; }; +struct lwp_struct { + u64 lwpcb_addr; + u32 flags; + u32 buf_head_offset; + u64 buf_base; + u32 buf_size; + u32 filters; + u64 saved_event_record[4]; + u32 event_counter[16]; +}; + +struct bndregs_struct { + u64 bndregs[8]; +} __packed; + +struct bndcsr_struct { + u64 cfg_reg_u; + u64 status_reg; +} __packed; + struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2]; @@ -380,6 +400,9 @@ struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; struct ymmh_struct ymmh; + struct lwp_struct lwp; + struct bndregs_struct bndregs; + struct bndcsr_struct bndcsr; /* new processor state extensions will go here */ } __attribute__ ((packed, aligned (64))); diff --git a/arch/x86/include/asm/simd.h b/arch/x86/include/asm/simd.h new file mode 100644 index 000000000000..ee80b92f0096 --- /dev/null +++ b/arch/x86/include/asm/simd.h @@ -0,0 +1,11 @@ + +#include <asm/i387.h> + +/* + * may_use_simd - whether it is allowable at this time to issue SIMD + * instructions or access the SIMD register file + */ +static __must_check inline bool may_use_simd(void) +{ + return irq_fpu_usable(); +} diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 966502d4682e..2067264fb7f5 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -100,6 +100,7 @@ #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f #define VMX_MISC_SAVE_EFER_LMA 0x00000020 +#define VMX_MISC_ACTIVITY_HLT 0x00000040 /* VMCS Encodings */ enum vmcs_field { diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 0415cdabb5a6..554738963b28 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -9,6 +9,8 @@ #define XSTATE_FP 0x1 #define XSTATE_SSE 0x2 #define XSTATE_YMM 0x4 +#define XSTATE_BNDREGS 0x8 +#define XSTATE_BNDCSR 0x10 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) @@ -20,10 +22,14 @@ #define XSAVE_YMM_SIZE 256 #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) -/* - * These are the features that the OS can handle currently. - */ -#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) +/* Supported features which support lazy state saving */ +#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) + +/* Supported features which require eager state saving */ +#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR) + +/* All currently supported features */ +#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER) #ifdef CONFIG_X86_64 #define REX_PREFIX "0x48, " diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 422fd8223470..a4b451c6addf 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -562,6 +562,16 @@ static void __init xstate_enable_boot_cpu(void) if (cpu_has_xsaveopt && eagerfpu != DISABLE) eagerfpu = ENABLE; + if (pcntxt_mask & XSTATE_EAGER) { + if (eagerfpu == DISABLE) { + pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n", + pcntxt_mask & XSTATE_EAGER); + pcntxt_mask &= ~XSTATE_EAGER; + } else { + eagerfpu = ENABLE; + } + } + pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n", pcntxt_mask, xstate_size); } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5439117d5c4c..206715bc603d 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -432,7 +432,7 @@ static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) u8 val; if (pv_eoi_get_user(vcpu, &val) < 0) apic_debug("Can't read EOI MSR value: 0x%llx\n", - (unsigned long long)vcpi->arch.pv_eoi.msr_val); + (unsigned long long)vcpu->arch.pv_eoi.msr_val); return val & 0x1; } @@ -440,7 +440,7 @@ static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) { if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) { apic_debug("Can't set EOI MSR value: 0x%llx\n", - (unsigned long long)vcpi->arch.pv_eoi.msr_val); + (unsigned long long)vcpu->arch.pv_eoi.msr_val); return; } __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); @@ -450,7 +450,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) { if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) { apic_debug("Can't clear EOI MSR value: 0x%llx\n", - (unsigned long long)vcpi->arch.pv_eoi.msr_val); + (unsigned long long)vcpu->arch.pv_eoi.msr_val); return; } __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 40772ef0f2b1..31a570287fcc 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2659,6 +2659,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, int emulate = 0; gfn_t pseudo_gfn; + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + return 0; + for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { if (iterator.level == level) { mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b2fe1c252f35..7661eb171936 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -418,6 +418,8 @@ struct vcpu_vmx { u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; #endif + u32 vm_entry_controls_shadow; + u32 vm_exit_controls_shadow; /* * loaded_vmcs points to the VMCS currently used in this vcpu. For a * non-nested (L1) guest, it always points to vmcs01. For a nested @@ -1326,6 +1328,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask) vmcs_writel(field, vmcs_readl(field) | mask); } +static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) +{ + vmcs_write32(VM_ENTRY_CONTROLS, val); + vmx->vm_entry_controls_shadow = val; +} + +static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) +{ + if (vmx->vm_entry_controls_shadow != val) + vm_entry_controls_init(vmx, val); +} + +static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) +{ + return vmx->vm_entry_controls_shadow; +} + + +static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) +{ + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); +} + +static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) +{ + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); +} + +static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) +{ + vmcs_write32(VM_EXIT_CONTROLS, val); + vmx->vm_exit_controls_shadow = val; +} + +static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) +{ + if (vmx->vm_exit_controls_shadow != val) + vm_exit_controls_init(vmx, val); +} + +static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) +{ + return vmx->vm_exit_controls_shadow; +} + + +static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) +{ + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); +} + +static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) +{ + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); +} + static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) { vmx->segment_cache.bitmask = 0; @@ -1410,11 +1468,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) vmcs_write32(EXCEPTION_BITMAP, eb); } -static void clear_atomic_switch_msr_special(unsigned long entry, - unsigned long exit) +static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, + unsigned long entry, unsigned long exit) { - vmcs_clear_bits(VM_ENTRY_CONTROLS, entry); - vmcs_clear_bits(VM_EXIT_CONTROLS, exit); + vm_entry_controls_clearbit(vmx, entry); + vm_exit_controls_clearbit(vmx, exit); } static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) @@ -1425,14 +1483,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) switch (msr) { case MSR_EFER: if (cpu_has_load_ia32_efer) { - clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER, + clear_atomic_switch_msr_special(vmx, + VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER); return; } break; case MSR_CORE_PERF_GLOBAL_CTRL: if (cpu_has_load_perf_global_ctrl) { - clear_atomic_switch_msr_special( + clear_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); return; @@ -1453,14 +1512,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } -static void add_atomic_switch_msr_special(unsigned long entry, - unsigned long exit, unsigned long guest_val_vmcs, - unsigned long host_val_vmcs, u64 guest_val, u64 host_val) +static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, + unsigned long entry, unsigned long exit, + unsigned long guest_val_vmcs, unsigned long host_val_vmcs, + u64 guest_val, u64 host_val) { vmcs_write64(guest_val_vmcs, guest_val); vmcs_write64(host_val_vmcs, host_val); - vmcs_set_bits(VM_ENTRY_CONTROLS, entry); - vmcs_set_bits(VM_EXIT_CONTROLS, exit); + vm_entry_controls_setbit(vmx, entry); + vm_exit_controls_setbit(vmx, exit); } static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, @@ -1472,7 +1532,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, switch (msr) { case MSR_EFER: if (cpu_has_load_ia32_efer) { - add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER, + add_atomic_switch_msr_special(vmx, + VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER, GUEST_IA32_EFER, HOST_IA32_EFER, @@ -1482,7 +1543,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, break; case MSR_CORE_PERF_GLOBAL_CTRL: if (cpu_has_load_perf_global_ctrl) { - add_atomic_switch_msr_special( + add_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, GUEST_IA32_PERF_GLOBAL_CTRL, @@ -2279,6 +2340,7 @@ static __init void nested_vmx_setup_ctls_msrs(void) rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high); nested_vmx_misc_low &= VMX_MISC_PREEMPTION_TIMER_RATE_MASK | VMX_MISC_SAVE_EFER_LMA; + nested_vmx_misc_low |= VMX_MISC_ACTIVITY_HLT; nested_vmx_misc_high = 0; } @@ -3182,14 +3244,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) vmx_load_host_state(to_vmx(vcpu)); vcpu->arch.efer = efer; if (efer & EFER_LMA) { - vmcs_write32(VM_ENTRY_CONTROLS, - vmcs_read32(VM_ENTRY_CONTROLS) | - VM_ENTRY_IA32E_MODE); + vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); msr->data = efer; } else { - vmcs_write32(VM_ENTRY_CONTROLS, - vmcs_read32(VM_ENTRY_CONTROLS) & - ~VM_ENTRY_IA32E_MODE); + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); msr->data = efer & ~EFER_LME; } @@ -3217,9 +3275,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu) static void exit_lmode(struct kvm_vcpu *vcpu) { - vmcs_write32(VM_ENTRY_CONTROLS, - vmcs_read32(VM_ENTRY_CONTROLS) - & ~VM_ENTRY_IA32E_MODE); + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); } @@ -4346,10 +4402,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ++vmx->nmsrs; } - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); + + vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); /* 22.2.1, 20.8.1 */ - vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl); + vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl); vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); set_cr4_guest_host_mask(vmx); @@ -5080,10 +5137,14 @@ static int handle_dr(struct kvm_vcpu *vcpu) reg = DEBUG_REG_ACCESS_REG(exit_qualification); if (exit_qualification & TYPE_MOV_FROM_DR) { unsigned long val; - if (!kvm_get_dr(vcpu, dr, &val)) - kvm_register_write(vcpu, reg, val); + + if (kvm_get_dr(vcpu, dr, &val)) + return 1; + kvm_register_write(vcpu, reg, val); } else - kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]); + if (kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg])) + return 1; + skip_emulated_instruction(vcpu); return 1; } @@ -6460,11 +6521,8 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, int size; u8 b; - if (nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING)) - return 1; - if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) - return 0; + return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); @@ -7332,8 +7390,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); free_vpid(vmx); - free_nested(vmx); free_loaded_vmcs(vmx->loaded_vmcs); + free_nested(vmx); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vmx); @@ -7706,6 +7764,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) else vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(vmx->nested.apic_access_page)); + } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) { + exec_control |= + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + vmcs_write64(APIC_ACCESS_ADDR, + page_to_phys(vcpu->kvm->arch.apic_access_page)); } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); @@ -7759,12 +7822,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) exit_control = vmcs_config.vmexit_ctrl; if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER) exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER; - vmcs_write32(VM_EXIT_CONTROLS, exit_control); + vm_exit_controls_init(vmx, exit_control); /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are * emulated by vmx_set_efer(), below. */ - vmcs_write32(VM_ENTRY_CONTROLS, + vm_entry_controls_init(vmx, (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & ~VM_ENTRY_IA32E_MODE) | (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); @@ -7882,7 +7945,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) return 1; } - if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE) { + if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && + vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } @@ -8011,6 +8075,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) prepare_vmcs02(vcpu, vmcs12); + if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) + return kvm_emulate_halt(vcpu); + /* * Note no nested_vmx_succeed or nested_vmx_fail here. At this point * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet @@ -8186,7 +8253,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) vmcs12->vm_entry_controls = (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | - (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE); + (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); /* TODO: These cannot have changed unless we have MSR bitmaps and * the relevant bit asks not to trap the change */ @@ -8390,6 +8457,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu) vcpu->cpu = cpu; put_cpu(); + vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS)); + vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS)); vmx_segment_cache_clear(vmx); /* if no vmcs02 cache requested, remove the one we used */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 21ef1ba184ae..1dc0359e2095 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5865,6 +5865,11 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) kvm_apic_update_tmr(vcpu, tmr); } +/* + * Returns 1 to let __vcpu_run() continue the guest execution loop without + * exiting to the userspace. Otherwise, the value will be returned to the + * userspace. + */ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; @@ -6125,7 +6130,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) } if (need_resched()) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - kvm_resched(vcpu); + cond_resched(); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } } |