summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2017-02-09 14:25:02 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2017-02-09 14:25:02 +1100
commit8fd5bcd52b56746829b5df72a76a50377ff66688 (patch)
treec47ec8c4cd0fc9ab58e40941d339894e76b6b488 /arch
parentf494e0835d2db55f49ba1c97ce7f85b1fccacb45 (diff)
parent7b6b46311a8562fb3a9e035ed6ffab6d49c28886 (diff)
Merge remote-tracking branch 'kvm-arm/next'
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/asm/kvm_mmu.h12
-rw-r--r--arch/arm/include/uapi/asm/kvm.h13
-rw-r--r--arch/arm/kvm/Makefile5
-rw-r--r--arch/arm/kvm/arm.c4
-rw-r--r--arch/arm/kvm/mmu.c20
-rw-r--r--arch/arm/kvm/reset.c9
-rw-r--r--arch/arm/kvm/vgic-v3-coproc.c35
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h6
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h13
-rw-r--r--arch/arm64/kvm/Makefile4
-rw-r--r--arch/arm64/kvm/reset.c9
-rw-r--r--arch/arm64/kvm/sys_regs.c92
-rw-r--r--arch/arm64/kvm/sys_regs.h4
-rw-r--r--arch/arm64/kvm/vgic-sys-reg-v3.c346
16 files changed, 524 insertions, 54 deletions
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d5423ab15ed5..cc495d799c67 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -60,9 +60,6 @@ struct kvm_arch {
/* The last vcpu id that ran on each physical CPU */
int __percpu *last_vcpu_ran;
- /* Timer */
- struct arch_timer_kvm timer;
-
/*
* Anything that is not used directly from assembly code goes
* here.
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 74a44727f8e1..95f38dcd611d 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -129,8 +129,7 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn,
- unsigned long size,
- bool ipa_uncached)
+ unsigned long size)
{
/*
* If we are going to insert an instruction page and the icache is
@@ -150,18 +149,12 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
* and iterate over the range.
*/
- bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
-
VM_BUG_ON(size & ~PAGE_MASK);
- if (!need_flush && !icache_is_pipt())
- goto vipt_cache;
-
while (size) {
void *va = kmap_atomic_pfn(pfn);
- if (need_flush)
- kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
if (icache_is_pipt())
__cpuc_coherent_user_range((unsigned long)va,
@@ -173,7 +166,6 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kunmap_atomic(va);
}
-vipt_cache:
if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index af05f8e0903e..6ebd3e6a1fd1 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -181,10 +181,23 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
+#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32
+#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \
+ (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT)
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff)
#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
+#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
+#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
+#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
+#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
+#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
+ (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
+#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
+#define VGIC_LEVEL_INFO_LINE_LEVEL 0
+
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
/* KVM_IRQ_LINE irq field index values */
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index d571243ab4d1..7b3670c2ae7b 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -7,7 +7,7 @@ ifeq ($(plus_virt),+virt)
plus_virt_def := -DREQUIRES_VIRT=1
endif
-ccflags-y += -Iarch/arm/kvm
+ccflags-y += -Iarch/arm/kvm -Ivirt/kvm/arm/vgic
CFLAGS_arm.o := -I. $(plus_virt_def)
CFLAGS_mmu.o := -I.
@@ -20,7 +20,7 @@ kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vf
obj-$(CONFIG_KVM_ARM_HOST) += hyp/
obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
-obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
+obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o vgic-v3-coproc.o
obj-y += $(KVM)/arm/aarch32.o
obj-y += $(KVM)/arm/vgic/vgic.o
@@ -33,5 +33,6 @@ obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
obj-y += $(KVM)/arm/vgic/vgic-its.o
+obj-y += $(KVM)/arm/vgic/vgic-debug.o
obj-y += $(KVM)/irqchip.o
obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9d7446456e0c..21c493a9e5c9 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -135,7 +135,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
goto out_free_stage2_pgd;
kvm_vgic_early_init(kvm);
- kvm_timer_init(kvm);
/* Mark the initial VMID generation invalid */
kvm->arch.vmid_gen = 0;
@@ -301,7 +300,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return kvm_timer_should_fire(vcpu);
+ return kvm_timer_should_fire(vcpu_vtimer(vcpu)) ||
+ kvm_timer_should_fire(vcpu_ptimer(vcpu));
}
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index a5265edbeeab..962616fd4ddd 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1232,9 +1232,9 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
}
static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
- unsigned long size, bool uncached)
+ unsigned long size)
{
- __coherent_cache_guest_page(vcpu, pfn, size, uncached);
+ __coherent_cache_guest_page(vcpu, pfn, size);
}
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
@@ -1250,7 +1250,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct vm_area_struct *vma;
kvm_pfn_t pfn;
pgprot_t mem_type = PAGE_S2;
- bool fault_ipa_uncached;
bool logging_active = memslot_is_logging(memslot);
unsigned long flags = 0;
@@ -1337,8 +1336,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (!hugetlb && !force_pte)
hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
- fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
-
if (hugetlb) {
pmd_t new_pmd = pfn_pmd(pfn, mem_type);
new_pmd = pmd_mkhuge(new_pmd);
@@ -1346,7 +1343,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
new_pmd = kvm_s2pmd_mkwrite(new_pmd);
kvm_set_pfn_dirty(pfn);
}
- coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
+ coherent_cache_guest_page(vcpu, pfn, PMD_SIZE);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -1356,7 +1353,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_pfn_dirty(pfn);
mark_page_dirty(kvm, gfn);
}
- coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
+ coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
}
@@ -1879,15 +1876,6 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{
- /*
- * Readonly memslots are not incoherent with the caches by definition,
- * but in practice, they are used mostly to emulate ROMs or NOR flashes
- * that the guest may consider devices and hence map as uncached.
- * To prevent incoherency issues in these cases, tag all readonly
- * regions as incoherent.
- */
- if (slot->flags & KVM_MEM_READONLY)
- slot->flags |= KVM_MEMSLOT_INCOHERENT;
return 0;
}
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 4b5e802e57d1..1da8b2d14550 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -37,6 +37,11 @@ static struct kvm_regs cortexa_regs_reset = {
.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
};
+static const struct kvm_irq_level cortexa_ptimer_irq = {
+ { .irq = 30 },
+ .level = 1,
+};
+
static const struct kvm_irq_level cortexa_vtimer_irq = {
{ .irq = 27 },
.level = 1,
@@ -58,6 +63,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
struct kvm_regs *reset_regs;
const struct kvm_irq_level *cpu_vtimer_irq;
+ const struct kvm_irq_level *cpu_ptimer_irq;
switch (vcpu->arch.target) {
case KVM_ARM_TARGET_CORTEX_A7:
@@ -65,6 +71,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
reset_regs = &cortexa_regs_reset;
vcpu->arch.midr = read_cpuid_id();
cpu_vtimer_irq = &cortexa_vtimer_irq;
+ cpu_ptimer_irq = &cortexa_ptimer_irq;
break;
default:
return -ENODEV;
@@ -77,5 +84,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_reset_coprocs(vcpu);
/* Reset arch_timer context */
- return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
+ return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq, cpu_ptimer_irq);
}
diff --git a/arch/arm/kvm/vgic-v3-coproc.c b/arch/arm/kvm/vgic-v3-coproc.c
new file mode 100644
index 000000000000..f41abf76366f
--- /dev/null
+++ b/arch/arm/kvm/vgic-v3-coproc.c
@@ -0,0 +1,35 @@
+/*
+ * VGIC system registers handling functions for AArch32 mode
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include "vgic.h"
+
+int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
+ u64 *reg)
+{
+ /*
+ * TODO: Implement for AArch32
+ */
+ return -ENXIO;
+}
+
+int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
+ u64 *reg)
+{
+ /*
+ * TODO: Implement for AArch32
+ */
+ return -ENXIO;
+}
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 443b387021f2..f21fd3894370 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -70,9 +70,6 @@ struct kvm_arch {
/* Interrupt controller */
struct vgic_dist vgic;
-
- /* Timer */
- struct arch_timer_kvm timer;
};
#define KVM_NR_MEM_OBJS 40
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 55772c13a375..ed1246014901 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -236,13 +236,11 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn,
- unsigned long size,
- bool ipa_uncached)
+ unsigned long size)
{
void *va = page_address(pfn_to_page(pfn));
- if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
- kvm_flush_dcache_to_poc(va, size);
+ kvm_flush_dcache_to_poc(va, size);
if (!icache_is_aliasing()) { /* PIPT */
flush_icache_range((unsigned long)va,
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 3051f86a9b5f..c2860358ae3e 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -201,10 +201,23 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
+#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32
+#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \
+ (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT)
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff)
#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
+#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
+#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
+#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
+#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
+#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
+ (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
+#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
+#define VGIC_LEVEL_INFO_LINE_LEVEL 0
+
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
/* Device Control API on vcpu fd */
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index d50a82a16ff6..afd51bebb9c5 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -2,7 +2,7 @@
# Makefile for Kernel-based Virtual Machine module
#
-ccflags-y += -Iarch/arm64/kvm
+ccflags-y += -Iarch/arm64/kvm -Ivirt/kvm/arm/vgic
CFLAGS_arm.o := -I.
CFLAGS_mmu.o := -I.
@@ -19,6 +19,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
+kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
@@ -31,6 +32,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-kvm-device.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-its.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-debug.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/irqchip.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index e95d4f68bf54..d9e9697de1b2 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -46,6 +46,11 @@ static const struct kvm_regs default_regs_reset32 = {
COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
};
+static const struct kvm_irq_level default_ptimer_irq = {
+ .irq = 30,
+ .level = 1,
+};
+
static const struct kvm_irq_level default_vtimer_irq = {
.irq = 27,
.level = 1,
@@ -104,6 +109,7 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
const struct kvm_irq_level *cpu_vtimer_irq;
+ const struct kvm_irq_level *cpu_ptimer_irq;
const struct kvm_regs *cpu_reset;
switch (vcpu->arch.target) {
@@ -117,6 +123,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
cpu_vtimer_irq = &default_vtimer_irq;
+ cpu_ptimer_irq = &default_ptimer_irq;
break;
}
@@ -130,5 +137,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_pmu_vcpu_reset(vcpu);
/* Reset timer */
- return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
+ return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq, cpu_ptimer_irq);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 87e7e6608cd8..0e26f8c2b56f 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -820,6 +820,61 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
+static bool access_cntp_tval(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+ u64 now = kvm_phys_timer_read();
+
+ if (p->is_write)
+ ptimer->cnt_cval = p->regval + now;
+ else
+ p->regval = ptimer->cnt_cval - now;
+
+ return true;
+}
+
+static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+
+ if (p->is_write) {
+ /* ISTATUS bit is read-only */
+ ptimer->cnt_ctl = p->regval & ~ARCH_TIMER_CTRL_IT_STAT;
+ } else {
+ u64 now = kvm_phys_timer_read();
+
+ p->regval = ptimer->cnt_ctl;
+ /*
+ * Set ISTATUS bit if it's expired.
+ * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
+ * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
+ * regardless of ENABLE bit for our implementation convenience.
+ */
+ if (ptimer->cnt_cval <= now)
+ p->regval |= ARCH_TIMER_CTRL_IT_STAT;
+ }
+
+ return true;
+}
+
+static bool access_cntp_cval(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+
+ if (p->is_write)
+ ptimer->cnt_cval = p->regval;
+ else
+ p->regval = ptimer->cnt_cval;
+
+ return true;
+}
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -1029,6 +1084,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
NULL, reset_unknown, TPIDRRO_EL0 },
+ /* CNTP_TVAL_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b0010), Op2(0b000),
+ access_cntp_tval },
+ /* CNTP_CTL_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b0010), Op2(0b001),
+ access_cntp_ctl },
+ /* CNTP_CVAL_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b0010), Op2(0b010),
+ access_cntp_cval },
+
/* PMEVCNTRn_EL0 */
PMU_PMEVCNTR_EL0(0),
PMU_PMEVCNTR_EL0(1),
@@ -1795,6 +1860,17 @@ static bool index_to_params(u64 id, struct sys_reg_params *params)
}
}
+const struct sys_reg_desc *find_reg_by_id(u64 id,
+ struct sys_reg_params *params,
+ const struct sys_reg_desc table[],
+ unsigned int num)
+{
+ if (!index_to_params(id, params))
+ return NULL;
+
+ return find_reg(params, table, num);
+}
+
/* Decode an index value, and find the sys_reg_desc entry. */
static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
u64 id)
@@ -1807,11 +1883,8 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
return NULL;
- if (!index_to_params(id, &params))
- return NULL;
-
table = get_target_table(vcpu->arch.target, true, &num);
- r = find_reg(&params, table, num);
+ r = find_reg_by_id(id, &params, table, num);
if (!r)
r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
@@ -1918,10 +1991,8 @@ static int get_invariant_sys_reg(u64 id, void __user *uaddr)
struct sys_reg_params params;
const struct sys_reg_desc *r;
- if (!index_to_params(id, &params))
- return -ENOENT;
-
- r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
+ r = find_reg_by_id(id, &params, invariant_sys_regs,
+ ARRAY_SIZE(invariant_sys_regs));
if (!r)
return -ENOENT;
@@ -1935,9 +2006,8 @@ static int set_invariant_sys_reg(u64 id, void __user *uaddr)
int err;
u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
- if (!index_to_params(id, &params))
- return -ENOENT;
- r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
+ r = find_reg_by_id(id, &params, invariant_sys_regs,
+ ARRAY_SIZE(invariant_sys_regs));
if (!r)
return -ENOENT;
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index dbbb01cfbee9..9c6ffd0f0196 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -136,6 +136,10 @@ static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
return i1->Op2 - i2->Op2;
}
+const struct sys_reg_desc *find_reg_by_id(u64 id,
+ struct sys_reg_params *params,
+ const struct sys_reg_desc table[],
+ unsigned int num);
#define Op0(_x) .Op0 = _x
#define Op1(_x) .Op1 = _x
diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c
new file mode 100644
index 000000000000..79f37e37d367
--- /dev/null
+++ b/arch/arm64/kvm/vgic-sys-reg-v3.c
@@ -0,0 +1,346 @@
+/*
+ * VGIC system registers handling functions for AArch64 mode
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include "vgic.h"
+#include "sys_regs.h"
+
+static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v;
+ struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_vmcr vmcr;
+ u64 val;
+
+ vgic_get_vmcr(vcpu, &vmcr);
+ if (p->is_write) {
+ val = p->regval;
+
+ /*
+ * Disallow restoring VM state if not supported by this
+ * hardware.
+ */
+ host_pri_bits = ((val & ICC_CTLR_EL1_PRI_BITS_MASK) >>
+ ICC_CTLR_EL1_PRI_BITS_SHIFT) + 1;
+ if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
+ return false;
+
+ vgic_v3_cpu->num_pri_bits = host_pri_bits;
+
+ host_id_bits = (val & ICC_CTLR_EL1_ID_BITS_MASK) >>
+ ICC_CTLR_EL1_ID_BITS_SHIFT;
+ if (host_id_bits > vgic_v3_cpu->num_id_bits)
+ return false;
+
+ vgic_v3_cpu->num_id_bits = host_id_bits;
+
+ host_seis = ((kvm_vgic_global_state.ich_vtr_el2 &
+ ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT);
+ seis = (val & ICC_CTLR_EL1_SEIS_MASK) >>
+ ICC_CTLR_EL1_SEIS_SHIFT;
+ if (host_seis != seis)
+ return false;
+
+ host_a3v = ((kvm_vgic_global_state.ich_vtr_el2 &
+ ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT);
+ a3v = (val & ICC_CTLR_EL1_A3V_MASK) >> ICC_CTLR_EL1_A3V_SHIFT;
+ if (host_a3v != a3v)
+ return false;
+
+ /*
+ * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
+ * The vgic_set_vmcr() will convert to ICH_VMCR layout.
+ */
+ vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK;
+ vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK;
+ vgic_set_vmcr(vcpu, &vmcr);
+ } else {
+ val = 0;
+ val |= (vgic_v3_cpu->num_pri_bits - 1) <<
+ ICC_CTLR_EL1_PRI_BITS_SHIFT;
+ val |= vgic_v3_cpu->num_id_bits << ICC_CTLR_EL1_ID_BITS_SHIFT;
+ val |= ((kvm_vgic_global_state.ich_vtr_el2 &
+ ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT) <<
+ ICC_CTLR_EL1_SEIS_SHIFT;
+ val |= ((kvm_vgic_global_state.ich_vtr_el2 &
+ ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT) <<
+ ICC_CTLR_EL1_A3V_SHIFT;
+ /*
+ * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
+ * Extract it directly using ICC_CTLR_EL1 reg definitions.
+ */
+ val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK;
+ val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK;
+
+ p->regval = val;
+ }
+
+ return true;
+}
+
+static bool access_gic_pmr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ struct vgic_vmcr vmcr;
+
+ vgic_get_vmcr(vcpu, &vmcr);
+ if (p->is_write) {
+ vmcr.pmr = (p->regval & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT;
+ vgic_set_vmcr(vcpu, &vmcr);
+ } else {
+ p->regval = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK;
+ }
+
+ return true;
+}
+
+static bool access_gic_bpr0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ struct vgic_vmcr vmcr;
+
+ vgic_get_vmcr(vcpu, &vmcr);
+ if (p->is_write) {
+ vmcr.bpr = (p->regval & ICC_BPR0_EL1_MASK) >>
+ ICC_BPR0_EL1_SHIFT;
+ vgic_set_vmcr(vcpu, &vmcr);
+ } else {
+ p->regval = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) &
+ ICC_BPR0_EL1_MASK;
+ }
+
+ return true;
+}
+
+static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ struct vgic_vmcr vmcr;
+
+ if (!p->is_write)
+ p->regval = 0;
+
+ vgic_get_vmcr(vcpu, &vmcr);
+ if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) {
+ if (p->is_write) {
+ vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
+ ICC_BPR1_EL1_SHIFT;
+ vgic_set_vmcr(vcpu, &vmcr);
+ } else {
+ p->regval = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) &
+ ICC_BPR1_EL1_MASK;
+ }
+ } else {
+ if (!p->is_write)
+ p->regval = min((vmcr.bpr + 1), 7U);
+ }
+
+ return true;
+}
+
+static bool access_gic_grpen0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ struct vgic_vmcr vmcr;
+
+ vgic_get_vmcr(vcpu, &vmcr);
+ if (p->is_write) {
+ vmcr.grpen0 = (p->regval & ICC_IGRPEN0_EL1_MASK) >>
+ ICC_IGRPEN0_EL1_SHIFT;
+ vgic_set_vmcr(vcpu, &vmcr);
+ } else {
+ p->regval = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) &
+ ICC_IGRPEN0_EL1_MASK;
+ }
+
+ return true;
+}
+
+static bool access_gic_grpen1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ struct vgic_vmcr vmcr;
+
+ vgic_get_vmcr(vcpu, &vmcr);
+ if (p->is_write) {
+ vmcr.grpen1 = (p->regval & ICC_IGRPEN1_EL1_MASK) >>
+ ICC_IGRPEN1_EL1_SHIFT;
+ vgic_set_vmcr(vcpu, &vmcr);
+ } else {
+ p->regval = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) &
+ ICC_IGRPEN1_EL1_MASK;
+ }
+
+ return true;
+}
+
+static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p, u8 apr, u8 idx)
+{
+ struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+ uint32_t *ap_reg;
+
+ if (apr)
+ ap_reg = &vgicv3->vgic_ap1r[idx];
+ else
+ ap_reg = &vgicv3->vgic_ap0r[idx];
+
+ if (p->is_write)
+ *ap_reg = p->regval;
+ else
+ p->regval = *ap_reg;
+}
+
+static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r, u8 apr)
+{
+ struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
+ u8 idx = r->Op2 & 3;
+
+ /*
+ * num_pri_bits are initialized with HW supported values.
+ * We can rely safely on num_pri_bits even if VM has not
+ * restored ICC_CTLR_EL1 before restoring APnR registers.
+ */
+ switch (vgic_v3_cpu->num_pri_bits) {
+ case 7:
+ vgic_v3_access_apr_reg(vcpu, p, apr, idx);
+ break;
+ case 6:
+ if (idx > 1)
+ goto err;
+ vgic_v3_access_apr_reg(vcpu, p, apr, idx);
+ break;
+ default:
+ if (idx > 0)
+ goto err;
+ vgic_v3_access_apr_reg(vcpu, p, apr, idx);
+ }
+
+ return true;
+err:
+ if (!p->is_write)
+ p->regval = 0;
+
+ return false;
+}
+
+static bool access_gic_ap0r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+
+{
+ return access_gic_aprn(vcpu, p, r, 0);
+}
+
+static bool access_gic_ap1r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ return access_gic_aprn(vcpu, p, r, 1);
+}
+
+static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+ /* Validate SRE bit */
+ if (p->is_write) {
+ if (!(p->regval & ICC_SRE_EL1_SRE))
+ return false;
+ } else {
+ p->regval = vgicv3->vgic_sre;
+ }
+
+ return true;
+}
+static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
+ /* ICC_PMR_EL1 */
+ { Op0(3), Op1(0), CRn(4), CRm(6), Op2(0), access_gic_pmr },
+ /* ICC_BPR0_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(8), Op2(3), access_gic_bpr0 },
+ /* ICC_AP0R0_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(8), Op2(4), access_gic_ap0r },
+ /* ICC_AP0R1_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(8), Op2(5), access_gic_ap0r },
+ /* ICC_AP0R2_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(8), Op2(6), access_gic_ap0r },
+ /* ICC_AP0R3_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(8), Op2(7), access_gic_ap0r },
+ /* ICC_AP1R0_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(9), Op2(0), access_gic_ap1r },
+ /* ICC_AP1R1_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(9), Op2(1), access_gic_ap1r },
+ /* ICC_AP1R2_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(9), Op2(2), access_gic_ap1r },
+ /* ICC_AP1R3_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(9), Op2(3), access_gic_ap1r },
+ /* ICC_BPR1_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(12), Op2(3), access_gic_bpr1 },
+ /* ICC_CTLR_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(12), Op2(4), access_gic_ctlr },
+ /* ICC_SRE_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(12), Op2(5), access_gic_sre },
+ /* ICC_IGRPEN0_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(12), Op2(6), access_gic_grpen0 },
+ /* ICC_GRPEN1_EL1 */
+ { Op0(3), Op1(0), CRn(12), CRm(12), Op2(7), access_gic_grpen1 },
+};
+
+int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
+ u64 *reg)
+{
+ struct sys_reg_params params;
+ u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64;
+
+ params.regval = *reg;
+ params.is_write = is_write;
+ params.is_aarch32 = false;
+ params.is_32bit = false;
+
+ if (find_reg_by_id(sysreg, &params, gic_v3_icc_reg_descs,
+ ARRAY_SIZE(gic_v3_icc_reg_descs)))
+ return 0;
+
+ return -ENXIO;
+}
+
+int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
+ u64 *reg)
+{
+ struct sys_reg_params params;
+ const struct sys_reg_desc *r;
+ u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64;
+
+ if (is_write)
+ params.regval = *reg;
+ params.is_write = is_write;
+ params.is_aarch32 = false;
+ params.is_32bit = false;
+
+ r = find_reg_by_id(sysreg, &params, gic_v3_icc_reg_descs,
+ ARRAY_SIZE(gic_v3_icc_reg_descs));
+ if (!r)
+ return -ENXIO;
+
+ if (!r->access(vcpu, &params, r))
+ return -EINVAL;
+
+ if (!is_write)
+ *reg = params.regval;
+
+ return 0;
+}