From 7775cbaa11153ec5489cfa31de95aa1b5f29310b Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 12 Sep 2019 09:02:50 +0200 Subject: KVM: s390: Remove unused parameter from __inject_sigp_restart() It's not required, so drop it to make it clear that this interrupt does not have any extra parameters. Signed-off-by: Thomas Huth Link: https://lore.kernel.org/kvm/20190912070250.15131-1-thuth@redhat.com Reviewed-by: Janosch Frank Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index d1ccc168c071..165dea4c7f19 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1477,8 +1477,7 @@ static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) return 0; } -static int __inject_sigp_restart(struct kvm_vcpu *vcpu, - struct kvm_s390_irq *irq) +static int __inject_sigp_restart(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; @@ -2007,7 +2006,7 @@ static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) rc = __inject_sigp_stop(vcpu, irq); break; case KVM_S390_RESTART: - rc = __inject_sigp_restart(vcpu, irq); + rc = __inject_sigp_restart(vcpu); break; case KVM_S390_INT_CLOCK_COMP: rc = __inject_ckc(vcpu); -- cgit v1.2.3 From f76f6371643b563a7168a6ba5713ce93caa36ecc Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Wed, 2 Oct 2019 03:56:27 -0400 Subject: KVM: s390: Cleanup kvm_arch_init error path Both kvm_s390_gib_destroy and debug_unregister test if the needed pointers are not NULL and hence can be called unconditionally. Signed-off-by: Janosch Frank Link: https://lore.kernel.org/kvm/20191002075627.3582-1-frankja@linux.ibm.com Reviewed-by: David Hildenbrand Reviewed-by: Thomas Huth Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f6db0f1bc867..40af442b2e15 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -453,16 +453,14 @@ static void kvm_s390_cpu_feat_init(void) int kvm_arch_init(void *opaque) { - int rc; + int rc = -ENOMEM; kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); if (!kvm_s390_dbf) return -ENOMEM; - if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) { - rc = -ENOMEM; - goto out_debug_unreg; - } + if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) + goto out; kvm_s390_cpu_feat_init(); @@ -470,19 +468,17 @@ int kvm_arch_init(void *opaque) rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); if (rc) { pr_err("A FLIC registration call failed with rc=%d\n", rc); - goto out_debug_unreg; + goto out; } rc = kvm_s390_gib_init(GAL_ISC); if (rc) - goto out_gib_destroy; + goto out; return 0; -out_gib_destroy: - kvm_s390_gib_destroy(); -out_debug_unreg: - debug_unregister(kvm_s390_dbf); +out: + kvm_arch_exit(); return rc; } -- cgit v1.2.3 From efec8d219fb1bc2d7ab4f1c582e7beed44e309f4 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 10 Oct 2019 09:37:29 +0200 Subject: selftests: kvm: make syncregs more reliable on s390 similar to commit 2c57da356800 ("selftests: kvm: fix sync_regs_test with newer gccs") and commit 204c91eff798a ("KVM: selftests: do not blindly clobber registers in guest asm") we better do not rely on gcc leaving r11 untouched. We can write the simple ucall inline and have the guest code completely as small assembler function. Signed-off-by: Christian Borntraeger Suggested-by: Paolo Bonzini Reviewed-by: Thomas Huth --- tools/testing/selftests/kvm/s390x/sync_regs_test.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/kvm/s390x/sync_regs_test.c b/tools/testing/selftests/kvm/s390x/sync_regs_test.c index d5290b4ad636..b705637ca14b 100644 --- a/tools/testing/selftests/kvm/s390x/sync_regs_test.c +++ b/tools/testing/selftests/kvm/s390x/sync_regs_test.c @@ -25,12 +25,15 @@ static void guest_code(void) { - register u64 stage asm("11") = 0; - - for (;;) { - GUEST_SYNC(0); - asm volatile ("ahi %0,1" : : "r"(stage)); - } + /* + * We embed diag 501 here instead of doing a ucall to avoid that + * the compiler has messed with r11 at the time of the ucall. + */ + asm volatile ( + "0: diag 0,0,0x501\n" + " ahi 11,1\n" + " j 0b\n" + ); } #define REG_COMPARE(reg) \ -- cgit v1.2.3 From 8474e5cac07e7f21dd8c0b3620640db30115db56 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 15 Feb 2019 13:47:20 +0100 Subject: KVM: s390: count invalid yields To analyze some performance issues with lock contention and scheduling it is nice to know when diag9c did not result in any action or when no action was tried. Signed-off-by: Christian Borntraeger Reviewed-by: David Hildenbrand Reviewed-by: Cornelia Huck --- arch/s390/include/asm/kvm_host.h | 1 + arch/s390/kvm/diag.c | 18 ++++++++++++++---- arch/s390/kvm/kvm-s390.c | 1 + 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index abe60268335d..02f4c21c57f6 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -392,6 +392,7 @@ struct kvm_vcpu_stat { u64 diagnose_10; u64 diagnose_44; u64 diagnose_9c; + u64 diagnose_9c_ignored; u64 diagnose_258; u64 diagnose_308; u64 diagnose_500; diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 45634b3d2e0a..609c55df3ce8 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -158,14 +158,24 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; vcpu->stat.diagnose_9c++; - VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid); + /* yield to self */ if (tid == vcpu->vcpu_id) - return 0; + goto no_yield; + /* yield to invalid */ tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid); - if (tcpu) - kvm_vcpu_yield_to(tcpu); + if (!tcpu) + goto no_yield; + + if (kvm_vcpu_yield_to(tcpu) <= 0) + goto no_yield; + + VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid); + return 0; +no_yield: + VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid); + vcpu->stat.diagnose_9c_ignored++; return 0; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 40af442b2e15..3b5ebf48f802 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -155,6 +155,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "instruction_diag_10", VCPU_STAT(diagnose_10) }, { "instruction_diag_44", VCPU_STAT(diagnose_44) }, { "instruction_diag_9c", VCPU_STAT(diagnose_9c) }, + { "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) }, { "instruction_diag_258", VCPU_STAT(diagnose_258) }, { "instruction_diag_308", VCPU_STAT(diagnose_308) }, { "instruction_diag_500", VCPU_STAT(diagnose_500) }, -- cgit v1.2.3 From c7b7de63124645089ccf9900b9e5ea08059ccae0 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 15 Feb 2019 13:47:20 +0100 Subject: KVM: s390: Do not yield when target is already running If the target is already running we do not need to yield. Signed-off-by: Christian Borntraeger Reviewed-by: David Hildenbrand Reviewed-by: Cornelia Huck --- arch/s390/kvm/diag.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 609c55df3ce8..3fb54ec2cf3e 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -168,6 +168,10 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) if (!tcpu) goto no_yield; + /* target already running */ + if (READ_ONCE(tcpu->cpu) >= 0) + goto no_yield; + if (kvm_vcpu_yield_to(tcpu) <= 0) goto no_yield; -- cgit v1.2.3 From 1a9167a214f560a23c5050ce6dfebae489528f0d Mon Sep 17 00:00:00 2001 From: Fabiano Rosas Date: Wed, 19 Jun 2019 13:01:27 -0300 Subject: KVM: PPC: Report single stepping capability When calling the KVM_SET_GUEST_DEBUG ioctl, userspace might request the next instruction to be single stepped via the KVM_GUESTDBG_SINGLESTEP control bit of the kvm_guest_debug structure. This patch adds the KVM_CAP_PPC_GUEST_DEBUG_SSTEP capability in order to inform userspace about the state of single stepping support. We currently don't have support for guest single stepping implemented in Book3S HV so the capability is only present for Book3S PR and BookE. Signed-off-by: Fabiano Rosas Signed-off-by: Paul Mackerras --- Documentation/virt/kvm/api.txt | 3 +++ arch/powerpc/kvm/powerpc.c | 2 ++ include/uapi/linux/kvm.h | 1 + 3 files changed, 6 insertions(+) diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt index 4833904d32a5..f94d06a12d20 100644 --- a/Documentation/virt/kvm/api.txt +++ b/Documentation/virt/kvm/api.txt @@ -2982,6 +2982,9 @@ can be determined by querying the KVM_CAP_GUEST_DEBUG_HW_BPS and KVM_CAP_GUEST_DEBUG_HW_WPS capabilities which return a positive number indicating the number of supported registers. +For ppc, the KVM_CAP_PPC_GUEST_DEBUG_SSTEP capability indicates whether +the single-step debug event (KVM_GUESTDBG_SINGLESTEP) is supported. + When debug events exit the main run loop with the reason KVM_EXIT_DEBUG with the kvm_debug_exit_arch part of the kvm_run structure containing architecture specific debug information. diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 3a77bb643452..9e085e931d74 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -522,6 +522,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_IMMEDIATE_EXIT: r = 1; break; + case KVM_CAP_PPC_GUEST_DEBUG_SSTEP: + /* fall through */ case KVM_CAP_PPC_PAIRED_SINGLES: case KVM_CAP_PPC_OSI: case KVM_CAP_PPC_GET_PVINFO: diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 52641d8ca9e8..ce8cfcc51aec 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1000,6 +1000,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PMU_EVENT_FILTER 173 #define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174 #define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175 +#define KVM_CAP_PPC_GUEST_DEBUG_SSTEP 176 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3 From 258ed7d02843052d127df2264c8b342276ced18a Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Mon, 23 Sep 2019 18:30:23 -0300 Subject: KVM: PPC: Reduce calls to get current->mm by storing the value locally Reduces the number of calls to get_current() in order to get the value of current->mm by doing it once and storing the value, since it is not supposed to change inside the same process). Signed-off-by: Leonardo Bras Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 9a75f0e1933b..f2b9aea43216 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -508,6 +508,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, struct vm_area_struct *vma; unsigned long rcbits; long mmio_update; + struct mm_struct *mm; if (kvm_is_radix(kvm)) return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr); @@ -584,6 +585,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, is_ci = false; pfn = 0; page = NULL; + mm = current->mm; pte_size = PAGE_SIZE; writing = (dsisr & DSISR_ISSTORE) != 0; /* If writing != 0, then the HPTE must allow writing, if we get here */ @@ -592,8 +594,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages); if (npages < 1) { /* Check if it's an I/O mapping */ - down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm, hva); + down_read(&mm->mmap_sem); + vma = find_vma(mm, hva); if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end && (vma->vm_flags & VM_PFNMAP)) { pfn = vma->vm_pgoff + @@ -602,7 +604,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot)))); write_ok = vma->vm_flags & VM_WRITE; } - up_read(¤t->mm->mmap_sem); + up_read(&mm->mmap_sem); if (!pfn) goto out_put; } else { @@ -621,8 +623,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, * hugepage split and collapse. */ local_irq_save(flags); - ptep = find_current_mm_pte(current->mm->pgd, - hva, NULL, NULL); + ptep = find_current_mm_pte(mm->pgd, hva, NULL, NULL); if (ptep) { pte = kvmppc_read_update_linux_pte(ptep, 1); if (__pte_write(pte)) -- cgit v1.2.3 From f41c4989c8de1fa70aafe950abaf80c56a8b8712 Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Mon, 23 Sep 2019 18:24:08 -0300 Subject: KVM: PPC: E500: Replace current->mm by kvm->mm Given that in kvm_create_vm() there is: kvm->mm = current->mm; And that on every kvm_*_ioctl we have: if (kvm->mm != current->mm) return -EIO; I see no reason to keep using current->mm instead of kvm->mm. By doing so, we would reduce the use of 'global' variables on code, relying more in the contents of kvm struct. Signed-off-by: Leonardo Bras Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/e500_mmu_host.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 321db0fdb9db..425d13806645 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -355,9 +355,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, if (tlbsel == 1) { struct vm_area_struct *vma; - down_read(¤t->mm->mmap_sem); + down_read(&kvm->mm->mmap_sem); - vma = find_vma(current->mm, hva); + vma = find_vma(kvm->mm, hva); if (vma && hva >= vma->vm_start && (vma->vm_flags & VM_PFNMAP)) { /* @@ -441,7 +441,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); } - up_read(¤t->mm->mmap_sem); + up_read(&kvm->mm->mmap_sem); } if (likely(!pfnmap)) { -- cgit v1.2.3 From e7d71c943040c23f2fd042033d319f56e84f845b Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Fri, 27 Sep 2019 13:53:38 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Set kvm->arch.xive when VPs are allocated MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we cannot allocate the XIVE VPs in OPAL, the creation of a XIVE or XICS-on-XIVE device is aborted as expected, but we leave kvm->arch.xive set forever since the release method isn't called in this case. Any subsequent tentative to create a XIVE or XICS-on-XIVE for this VM will thus always fail (DoS). This is a problem for QEMU since it destroys and re-creates these devices when the VM is reset: the VM would be restricted to using the much slower emulated XIVE or XICS forever. As an alternative to adding rollback, do not assign kvm->arch.xive before making sure the XIVE VPs are allocated in OPAL. Cc: stable@vger.kernel.org # v5.2 Fixes: 5422e95103cf ("KVM: PPC: Book3S HV: XIVE: Replace the 'destroy' method by a 'release' method") Signed-off-by: Greg Kurz Reviewed-by: Cédric Le Goater Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_xive.c | 11 +++++------ arch/powerpc/kvm/book3s_xive_native.c | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index a3f9c665bb5b..baa740815b3c 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -2005,6 +2005,10 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) pr_devel("Creating xive for partition\n"); + /* Already there ? */ + if (kvm->arch.xive) + return -EEXIST; + xive = kvmppc_xive_get_device(kvm, type); if (!xive) return -ENOMEM; @@ -2014,12 +2018,6 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) xive->kvm = kvm; mutex_init(&xive->lock); - /* Already there ? */ - if (kvm->arch.xive) - ret = -EEXIST; - else - kvm->arch.xive = xive; - /* We use the default queue size set by the host */ xive->q_order = xive_native_default_eq_shift(); if (xive->q_order < PAGE_SHIFT) @@ -2039,6 +2037,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) if (ret) return ret; + kvm->arch.xive = xive; return 0; } diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 78b906ffa0d2..ebb4215baf45 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -1081,7 +1081,6 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) dev->private = xive; xive->dev = dev; xive->kvm = kvm; - kvm->arch.xive = xive; mutex_init(&xive->mapping_lock); mutex_init(&xive->lock); @@ -1102,6 +1101,7 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) if (ret) return ret; + kvm->arch.xive = xive; return 0; } -- cgit v1.2.3 From 8a4e7597ba1e41030189b73cd7261f4383588d1d Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Fri, 27 Sep 2019 13:53:49 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Show VP id in debugfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Print out the VP id of each connected vCPU, this allow to see: - the VP block base in which OPAL encodes information that may be useful when debugging - the packed vCPU id which may differ from the raw vCPU id if the latter is >= KVM_MAX_VCPUS (2048) Signed-off-by: Greg Kurz Reviewed-by: Cédric Le Goater Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_xive.c | 4 ++-- arch/powerpc/kvm/book3s_xive_native.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index baa740815b3c..0b7859e40f66 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -2107,9 +2107,9 @@ static int xive_debug_show(struct seq_file *m, void *private) if (!xc) continue; - seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x" + seq_printf(m, "cpu server %#x VP:%#x CPPR:%#x HWCPPR:%#x" " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n", - xc->server_num, xc->cppr, xc->hw_cppr, + xc->server_num, xc->vp_id, xc->cppr, xc->hw_cppr, xc->mfrr, xc->pending, xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index ebb4215baf45..43a86858390a 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -1204,8 +1204,8 @@ static int xive_native_debug_show(struct seq_file *m, void *private) if (!xc) continue; - seq_printf(m, "cpu server %#x NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n", - xc->server_num, + seq_printf(m, "cpu server %#x VP=%#x NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n", + xc->server_num, xc->vp_id, vcpu->arch.xive_saved_state.nsr, vcpu->arch.xive_saved_state.cppr, vcpu->arch.xive_saved_state.ipb, -- cgit v1.2.3 From 8db29ea2391cc6f5b73cc9c04b2dee4409b9fc05 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Fri, 27 Sep 2019 13:53:55 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Compute the VP id in a common helper Reduce code duplication by consolidating the checking of vCPU ids and VP ids to a common helper used by both legacy and native XIVE KVM devices. And explain the magic with a comment. Signed-off-by: Greg Kurz Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_xive.c | 42 +++++++++++++++++++++++++++-------- arch/powerpc/kvm/book3s_xive.h | 1 + arch/powerpc/kvm/book3s_xive_native.c | 11 ++------- 3 files changed, 36 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 0b7859e40f66..d84da9f6ee88 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1211,6 +1211,37 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.xive_vcpu = NULL; } +static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu) +{ + /* We have a block of KVM_MAX_VCPUS VPs. We just need to check + * raw vCPU ids are below the expected limit for this guest's + * core stride ; kvmppc_pack_vcpu_id() will pack them down to an + * index that can be safely used to compute a VP id that belongs + * to the VP block. + */ + return cpu < KVM_MAX_VCPUS * xive->kvm->arch.emul_smt_mode; +} + +int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp) +{ + u32 vp_id; + + if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) { + pr_devel("Out of bounds !\n"); + return -EINVAL; + } + + vp_id = kvmppc_xive_vp(xive, cpu); + if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) { + pr_devel("Duplicate !\n"); + return -EEXIST; + } + + *vp = vp_id; + + return 0; +} + int kvmppc_xive_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, u32 cpu) { @@ -1229,20 +1260,13 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, return -EPERM; if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) return -EBUSY; - if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) { - pr_devel("Out of bounds !\n"); - return -EINVAL; - } /* We need to synchronize with queue provisioning */ mutex_lock(&xive->lock); - vp_id = kvmppc_xive_vp(xive, cpu); - if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) { - pr_devel("Duplicate !\n"); - r = -EEXIST; + r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id); + if (r) goto bail; - } xc = kzalloc(sizeof(*xc), GFP_KERNEL); if (!xc) { diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index fe3ed50e0818..90cf6ec35a68 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -296,6 +296,7 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type); void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, struct kvmppc_xive_vcpu *xc, int irq); +int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp); #endif /* CONFIG_KVM_XICS */ #endif /* _KVM_PPC_BOOK3S_XICS_H */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 43a86858390a..5bb480b2aafd 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -118,19 +118,12 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, return -EPERM; if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) return -EBUSY; - if (server_num >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) { - pr_devel("Out of bounds !\n"); - return -EINVAL; - } mutex_lock(&xive->lock); - vp_id = kvmppc_xive_vp(xive, server_num); - if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) { - pr_devel("Duplicate !\n"); - rc = -EEXIST; + rc = kvmppc_xive_compute_vp_id(xive, server_num, &vp_id); + if (rc) goto bail; - } xc = kzalloc(sizeof(*xc), GFP_KERNEL); if (!xc) { -- cgit v1.2.3 From 062cfab7069fcb55d77ad5552f29e24178728fa2 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Fri, 27 Sep 2019 13:54:01 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Make VP block size configurable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The XIVE VP is an internal structure which allow the XIVE interrupt controller to maintain the interrupt context state of vCPUs non dispatched on HW threads. When a guest is started, the XIVE KVM device allocates a block of XIVE VPs in OPAL, enough to accommodate the highest possible vCPU id KVM_MAX_VCPU_ID (16384) packed down to KVM_MAX_VCPUS (2048). With a guest's core stride of 8 and a threading mode of 1 (QEMU's default), a VM must run at least 256 vCPUs to actually need such a range of VPs. A POWER9 system has a limited XIVE VP space : 512k and KVM is currently wasting this HW resource with large VP allocations, especially since a typical VM likely runs with a lot less vCPUs. Make the size of the VP block configurable. Add an nr_servers field to the XIVE structure and a function to set it for this purpose. Split VP allocation out of the device create function. Since the VP block isn't used before the first vCPU connects to the XIVE KVM device, allocation is now performed by kvmppc_xive_connect_vcpu(). This gives the opportunity to set nr_servers in between: kvmppc_xive_create() / kvmppc_xive_native_create() . . kvmppc_xive_set_nr_servers() . . kvmppc_xive_connect_vcpu() / kvmppc_xive_native_connect_vcpu() The connect_vcpu() functions check that the vCPU id is below nr_servers and if it is the first vCPU they allocate the VP block. This is protected against a concurrent update of nr_servers by kvmppc_xive_set_nr_servers() with the xive->lock mutex. Also, the block is allocated once for the device lifetime: nr_servers should stay constant otherwise connect_vcpu() could generate a boggus VP id and likely crash OPAL. It is thus forbidden to update nr_servers once the block is allocated. If the VP allocation fail, return ENOSPC which seems more appropriate to report the depletion of system wide HW resource than ENOMEM or ENXIO. A VM using a stride of 8 and 1 thread per core with 32 vCPUs would hence only need 256 VPs instead of 2048. If the stride is set to match the number of threads per core, this goes further down to 32. This will be exposed to userspace by a subsequent patch. Signed-off-by: Greg Kurz Reviewed-by: Cédric Le Goater Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_xive.c | 65 ++++++++++++++++++++++++++++------- arch/powerpc/kvm/book3s_xive.h | 4 +++ arch/powerpc/kvm/book3s_xive_native.c | 18 +++------- 3 files changed, 62 insertions(+), 25 deletions(-) diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index d84da9f6ee88..6c35b3d95986 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1213,13 +1213,13 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu) { - /* We have a block of KVM_MAX_VCPUS VPs. We just need to check + /* We have a block of xive->nr_servers VPs. We just need to check * raw vCPU ids are below the expected limit for this guest's * core stride ; kvmppc_pack_vcpu_id() will pack them down to an * index that can be safely used to compute a VP id that belongs * to the VP block. */ - return cpu < KVM_MAX_VCPUS * xive->kvm->arch.emul_smt_mode; + return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode; } int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp) @@ -1231,6 +1231,14 @@ int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp) return -EINVAL; } + if (xive->vp_base == XIVE_INVALID_VP) { + xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers); + pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers); + + if (xive->vp_base == XIVE_INVALID_VP) + return -ENOSPC; + } + vp_id = kvmppc_xive_vp(xive, cpu); if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) { pr_devel("Duplicate !\n"); @@ -1858,6 +1866,43 @@ int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, return 0; } +int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr) +{ + u32 __user *ubufp = (u32 __user *) addr; + u32 nr_servers; + int rc = 0; + + if (get_user(nr_servers, ubufp)) + return -EFAULT; + + pr_devel("%s nr_servers=%u\n", __func__, nr_servers); + + if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID) + return -EINVAL; + + mutex_lock(&xive->lock); + if (xive->vp_base != XIVE_INVALID_VP) + /* The VP block is allocated once and freed when the device + * is released. Better not allow to change its size since its + * used by connect_vcpu to validate vCPU ids are valid (eg, + * setting it back to a higher value could allow connect_vcpu + * to come up with a VP id that goes beyond the VP block, which + * is likely to cause a crash in OPAL). + */ + rc = -EBUSY; + else if (nr_servers > KVM_MAX_VCPUS) + /* We don't need more servers. Higher vCPU ids get packed + * down below KVM_MAX_VCPUS by kvmppc_pack_vcpu_id(). + */ + xive->nr_servers = KVM_MAX_VCPUS; + else + xive->nr_servers = nr_servers; + + mutex_unlock(&xive->lock); + + return rc; +} + static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { struct kvmppc_xive *xive = dev->private; @@ -2025,7 +2070,6 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) { struct kvmppc_xive *xive; struct kvm *kvm = dev->kvm; - int ret = 0; pr_devel("Creating xive for partition\n"); @@ -2049,18 +2093,15 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) else xive->q_page_order = xive->q_order - PAGE_SHIFT; - /* Allocate a bunch of VPs */ - xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS); - pr_devel("VP_Base=%x\n", xive->vp_base); - - if (xive->vp_base == XIVE_INVALID_VP) - ret = -ENOMEM; + /* VP allocation is delayed to the first call to connect_vcpu */ + xive->vp_base = XIVE_INVALID_VP; + /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets + * on a POWER9 system. + */ + xive->nr_servers = KVM_MAX_VCPUS; xive->single_escalation = xive_native_has_single_escalation(); - if (ret) - return ret; - kvm->arch.xive = xive; return 0; } diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index 90cf6ec35a68..382e3a56e789 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -135,6 +135,9 @@ struct kvmppc_xive { /* Flags */ u8 single_escalation; + /* Number of entries in the VP block */ + u32 nr_servers; + struct kvmppc_xive_ops *ops; struct address_space *mapping; struct mutex mapping_lock; @@ -297,6 +300,7 @@ struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type); void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, struct kvmppc_xive_vcpu *xc, int irq); int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp); +int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr); #endif /* CONFIG_KVM_XICS */ #endif /* _KVM_PPC_BOOK3S_XICS_H */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 5bb480b2aafd..8ab333eabeef 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -1060,7 +1060,6 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) { struct kvmppc_xive *xive; struct kvm *kvm = dev->kvm; - int ret = 0; pr_devel("Creating xive native device\n"); @@ -1077,23 +1076,16 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) mutex_init(&xive->mapping_lock); mutex_init(&xive->lock); - /* - * Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for - * a default. Getting the max number of CPUs the VM was - * configured with would improve our usage of the XIVE VP space. + /* VP allocation is delayed to the first call to connect_vcpu */ + xive->vp_base = XIVE_INVALID_VP; + /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets + * on a POWER9 system. */ - xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS); - pr_devel("VP_Base=%x\n", xive->vp_base); - - if (xive->vp_base == XIVE_INVALID_VP) - ret = -ENXIO; + xive->nr_servers = KVM_MAX_VCPUS; xive->single_escalation = xive_native_has_single_escalation(); xive->ops = &kvmppc_xive_native_ops; - if (ret) - return ret; - kvm->arch.xive = xive; return 0; } -- cgit v1.2.3 From efe5ddcae496b7c7307805d31815df23ba69bf7c Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Fri, 27 Sep 2019 13:54:07 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Allow userspace to set the # of VPs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new attribute to both XIVE and XICS-on-XIVE KVM devices so that userspace can tell how many interrupt servers it needs. If a VM needs less than the current default of KVM_MAX_VCPUS (2048), we can allocate less VPs in OPAL. Combined with a core stride (VSMT) that matches the number of guest threads per core, this may substantially increases the number of VMs that can run concurrently with an in-kernel XIVE device. Since the legacy XIVE KVM device is exposed to userspace through the XICS KVM API, a new attribute group is added to it for this purpose. While here, fix the syntax of the existing KVM_DEV_XICS_GRP_SOURCES in the XICS documentation. Signed-off-by: Greg Kurz Reviewed-by: Cédric Le Goater Signed-off-by: Paul Mackerras --- Documentation/virt/kvm/devices/xics.txt | 14 ++++++++++++-- Documentation/virt/kvm/devices/xive.txt | 8 ++++++++ arch/powerpc/include/uapi/asm/kvm.h | 3 +++ arch/powerpc/kvm/book3s_xive.c | 10 ++++++++++ arch/powerpc/kvm/book3s_xive_native.c | 3 +++ 5 files changed, 36 insertions(+), 2 deletions(-) diff --git a/Documentation/virt/kvm/devices/xics.txt b/Documentation/virt/kvm/devices/xics.txt index 42864935ac5d..423332dda7bc 100644 --- a/Documentation/virt/kvm/devices/xics.txt +++ b/Documentation/virt/kvm/devices/xics.txt @@ -3,9 +3,19 @@ XICS interrupt controller Device type supported: KVM_DEV_TYPE_XICS Groups: - KVM_DEV_XICS_SOURCES + 1. KVM_DEV_XICS_GRP_SOURCES Attributes: One per interrupt source, indexed by the source number. + 2. KVM_DEV_XICS_GRP_CTRL + Attributes: + 2.1 KVM_DEV_XICS_NR_SERVERS (write only) + The kvm_device_attr.addr points to a __u32 value which is the number of + interrupt server numbers (ie, highest possible vcpu id plus one). + Errors: + -EINVAL: Value greater than KVM_MAX_VCPU_ID. + -EFAULT: Invalid user pointer for attr->addr. + -EBUSY: A vcpu is already connected to the device. + This device emulates the XICS (eXternal Interrupt Controller Specification) defined in PAPR. The XICS has a set of interrupt sources, each identified by a 20-bit source number, and a set of @@ -38,7 +48,7 @@ least-significant end of the word: Each source has 64 bits of state that can be read and written using the KVM_GET_DEVICE_ATTR and KVM_SET_DEVICE_ATTR ioctls, specifying the -KVM_DEV_XICS_SOURCES attribute group, with the attribute number being +KVM_DEV_XICS_GRP_SOURCES attribute group, with the attribute number being the interrupt source number. The 64 bit state word has the following bitfields, starting from the least-significant end of the word: diff --git a/Documentation/virt/kvm/devices/xive.txt b/Documentation/virt/kvm/devices/xive.txt index 9a24a4525253..f5d1d6b5af61 100644 --- a/Documentation/virt/kvm/devices/xive.txt +++ b/Documentation/virt/kvm/devices/xive.txt @@ -78,6 +78,14 @@ the legacy interrupt mode, referred as XICS (POWER7/8). migrating the VM. Errors: none + 1.3 KVM_DEV_XIVE_NR_SERVERS (write only) + The kvm_device_attr.addr points to a __u32 value which is the number of + interrupt server numbers (ie, highest possible vcpu id plus one). + Errors: + -EINVAL: Value greater than KVM_MAX_VCPU_ID. + -EFAULT: Invalid user pointer for attr->addr. + -EBUSY: A vCPU is already connected to the device. + 2. KVM_DEV_XIVE_GRP_SOURCE (write only) Initializes a new source in the XIVE device and mask it. Attributes: diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index b0f72dea8b11..264e266a85bf 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -667,6 +667,8 @@ struct kvm_ppc_cpu_char { /* PPC64 eXternal Interrupt Controller Specification */ #define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ +#define KVM_DEV_XICS_GRP_CTRL 2 +#define KVM_DEV_XICS_NR_SERVERS 1 /* Layout of 64-bit source attribute values */ #define KVM_XICS_DESTINATION_SHIFT 0 @@ -683,6 +685,7 @@ struct kvm_ppc_cpu_char { #define KVM_DEV_XIVE_GRP_CTRL 1 #define KVM_DEV_XIVE_RESET 1 #define KVM_DEV_XIVE_EQ_SYNC 2 +#define KVM_DEV_XIVE_NR_SERVERS 3 #define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */ #define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */ #define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */ diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 6c35b3d95986..66858b7d3c6b 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1911,6 +1911,11 @@ static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) switch (attr->group) { case KVM_DEV_XICS_GRP_SOURCES: return xive_set_source(xive, attr->attr, attr->addr); + case KVM_DEV_XICS_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_XICS_NR_SERVERS: + return kvmppc_xive_set_nr_servers(xive, attr->addr); + } } return -ENXIO; } @@ -1936,6 +1941,11 @@ static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) attr->attr < KVMPPC_XICS_NR_IRQS) return 0; break; + case KVM_DEV_XICS_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_XICS_NR_SERVERS: + return 0; + } } return -ENXIO; } diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 8ab333eabeef..34bd123fa024 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -921,6 +921,8 @@ static int kvmppc_xive_native_set_attr(struct kvm_device *dev, return kvmppc_xive_reset(xive); case KVM_DEV_XIVE_EQ_SYNC: return kvmppc_xive_native_eq_sync(xive); + case KVM_DEV_XIVE_NR_SERVERS: + return kvmppc_xive_set_nr_servers(xive, attr->addr); } break; case KVM_DEV_XIVE_GRP_SOURCE: @@ -960,6 +962,7 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev, switch (attr->attr) { case KVM_DEV_XIVE_RESET: case KVM_DEV_XIVE_EQ_SYNC: + case KVM_DEV_XIVE_NR_SERVERS: return 0; } break; -- cgit v1.2.3 From 9ee6471eb9d43114ba4f0de3e0f483bf6fb2a906 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 2 Oct 2019 16:00:21 +1000 Subject: KVM: PPC: Book3S: Define and use SRR1_MSR_BITS Acked-by: Paul Mackerras Signed-off-by: Nicholas Piggin Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/reg.h | 12 ++++++++++++ arch/powerpc/kvm/book3s.c | 2 +- arch/powerpc/kvm/book3s_hv_nested.c | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index b3cbb1136bce..75c7e95a321b 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -748,6 +748,18 @@ #define SPRN_USPRG7 0x107 /* SPRG7 userspace read */ #define SPRN_SRR0 0x01A /* Save/Restore Register 0 */ #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ + +#ifdef CONFIG_PPC_BOOK3S +/* + * Bits loaded from MSR upon interrupt. + * PPC (64-bit) bits 33-36,42-47 are interrupt dependent, the others are + * loaded from MSR. The exception is that SRESET and MCE do not always load + * bit 62 (RI) from MSR. Don't use PPC_BITMASK for this because 32-bit uses + * it. + */ +#define SRR1_MSR_BITS (~0x783f0000UL) +#endif + #define SRR1_ISI_NOPT 0x40000000 /* ISI: Not found in hash */ #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index ec2547cc5ecb..a2336c452905 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -136,7 +136,7 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) { kvmppc_unfixup_split_real(vcpu); kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); - kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags); + kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & SRR1_MSR_BITS) | flags); kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); vcpu->arch.mmu.reset_msr(vcpu); } diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index cdf30c6eaf54..dc97e5be76f6 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -1186,7 +1186,7 @@ static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu, forward_to_l1: vcpu->arch.fault_dsisr = flags; if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) { - vcpu->arch.shregs.msr &= ~0x783f0000ul; + vcpu->arch.shregs.msr &= SRR1_MSR_BITS; vcpu->arch.shregs.msr |= flags; } return RESUME_HOST; -- cgit v1.2.3 From 87a45e07a5abfec4d6b0e8356718f8919d0a3c20 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 2 Oct 2019 16:00:22 +1000 Subject: KVM: PPC: Book3S: Replace reset_msr mmu op with inject_interrupt arch op reset_msr sets the MSR for interrupt injection, but it's cleaner and more flexible to provide a single op to set both MSR and PC for the interrupt. Signed-off-by: Nicholas Piggin Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_host.h | 1 - arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/kvm/book3s.c | 27 +------------------------ arch/powerpc/kvm/book3s_32_mmu.c | 6 ------ arch/powerpc/kvm/book3s_64_mmu.c | 15 -------------- arch/powerpc/kvm/book3s_64_mmu_hv.c | 13 ------------ arch/powerpc/kvm/book3s_hv.c | 22 ++++++++++++++++++++ arch/powerpc/kvm/book3s_pr.c | 40 ++++++++++++++++++++++++++++++++++++- 8 files changed, 63 insertions(+), 62 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 6fe6ad64cba5..4273e799203d 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -401,7 +401,6 @@ struct kvmppc_mmu { u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data, bool iswrite); - void (*reset_msr)(struct kvm_vcpu *vcpu); void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data); diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index ee62776e5433..d63f649fe713 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -271,6 +271,7 @@ struct kvmppc_ops { union kvmppc_one_reg *val); void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); void (*vcpu_put)(struct kvm_vcpu *vcpu); + void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index a2336c452905..58a59ee998e2 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -74,27 +74,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { NULL } }; -void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) -{ - if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { - ulong pc = kvmppc_get_pc(vcpu); - ulong lr = kvmppc_get_lr(vcpu); - if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) - kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); - if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) - kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); - vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; - } -} -EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real); - -static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) -{ - if (!is_kvmppc_hv_enabled(vcpu->kvm)) - return to_book3s(vcpu)->hior; - return 0; -} - static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, unsigned long pending_now, unsigned long old_pending) { @@ -134,11 +113,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) { - kvmppc_unfixup_split_real(vcpu); - kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); - kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & SRR1_MSR_BITS) | flags); - kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); - vcpu->arch.mmu.reset_msr(vcpu); + vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags); } static int kvmppc_book3s_vec2irqprio(unsigned int vec) diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 18f244aad7aa..f21e73492ce3 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -90,11 +90,6 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); } -static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) -{ - kvmppc_set_msr(vcpu, 0); -} - static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu, u32 sre, gva_t eaddr, bool primary) @@ -406,7 +401,6 @@ void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu) mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin; mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin; mmu->xlate = kvmppc_mmu_book3s_32_xlate; - mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr; mmu->tlbie = kvmppc_mmu_book3s_32_tlbie; mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid; mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp; diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 5f63a5f7f24f..599133256a95 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -24,20 +24,6 @@ #define dprintk(X...) do { } while(0) #endif -static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) -{ - unsigned long msr = vcpu->arch.intr_msr; - unsigned long cur_msr = kvmppc_get_msr(vcpu); - - /* If transactional, change to suspend mode on IRQ delivery */ - if (MSR_TM_TRANSACTIONAL(cur_msr)) - msr |= MSR_TS_S; - else - msr |= cur_msr & MSR_TS_MASK; - - kvmppc_set_msr(vcpu, msr); -} - static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( struct kvm_vcpu *vcpu, gva_t eaddr) @@ -676,7 +662,6 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) mmu->slbie = kvmppc_mmu_book3s_64_slbie; mmu->slbia = kvmppc_mmu_book3s_64_slbia; mmu->xlate = kvmppc_mmu_book3s_64_xlate; - mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr; mmu->tlbie = kvmppc_mmu_book3s_64_tlbie; mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f2b9aea43216..4c37e97c75a1 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -275,18 +275,6 @@ int kvmppc_mmu_hv_init(void) return 0; } -static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) -{ - unsigned long msr = vcpu->arch.intr_msr; - - /* If transactional, change to suspend mode on IRQ delivery */ - if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) - msr |= MSR_TS_S; - else - msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; - kvmppc_set_msr(vcpu, msr); -} - static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, long pte_index, unsigned long pteh, unsigned long ptel, unsigned long *pte_idx_ret) @@ -2162,7 +2150,6 @@ void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; - mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 709cf1fd4cf4..94a0a9911b27 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -338,6 +338,27 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } +static void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) +{ + unsigned long msr, pc, new_msr, new_pc; + + msr = kvmppc_get_msr(vcpu); + pc = kvmppc_get_pc(vcpu); + new_msr = vcpu->arch.intr_msr; + new_pc = vec; + + /* If transactional, change to suspend mode on IRQ delivery */ + if (MSR_TM_TRANSACTIONAL(msr)) + new_msr |= MSR_TS_S; + else + new_msr |= msr & MSR_TS_MASK; + + kvmppc_set_srr0(vcpu, pc); + kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); + kvmppc_set_pc(vcpu, new_pc); + kvmppc_set_msr(vcpu, new_msr); +} + static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) { /* @@ -5401,6 +5422,7 @@ static struct kvmppc_ops kvm_ops_hv = { .set_one_reg = kvmppc_set_one_reg_hv, .vcpu_load = kvmppc_core_vcpu_load_hv, .vcpu_put = kvmppc_core_vcpu_put_hv, + .inject_interrupt = kvmppc_inject_interrupt_hv, .set_msr = kvmppc_set_msr_hv, .vcpu_run = kvmppc_vcpu_run_hv, .vcpu_create = kvmppc_core_vcpu_create_hv, diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index cc65af8fe6f7..ce4fcf76e53e 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -90,7 +90,43 @@ static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); } -void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu); +static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { + ulong pc = kvmppc_get_pc(vcpu); + ulong lr = kvmppc_get_lr(vcpu); + if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) + kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); + if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) + kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); + vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; + } +} + +static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) +{ + unsigned long msr, pc, new_msr, new_pc; + + kvmppc_unfixup_split_real(vcpu); + + msr = kvmppc_get_msr(vcpu); + pc = kvmppc_get_pc(vcpu); + new_msr = vcpu->arch.intr_msr; + new_pc = to_book3s(vcpu)->hior + vec; + +#ifdef CONFIG_PPC_BOOK3S_64 + /* If transactional, change to suspend mode on IRQ delivery */ + if (MSR_TM_TRANSACTIONAL(msr)) + new_msr |= MSR_TS_S; + else + new_msr |= msr & MSR_TS_MASK; +#endif + + kvmppc_set_srr0(vcpu, pc); + kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); + kvmppc_set_pc(vcpu, new_pc); + kvmppc_set_msr(vcpu, new_msr); +} static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) { @@ -1761,6 +1797,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, #else /* default to book3s_32 (750) */ vcpu->arch.pvr = 0x84202; + vcpu->arch.intr_msr = 0; #endif kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); vcpu->arch.slb_nr = 64; @@ -2058,6 +2095,7 @@ static struct kvmppc_ops kvm_ops_pr = { .set_one_reg = kvmppc_set_one_reg_pr, .vcpu_load = kvmppc_core_vcpu_load_pr, .vcpu_put = kvmppc_core_vcpu_put_pr, + .inject_interrupt = kvmppc_inject_interrupt_pr, .set_msr = kvmppc_set_msr_pr, .vcpu_run = kvmppc_vcpu_run_pr, .vcpu_create = kvmppc_core_vcpu_create_pr, -- cgit v1.2.3 From 268f4ef9954cec198cd6772caadf453bcaed3e5a Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 2 Oct 2019 16:00:23 +1000 Subject: KVM: PPC: Book3S HV: Reuse kvmppc_inject_interrupt for async guest delivery This consolidates the HV interrupt delivery logic into one place. Signed-off-by: Nicholas Piggin Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s.h | 3 ++ arch/powerpc/kvm/book3s_hv.c | 43 ----------------------- arch/powerpc/kvm/book3s_hv_builtin.c | 67 ++++++++++++++++++++++++++++-------- 3 files changed, 56 insertions(+), 57 deletions(-) diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h index 2ef1311a2a13..3a4613985949 100644 --- a/arch/powerpc/kvm/book3s.h +++ b/arch/powerpc/kvm/book3s.h @@ -32,4 +32,7 @@ extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val); static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {} #endif +extern void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr); +extern void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); + #endif diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 94a0a9911b27..c340d416dce3 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -133,7 +133,6 @@ static inline bool nesting_enabled(struct kvm *kvm) /* If set, the threads on each CPU core have to be in the same MMU mode */ static bool no_mixing_hpt_and_radix; -static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); /* @@ -338,39 +337,6 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } -static void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) -{ - unsigned long msr, pc, new_msr, new_pc; - - msr = kvmppc_get_msr(vcpu); - pc = kvmppc_get_pc(vcpu); - new_msr = vcpu->arch.intr_msr; - new_pc = vec; - - /* If transactional, change to suspend mode on IRQ delivery */ - if (MSR_TM_TRANSACTIONAL(msr)) - new_msr |= MSR_TS_S; - else - new_msr |= msr & MSR_TS_MASK; - - kvmppc_set_srr0(vcpu, pc); - kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); - kvmppc_set_pc(vcpu, new_pc); - kvmppc_set_msr(vcpu, new_msr); -} - -static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) -{ - /* - * Check for illegal transactional state bit combination - * and if we find it, force the TS field to a safe state. - */ - if ((msr & MSR_TS_MASK) == MSR_TS_MASK) - msr &= ~MSR_TS_MASK; - vcpu->arch.shregs.msr = msr; - kvmppc_end_cede(vcpu); -} - static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) { vcpu->arch.pvr = pvr; @@ -2475,15 +2441,6 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu) vcpu->arch.timer_running = 1; } -static void kvmppc_end_cede(struct kvm_vcpu *vcpu) -{ - vcpu->arch.ceded = 0; - if (vcpu->arch.timer_running) { - hrtimer_try_to_cancel(&vcpu->arch.dec_timer); - vcpu->arch.timer_running = 0; - } -} - extern int __kvmppc_vcore_entry(void); static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 7c1909657b55..068bee941a71 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -755,6 +755,56 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) local_paca->kvm_hstate.kvm_split_mode = NULL; } +static void kvmppc_end_cede(struct kvm_vcpu *vcpu) +{ + vcpu->arch.ceded = 0; + if (vcpu->arch.timer_running) { + hrtimer_try_to_cancel(&vcpu->arch.dec_timer); + vcpu->arch.timer_running = 0; + } +} + +void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) +{ + /* + * Check for illegal transactional state bit combination + * and if we find it, force the TS field to a safe state. + */ + if ((msr & MSR_TS_MASK) == MSR_TS_MASK) + msr &= ~MSR_TS_MASK; + vcpu->arch.shregs.msr = msr; + kvmppc_end_cede(vcpu); +} +EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv); + +static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) +{ + unsigned long msr, pc, new_msr, new_pc; + + msr = kvmppc_get_msr(vcpu); + pc = kvmppc_get_pc(vcpu); + new_msr = vcpu->arch.intr_msr; + new_pc = vec; + + /* If transactional, change to suspend mode on IRQ delivery */ + if (MSR_TM_TRANSACTIONAL(msr)) + new_msr |= MSR_TS_S; + else + new_msr |= msr & MSR_TS_MASK; + + kvmppc_set_srr0(vcpu, pc); + kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); + kvmppc_set_pc(vcpu, new_pc); + vcpu->arch.shregs.msr = new_msr; +} + +void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) +{ + inject_interrupt(vcpu, vec, srr1_flags); + kvmppc_end_cede(vcpu); +} +EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv); + /* * Is there a PRIV_DOORBELL pending for the guest (on POWER9)? * Can we inject a Decrementer or a External interrupt? @@ -762,7 +812,6 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) { int ext; - unsigned long vec = 0; unsigned long lpcr; /* Insert EXTERNAL bit into LPCR at the MER bit position */ @@ -774,26 +823,16 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) if (vcpu->arch.shregs.msr & MSR_EE) { if (ext) { - vec = BOOK3S_INTERRUPT_EXTERNAL; + inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0); } else { long int dec = mfspr(SPRN_DEC); if (!(lpcr & LPCR_LD)) dec = (int) dec; if (dec < 0) - vec = BOOK3S_INTERRUPT_DECREMENTER; + inject_interrupt(vcpu, + BOOK3S_INTERRUPT_DECREMENTER, 0); } } - if (vec) { - unsigned long msr, old_msr = vcpu->arch.shregs.msr; - - kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); - kvmppc_set_srr1(vcpu, old_msr); - kvmppc_set_pc(vcpu, vec); - msr = vcpu->arch.intr_msr; - if (MSR_TM_ACTIVE(old_msr)) - msr |= MSR_TS_S; - vcpu->arch.shregs.msr = msr; - } if (vcpu->arch.doorbell_request) { mtspr(SPRN_DPDES, 1); -- cgit v1.2.3 From 6a13cb0c376abb436d060b989018257963656d0c Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 2 Oct 2019 16:00:24 +1000 Subject: KVM: PPC: Book3S HV: Implement LPCR[AIL]=3 mode for injected interrupts kvmppc_inject_interrupt does not implement LPCR[AIL]!=0 modes, which can result in the guest receiving interrupts as if LPCR[AIL]=0 contrary to the ISA. In practice, Linux guests cope with this deviation, but it should be fixed. Signed-off-by: Nicholas Piggin Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_builtin.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 068bee941a71..7cd3cf3d366b 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -792,6 +792,21 @@ static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) else new_msr |= msr & MSR_TS_MASK; + /* + * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and + * applicable. AIL=2 is not supported. + * + * AIL does not apply to SRESET, MCE, or HMI (which is never + * delivered to the guest), and does not apply if IR=0 or DR=0. + */ + if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET && + vec != BOOK3S_INTERRUPT_MACHINE_CHECK && + (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 && + (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) { + new_msr |= MSR_IR | MSR_DR; + new_pc += 0xC000000000004000ULL; + } + kvmppc_set_srr0(vcpu, pc); kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); kvmppc_set_pc(vcpu, new_pc); -- cgit v1.2.3 From 55d7004299eb917767761f01a208d50afad4f535 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 2 Oct 2019 16:00:25 +1000 Subject: KVM: PPC: Book3S HV: Reject mflags=2 (LPCR[AIL]=2) ADDR_TRANS_MODE mode AIL=2 mode has no known users, so is not well tested or supported. Disallow guests from selecting this mode because it may become deprecated in future versions of the architecture. This policy decision is not left to QEMU because KVM support is required for AIL=2 (when injecting interrupts). Signed-off-by: Nicholas Piggin Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index c340d416dce3..ec5c0379296a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -779,6 +779,11 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, vcpu->arch.dawr = value1; vcpu->arch.dawrx = value2; return H_SUCCESS; + case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: + /* KVM does not support mflags=2 (AIL=2) */ + if (mflags != 0 && mflags != 3) + return H_UNSUPPORTED_FLAG_START; + return H_TOO_HARD; default: return H_TOO_HARD; } -- cgit v1.2.3 From e3b9a9e147dbe1a8fb9d8398a2faa47d8a6f50de Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Thu, 3 Oct 2019 21:17:43 +0000 Subject: KVM: SVM: Serialize access to the SEV ASID bitmap The SEV ASID bitmap currently is not protected against parallel SEV guest startups. This can result in an SEV guest failing to start because another SEV guest could have been assigned the same ASID value. Use a mutex to serialize access to the SEV ASID bitmap. Fixes: 1654efcbc431 ("KVM: SVM: Add KVM_SEV_INIT command") Tested-by: David Rientjes Signed-off-by: Tom Lendacky Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f8ecb6df5106..d371007ab109 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -418,6 +418,7 @@ enum { #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL +static DEFINE_MUTEX(sev_bitmap_lock); static unsigned int max_sev_asid; static unsigned int min_sev_asid; static unsigned long *sev_asid_bitmap; @@ -1723,25 +1724,22 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) return 0; } -static void __sev_asid_free(int asid) +static void sev_asid_free(int asid) { struct svm_cpu_data *sd; int cpu, pos; + mutex_lock(&sev_bitmap_lock); + pos = asid - 1; - clear_bit(pos, sev_asid_bitmap); + __clear_bit(pos, sev_asid_bitmap); for_each_possible_cpu(cpu) { sd = per_cpu(svm_data, cpu); sd->sev_vmcbs[pos] = NULL; } -} - -static void sev_asid_free(struct kvm *kvm) -{ - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - __sev_asid_free(sev->asid); + mutex_unlock(&sev_bitmap_lock); } static void sev_unbind_asid(struct kvm *kvm, unsigned int handle) @@ -1910,7 +1908,7 @@ static void sev_vm_destroy(struct kvm *kvm) mutex_unlock(&kvm->lock); sev_unbind_asid(kvm, sev->handle); - sev_asid_free(kvm); + sev_asid_free(sev->asid); } static void avic_vm_destroy(struct kvm *kvm) @@ -6268,14 +6266,21 @@ static int sev_asid_new(void) { int pos; + mutex_lock(&sev_bitmap_lock); + /* * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid. */ pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1); - if (pos >= max_sev_asid) + if (pos >= max_sev_asid) { + mutex_unlock(&sev_bitmap_lock); return -EBUSY; + } + + __set_bit(pos, sev_asid_bitmap); + + mutex_unlock(&sev_bitmap_lock); - set_bit(pos, sev_asid_bitmap); return pos + 1; } @@ -6303,7 +6308,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) return 0; e_free: - __sev_asid_free(asid); + sev_asid_free(asid); return ret; } -- cgit v1.2.3 From 83af5e65a89547633bab7278564219ca8e68b968 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Thu, 3 Oct 2019 21:17:45 +0000 Subject: KVM: SVM: Guard against DEACTIVATE when performing WBINVD/DF_FLUSH The SEV firmware DEACTIVATE command disassociates an SEV guest from an ASID, clears the WBINVD indicator on all threads and indicates that the SEV firmware DF_FLUSH command must be issued before the ASID can be re-used. The SEV firmware DF_FLUSH command will return an error if a WBINVD has not been performed on every thread before it has been invoked. A window exists between the WBINVD and the invocation of the DF_FLUSH command where an SEV firmware DEACTIVATE command could be invoked on another thread, clearing the WBINVD indicator. This will cause the subsequent SEV firmware DF_FLUSH command to fail which, in turn, results in the SEV firmware ACTIVATE command failing for the reclaimed ASID. This results in the SEV guest failing to start. Use a mutex to close the WBINVD/DF_FLUSH window by obtaining the mutex before the DEACTIVATE and releasing it after the DF_FLUSH. This ensures that any DEACTIVATE cannot run before a DF_FLUSH has completed. Fixes: 59414c989220 ("KVM: SVM: Add support for KVM_SEV_LAUNCH_START command") Tested-by: David Rientjes Signed-off-by: Tom Lendacky Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d371007ab109..1d217680cf83 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -418,6 +418,7 @@ enum { #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL +static DEFINE_MUTEX(sev_deactivate_lock); static DEFINE_MUTEX(sev_bitmap_lock); static unsigned int max_sev_asid; static unsigned int min_sev_asid; @@ -1756,10 +1757,20 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle) /* deactivate handle */ data->handle = handle; + + /* + * Guard against a parallel DEACTIVATE command before the DF_FLUSH + * command has completed. + */ + mutex_lock(&sev_deactivate_lock); + sev_guest_deactivate(data, NULL); wbinvd_on_all_cpus(); sev_guest_df_flush(NULL); + + mutex_unlock(&sev_deactivate_lock); + kfree(data); decommission = kzalloc(sizeof(*decommission), GFP_KERNEL); @@ -6318,9 +6329,18 @@ static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error) int asid = sev_get_asid(kvm); int ret; + /* + * Guard against a DEACTIVATE command before the DF_FLUSH command + * has completed. + */ + mutex_lock(&sev_deactivate_lock); + wbinvd_on_all_cpus(); ret = sev_guest_df_flush(error); + + mutex_unlock(&sev_deactivate_lock); + if (ret) return ret; -- cgit v1.2.3 From 04f11ef45810da5ae2542dd78cc353f3761bd2cb Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 27 Sep 2019 14:45:16 -0700 Subject: KVM: nVMX: Always write vmcs02.GUEST_CR3 during nested VM-Enter Write the desired L2 CR3 into vmcs02.GUEST_CR3 during nested VM-Enter instead of deferring the VMWRITE until vmx_set_cr3(). If the VMWRITE is deferred, then KVM can consume a stale vmcs02.GUEST_CR3 when it refreshes vmcs12->guest_cr3 during nested_vmx_vmexit() if the emulated VM-Exit occurs without actually entering L2, e.g. if the nested run is squashed because nested VM-Enter (from L1) is putting L2 into HLT. Note, the above scenario can occur regardless of whether L1 is intercepting HLT, e.g. L1 can intercept HLT and then re-enter L2 with vmcs.GUEST_ACTIVITY_STATE=HALTED. But practically speaking, a VMM will likely put a guest into HALTED if and only if it's not intercepting HLT. In an ideal world where EPT *requires* unrestricted guest (and vice versa), VMX could handle CR3 similar to how it handles RSP and RIP, e.g. mark CR3 dirty and conditionally load it at vmx_vcpu_run(). But the unrestricted guest silliness complicates the dirty tracking logic to the point that explicitly handling vmcs02.GUEST_CR3 during nested VM-Enter is a simpler overall implementation. Cc: stable@vger.kernel.org Reported-and-tested-by: Reto Buerki Tested-by: Vitaly Kuznetsov Reviewed-by: Liran Alon Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 10 ++++++++++ arch/x86/kvm/vmx/vmx.c | 10 +++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index e76eb4f07f6c..d93ddc79a595 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2418,6 +2418,16 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, entry_failure_code)) return -EINVAL; + /* + * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12 + * on nested VM-Exit, which can occur without actually running L2 and + * thus without hitting vmx_set_cr3(), e.g. if L1 is entering L2 with + * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the + * transition to HLT instead of running L2. + */ + if (enable_ept) + vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); + /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */ if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 8f01019295a1..04603f53ca36 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2989,6 +2989,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa) void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { struct kvm *kvm = vcpu->kvm; + bool update_guest_cr3 = true; unsigned long guest_cr3; u64 eptp; @@ -3005,15 +3006,18 @@ void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); } - if (enable_unrestricted_guest || is_paging(vcpu) || - is_guest_mode(vcpu)) + /* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */ + if (is_guest_mode(vcpu)) + update_guest_cr3 = false; + else if (enable_unrestricted_guest || is_paging(vcpu)) guest_cr3 = kvm_read_cr3(vcpu); else guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; ept_load_pdptrs(vcpu); } - vmcs_writel(GUEST_CR3, guest_cr3); + if (update_guest_cr3) + vmcs_writel(GUEST_CR3, guest_cr3); } int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) -- cgit v1.2.3 From 0fc5deae03a2724a4b18373b2e6a3b585019de1e Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Thu, 3 Oct 2019 21:17:46 +0000 Subject: KVM: SVM: Remove unneeded WBINVD and DF_FLUSH when starting SEV guests Performing a WBINVD and DF_FLUSH are expensive operations. The SEV support currently performs this WBINVD/DF_FLUSH combination when an SEV guest is terminated, so there is no need for it to be done before LAUNCH. However, when the SEV firmware transitions the platform from UNINIT state to INIT state, all ASIDs will be marked invalid across all threads. Therefore, as part of transitioning the platform to INIT state, perform a WBINVD/DF_FLUSH after a successful INIT in the PSP/SEV device driver. Since the PSP/SEV device driver is x86 only, it can reference and use the WBINVD related functions directly. Cc: Gary Hook Cc: Herbert Xu Cc: "David S. Miller" Tested-by: David Rientjes Signed-off-by: Tom Lendacky Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 15 --------------- drivers/crypto/ccp/psp-dev.c | 9 +++++++++ 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1d217680cf83..389dfd7594eb 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -6329,21 +6329,6 @@ static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error) int asid = sev_get_asid(kvm); int ret; - /* - * Guard against a DEACTIVATE command before the DF_FLUSH command - * has completed. - */ - mutex_lock(&sev_deactivate_lock); - - wbinvd_on_all_cpus(); - - ret = sev_guest_df_flush(error); - - mutex_unlock(&sev_deactivate_lock); - - if (ret) - return ret; - data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); if (!data) return -ENOMEM; diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 6b17d179ef8a..39fdd0641637 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -21,6 +21,8 @@ #include #include +#include + #include "sp-dev.h" #include "psp-dev.h" @@ -235,6 +237,13 @@ static int __sev_platform_init_locked(int *error) return rc; psp->sev_state = SEV_STATE_INIT; + + /* Prepare for first SEV guest launch after INIT */ + wbinvd_on_all_cpus(); + rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error); + if (rc) + return rc; + dev_dbg(psp->dev, "SEV firmware initialized\n"); return rc; -- cgit v1.2.3 From 33af3a7ef9e6fb6fa5f0168c3c67f51776dafc54 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Thu, 3 Oct 2019 21:17:48 +0000 Subject: KVM: SVM: Reduce WBINVD/DF_FLUSH invocations Performing a WBINVD and DF_FLUSH are expensive operations. Currently, a WBINVD/DF_FLUSH is performed every time an SEV guest terminates. However, the WBINVD/DF_FLUSH is only required when an ASID is being re-allocated to a new SEV guest. Also, a single WBINVD/DF_FLUSH can enable all ASIDs that have been disassociated from guests through DEACTIVATE. To reduce the number of WBINVD/DF_FLUSH invocations, introduce a new ASID bitmap to track ASIDs that need to be reclaimed. When an SEV guest is terminated, add its ASID to the reclaim bitmap instead of clearing the bitmap in the existing SEV ASID bitmap. This delays the need to perform a WBINVD/DF_FLUSH invocation when an SEV guest terminates until all of the available SEV ASIDs have been used. At that point, the WBINVD/DF_FLUSH invocation can be performed and all ASIDs in the reclaim bitmap moved to the available ASIDs bitmap. The semaphore around DEACTIVATE can be changed to a read semaphore with the semaphore taken in write mode before performing the WBINVD/DF_FLUSH. Tested-by: David Rientjes Signed-off-by: Tom Lendacky Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 81 ++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 66 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 389dfd7594eb..62b0938b62ef 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -418,11 +419,13 @@ enum { #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL -static DEFINE_MUTEX(sev_deactivate_lock); +static int sev_flush_asids(void); +static DECLARE_RWSEM(sev_deactivate_lock); static DEFINE_MUTEX(sev_bitmap_lock); static unsigned int max_sev_asid; static unsigned int min_sev_asid; static unsigned long *sev_asid_bitmap; +static unsigned long *sev_reclaim_asid_bitmap; #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) struct enc_region { @@ -1231,11 +1234,15 @@ static __init int sev_hardware_setup(void) /* Minimum ASID value that should be used for SEV guest */ min_sev_asid = cpuid_edx(0x8000001F); - /* Initialize SEV ASID bitmap */ + /* Initialize SEV ASID bitmaps */ sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); if (!sev_asid_bitmap) return 1; + sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); + if (!sev_reclaim_asid_bitmap) + return 1; + status = kmalloc(sizeof(*status), GFP_KERNEL); if (!status) return 1; @@ -1414,8 +1421,12 @@ static __exit void svm_hardware_unsetup(void) { int cpu; - if (svm_sev_enabled()) + if (svm_sev_enabled()) { bitmap_free(sev_asid_bitmap); + bitmap_free(sev_reclaim_asid_bitmap); + + sev_flush_asids(); + } for_each_possible_cpu(cpu) svm_cpu_uninit(cpu); @@ -1733,7 +1744,7 @@ static void sev_asid_free(int asid) mutex_lock(&sev_bitmap_lock); pos = asid - 1; - __clear_bit(pos, sev_asid_bitmap); + __set_bit(pos, sev_reclaim_asid_bitmap); for_each_possible_cpu(cpu) { sd = per_cpu(svm_data, cpu); @@ -1758,18 +1769,10 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle) /* deactivate handle */ data->handle = handle; - /* - * Guard against a parallel DEACTIVATE command before the DF_FLUSH - * command has completed. - */ - mutex_lock(&sev_deactivate_lock); - + /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */ + down_read(&sev_deactivate_lock); sev_guest_deactivate(data, NULL); - - wbinvd_on_all_cpus(); - sev_guest_df_flush(NULL); - - mutex_unlock(&sev_deactivate_lock); + up_read(&sev_deactivate_lock); kfree(data); @@ -6273,8 +6276,51 @@ static int enable_smi_window(struct kvm_vcpu *vcpu) return 0; } +static int sev_flush_asids(void) +{ + int ret, error; + + /* + * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail, + * so it must be guarded. + */ + down_write(&sev_deactivate_lock); + + wbinvd_on_all_cpus(); + ret = sev_guest_df_flush(&error); + + up_write(&sev_deactivate_lock); + + if (ret) + pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error); + + return ret; +} + +/* Must be called with the sev_bitmap_lock held */ +static bool __sev_recycle_asids(void) +{ + int pos; + + /* Check if there are any ASIDs to reclaim before performing a flush */ + pos = find_next_bit(sev_reclaim_asid_bitmap, + max_sev_asid, min_sev_asid - 1); + if (pos >= max_sev_asid) + return false; + + if (sev_flush_asids()) + return false; + + bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap, + max_sev_asid); + bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid); + + return true; +} + static int sev_asid_new(void) { + bool retry = true; int pos; mutex_lock(&sev_bitmap_lock); @@ -6282,8 +6328,13 @@ static int sev_asid_new(void) /* * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid. */ +again: pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1); if (pos >= max_sev_asid) { + if (retry && __sev_recycle_asids()) { + retry = false; + goto again; + } mutex_unlock(&sev_bitmap_lock); return -EBUSY; } -- cgit v1.2.3 From b17b7436f2f0c4984f98a0b317b8362fd365700d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 27 Sep 2019 14:45:17 -0700 Subject: KVM: VMX: Skip GUEST_CR3 VMREAD+VMWRITE if the VMCS is up-to-date Skip the VMWRITE to update GUEST_CR3 if CR3 is not available, i.e. has not been read from the VMCS since the last VM-Enter. If vcpu->arch.cr3 is stale, kvm_read_cr3(vcpu) will refresh vcpu->arch.cr3 from the VMCS, meaning KVM will do a VMREAD and then VMWRITE the value it just pulled from the VMCS. Note, this is a purely theoretical change, no instances of skipping the VMREAD+VMWRITE have been observed with this change. Tested-by: Reto Buerki Tested-by: Vitaly Kuznetsov Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 04603f53ca36..71c7a174bdaa 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3009,10 +3009,12 @@ void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) /* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */ if (is_guest_mode(vcpu)) update_guest_cr3 = false; - else if (enable_unrestricted_guest || is_paging(vcpu)) - guest_cr3 = kvm_read_cr3(vcpu); - else + else if (!enable_unrestricted_guest && !is_paging(vcpu)) guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; + else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) + guest_cr3 = vcpu->arch.cr3; + else /* vmcs01.GUEST_CR3 is already up-to-date. */ + update_guest_cr3 = false; ept_load_pdptrs(vcpu); } -- cgit v1.2.3 From e7bddc52582d5961dfb782b40a94f54c9e6673a0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 27 Sep 2019 14:45:18 -0700 Subject: KVM: VMX: Consolidate to_vmx() usage in RFLAGS accessors Capture struct vcpu_vmx in a local variable to improve the readability of vmx_{g,s}et_rflags(). No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 71c7a174bdaa..cc83abc93f6d 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1412,35 +1412,37 @@ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) { + struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long rflags, save_rflags; if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); rflags = vmcs_readl(GUEST_RFLAGS); - if (to_vmx(vcpu)->rmode.vm86_active) { + if (vmx->rmode.vm86_active) { rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; - save_rflags = to_vmx(vcpu)->rmode.save_rflags; + save_rflags = vmx->rmode.save_rflags; rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; } - to_vmx(vcpu)->rflags = rflags; + vmx->rflags = rflags; } - return to_vmx(vcpu)->rflags; + return vmx->rflags; } void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { + struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long old_rflags = vmx_get_rflags(vcpu); __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); - to_vmx(vcpu)->rflags = rflags; - if (to_vmx(vcpu)->rmode.vm86_active) { - to_vmx(vcpu)->rmode.save_rflags = rflags; + vmx->rflags = rflags; + if (vmx->rmode.vm86_active) { + vmx->rmode.save_rflags = rflags; rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; } vmcs_writel(GUEST_RFLAGS, rflags); - if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM) - to_vmx(vcpu)->emulation_required = emulation_required(vcpu); + if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM) + vmx->emulation_required = emulation_required(vcpu); } u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 491c1ad1ac8d891aa440eb0216d023af6c038346 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 27 Sep 2019 14:45:19 -0700 Subject: KVM: VMX: Optimize vmx_set_rflags() for unrestricted guest Rework vmx_set_rflags() to avoid the extra code need to handle emulation of real mode and invalid state when unrestricted guest is disabled. The primary reason for doing so is to avoid the call to vmx_get_rflags(), which will incur a VMREAD when RFLAGS is not already available. When running nested VMs, the majority of calls to vmx_set_rflags() will occur without an associated vmx_get_rflags(), i.e. when stuffing GUEST_RFLAGS during transitions between vmcs01 and vmcs02. Note, vmx_get_rflags() guarantees RFLAGS is marked available. Signed-off-by: Sean Christopherson [Replace "else" with early "return" in the unrestricted guest branch. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index cc83abc93f6d..9eb35e6cbc3f 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1431,9 +1431,16 @@ unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long old_rflags = vmx_get_rflags(vcpu); + unsigned long old_rflags; - __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); + if (enable_unrestricted_guest) { + __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); + vmx->rflags = rflags; + vmcs_writel(GUEST_RFLAGS, rflags); + return; + } + + old_rflags = vmx_get_rflags(vcpu); vmx->rflags = rflags; if (vmx->rmode.vm86_active) { vmx->rmode.save_rflags = rflags; -- cgit v1.2.3 From 489cbcf01d1c9e1bf09b7e371d0f312b3a1f3ef2 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 27 Sep 2019 14:45:20 -0700 Subject: KVM: x86: Add WARNs to detect out-of-bounds register indices Add WARN_ON_ONCE() checks in kvm_register_{read,write}() to detect reg values that would cause KVM to overflow vcpu->arch.regs. Change the reg param to an 'int' to make it clear that the reg index is unverified. Regarding the overhead of WARN_ON_ONCE(), now that all fixed GPR reads and writes use dedicated accessors, e.g. kvm_rax_read(), the overhead is limited to flows where the reg index is generated at runtime. And there is at least one historical bug where KVM has generated an out-of- bounds access to arch.regs (see commit b68f3cc7d9789, "KVM: x86: Always use 32-bit SMRAM save state for 32-bit kernels"). Adding the WARN_ON_ONCE() protection paves the way for additional cleanup related to kvm_reg and kvm_reg_ex. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/kvm_cache_regs.h | 12 ++++++++---- arch/x86/kvm/x86.h | 6 ++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 1cc6c47dc77e..807c12c122c0 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -37,19 +37,23 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14) BUILD_KVM_GPR_ACCESSORS(r15, R15) #endif -static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, - enum kvm_reg reg) +static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) { + if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) + return 0; + if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) kvm_x86_ops->cache_reg(vcpu, reg); return vcpu->arch.regs[reg]; } -static inline void kvm_register_write(struct kvm_vcpu *vcpu, - enum kvm_reg reg, +static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg, unsigned long val) { + if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) + return; + vcpu->arch.regs[reg] = val; __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index dbf7442a822b..45d82b8277e5 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -238,8 +238,7 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) return false; } -static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, - enum kvm_reg reg) +static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg) { unsigned long val = kvm_register_read(vcpu, reg); @@ -247,8 +246,7 @@ static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, } static inline void kvm_register_writel(struct kvm_vcpu *vcpu, - enum kvm_reg reg, - unsigned long val) + int reg, unsigned long val) { if (!is_64_bit_mode(vcpu)) val = (u32)val; -- cgit v1.2.3 From f8845541e93c5b41618405de6735edd6f0cc8984 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 27 Sep 2019 14:45:21 -0700 Subject: KVM: x86: Fold 'enum kvm_ex_reg' definitions into 'enum kvm_reg' Now that indexing into arch.regs is either protected by WARN_ON_ONCE or done with hardcoded enums, combine all definitions for registers that are tracked by regs_avail and regs_dirty into 'enum kvm_reg'. Having a single enum type will simplify additional cleanup related to regs_avail and regs_dirty. Signed-off-by: Sean Christopherson Reviewed-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 4 +--- arch/x86/kvm/kvm_cache_regs.h | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 50eb430b0ad8..c86c95a499af 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -156,10 +156,8 @@ enum kvm_reg { VCPU_REGS_R15 = __VCPU_REGS_R15, #endif VCPU_REGS_RIP, - NR_VCPU_REGS -}; + NR_VCPU_REGS, -enum kvm_reg_ex { VCPU_EXREG_PDPTR = NR_VCPU_REGS, VCPU_EXREG_CR3, VCPU_EXREG_RFLAGS, diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 807c12c122c0..728f8e19be64 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -85,7 +85,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail)) - kvm_x86_ops->cache_reg(vcpu, (enum kvm_reg)VCPU_EXREG_PDPTR); + kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR); return vcpu->arch.walk_mmu->pdptrs[index]; } -- cgit v1.2.3 From cb3c1e2f3e8d0a77824c05c7c38f03d2cbdeaf9e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 27 Sep 2019 14:45:22 -0700 Subject: KVM: x86: Add helpers to test/mark reg availability and dirtiness Add helpers to prettify code that tests and/or marks whether or not a register is available and/or dirty. Suggested-by: Vitaly Kuznetsov Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/kvm_cache_regs.h | 35 +++++++++++++++++++++++++++++------ arch/x86/kvm/vmx/nested.c | 4 ++-- arch/x86/kvm/vmx/vmx.c | 29 +++++++++++++---------------- arch/x86/kvm/x86.c | 13 +++++-------- 4 files changed, 49 insertions(+), 32 deletions(-) diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 728f8e19be64..e85b5ed22371 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -37,12 +37,37 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14) BUILD_KVM_GPR_ACCESSORS(r15, R15) #endif +static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, + enum kvm_reg reg) +{ + return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); +} + +static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, + enum kvm_reg reg) +{ + return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); +} + +static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu, + enum kvm_reg reg) +{ + __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); +} + +static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, + enum kvm_reg reg) +{ + __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); + __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); +} + static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) { if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) return 0; - if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) + if (!kvm_register_is_available(vcpu, reg)) kvm_x86_ops->cache_reg(vcpu, reg); return vcpu->arch.regs[reg]; @@ -55,8 +80,7 @@ static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg, return; vcpu->arch.regs[reg] = val; - __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); - __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); + kvm_register_mark_dirty(vcpu, reg); } static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) @@ -83,8 +107,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) { might_sleep(); /* on svm */ - if (!test_bit(VCPU_EXREG_PDPTR, - (unsigned long *)&vcpu->arch.regs_avail)) + if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR); return vcpu->arch.walk_mmu->pdptrs[index]; @@ -113,7 +136,7 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) { - if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) + if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) kvm_x86_ops->decache_cr3(vcpu); return vcpu->arch.cr3; } diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d93ddc79a595..5e231da00310 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1012,7 +1012,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne kvm_mmu_new_cr3(vcpu, cr3, false); vcpu->arch.cr3 = cr3; - __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); + kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); kvm_init_mmu(vcpu, false); @@ -3986,7 +3986,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) nested_ept_uninit_mmu_context(vcpu); vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); - __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); + kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); /* * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9eb35e6cbc3f..48a41abe016b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -726,8 +726,8 @@ static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, bool ret; u32 mask = 1 << (seg * SEG_FIELD_NR + field); - if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { - vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); + if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) { + kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS); vmx->segment_cache.bitmask = 0; } ret = vmx->segment_cache.bitmask & mask; @@ -1415,8 +1415,8 @@ unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long rflags, save_rflags; - if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { - __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); + if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) { + kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); rflags = vmcs_readl(GUEST_RFLAGS); if (vmx->rmode.vm86_active) { rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; @@ -1434,7 +1434,7 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) unsigned long old_rflags; if (enable_unrestricted_guest) { - __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); + kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); vmx->rflags = rflags; vmcs_writel(GUEST_RFLAGS, rflags); return; @@ -2179,7 +2179,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { - __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); + kvm_register_mark_available(vcpu, reg); + switch (reg) { case VCPU_REGS_RSP: vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); @@ -2866,7 +2867,7 @@ static void vmx_decache_cr3(struct kvm_vcpu *vcpu) { if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu))) vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); - __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); + kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); } static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) @@ -2881,8 +2882,7 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - if (!test_bit(VCPU_EXREG_PDPTR, - (unsigned long *)&vcpu->arch.regs_dirty)) + if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR)) return; if (is_pae_paging(vcpu)) { @@ -2904,10 +2904,7 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu) mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); } - __set_bit(VCPU_EXREG_PDPTR, - (unsigned long *)&vcpu->arch.regs_avail); - __set_bit(VCPU_EXREG_PDPTR, - (unsigned long *)&vcpu->arch.regs_dirty); + kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); } static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, @@ -2916,7 +2913,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, { struct vcpu_vmx *vmx = to_vmx(vcpu); - if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) + if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ @@ -6520,9 +6517,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) if (vmx->nested.need_vmcs12_to_shadow_sync) nested_sync_vmcs12_to_shadow(vcpu); - if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) + if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP)) vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); - if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) + if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); cr3 = __get_current_cr3_fast(); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5863c38108d9..968f09e029e5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -708,10 +708,8 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) ret = 1; memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); - __set_bit(VCPU_EXREG_PDPTR, - (unsigned long *)&vcpu->arch.regs_avail); - __set_bit(VCPU_EXREG_PDPTR, - (unsigned long *)&vcpu->arch.regs_dirty); + kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); + out: return ret; @@ -729,8 +727,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu) if (!is_pae_paging(vcpu)) return false; - if (!test_bit(VCPU_EXREG_PDPTR, - (unsigned long *)&vcpu->arch.regs_avail)) + if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) return true; gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT; @@ -983,7 +980,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush); vcpu->arch.cr3 = cr3; - __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); + kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); return 0; } @@ -8763,7 +8760,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; - __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); + kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); kvm_set_cr8(vcpu, sregs->cr8); -- cgit v1.2.3 From 34059c2570102870df8d8a31bd42f8d9c19cce87 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 27 Sep 2019 14:45:23 -0700 Subject: KVM: x86: Fold decache_cr3() into cache_reg() Handle caching CR3 (from VMX's VMCS) into struct kvm_vcpu via the common cache_reg() callback and drop the dedicated decache_cr3(). The name decache_cr3() is somewhat confusing as the caching behavior of CR3 follows that of GPRs, RFLAGS and PDPTRs, (handled via cache_reg()), and has nothing in common with the caching behavior of CR0/CR4 (whose decache_cr{0,4}_guest_bits() likely provided the 'decache' verbiage). This would effectivel adds a BUG() if KVM attempts to cache CR3 on SVM. Change it to a WARN_ON_ONCE() -- if the cache never requires filling, the value is already in the right place -- and opportunistically add one in VMX to provide an equivalent check. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/kvm_cache_regs.h | 2 +- arch/x86/kvm/svm.c | 7 +------ arch/x86/kvm/vmx/vmx.c | 15 ++++++--------- 4 files changed, 8 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c86c95a499af..cdde7488430d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1033,7 +1033,6 @@ struct kvm_x86_ops { struct kvm_segment *var, int seg); void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); - void (*decache_cr3)(struct kvm_vcpu *vcpu); void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index e85b5ed22371..58767020de41 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -137,7 +137,7 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) { if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) - kvm_x86_ops->decache_cr3(vcpu); + kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_CR3); return vcpu->arch.cr3; } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 62b0938b62ef..80711b6e3a59 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2376,7 +2376,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); break; default: - BUG(); + WARN_ON_ONCE(1); } } @@ -2529,10 +2529,6 @@ static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { } -static void svm_decache_cr3(struct kvm_vcpu *vcpu) -{ -} - static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { } @@ -7269,7 +7265,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .get_cpl = svm_get_cpl, .get_cs_db_l_bits = kvm_get_cs_db_l_bits, .decache_cr0_guest_bits = svm_decache_cr0_guest_bits, - .decache_cr3 = svm_decache_cr3, .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, .set_cr0 = svm_set_cr0, .set_cr3 = svm_set_cr3, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 48a41abe016b..1b022db081cf 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2192,7 +2192,12 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) if (enable_ept) ept_save_pdptrs(vcpu); break; + case VCPU_EXREG_CR3: + if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu))) + vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); + break; default: + WARN_ON_ONCE(1); break; } } @@ -2863,13 +2868,6 @@ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; } -static void vmx_decache_cr3(struct kvm_vcpu *vcpu) -{ - if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu))) - vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); - kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); -} - static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; @@ -2914,7 +2912,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, struct vcpu_vmx *vmx = to_vmx(vcpu); if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) - vmx_decache_cr3(vcpu); + vmx_cache_reg(vcpu, VCPU_EXREG_CR3); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING | @@ -7784,7 +7782,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .get_cpl = vmx_get_cpl, .get_cs_db_l_bits = vmx_get_cs_db_l_bits, .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, - .decache_cr3 = vmx_decache_cr3, .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, .set_cr0 = vmx_set_cr0, .set_cr3 = vmx_set_cr3, -- cgit v1.2.3 From 2cf9af0b566823de418eb2ff357a2f8233c718e9 Mon Sep 17 00:00:00 2001 From: "Suthikulpanit, Suravee" Date: Fri, 13 Sep 2019 19:00:49 +0000 Subject: kvm: x86: Modify kvm_x86_ops.get_enable_apicv() to use struct kvm parameter Generally, APICv for all vcpus in the VM are enable/disable in the same manner. So, get_enable_apicv() should represent APICv status of the VM instead of each VCPU. Modify kvm_x86_ops.get_enable_apicv() to take struct kvm as parameter instead of struct kvm_vcpu. Reviewed-by: Vitaly Kuznetsov Signed-off-by: Suravee Suthikulpanit Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/svm.c | 4 ++-- arch/x86/kvm/vmx/vmx.c | 2 +- arch/x86/kvm/x86.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index cdde7488430d..5d8056ff7390 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1081,7 +1081,7 @@ struct kvm_x86_ops { void (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); - bool (*get_enable_apicv)(struct kvm_vcpu *vcpu); + bool (*get_enable_apicv)(struct kvm *kvm); void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 80711b6e3a59..e479ea9bc9da 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -5112,9 +5112,9 @@ static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu) return; } -static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu) +static bool svm_get_enable_apicv(struct kvm *kvm) { - return avic && irqchip_split(vcpu->kvm); + return avic && irqchip_split(kvm); } static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 1b022db081cf..e660e28e9ae0 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3757,7 +3757,7 @@ void pt_update_intercept_for_msr(struct vcpu_vmx *vmx) } } -static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu) +static bool vmx_get_enable_apicv(struct kvm *kvm) { return enable_apicv; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 968f09e029e5..368a76648b70 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9340,7 +9340,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) goto fail_free_pio_data; if (irqchip_in_kernel(vcpu->kvm)) { - vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); + vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu->kvm); r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); if (r < 0) goto fail_mmu_destroy; -- cgit v1.2.3 From 30ce89acdfe91eb7a88cc5805d2774f11e1eccb4 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 21 Oct 2019 10:52:56 +0800 Subject: KVM: remove redundant code in kvm_arch_vm_ioctl If we reach here with r = 0, we will reassign r = 0 unnecesarry, then do the label set_irqchip_out work. If we reach here with r != 0, then we will do the label work directly. So this if statement and r = 0 assignment is redundant. Signed-off-by: Miaohe Lin Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 368a76648b70..38131c834091 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4910,9 +4910,6 @@ set_identity_unlock: if (!irqchip_kernel(kvm)) goto set_irqchip_out; r = kvm_vm_ioctl_set_irqchip(kvm, chip); - if (r) - goto set_irqchip_out; - r = 0; set_irqchip_out: kfree(chip); break; -- cgit v1.2.3 From 4be946728f65c10c9bb1a1580ec47a316f5ee6ac Mon Sep 17 00:00:00 2001 From: Like Xu Date: Mon, 21 Oct 2019 18:55:04 +0800 Subject: KVM: x86/vPMU: Declare kvm_pmu->reprogram_pmi field using DECLARE_BITMAP Replace the explicit declaration of "u64 reprogram_pmi" with the generic macro DECLARE_BITMAP for all possible appropriate number of bits. Suggested-by: Paolo Bonzini Signed-off-by: Like Xu Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/pmu.c | 15 +++++---------- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5d8056ff7390..62f32a61c250 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -467,7 +467,7 @@ struct kvm_pmu { struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; struct irq_work irq_work; - u64 reprogram_pmi; + DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX); }; struct kvm_pmu_ops; diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 46875bbd0419..75e8f9fae031 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -62,8 +62,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event, struct kvm_pmc *pmc = perf_event->overflow_handler_context; struct kvm_pmu *pmu = pmc_to_pmu(pmc); - if (!test_and_set_bit(pmc->idx, - (unsigned long *)&pmu->reprogram_pmi)) { + if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); kvm_make_request(KVM_REQ_PMU, pmc->vcpu); } @@ -76,8 +75,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event, struct kvm_pmc *pmc = perf_event->overflow_handler_context; struct kvm_pmu *pmu = pmc_to_pmu(pmc); - if (!test_and_set_bit(pmc->idx, - (unsigned long *)&pmu->reprogram_pmi)) { + if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); kvm_make_request(KVM_REQ_PMU, pmc->vcpu); @@ -137,7 +135,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, } pmc->perf_event = event; - clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi); + clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); } void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) @@ -253,16 +251,13 @@ EXPORT_SYMBOL_GPL(reprogram_counter); void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); - u64 bitmask; int bit; - bitmask = pmu->reprogram_pmi; - - for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { + for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) { struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit); if (unlikely(!pmc || !pmc->perf_event)) { - clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi); + clear_bit(bit, pmu->reprogram_pmi); continue; } -- cgit v1.2.3 From 35fbe0d4ef9abb05a8c591481d0196edcb056bcc Mon Sep 17 00:00:00 2001 From: Xiaoyao Li Date: Sun, 20 Oct 2019 17:10:58 +0800 Subject: KVM: VMX: Write VPID to vmcs when creating vcpu Move the code that writes vmx->vpid to vmcs from vmx_vcpu_reset() to vmx_vcpu_setup(), because vmx->vpid is allocated when creating vcpu and never changed. So we don't need to update the vmcs.vpid when resetting vcpu. Signed-off-by: Xiaoyao Li Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e660e28e9ae0..279f855d892b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4252,6 +4252,9 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) set_cr4_guest_host_mask(vmx); + if (vmx->vpid != 0) + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); + if (vmx_xsaves_supported()) vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); @@ -4354,9 +4357,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); - if (vmx->vpid != 0) - vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); - cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; vmx->vcpu.arch.cr0 = cr0; vmx_set_cr0(vcpu, cr0); /* enter rmode */ -- cgit v1.2.3 From 3c0f4be1f33b25bd28be1672ecb53627577c0899 Mon Sep 17 00:00:00 2001 From: Xiaoyao Li Date: Sun, 20 Oct 2019 17:10:59 +0800 Subject: KVM: VMX: Remove vmx->hv_deadline_tsc initialization from vmx_vcpu_setup() ... It can be removed here because the same code is called later in vmx_vcpu_reset() as the flow: kvm_arch_vcpu_setup() -> kvm_vcpu_reset() -> vmx_vcpu_reset() Signed-off-by: Xiaoyao Li Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 279f855d892b..ec7c42f57b65 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4178,7 +4178,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) /* Control */ pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); - vmx->hv_deadline_tsc = -1; exec_controls_set(vmx, vmx_exec_control(vmx)); -- cgit v1.2.3 From 4be5341026246870818e28b53202b001426a5aec Mon Sep 17 00:00:00 2001 From: Xiaoyao Li Date: Sun, 20 Oct 2019 17:11:00 +0800 Subject: KVM: VMX: Initialize vmx->guest_msrs[] right after allocation Move the initialization of vmx->guest_msrs[] from vmx_vcpu_setup() to vmx_create_vcpu(), and put it right after its allocation. This also is the preperation for next patch. Signed-off-by: Xiaoyao Li Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index ec7c42f57b65..84c32395d887 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4166,8 +4166,6 @@ static void ept_set_mmio_spte_mask(void) */ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) { - int i; - if (nested) nested_vmx_vcpu_setup(); @@ -4226,21 +4224,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); - for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { - u32 index = vmx_msr_index[i]; - u32 data_low, data_high; - int j = vmx->nmsrs; - - if (rdmsr_safe(index, &data_low, &data_high) < 0) - continue; - if (wrmsr_safe(index, data_low, data_high) < 0) - continue; - vmx->guest_msrs[j].index = i; - vmx->guest_msrs[j].data = 0; - vmx->guest_msrs[j].mask = -1ull; - ++vmx->nmsrs; - } - vm_exit_controls_set(vmx, vmx_vmexit_ctrl()); /* 22.2.1, 20.8.1 */ @@ -6700,7 +6683,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) int err; struct vcpu_vmx *vmx; unsigned long *msr_bitmap; - int cpu; + int i, cpu; BUILD_BUG_ON_MSG(offsetof(struct vcpu_vmx, vcpu) != 0, "struct kvm_vcpu must be at offset 0 for arch usercopy region"); @@ -6752,6 +6735,21 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) if (!vmx->guest_msrs) goto free_pml; + for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { + u32 index = vmx_msr_index[i]; + u32 data_low, data_high; + int j = vmx->nmsrs; + + if (rdmsr_safe(index, &data_low, &data_high) < 0) + continue; + if (wrmsr_safe(index, data_low, data_high) < 0) + continue; + vmx->guest_msrs[j].index = i; + vmx->guest_msrs[j].data = 0; + vmx->guest_msrs[j].mask = -1ull; + ++vmx->nmsrs; + } + err = alloc_loaded_vmcs(&vmx->vmcs01); if (err < 0) goto free_msrs; -- cgit v1.2.3 From 1b84292bea00c042afc2f950c61b2c027bd36ff7 Mon Sep 17 00:00:00 2001 From: Xiaoyao Li Date: Sun, 20 Oct 2019 17:11:01 +0800 Subject: KVM: VMX: Rename {vmx,nested_vmx}_vcpu_setup() Rename {vmx,nested_vmx}_vcpu_setup() to match what they really do. Signed-off-by: Xiaoyao Li Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/vmx/nested.h | 2 +- arch/x86/kvm/vmx/vmx.c | 9 +++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 5e231da00310..55c5791ac52b 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -5768,7 +5768,7 @@ error_guest_mode: return ret; } -void nested_vmx_vcpu_setup(void) +void nested_vmx_set_vmcs_shadowing_bitmap(void) { if (enable_shadow_vmcs) { vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index 187d39bf0bf1..4cf1d40da15f 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -11,7 +11,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, bool apicv); void nested_vmx_hardware_unsetup(void); __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)); -void nested_vmx_vcpu_setup(void); +void nested_vmx_set_vmcs_shadowing_bitmap(void); void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu); int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry); bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 84c32395d887..4211f72a1a01 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4162,12 +4162,13 @@ static void ept_set_mmio_spte_mask(void) #define VMX_XSS_EXIT_BITMAP 0 /* - * Sets up the vmcs for emulated real mode. + * Noting that the initialization of Guest-state Area of VMCS is in + * vmx_vcpu_reset(). */ -static void vmx_vcpu_setup(struct vcpu_vmx *vmx) +static void init_vmcs(struct vcpu_vmx *vmx) { if (nested) - nested_vmx_vcpu_setup(); + nested_vmx_set_vmcs_shadowing_bitmap(); if (cpu_has_vmx_msr_bitmap()) vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); @@ -6774,7 +6775,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) cpu = get_cpu(); vmx_vcpu_load(&vmx->vcpu, cpu); vmx->vcpu.cpu = cpu; - vmx_vcpu_setup(vmx); + init_vmcs(vmx); vmx_vcpu_put(&vmx->vcpu); put_cpu(); if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { -- cgit v1.2.3 From 7204160eb7809345d10c983d9d1dfbd98060a56d Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Mon, 21 Oct 2019 16:30:20 -0700 Subject: KVM: x86: Introduce vcpu->arch.xsaves_enabled Cache whether XSAVES is enabled in the guest by adding xsaves_enabled to vcpu->arch. Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Change-Id: If4638e0901c28a4494dad2e103e2c075e8ab5d68 Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm.c | 3 +++ arch/x86/kvm/vmx/vmx.c | 5 +++++ 3 files changed, 9 insertions(+) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 62f32a61c250..6f6b8886a8eb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -560,6 +560,7 @@ struct kvm_vcpu_arch { u64 smbase; u64 smi_count; bool tpr_access_reporting; + bool xsaves_enabled; u64 ia32_xss; u64 microcode_version; u64 arch_capabilities; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e479ea9bc9da..cf224963e7d1 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -5895,6 +5895,9 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); + vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && + boot_cpu_has(X86_FEATURE_XSAVES); + /* Update nrips enabled cache */ svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4211f72a1a01..751765532305 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4050,6 +4050,8 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); + vcpu->arch.xsaves_enabled = xsaves_enabled; + if (!xsaves_enabled) exec_control &= ~SECONDARY_EXEC_XSAVES; @@ -7089,6 +7091,9 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); + /* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */ + vcpu->arch.xsaves_enabled = false; + if (cpu_has_secondary_exec_ctrls()) { vmx_compute_secondary_exec_control(vmx); vmcs_set_secondary_exec_control(vmx); -- cgit v1.2.3 From c034f2aa8622e1e436563eb34c0f78ba8aa32329 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Mon, 21 Oct 2019 16:30:21 -0700 Subject: KVM: VMX: Fix conditions for guest IA32_XSS support Volume 4 of the SDM says that IA32_XSS is supported if CPUID(EAX=0DH,ECX=1):EAX.XSS[bit 3] is set, so only the X86_FEATURE_XSAVES check is necessary (X86_FEATURE_XSAVES is the Linux name for CPUID(EAX=0DH,ECX=1):EAX.XSS[bit 3]). Fixes: 4d763b168e9c5 ("KVM: VMX: check CPUID before allowing read/write of IA32_XSS") Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Change-Id: I9059b9f2e3595e4b09a4cdcf14b933b22ebad419 Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 751765532305..e5b09c75e529 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1830,10 +1830,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, &msr_info->data); case MSR_IA32_XSS: - if (!vmx_xsaves_supported() || - (!msr_info->host_initiated && - !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && - guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))) + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) return 1; msr_info->data = vcpu->arch.ia32_xss; break; @@ -2073,10 +2071,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; return vmx_set_vmx_msr(vcpu, msr_index, data); case MSR_IA32_XSS: - if (!vmx_xsaves_supported() || - (!msr_info->host_initiated && - !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && - guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))) + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) return 1; /* * The only supported bit as of Skylake is bit 8, but @@ -2085,11 +2081,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data != 0) return 1; vcpu->arch.ia32_xss = data; - if (vcpu->arch.ia32_xss != host_xss) - add_atomic_switch_msr(vmx, MSR_IA32_XSS, - vcpu->arch.ia32_xss, host_xss, false); - else - clear_atomic_switch_msr(vmx, MSR_IA32_XSS); + if (vcpu->arch.xsaves_enabled) { + if (vcpu->arch.ia32_xss != host_xss) + add_atomic_switch_msr(vmx, MSR_IA32_XSS, + vcpu->arch.ia32_xss, host_xss, false); + else + clear_atomic_switch_msr(vmx, MSR_IA32_XSS); + } break; case MSR_IA32_RTIT_CTL: if ((pt_mode != PT_MODE_HOST_GUEST) || -- cgit v1.2.3 From 78958563d8023db0c6d03a2fe2a64d79b47b4349 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Mon, 21 Oct 2019 16:30:22 -0700 Subject: KVM: x86: Remove unneeded kvm_vcpu variable, guest_xcr0_loaded The kvm_vcpu variable, guest_xcr0_loaded, is a waste of an 'int' and a conditional branch. VMX and SVM are the only users, and both unconditionally pair kvm_load_guest_xcr0() with kvm_put_guest_xcr0() making this check unnecessary. Without this variable, the predicates in kvm_load_guest_xcr0 and kvm_put_guest_xcr0 should match. Suggested-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Change-Id: I7b1eb9b62969d7bbb2850f27e42f863421641b23 Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 16 +++++----------- include/linux/kvm_host.h | 1 - 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 38131c834091..7141f81141a2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -811,22 +811,16 @@ EXPORT_SYMBOL_GPL(kvm_lmsw); void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) { if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && - !vcpu->guest_xcr0_loaded) { - /* kvm_set_xcr() also depends on this */ - if (vcpu->arch.xcr0 != host_xcr0) - xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); - vcpu->guest_xcr0_loaded = 1; - } + vcpu->arch.xcr0 != host_xcr0) + xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); } EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0); void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) { - if (vcpu->guest_xcr0_loaded) { - if (vcpu->arch.xcr0 != host_xcr0) - xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); - vcpu->guest_xcr0_loaded = 0; - } + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && + vcpu->arch.xcr0 != host_xcr0) + xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); } EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 719fc3e15ea4..d2017302996c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -278,7 +278,6 @@ struct kvm_vcpu { struct mutex mutex; struct kvm_run *run; - int guest_xcr0_loaded; struct swait_queue_head wq; struct pid __rcu *pid; int sigset_active; -- cgit v1.2.3 From 312a1c87798e6b43ff533393167b3cba33645ead Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Mon, 21 Oct 2019 16:30:23 -0700 Subject: KVM: SVM: Use wrmsr for switching between guest and host IA32_XSS on AMD When the guest can execute the XSAVES/XRSTORS instructions, set the hardware IA32_XSS MSR to guest/host values on VM-entry/VM-exit. Note that vcpu->arch.ia32_xss is currently guaranteed to be 0 on AMD, since there is no way to change it. Suggested-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Change-Id: Id51a782462086e6d7a3ab621838e200f1c005afd Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index cf224963e7d1..fa29125193fe 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -116,6 +116,8 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); static bool erratum_383_found __read_mostly; +static u64 __read_mostly host_xss; + static const u32 host_save_user_msrs[] = { #ifdef CONFIG_X86_64 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, @@ -1409,6 +1411,9 @@ static __init int svm_hardware_setup(void) pr_info("Virtual GIF supported\n"); } + if (boot_cpu_has(X86_FEATURE_XSAVES)) + rdmsrl(MSR_IA32_XSS, host_xss); + return 0; err: @@ -5598,6 +5603,22 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) svm_complete_interrupts(svm); } +static void svm_load_guest_xss(struct kvm_vcpu *vcpu) +{ + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && + vcpu->arch.xsaves_enabled && + vcpu->arch.ia32_xss != host_xss) + wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); +} + +static void svm_load_host_xss(struct kvm_vcpu *vcpu) +{ + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && + vcpu->arch.xsaves_enabled && + vcpu->arch.ia32_xss != host_xss) + wrmsrl(MSR_IA32_XSS, host_xss); +} + static void svm_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -5637,6 +5658,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) clgi(); kvm_load_guest_xcr0(vcpu); + svm_load_guest_xss(vcpu); if (lapic_in_kernel(vcpu) && vcpu->arch.apic->lapic_timer.timer_advance_ns) @@ -5786,6 +5808,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_before_interrupt(&svm->vcpu); + svm_load_host_xss(vcpu); kvm_put_guest_xcr0(vcpu); stgi(); -- cgit v1.2.3 From 9753d68865c5662eee94eb8808b5ad5eb766f5ea Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Mon, 21 Oct 2019 16:30:24 -0700 Subject: KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS on Intel When the guest can execute the XSAVES/XRSTORS instructions, use wrmsr to set the hardware IA32_XSS MSR to guest/host values on VM-entry/VM-exit, rather than the MSR-load areas. By using the same approach as AMD, we will be able to use a common implementation for both (in the next patch). Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Change-Id: I9447d104b2615c04e39e4af0c911e1e7309bf464 Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e5b09c75e529..aee6f1a9d252 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2081,13 +2081,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data != 0) return 1; vcpu->arch.ia32_xss = data; - if (vcpu->arch.xsaves_enabled) { - if (vcpu->arch.ia32_xss != host_xss) - add_atomic_switch_msr(vmx, MSR_IA32_XSS, - vcpu->arch.ia32_xss, host_xss, false); - else - clear_atomic_switch_msr(vmx, MSR_IA32_XSS); - } break; case MSR_IA32_RTIT_CTL: if ((pt_mode != PT_MODE_HOST_GUEST) || @@ -6473,6 +6466,22 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) } } +static void vmx_load_guest_xss(struct kvm_vcpu *vcpu) +{ + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && + vcpu->arch.xsaves_enabled && + vcpu->arch.ia32_xss != host_xss) + wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); +} + +static void vmx_load_host_xss(struct kvm_vcpu *vcpu) +{ + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && + vcpu->arch.xsaves_enabled && + vcpu->arch.ia32_xss != host_xss) + wrmsrl(MSR_IA32_XSS, host_xss); +} + bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); static void vmx_vcpu_run(struct kvm_vcpu *vcpu) @@ -6524,6 +6533,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx_set_interrupt_shadow(vcpu, 0); kvm_load_guest_xcr0(vcpu); + vmx_load_guest_xss(vcpu); if (static_cpu_has(X86_FEATURE_PKU) && kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && @@ -6630,6 +6640,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) __write_pkru(vmx->host_pkru); } + vmx_load_host_xss(vcpu); kvm_put_guest_xcr0(vcpu); vmx->nested.nested_run_pending = 0; -- cgit v1.2.3 From 139a12cfe1a040fd881338a7cc042bd37159ea9a Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Mon, 21 Oct 2019 16:30:25 -0700 Subject: KVM: x86: Move IA32_XSS-swapping on VM-entry/VM-exit to common x86 code Hoist the vendor-specific code related to loading the hardware IA32_XSS MSR with guest/host values on VM-entry/VM-exit to common x86 code. Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Change-Id: Ic6e3430833955b98eb9b79ae6715cf2a3fdd6d82 Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 27 ++------------------------- arch/x86/kvm/vmx/vmx.c | 27 ++------------------------- arch/x86/kvm/x86.c | 38 ++++++++++++++++++++++++++++---------- arch/x86/kvm/x86.h | 4 ++-- 4 files changed, 34 insertions(+), 62 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index fa29125193fe..77429fa38748 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -116,8 +116,6 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); static bool erratum_383_found __read_mostly; -static u64 __read_mostly host_xss; - static const u32 host_save_user_msrs[] = { #ifdef CONFIG_X86_64 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, @@ -1411,9 +1409,6 @@ static __init int svm_hardware_setup(void) pr_info("Virtual GIF supported\n"); } - if (boot_cpu_has(X86_FEATURE_XSAVES)) - rdmsrl(MSR_IA32_XSS, host_xss); - return 0; err: @@ -5603,22 +5598,6 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) svm_complete_interrupts(svm); } -static void svm_load_guest_xss(struct kvm_vcpu *vcpu) -{ - if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && - vcpu->arch.xsaves_enabled && - vcpu->arch.ia32_xss != host_xss) - wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); -} - -static void svm_load_host_xss(struct kvm_vcpu *vcpu) -{ - if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && - vcpu->arch.xsaves_enabled && - vcpu->arch.ia32_xss != host_xss) - wrmsrl(MSR_IA32_XSS, host_xss); -} - static void svm_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -5657,8 +5636,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) svm->vmcb->save.cr2 = vcpu->arch.cr2; clgi(); - kvm_load_guest_xcr0(vcpu); - svm_load_guest_xss(vcpu); + kvm_load_guest_xsave_state(vcpu); if (lapic_in_kernel(vcpu) && vcpu->arch.apic->lapic_timer.timer_advance_ns) @@ -5808,8 +5786,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_before_interrupt(&svm->vcpu); - svm_load_host_xss(vcpu); - kvm_put_guest_xcr0(vcpu); + kvm_load_host_xsave_state(vcpu); stgi(); /* Any pending NMI will happen here */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index aee6f1a9d252..d06140bdb3ad 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -106,8 +106,6 @@ module_param(enable_apicv, bool, S_IRUGO); static bool __read_mostly nested = 1; module_param(nested, bool, S_IRUGO); -static u64 __read_mostly host_xss; - bool __read_mostly enable_pml = 1; module_param_named(pml, enable_pml, bool, S_IRUGO); @@ -6466,22 +6464,6 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) } } -static void vmx_load_guest_xss(struct kvm_vcpu *vcpu) -{ - if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && - vcpu->arch.xsaves_enabled && - vcpu->arch.ia32_xss != host_xss) - wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); -} - -static void vmx_load_host_xss(struct kvm_vcpu *vcpu) -{ - if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && - vcpu->arch.xsaves_enabled && - vcpu->arch.ia32_xss != host_xss) - wrmsrl(MSR_IA32_XSS, host_xss); -} - bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); static void vmx_vcpu_run(struct kvm_vcpu *vcpu) @@ -6532,8 +6514,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vmx_set_interrupt_shadow(vcpu, 0); - kvm_load_guest_xcr0(vcpu); - vmx_load_guest_xss(vcpu); + kvm_load_guest_xsave_state(vcpu); if (static_cpu_has(X86_FEATURE_PKU) && kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && @@ -6640,8 +6621,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) __write_pkru(vmx->host_pkru); } - vmx_load_host_xss(vcpu); - kvm_put_guest_xcr0(vcpu); + kvm_load_host_xsave_state(vcpu); vmx->nested.nested_run_pending = 0; vmx->idt_vectoring_info = 0; @@ -7611,9 +7591,6 @@ static __init int hardware_setup(void) WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost"); } - if (boot_cpu_has(X86_FEATURE_XSAVES)) - rdmsrl(MSR_IA32_XSS, host_xss); - if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) enable_vpid = 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7141f81141a2..1ebe13493241 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -176,6 +176,8 @@ struct kvm_shared_msrs { static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; static struct kvm_shared_msrs __percpu *shared_msrs; +static u64 __read_mostly host_xss; + struct kvm_stats_debugfs_item debugfs_entries[] = { { "pf_fixed", VCPU_STAT(pf_fixed) }, { "pf_guest", VCPU_STAT(pf_guest) }, @@ -808,21 +810,34 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) } EXPORT_SYMBOL_GPL(kvm_lmsw); -void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) +void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) { - if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && - vcpu->arch.xcr0 != host_xcr0) - xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { + + if (vcpu->arch.xcr0 != host_xcr0) + xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); + + if (vcpu->arch.xsaves_enabled && + vcpu->arch.ia32_xss != host_xss) + wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); + } } -EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0); +EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state); -void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) +void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) { - if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && - vcpu->arch.xcr0 != host_xcr0) - xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { + + if (vcpu->arch.xcr0 != host_xcr0) + xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); + + if (vcpu->arch.xsaves_enabled && + vcpu->arch.ia32_xss != host_xss) + wrmsrl(MSR_IA32_XSS, host_xss); + } + } -EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0); +EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state); static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { @@ -9278,6 +9293,9 @@ int kvm_arch_hardware_setup(void) kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits; } + if (boot_cpu_has(X86_FEATURE_XSAVES)) + rdmsrl(MSR_IA32_XSS, host_xss); + kvm_init_msr_list(); return 0; } diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 45d82b8277e5..2b0805012e3c 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -364,7 +364,7 @@ static inline bool kvm_pat_valid(u64 data) return (data | ((data & 0x0202020202020202ull) << 1)) == data; } -void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu); -void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu); +void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); +void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); #endif -- cgit v1.2.3 From 864e2ab2b46db1ac266c46a7c9cefe6cc893029d Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Mon, 21 Oct 2019 16:30:26 -0700 Subject: kvm: x86: Move IA32_XSS to kvm_{get,set}_msr_common Hoist support for RDMSR/WRMSR of IA32_XSS from vmx into common code so that it can be used for svm as well. Right now, kvm only allows the guest IA32_XSS to be zero, so the guest's usage of XSAVES will be exactly the same as XSAVEC. Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Change-Id: Ie4b0f777d71e428fbee6e82071ac2d7618e9bb40 Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 18 ------------------ arch/x86/kvm/x86.c | 20 ++++++++++++++++++++ 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d06140bdb3ad..31ce6bc2c371 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1827,12 +1827,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, &msr_info->data); - case MSR_IA32_XSS: - if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) - return 1; - msr_info->data = vcpu->arch.ia32_xss; - break; case MSR_IA32_RTIT_CTL: if (pt_mode != PT_MODE_HOST_GUEST) return 1; @@ -2068,18 +2062,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!nested_vmx_allowed(vcpu)) return 1; return vmx_set_vmx_msr(vcpu, msr_index, data); - case MSR_IA32_XSS: - if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) - return 1; - /* - * The only supported bit as of Skylake is bit 8, but - * it is not supported on KVM. - */ - if (data != 0) - return 1; - vcpu->arch.ia32_xss = data; - break; case MSR_IA32_RTIT_CTL: if ((pt_mode != PT_MODE_HOST_GUEST) || vmx_rtit_ctl_check(vcpu, data) || diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1ebe13493241..19a0dc96beca 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2694,6 +2694,20 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr_info); break; + case MSR_IA32_XSS: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) + return 1; + /* + * We do support PT if kvm_x86_ops->pt_supported(), but we do + * not support IA32_XSS[bit 8]. Guests will have to use + * RDMSR/WRMSR rather than XSAVES/XRSTORS to save/restore PT + * MSRs. + */ + if (data != 0) + return 1; + vcpu->arch.ia32_xss = data; + break; case MSR_SMI_COUNT: if (!msr_info->host_initiated) return 1; @@ -3021,6 +3035,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: return get_msr_mce(vcpu, msr_info->index, &msr_info->data, msr_info->host_initiated); + case MSR_IA32_XSS: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) + return 1; + msr_info->data = vcpu->arch.ia32_xss; + break; case MSR_K7_CLK_CTL: /* * Provide expected ramp-up count for K7. All other -- cgit v1.2.3 From 52297436199dde85be557ee6bc779f5b96082f74 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Mon, 21 Oct 2019 16:30:27 -0700 Subject: kvm: svm: Update svm_xsaves_supported AMD CPUs now support XSAVES in a limited fashion (they require IA32_XSS to be zero). AMD has no equivalent of Intel's "Enable XSAVES/XRSTORS" VM-execution control. Instead, XSAVES is always available to the guest when supported on the host. Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Change-Id: I40dc2c682eb0d38c2208d95d5eb7bbb6c47f6317 Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 77429fa38748..4153ca8cddb7 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -5973,7 +5973,7 @@ static bool svm_mpx_supported(void) static bool svm_xsaves_supported(void) { - return false; + return boot_cpu_has(X86_FEATURE_XSAVES); } static bool svm_umip_emulated(void) -- cgit v1.2.3 From c90992bfb0804907402ab175b25b8a37cc3c31f2 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Mon, 21 Oct 2019 16:30:28 -0700 Subject: kvm: tests: Add test to verify MSR_IA32_XSS Ensure that IA32_XSS appears in KVM_GET_MSR_INDEX_LIST if it can be set to a non-zero value. Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Change-Id: Ia2d644f69e2d6d8c27d7e0a7a45c2bf9c42bf5ff Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/include/x86_64/processor.h | 7 +- tools/testing/selftests/kvm/lib/x86_64/processor.c | 72 +++++++++++++++++--- tools/testing/selftests/kvm/x86_64/xss_msr_test.c | 76 ++++++++++++++++++++++ 5 files changed, 147 insertions(+), 10 deletions(-) create mode 100644 tools/testing/selftests/kvm/x86_64/xss_msr_test.c diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 409c1fa75e03..30072c3f52fb 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -13,6 +13,7 @@ /x86_64/vmx_dirty_log_test /x86_64/vmx_set_nested_state_test /x86_64/vmx_tsc_adjust_test +/x86_64/xss_msr_test /clear_dirty_log_test /dirty_log_test /kvm_create_max_vcpus diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index c5ec868fa1e5..3138a916574a 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -25,6 +25,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test +TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test TEST_GEN_PROGS_x86_64 += clear_dirty_log_test TEST_GEN_PROGS_x86_64 += dirty_log_test TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index ff234018219c..635ee6c33ad2 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -308,6 +308,8 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid); void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state); +struct kvm_msr_list *kvm_get_msr_index_list(void); + struct kvm_cpuid2 *kvm_get_supported_cpuid(void); void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); @@ -322,10 +324,13 @@ kvm_get_supported_cpuid_entry(uint32_t function) } uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index); +int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, + uint64_t msr_value); void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, uint64_t msr_value); -uint32_t kvm_get_cpuid_max(void); +uint32_t kvm_get_cpuid_max_basic(void); +uint32_t kvm_get_cpuid_max_extended(void); void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits); /* diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 6698cb741e10..683d3bdb8f6a 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -869,7 +869,7 @@ uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index) return buffer.entry.data; } -/* VCPU Set MSR +/* _VCPU Set MSR * * Input Args: * vm - Virtual Machine @@ -879,12 +879,12 @@ uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index) * * Output Args: None * - * Return: On success, nothing. On failure a TEST_ASSERT is produced. + * Return: The result of KVM_SET_MSRS. * - * Set value of MSR for VCPU. + * Sets the value of an MSR for the given VCPU. */ -void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, - uint64_t msr_value) +int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, + uint64_t msr_value) { struct vcpu *vcpu = vcpu_find(vm, vcpuid); struct { @@ -899,6 +899,29 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, buffer.entry.index = msr_index; buffer.entry.data = msr_value; r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header); + return r; +} + +/* VCPU Set MSR + * + * Input Args: + * vm - Virtual Machine + * vcpuid - VCPU ID + * msr_index - Index of MSR + * msr_value - New value of MSR + * + * Output Args: None + * + * Return: On success, nothing. On failure a TEST_ASSERT is produced. + * + * Set value of MSR for VCPU. + */ +void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, + uint64_t msr_value) +{ + int r; + + r = _vcpu_set_msr(vm, vcpuid, msr_index, msr_value); TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n" " rc: %i errno: %i", r, errno); } @@ -1000,19 +1023,45 @@ struct kvm_x86_state { struct kvm_msrs msrs; }; -static int kvm_get_num_msrs(struct kvm_vm *vm) +static int kvm_get_num_msrs_fd(int kvm_fd) { struct kvm_msr_list nmsrs; int r; nmsrs.nmsrs = 0; - r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs); + r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs); TEST_ASSERT(r == -1 && errno == E2BIG, "Unexpected result from KVM_GET_MSR_INDEX_LIST probe, r: %i", r); return nmsrs.nmsrs; } +static int kvm_get_num_msrs(struct kvm_vm *vm) +{ + return kvm_get_num_msrs_fd(vm->kvm_fd); +} + +struct kvm_msr_list *kvm_get_msr_index_list(void) +{ + struct kvm_msr_list *list; + int nmsrs, r, kvm_fd; + + kvm_fd = open(KVM_DEV_PATH, O_RDONLY); + if (kvm_fd < 0) + exit(KSFT_SKIP); + + nmsrs = kvm_get_num_msrs_fd(kvm_fd); + list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0])); + list->nmsrs = nmsrs; + r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list); + close(kvm_fd); + + TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i", + r); + + return list; +} + struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid) { struct vcpu *vcpu = vcpu_find(vm, vcpuid); @@ -1158,7 +1207,12 @@ bool is_intel_cpu(void) return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]); } -uint32_t kvm_get_cpuid_max(void) +uint32_t kvm_get_cpuid_max_basic(void) +{ + return kvm_get_supported_cpuid_entry(0)->eax; +} + +uint32_t kvm_get_cpuid_max_extended(void) { return kvm_get_supported_cpuid_entry(0x80000000)->eax; } @@ -1169,7 +1223,7 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits) bool pae; /* SDM 4.1.4 */ - if (kvm_get_cpuid_max() < 0x80000008) { + if (kvm_get_cpuid_max_extended() < 0x80000008) { pae = kvm_get_supported_cpuid_entry(1)->edx & (1 << 6); *pa_bits = pae ? 36 : 32; *va_bits = 32; diff --git a/tools/testing/selftests/kvm/x86_64/xss_msr_test.c b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c new file mode 100644 index 000000000000..851ea81b9d9f --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, Google LLC. + * + * Tests for the IA32_XSS MSR. + */ + +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include + +#include "test_util.h" +#include "kvm_util.h" +#include "vmx.h" + +#define VCPU_ID 1 +#define MSR_BITS 64 + +#define X86_FEATURE_XSAVES (1<<3) + +bool is_supported_msr(u32 msr_index) +{ + struct kvm_msr_list *list; + bool found = false; + int i; + + list = kvm_get_msr_index_list(); + for (i = 0; i < list->nmsrs; ++i) { + if (list->indices[i] == msr_index) { + found = true; + break; + } + } + + free(list); + return found; +} + +int main(int argc, char *argv[]) +{ + struct kvm_cpuid_entry2 *entry; + bool xss_supported = false; + struct kvm_vm *vm; + uint64_t xss_val; + int i, r; + + /* Create VM */ + vm = vm_create_default(VCPU_ID, 0, 0); + + if (kvm_get_cpuid_max_basic() >= 0xd) { + entry = kvm_get_supported_cpuid_index(0xd, 1); + xss_supported = entry && !!(entry->eax & X86_FEATURE_XSAVES); + } + if (!xss_supported) { + printf("IA32_XSS is not supported by the vCPU.\n"); + exit(KSFT_SKIP); + } + + xss_val = vcpu_get_msr(vm, VCPU_ID, MSR_IA32_XSS); + TEST_ASSERT(xss_val == 0, + "MSR_IA32_XSS should be initialized to zero\n"); + + vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, xss_val); + /* + * At present, KVM only supports a guest IA32_XSS value of 0. Verify + * that trying to set the guest IA32_XSS to an unsupported value fails. + * Also, in the future when a non-zero value succeeds check that + * IA32_XSS is in the KVM_GET_MSR_INDEX_LIST. + */ + for (i = 0; i < MSR_BITS; ++i) { + r = _vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, 1ull << i); + TEST_ASSERT(r == 0 || is_supported_msr(MSR_IA32_XSS), + "IA32_XSS was able to be set, but was not found in KVM_GET_MSR_INDEX_LIST.\n"); + } + + kvm_vm_free(vm); +} -- cgit v1.2.3 From 149487bdacde32f5a9a344a49533ae0772fb9db7 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 21 Oct 2019 15:58:42 -0700 Subject: KVM: Add separate helper for putting borrowed reference to kvm Add a new helper, kvm_put_kvm_no_destroy(), to handle putting a borrowed reference[*] to the VM when installing a new file descriptor fails. KVM expects the refcount to remain valid in this case, as the in-progress ioctl() has an explicit reference to the VM. The primary motiviation for the helper is to document that the 'kvm' pointer is still valid after putting the borrowed reference, e.g. to document that doing mutex(&kvm->lock) immediately after putting a ref to kvm isn't broken. [*] When exposing a new object to userspace via a file descriptor, e.g. a new vcpu, KVM grabs a reference to itself (the VM) prior to making the object visible to userspace to avoid prematurely freeing the VM in the scenario where userspace immediately closes file descriptor. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 +- arch/powerpc/kvm/book3s_64_vio.c | 2 +- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 16 ++++++++++++++-- 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 9a75f0e1933b..68678e31c84c 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -2000,7 +2000,7 @@ int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC); if (ret < 0) { kfree(ctx); - kvm_put_kvm(kvm); + kvm_put_kvm_no_destroy(kvm); return ret; } diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 5834db0a54c6..883a66e76638 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -317,7 +317,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, if (ret >= 0) list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables); else - kvm_put_kvm(kvm); + kvm_put_kvm_no_destroy(kvm); mutex_unlock(&kvm->lock); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d2017302996c..a817e446c9aa 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -621,6 +621,7 @@ void kvm_exit(void); void kvm_get_kvm(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm); +void kvm_put_kvm_no_destroy(struct kvm *kvm); static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 67ef3f2e19e8..b8534c6b8cf6 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -772,6 +772,18 @@ void kvm_put_kvm(struct kvm *kvm) } EXPORT_SYMBOL_GPL(kvm_put_kvm); +/* + * Used to put a reference that was taken on behalf of an object associated + * with a user-visible file descriptor, e.g. a vcpu or device, if installation + * of the new file descriptor fails and the reference cannot be transferred to + * its final owner. In such cases, the caller is still actively using @kvm and + * will fail miserably if the refcount unexpectedly hits zero. + */ +void kvm_put_kvm_no_destroy(struct kvm *kvm) +{ + WARN_ON(refcount_dec_and_test(&kvm->users_count)); +} +EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); static int kvm_vm_release(struct inode *inode, struct file *filp) { @@ -2679,7 +2691,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) kvm_get_kvm(kvm); r = create_vcpu_fd(vcpu); if (r < 0) { - kvm_put_kvm(kvm); + kvm_put_kvm_no_destroy(kvm); goto unlock_vcpu_destroy; } @@ -3117,7 +3129,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm, kvm_get_kvm(kvm); ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); if (ret < 0) { - kvm_put_kvm(kvm); + kvm_put_kvm_no_destroy(kvm); mutex_lock(&kvm->lock); list_del(&dev->vm_node); mutex_unlock(&kvm->lock); -- cgit v1.2.3 From 19308a412ec52c0de92d296842be237778753d9b Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Thu, 10 Oct 2019 14:37:25 +0800 Subject: x86/kvm: Fix -Wmissing-prototypes warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We get two warning when build kernel with W=1: arch/x86/kernel/kvm.c:872:6: warning: no previous prototype for ‘arch_haltpoll_enable’ [-Wmissing-prototypes] arch/x86/kernel/kvm.c:885:6: warning: no previous prototype for ‘arch_haltpoll_disable’ [-Wmissing-prototypes] Including the missing head file can fix this. Signed-off-by: Yi Wang Signed-off-by: Paolo Bonzini --- arch/x86/kernel/kvm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index e820568ed4d5..32ef1ee733b7 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -33,6 +33,7 @@ #include #include #include +#include static int kvmapf = 1; -- cgit v1.2.3 From 53fafdbb8b21fa99dfd8376ca056bffde8cafc11 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Mon, 28 Oct 2019 12:36:22 -0200 Subject: KVM: x86: switch KVMCLOCK base to monotonic raw clock Commit 0bc48bea36d1 ("KVM: x86: update master clock before computing kvmclock_offset") switches the order of operations to avoid the conversion TSC (without frequency correction) -> system_timestamp (with frequency correction), which might cause a time jump. However, it leaves any other masterclock update unsafe, which includes, at the moment: * HV_X64_MSR_REFERENCE_TSC MSR write. * TSC writes. * Host suspend/resume. Avoid the time jump issue by using frequency uncorrected CLOCK_MONOTONIC_RAW clock. Its the guests time keeping software responsability to track and correct a reference clock such as UTC. This fixes forward time jump (which can result in failure to bring up a vCPU) during vCPU hotplug: Oct 11 14:48:33 storage kernel: CPU2 has been hot-added Oct 11 14:48:34 storage kernel: CPU3 has been hot-added Oct 11 14:49:22 storage kernel: smpboot: Booting Node 0 Processor 2 APIC 0x2 <-- time jump of almost 1 minute Oct 11 14:49:22 storage kernel: smpboot: do_boot_cpu failed(-1) to wakeup CPU#2 Oct 11 14:49:23 storage kernel: smpboot: Booting Node 0 Processor 3 APIC 0x3 Oct 11 14:49:23 storage kernel: kvm-clock: cpu 3, msr 0:7ff640c1, secondary cpu clock Which happens because: /* * Wait 10s total for a response from AP */ boot_error = -1; timeout = jiffies + 10*HZ; while (time_before(jiffies, timeout)) { ... } Analyzed-by: Igor Mammedov Signed-off-by: Marcelo Tosatti Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 59 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 19a0dc96beca..89621025577a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1526,20 +1526,25 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) } #ifdef CONFIG_X86_64 +struct pvclock_clock { + int vclock_mode; + u64 cycle_last; + u64 mask; + u32 mult; + u32 shift; +}; + struct pvclock_gtod_data { seqcount_t seq; - struct { /* extract of a clocksource struct */ - int vclock_mode; - u64 cycle_last; - u64 mask; - u32 mult; - u32 shift; - } clock; + struct pvclock_clock clock; /* extract of a clocksource struct */ + struct pvclock_clock raw_clock; /* extract of a clocksource struct */ + u64 boot_ns_raw; u64 boot_ns; u64 nsec_base; u64 wall_time_sec; + u64 monotonic_raw_nsec; }; static struct pvclock_gtod_data pvclock_gtod_data; @@ -1547,9 +1552,10 @@ static struct pvclock_gtod_data pvclock_gtod_data; static void update_pvclock_gtod(struct timekeeper *tk) { struct pvclock_gtod_data *vdata = &pvclock_gtod_data; - u64 boot_ns; + u64 boot_ns, boot_ns_raw; boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot)); + boot_ns_raw = ktime_to_ns(ktime_add(tk->tkr_raw.base, tk->offs_boot)); write_seqcount_begin(&vdata->seq); @@ -1560,11 +1566,20 @@ static void update_pvclock_gtod(struct timekeeper *tk) vdata->clock.mult = tk->tkr_mono.mult; vdata->clock.shift = tk->tkr_mono.shift; + vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->archdata.vclock_mode; + vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; + vdata->raw_clock.mask = tk->tkr_raw.mask; + vdata->raw_clock.mult = tk->tkr_raw.mult; + vdata->raw_clock.shift = tk->tkr_raw.shift; + vdata->boot_ns = boot_ns; vdata->nsec_base = tk->tkr_mono.xtime_nsec; vdata->wall_time_sec = tk->xtime_sec; + vdata->boot_ns_raw = boot_ns_raw; + vdata->monotonic_raw_nsec = tk->tkr_raw.xtime_nsec; + write_seqcount_end(&vdata->seq); } #endif @@ -1988,21 +2003,21 @@ static u64 read_tsc(void) return last; } -static inline u64 vgettsc(u64 *tsc_timestamp, int *mode) +static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp, + int *mode) { long v; - struct pvclock_gtod_data *gtod = &pvclock_gtod_data; u64 tsc_pg_val; - switch (gtod->clock.vclock_mode) { + switch (clock->vclock_mode) { case VCLOCK_HVCLOCK: tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(), tsc_timestamp); if (tsc_pg_val != U64_MAX) { /* TSC page valid */ *mode = VCLOCK_HVCLOCK; - v = (tsc_pg_val - gtod->clock.cycle_last) & - gtod->clock.mask; + v = (tsc_pg_val - clock->cycle_last) & + clock->mask; } else { /* TSC page invalid */ *mode = VCLOCK_NONE; @@ -2011,8 +2026,8 @@ static inline u64 vgettsc(u64 *tsc_timestamp, int *mode) case VCLOCK_TSC: *mode = VCLOCK_TSC; *tsc_timestamp = read_tsc(); - v = (*tsc_timestamp - gtod->clock.cycle_last) & - gtod->clock.mask; + v = (*tsc_timestamp - clock->cycle_last) & + clock->mask; break; default: *mode = VCLOCK_NONE; @@ -2021,10 +2036,10 @@ static inline u64 vgettsc(u64 *tsc_timestamp, int *mode) if (*mode == VCLOCK_NONE) *tsc_timestamp = v = 0; - return v * gtod->clock.mult; + return v * clock->mult; } -static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp) +static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; unsigned long seq; @@ -2033,10 +2048,10 @@ static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp) do { seq = read_seqcount_begin(>od->seq); - ns = gtod->nsec_base; - ns += vgettsc(tsc_timestamp, &mode); + ns = gtod->monotonic_raw_nsec; + ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode); ns >>= gtod->clock.shift; - ns += gtod->boot_ns; + ns += gtod->boot_ns_raw; } while (unlikely(read_seqcount_retry(>od->seq, seq))); *t = ns; @@ -2054,7 +2069,7 @@ static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp) seq = read_seqcount_begin(>od->seq); ts->tv_sec = gtod->wall_time_sec; ns = gtod->nsec_base; - ns += vgettsc(tsc_timestamp, &mode); + ns += vgettsc(>od->clock, tsc_timestamp, &mode); ns >>= gtod->clock.shift; } while (unlikely(read_seqcount_retry(>od->seq, seq))); @@ -2071,7 +2086,7 @@ static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp) if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) return false; - return gtod_is_based_on_tsc(do_monotonic_boot(kernel_ns, + return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns, tsc_timestamp)); } -- cgit v1.2.3 From 7f7f0d9c0bcbed864551012e4eb88a631fd376f9 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Fri, 25 Oct 2019 18:54:34 +0800 Subject: KVM: x86: get rid of odd out jump label in pdptrs_changed The odd out jump label is really not needed. Get rid of it by return true directly while r < 0 as suggested by Paolo. This further lead to var changed being unused. Remove it too. Signed-off-by: Miaohe Lin Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 89621025577a..8b3dcaa7985a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -721,7 +721,6 @@ EXPORT_SYMBOL_GPL(load_pdptrs); bool pdptrs_changed(struct kvm_vcpu *vcpu) { u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; - bool changed = true; int offset; gfn_t gfn; int r; @@ -737,11 +736,9 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu) r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), PFERR_USER_MASK | PFERR_WRITE_MASK); if (r < 0) - goto out; - changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; -out: + return true; - return changed; + return memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; } EXPORT_SYMBOL_GPL(pdptrs_changed); -- cgit v1.2.3 From f399e60c45f6b6e6ad6dfcedff1dd6386e086b0b Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 4 Nov 2019 17:59:58 -0500 Subject: KVM: x86: optimize more exit handlers in vmx.c Eliminate wasteful call/ret non RETPOLINE case and unnecessary fentry dynamic tracing hooking points. Signed-off-by: Andrea Arcangeli Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 30 +++++------------------------- 1 file changed, 5 insertions(+), 25 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 31ce6bc2c371..e8c21e330449 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4667,7 +4667,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) return 0; } -static int handle_external_interrupt(struct kvm_vcpu *vcpu) +static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu) { ++vcpu->stat.irq_exits; return 1; @@ -4939,21 +4939,6 @@ static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) vmcs_writel(GUEST_DR7, val); } -static int handle_cpuid(struct kvm_vcpu *vcpu) -{ - return kvm_emulate_cpuid(vcpu); -} - -static int handle_rdmsr(struct kvm_vcpu *vcpu) -{ - return kvm_emulate_rdmsr(vcpu); -} - -static int handle_wrmsr(struct kvm_vcpu *vcpu) -{ - return kvm_emulate_wrmsr(vcpu); -} - static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) { kvm_apic_update_ppr(vcpu); @@ -4970,11 +4955,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu) return 1; } -static int handle_halt(struct kvm_vcpu *vcpu) -{ - return kvm_emulate_halt(vcpu); -} - static int handle_vmcall(struct kvm_vcpu *vcpu) { return kvm_emulate_hypercall(vcpu); @@ -5522,11 +5502,11 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_IO_INSTRUCTION] = handle_io, [EXIT_REASON_CR_ACCESS] = handle_cr, [EXIT_REASON_DR_ACCESS] = handle_dr, - [EXIT_REASON_CPUID] = handle_cpuid, - [EXIT_REASON_MSR_READ] = handle_rdmsr, - [EXIT_REASON_MSR_WRITE] = handle_wrmsr, + [EXIT_REASON_CPUID] = kvm_emulate_cpuid, + [EXIT_REASON_MSR_READ] = kvm_emulate_rdmsr, + [EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr, [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, - [EXIT_REASON_HLT] = handle_halt, + [EXIT_REASON_HLT] = kvm_emulate_halt, [EXIT_REASON_INVD] = handle_invd, [EXIT_REASON_INVLPG] = handle_invlpg, [EXIT_REASON_RDPMC] = handle_rdpmc, -- cgit v1.2.3 From 4289d2728664fc1fb49cfc76a6a7d96d913b921f Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 4 Nov 2019 17:59:59 -0500 Subject: KVM: retpolines: x86: eliminate retpoline from vmx.c exit handlers It's enough to check the exit value and issue a direct call to avoid the retpoline for all the common vmexit reasons. Of course CONFIG_RETPOLINE already forbids gcc to use indirect jumps while compiling all switch() statements, however switch() would still allow the compiler to bisect the case value. It's more efficient to prioritize the most frequent vmexits instead. The halt may be slow paths from the point of the guest, but not necessarily so from the point of the host if the host runs at full CPU capacity and no host CPU is ever left idle. Signed-off-by: Andrea Arcangeli Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e8c21e330449..55f73d1c1765 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5879,9 +5879,23 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) } if (exit_reason < kvm_vmx_max_exit_handlers - && kvm_vmx_exit_handlers[exit_reason]) + && kvm_vmx_exit_handlers[exit_reason]) { +#ifdef CONFIG_RETPOLINE + if (exit_reason == EXIT_REASON_MSR_WRITE) + return kvm_emulate_wrmsr(vcpu); + else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER) + return handle_preemption_timer(vcpu); + else if (exit_reason == EXIT_REASON_PENDING_INTERRUPT) + return handle_interrupt_window(vcpu); + else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) + return handle_external_interrupt(vcpu); + else if (exit_reason == EXIT_REASON_HLT) + return kvm_emulate_halt(vcpu); + else if (exit_reason == EXIT_REASON_EPT_MISCONFIG) + return handle_ept_misconfig(vcpu); +#endif return kvm_vmx_exit_handlers[exit_reason](vcpu); - else { + } else { vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason); dump_vmcs(); -- cgit v1.2.3 From 3dcb2a3fa5a0c903fd754bfba2b8defb9f191974 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 4 Nov 2019 18:00:00 -0500 Subject: KVM: retpolines: x86: eliminate retpoline from svm.c exit handlers It's enough to check the exit value and issue a direct call to avoid the retpoline for all the common vmexit reasons. After this commit is applied, here the most common retpolines executed under a high resolution timer workload in the guest on a SVM host: [..] @[ trace_retpoline+1 __trace_retpoline+30 __x86_indirect_thunk_rax+33 ktime_get_update_offsets_now+70 hrtimer_interrupt+131 smp_apic_timer_interrupt+106 apic_timer_interrupt+15 start_sw_timer+359 restart_apic_timer+85 kvm_set_msr_common+1497 msr_interception+142 vcpu_enter_guest+684 kvm_arch_vcpu_ioctl_run+261 kvm_vcpu_ioctl+559 do_vfs_ioctl+164 ksys_ioctl+96 __x64_sys_ioctl+22 do_syscall_64+89 entry_SYSCALL_64_after_hwframe+68 ]: 1940 @[ trace_retpoline+1 __trace_retpoline+30 __x86_indirect_thunk_r12+33 force_qs_rnp+217 rcu_gp_kthread+1270 kthread+268 ret_from_fork+34 ]: 4644 @[]: 25095 @[ trace_retpoline+1 __trace_retpoline+30 __x86_indirect_thunk_rax+33 lapic_next_event+28 clockevents_program_event+148 hrtimer_start_range_ns+528 start_sw_timer+356 restart_apic_timer+85 kvm_set_msr_common+1497 msr_interception+142 vcpu_enter_guest+684 kvm_arch_vcpu_ioctl_run+261 kvm_vcpu_ioctl+559 do_vfs_ioctl+164 ksys_ioctl+96 __x64_sys_ioctl+22 do_syscall_64+89 entry_SYSCALL_64_after_hwframe+68 ]: 41474 @[ trace_retpoline+1 __trace_retpoline+30 __x86_indirect_thunk_rax+33 clockevents_program_event+148 hrtimer_start_range_ns+528 start_sw_timer+356 restart_apic_timer+85 kvm_set_msr_common+1497 msr_interception+142 vcpu_enter_guest+684 kvm_arch_vcpu_ioctl_run+261 kvm_vcpu_ioctl+559 do_vfs_ioctl+164 ksys_ioctl+96 __x64_sys_ioctl+22 do_syscall_64+89 entry_SYSCALL_64_after_hwframe+68 ]: 41474 @[ trace_retpoline+1 __trace_retpoline+30 __x86_indirect_thunk_rax+33 ktime_get+58 clockevents_program_event+84 hrtimer_start_range_ns+528 start_sw_timer+356 restart_apic_timer+85 kvm_set_msr_common+1497 msr_interception+142 vcpu_enter_guest+684 kvm_arch_vcpu_ioctl_run+261 kvm_vcpu_ioctl+559 do_vfs_ioctl+164 ksys_ioctl+96 __x64_sys_ioctl+22 do_syscall_64+89 entry_SYSCALL_64_after_hwframe+68 ]: 41887 @[ trace_retpoline+1 __trace_retpoline+30 __x86_indirect_thunk_rax+33 lapic_next_event+28 clockevents_program_event+148 hrtimer_try_to_cancel+168 hrtimer_cancel+21 kvm_set_lapic_tscdeadline_msr+43 kvm_set_msr_common+1497 msr_interception+142 vcpu_enter_guest+684 kvm_arch_vcpu_ioctl_run+261 kvm_vcpu_ioctl+559 do_vfs_ioctl+164 ksys_ioctl+96 __x64_sys_ioctl+22 do_syscall_64+89 entry_SYSCALL_64_after_hwframe+68 ]: 42723 @[ trace_retpoline+1 __trace_retpoline+30 __x86_indirect_thunk_rax+33 clockevents_program_event+148 hrtimer_try_to_cancel+168 hrtimer_cancel+21 kvm_set_lapic_tscdeadline_msr+43 kvm_set_msr_common+1497 msr_interception+142 vcpu_enter_guest+684 kvm_arch_vcpu_ioctl_run+261 kvm_vcpu_ioctl+559 do_vfs_ioctl+164 ksys_ioctl+96 __x64_sys_ioctl+22 do_syscall_64+89 entry_SYSCALL_64_after_hwframe+68 ]: 42766 @[ trace_retpoline+1 __trace_retpoline+30 __x86_indirect_thunk_rax+33 ktime_get+58 clockevents_program_event+84 hrtimer_try_to_cancel+168 hrtimer_cancel+21 kvm_set_lapic_tscdeadline_msr+43 kvm_set_msr_common+1497 msr_interception+142 vcpu_enter_guest+684 kvm_arch_vcpu_ioctl_run+261 kvm_vcpu_ioctl+559 do_vfs_ioctl+164 ksys_ioctl+96 __x64_sys_ioctl+22 do_syscall_64+89 entry_SYSCALL_64_after_hwframe+68 ]: 42848 @[ trace_retpoline+1 __trace_retpoline+30 __x86_indirect_thunk_rax+33 ktime_get+58 start_sw_timer+279 restart_apic_timer+85 kvm_set_msr_common+1497 msr_interception+142 vcpu_enter_guest+684 kvm_arch_vcpu_ioctl_run+261 kvm_vcpu_ioctl+559 do_vfs_ioctl+164 ksys_ioctl+96 __x64_sys_ioctl+22 do_syscall_64+89 entry_SYSCALL_64_after_hwframe+68 ]: 499845 @total: 1780243 SVM has no TSC based programmable preemption timer so it is invoking ktime_get() frequently. Signed-off-by: Andrea Arcangeli Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 4153ca8cddb7..a7b358f20aca 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4999,6 +4999,18 @@ static int handle_exit(struct kvm_vcpu *vcpu) return 0; } +#ifdef CONFIG_RETPOLINE + if (exit_code == SVM_EXIT_MSR) + return msr_interception(svm); + else if (exit_code == SVM_EXIT_VINTR) + return interrupt_window_interception(svm); + else if (exit_code == SVM_EXIT_INTR) + return intr_interception(svm); + else if (exit_code == SVM_EXIT_HLT) + return halt_interception(svm); + else if (exit_code == SVM_EXIT_NPF) + return npf_interception(svm); +#endif return svm_exit_handlers[exit_code](svm); } -- cgit v1.2.3 From 74c504a6d70ab29b2c28bee62f5f39e3dd847ea2 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 4 Nov 2019 18:00:01 -0500 Subject: x86: retpolines: eliminate retpoline from msr event handlers It's enough to check the value and issue the direct call. After this commit is applied, here the most common retpolines executed under a high resolution timer workload in the guest on a VMX host: [..] @[ trace_retpoline+1 __trace_retpoline+30 __x86_indirect_thunk_rax+33 do_syscall_64+89 entry_SYSCALL_64_after_hwframe+68 ]: 267 @[]: 2256 @[ trace_retpoline+1 __trace_retpoline+30 __x86_indirect_thunk_rax+33 __kvm_wait_lapic_expire+284 vmx_vcpu_run.part.97+1091 vcpu_enter_guest+377 kvm_arch_vcpu_ioctl_run+261 kvm_vcpu_ioctl+559 do_vfs_ioctl+164 ksys_ioctl+96 __x64_sys_ioctl+22 do_syscall_64+89 entry_SYSCALL_64_after_hwframe+68 ]: 2390 @[]: 33410 @total: 315707 Note the highest hit above is __delay so probably not worth optimizing even if it would be more frequent than 2k hits per sec. Signed-off-by: Andrea Arcangeli Signed-off-by: Paolo Bonzini --- arch/x86/events/intel/core.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index fcef678c3423..937363b803c1 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3323,8 +3323,19 @@ static int intel_pmu_hw_config(struct perf_event *event) return 0; } +#ifdef CONFIG_RETPOLINE +static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr); +static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr); +#endif + struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr) { +#ifdef CONFIG_RETPOLINE + if (x86_pmu.guest_get_msrs == intel_guest_get_msrs) + return intel_guest_get_msrs(nr); + else if (x86_pmu.guest_get_msrs == core_guest_get_msrs) + return core_guest_get_msrs(nr); +#endif if (x86_pmu.guest_get_msrs) return x86_pmu.guest_get_msrs(nr); *nr = 0; -- cgit v1.2.3 From ff90afa75573502f3ac05acd5a282d6e3d4cef34 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 11 Nov 2019 11:16:39 +0200 Subject: KVM: x86: Evaluate latched_init in KVM_SET_VCPU_EVENTS when vCPU not in SMM Commit 4b9852f4f389 ("KVM: x86: Fix INIT signal handling in various CPU states") fixed KVM to also latch pending LAPIC INIT event when vCPU is in VMX operation. However, current API of KVM_SET_VCPU_EVENTS defines this field as part of SMM state and only set pending LAPIC INIT event if vCPU is specified to be in SMM mode (events->smi.smm is set). Change KVM_SET_VCPU_EVENTS handler to set pending LAPIC INIT event by latched_init field regardless of if vCPU is in SMM mode or not. Fixes: 4b9852f4f389 ("KVM: x86: Fix INIT signal handling in various CPU states") Reviewed-by: Mihai Carabas Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8b3dcaa7985a..c5886eed3d57 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3830,12 +3830,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; else vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; - if (lapic_in_kernel(vcpu)) { - if (events->smi.latched_init) - set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); - else - clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); - } + } + + if (lapic_in_kernel(vcpu)) { + if (events->smi.latched_init) + set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); + else + clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); } } -- cgit v1.2.3 From 27cbe7d61898a1d1d39be32e5acff7d4be6e9d87 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 11 Nov 2019 11:16:40 +0200 Subject: KVM: x86: Prevent set vCPU into INIT/SIPI_RECEIVED state when INIT are latched Commit 4b9852f4f389 ("KVM: x86: Fix INIT signal handling in various CPU states") fixed KVM to also latch pending LAPIC INIT event when vCPU is in VMX operation. However, current API of KVM_SET_MP_STATE allows userspace to put vCPU into KVM_MP_STATE_SIPI_RECEIVED or KVM_MP_STATE_INIT_RECEIVED even when vCPU is in VMX operation. Fix this by introducing a util method to check if vCPU state latch INIT signals and use it in KVM_SET_MP_STATE handler. Fixes: 4b9852f4f389 ("KVM: x86: Fix INIT signal handling in various CPU states") Reported-by: Sean Christopherson Reviewed-by: Mihai Carabas Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 2 +- arch/x86/kvm/x86.c | 8 ++++++-- arch/x86/kvm/x86.h | 5 +++++ 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 87b0fcc23ef8..cacfe14717d6 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2714,7 +2714,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs * and leave the INIT pending. */ - if (is_smm(vcpu) || kvm_x86_ops->apic_init_signal_blocked(vcpu)) { + if (kvm_vcpu_latch_init(vcpu)) { WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED); if (test_bit(KVM_APIC_SIPI, &apic->pending_events)) clear_bit(KVM_APIC_SIPI, &apic->pending_events); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c5886eed3d57..34d9048c881e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8706,8 +8706,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, mp_state->mp_state != KVM_MP_STATE_RUNNABLE) goto out; - /* INITs are latched while in SMM */ - if ((is_smm(vcpu) || vcpu->arch.smi_pending) && + /* + * KVM_MP_STATE_INIT_RECEIVED means the processor is in + * INIT state; latched init should be reported using + * KVM_SET_VCPU_EVENTS, so reject it here. + */ + if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) && (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) goto out; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 2b0805012e3c..29391af8871d 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -258,6 +258,11 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) return !(kvm->arch.disabled_quirks & quirk); } +static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) +{ + return is_smm(vcpu) || kvm_x86_ops->apic_init_signal_blocked(vcpu); +} + void kvm_set_pending_timer(struct kvm_vcpu *vcpu); void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); -- cgit v1.2.3 From e64a8508234afb17a15d1aa98e8c1434fc207755 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 11 Nov 2019 14:16:05 +0200 Subject: KVM: VMX: Consume pending LAPIC INIT event when exit on INIT_SIGNAL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Intel SDM section 25.2 OTHER CAUSES OF VM EXITS specifies the following on INIT signals: "Such exits do not modify register state or clear pending events as they would outside of VMX operation." When commit 4b9852f4f389 ("KVM: x86: Fix INIT signal handling in various CPU states") was applied, I interepted above Intel SDM statement such that INIT_SIGNAL exit don’t consume the LAPIC INIT pending event. However, when Nadav Amit run matching kvm-unit-test on a bare-metal machine, it turned out my interpetation was wrong. i.e. INIT_SIGNAL exit does consume the LAPIC INIT pending event. (See: https://www.spinics.net/lists/kvm/msg196757.html) Therefore, fix KVM code to behave as observed on bare-metal. Fixes: 4b9852f4f389 ("KVM: x86: Fix INIT signal handling in various CPU states") Reported-by: Nadav Amit Reviewed-by: Mihai Carabas Reviewed-by: Joao Martins Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 55c5791ac52b..cd8d0b040daa 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -3463,6 +3463,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) test_bit(KVM_APIC_INIT, &apic->pending_events)) { if (block_nested_events) return -EBUSY; + clear_bit(KVM_APIC_INIT, &apic->pending_events); nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); return 0; } -- cgit v1.2.3 From b139b5a24774ee28e3e4d22942e2bede28d48202 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 9 Nov 2019 16:08:20 +0800 Subject: KVM: MMIO: get rid of odd out_err label in kvm_coalesced_mmio_init The out_err label and var ret is unnecessary, clean them up. Signed-off-by: Miaohe Lin Signed-off-by: Paolo Bonzini --- virt/kvm/coalesced_mmio.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 8ffd07e2a160..00c747dbc82e 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -110,14 +110,11 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = { int kvm_coalesced_mmio_init(struct kvm *kvm) { struct page *page; - int ret; - ret = -ENOMEM; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) - goto out_err; + return -ENOMEM; - ret = 0; kvm->coalesced_mmio_ring = page_address(page); /* @@ -128,8 +125,7 @@ int kvm_coalesced_mmio_init(struct kvm *kvm) spin_lock_init(&kvm->ring_lock); INIT_LIST_HEAD(&kvm->coalesced_zones); -out_err: - return ret; + return 0; } void kvm_coalesced_mmio_free(struct kvm *kvm) -- cgit v1.2.3 From 5b4ce93a8fe759e2d6b2ee05765cd5a3b4b6a2f1 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 9 Nov 2019 16:58:54 +0800 Subject: KVM: X86: avoid unused setup_syscalls_segments call when SYSCALL check failed When SYSCALL/SYSENTER ability check failed, cs and ss is inited but remain not used. Delay initializing cs and ss until SYSCALL/SYSENTER ability check passed. Signed-off-by: Miaohe Lin Signed-off-by: Paolo Bonzini --- arch/x86/kvm/emulate.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 698efb8c3897..952d1a4f4d7e 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2770,11 +2770,10 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_EFER, &efer); - setup_syscalls_segments(ctxt, &cs, &ss); - if (!(efer & EFER_SCE)) return emulate_ud(ctxt); + setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_STAR, &msr_data); msr_data >>= 32; cs_sel = (u16)(msr_data & 0xfffc); @@ -2838,12 +2837,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) if (ctxt->mode == X86EMUL_MODE_PROT64) return X86EMUL_UNHANDLEABLE; - setup_syscalls_segments(ctxt, &cs, &ss); - ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); + setup_syscalls_segments(ctxt, &cs, &ss); ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK; ss_sel = cs_sel + 8; -- cgit v1.2.3 From 1a686237d94b8a4bab9ce16ffd3e2208370d7695 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 9 Nov 2019 17:46:49 +0800 Subject: KVM: APIC: add helper func to remove duplicate code in kvm_pv_send_ipi There are some duplicate code in kvm_pv_send_ipi when deal with ipi bitmap. Add helper func to remove it, and eliminate odd out label, get rid of unnecessary kvm_lapic_irq field init and so on. Signed-off-by: Miaohe Lin Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 65 +++++++++++++++++++++++----------------------------- 1 file changed, 29 insertions(+), 36 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index cacfe14717d6..60fb21fe7f42 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -562,60 +562,53 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, irq->level, irq->trig_mode, dest_map); } +static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map, + struct kvm_lapic_irq *irq, u32 min) +{ + int i, count = 0; + struct kvm_vcpu *vcpu; + + if (min > map->max_apic_id) + return 0; + + for_each_set_bit(i, ipi_bitmap, + min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { + if (map->phys_map[min + i]) { + vcpu = map->phys_map[min + i]->vcpu; + count += kvm_apic_set_irq(vcpu, irq, NULL); + } + } + + return count; +} + int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit) { - int i; struct kvm_apic_map *map; - struct kvm_vcpu *vcpu; struct kvm_lapic_irq irq = {0}; int cluster_size = op_64_bit ? 64 : 32; - int count = 0; + int count; + + if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK)) + return -KVM_EINVAL; irq.vector = icr & APIC_VECTOR_MASK; irq.delivery_mode = icr & APIC_MODE_MASK; irq.level = (icr & APIC_INT_ASSERT) != 0; irq.trig_mode = icr & APIC_INT_LEVELTRIG; - if (icr & APIC_DEST_MASK) - return -KVM_EINVAL; - if (icr & APIC_SHORT_MASK) - return -KVM_EINVAL; - rcu_read_lock(); map = rcu_dereference(kvm->arch.apic_map); - if (unlikely(!map)) { - count = -EOPNOTSUPP; - goto out; + count = -EOPNOTSUPP; + if (likely(map)) { + count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min); + min += cluster_size; + count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min); } - if (min > map->max_apic_id) - goto out; - /* Bits above cluster_size are masked in the caller. */ - for_each_set_bit(i, &ipi_bitmap_low, - min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { - if (map->phys_map[min + i]) { - vcpu = map->phys_map[min + i]->vcpu; - count += kvm_apic_set_irq(vcpu, &irq, NULL); - } - } - - min += cluster_size; - - if (min > map->max_apic_id) - goto out; - - for_each_set_bit(i, &ipi_bitmap_high, - min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { - if (map->phys_map[min + i]) { - vcpu = map->phys_map[min + i]->vcpu; - count += kvm_apic_set_irq(vcpu, &irq, NULL); - } - } - -out: rcu_read_unlock(); return count; } -- cgit v1.2.3 From 49d654d85f857d9ca34fe2b4ac6d6cf34677e6c1 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 11 Nov 2019 14:26:21 +0200 Subject: KVM: SVM: Remove check if APICv enabled in SVM update_cr8_intercept() handler This check is unnecessary as x86 update_cr8_intercept() which calls this VMX/SVM specific callback already performs this check. Reviewed-by: Joao Martins Signed-off-by: Liran Alon Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index a7b358f20aca..d02a73a48461 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -5106,8 +5106,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vcpu_svm *svm = to_svm(vcpu); - if (svm_nested_virtualize_tpr(vcpu) || - kvm_vcpu_apicv_active(vcpu)) + if (svm_nested_virtualize_tpr(vcpu)) return; clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); -- cgit v1.2.3 From 132f4f7e39fd270c5e3c9c577939081cfd499b16 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 11 Nov 2019 14:30:54 +0200 Subject: KVM: VMX: Refactor update_cr8_intercept() No functional changes. Reviewed-by: Joao Martins Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 55f73d1c1765..2a64bf3c62b9 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5981,17 +5981,14 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu) static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + int tpr_threshold; if (is_guest_mode(vcpu) && nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) return; - if (irr == -1 || tpr < irr) { - vmcs_write32(TPR_THRESHOLD, 0); - return; - } - - vmcs_write32(TPR_THRESHOLD, irr); + tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr; + vmcs_write32(TPR_THRESHOLD, tpr_threshold); } void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 02d496cfb88a4856b9d67ade32317077c510aebc Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 11 Nov 2019 14:30:55 +0200 Subject: KVM: nVMX: Update vmcs01 TPR_THRESHOLD if L2 changed L1 TPR When L1 don't use TPR-Shadow to run L2, L0 configures vmcs02 without TPR-Shadow and install intercepts on CR8 access (load and store). If L1 do not intercept L2 CR8 access, L0 intercepts on those accesses will emulate load/store on L1's LAPIC TPR. If in this case L2 lowers TPR such that there is now an injectable interrupt to L1, apic_update_ppr() will request a KVM_REQ_EVENT which will trigger a call to update_cr8_intercept() to update TPR-Threshold to highest pending IRR priority. However, this update to TPR-Threshold is done while active vmcs is vmcs02 instead of vmcs01. Thus, when later at some point L0 will emulate an exit from L2 to L1, L1 will still run with high TPR-Threshold. This will result in every VMEntry to L1 to immediately exit on TPR_BELOW_THRESHOLD and continue to do so infinitely until some condition will cause KVM_REQ_EVENT to be set. (Note that TPR_BELOW_THRESHOLD exit handler do not set KVM_REQ_EVENT until apic_update_ppr() will notice a new injectable interrupt for PPR) To fix this issue, change update_cr8_intercept() such that if L2 lowers L1's TPR in a way that requires to lower L1's TPR-Threshold, save update to TPR-Threshold and apply it to vmcs01 when L0 emulates an exit from L2 to L1. Reviewed-by: Joao Martins Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 3 +++ arch/x86/kvm/vmx/vmx.c | 5 ++++- arch/x86/kvm/vmx/vmx.h | 3 +++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index cd8d0b040daa..bdb9b3028250 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2073,6 +2073,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) exec_control &= ~CPU_BASED_TPR_SHADOW; exec_control |= vmcs12->cpu_based_vm_exec_control; + vmx->nested.l1_tpr_threshold = -1; if (exec_control & CPU_BASED_TPR_SHADOW) vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); #ifdef CONFIG_X86_64 @@ -4115,6 +4116,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); + if (vmx->nested.l1_tpr_threshold != -1) + vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 2a64bf3c62b9..765086756177 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5988,7 +5988,10 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) return; tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr; - vmcs_write32(TPR_THRESHOLD, tpr_threshold); + if (is_guest_mode(vcpu)) + to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold; + else + vmcs_write32(TPR_THRESHOLD, tpr_threshold); } void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index bee16687dc0b..43331dfafffe 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -167,6 +167,9 @@ struct nested_vmx { u64 vmcs01_debugctl; u64 vmcs01_guest_bndcfgs; + /* to migrate it to L1 if L2 writes to L1's CR8 directly */ + int l1_tpr_threshold; + u16 vpid02; u16 last_vpid; -- cgit v1.2.3 From 3ca270fc9edb258d5bfa271bcf851614e9e6e7d4 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Sun, 27 Oct 2019 18:52:38 +0800 Subject: perf/core: Provide a kernel-internal interface to recalibrate event period Currently, perf_event_period() is used by user tools via ioctl. Based on naming convention, exporting perf_event_period() for kernel users (such as KVM) who may recalibrate the event period for their assigned counter according to their requirements. The perf_event_period() is an external accessor, just like the perf_event_{en,dis}able() and should thus use perf_event_ctx_lock(). Suggested-by: Kan Liang Signed-off-by: Like Xu Acked-by: Peter Zijlstra Signed-off-by: Paolo Bonzini --- include/linux/perf_event.h | 5 +++++ kernel/events/core.c | 28 +++++++++++++++++++++------- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 61448c19a132..d601df36e671 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1336,6 +1336,7 @@ extern void perf_event_disable_local(struct perf_event *event); extern void perf_event_disable_inatomic(struct perf_event *event); extern void perf_event_task_tick(void); extern int perf_event_account_interrupt(struct perf_event *event); +extern int perf_event_period(struct perf_event *event, u64 value); #else /* !CONFIG_PERF_EVENTS: */ static inline void * perf_aux_output_begin(struct perf_output_handle *handle, @@ -1415,6 +1416,10 @@ static inline void perf_event_disable(struct perf_event *event) { } static inline int __perf_event_disable(void *info) { return -1; } static inline void perf_event_task_tick(void) { } static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } +static inline int perf_event_period(struct perf_event *event, u64 value) +{ + return -EINVAL; +} #endif #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) diff --git a/kernel/events/core.c b/kernel/events/core.c index 9ec0b0bfddbd..e1b83d2731da 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5106,16 +5106,11 @@ static int perf_event_check_period(struct perf_event *event, u64 value) return event->pmu->check_period(event, value); } -static int perf_event_period(struct perf_event *event, u64 __user *arg) +static int _perf_event_period(struct perf_event *event, u64 value) { - u64 value; - if (!is_sampling_event(event)) return -EINVAL; - if (copy_from_user(&value, arg, sizeof(value))) - return -EFAULT; - if (!value) return -EINVAL; @@ -5133,6 +5128,19 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) return 0; } +int perf_event_period(struct perf_event *event, u64 value) +{ + struct perf_event_context *ctx; + int ret; + + ctx = perf_event_ctx_lock(event); + ret = _perf_event_period(event, value); + perf_event_ctx_unlock(event, ctx); + + return ret; +} +EXPORT_SYMBOL_GPL(perf_event_period); + static const struct file_operations perf_fops; static inline int perf_fget_light(int fd, struct fd *p) @@ -5176,8 +5184,14 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon return _perf_event_refresh(event, arg); case PERF_EVENT_IOC_PERIOD: - return perf_event_period(event, (u64 __user *)arg); + { + u64 value; + + if (copy_from_user(&value, (u64 __user *)arg, sizeof(value))) + return -EFAULT; + return _perf_event_period(event, value); + } case PERF_EVENT_IOC_ID: { u64 id = primary_event_id(event); -- cgit v1.2.3 From 52ba4b0b99770e892f43da1238f437155acb8b58 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Sun, 27 Oct 2019 18:52:39 +0800 Subject: perf/core: Provide a kernel-internal interface to pause perf_event Exporting perf_event_pause() as an external accessor for kernel users (such as KVM) who may do both disable perf_event and read count with just one time to hold perf_event_ctx_lock. Also the value could be reset optionally. Suggested-by: Peter Zijlstra Signed-off-by: Like Xu Acked-by: Peter Zijlstra Signed-off-by: Paolo Bonzini --- include/linux/perf_event.h | 5 +++++ kernel/events/core.c | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index d601df36e671..e9768bfc76f6 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1337,6 +1337,7 @@ extern void perf_event_disable_inatomic(struct perf_event *event); extern void perf_event_task_tick(void); extern int perf_event_account_interrupt(struct perf_event *event); extern int perf_event_period(struct perf_event *event, u64 value); +extern u64 perf_event_pause(struct perf_event *event, bool reset); #else /* !CONFIG_PERF_EVENTS: */ static inline void * perf_aux_output_begin(struct perf_output_handle *handle, @@ -1420,6 +1421,10 @@ static inline int perf_event_period(struct perf_event *event, u64 value) { return -EINVAL; } +static inline u64 perf_event_pause(struct perf_event *event, bool reset) +{ + return 0; +} #endif #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) diff --git a/kernel/events/core.c b/kernel/events/core.c index e1b83d2731da..fc9f5ebf4849 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5029,6 +5029,24 @@ static void _perf_event_reset(struct perf_event *event) perf_event_update_userpage(event); } +/* Assume it's not an event with inherit set. */ +u64 perf_event_pause(struct perf_event *event, bool reset) +{ + struct perf_event_context *ctx; + u64 count; + + ctx = perf_event_ctx_lock(event); + WARN_ON_ONCE(event->attr.inherit); + _perf_event_disable(event); + count = local64_read(&event->count); + if (reset) + local64_set(&event->count, 0); + perf_event_ctx_unlock(event, ctx); + + return count; +} +EXPORT_SYMBOL_GPL(perf_event_pause); + /* * Holding the top-level event's child_mutex means that any * descendant process that has inherited this event will block -- cgit v1.2.3 From 98ff80f5b788c1818464022cc61924ef5630d99d Mon Sep 17 00:00:00 2001 From: Like Xu Date: Sun, 27 Oct 2019 18:52:40 +0800 Subject: KVM: x86/vPMU: Rename pmu_ops callbacks from msr_idx to rdpmc_ecx The leagcy pmu_ops->msr_idx_to_pmc is only called in kvm_pmu_rdpmc, so this function actually receives the contents of ECX before RDPMC, and translates it to a kvm_pmc. Let's clarify its semantic by renaming the existing msr_idx_to_pmc to rdpmc_ecx_to_pmc, and is_valid_msr_idx to is_valid_rdpmc_ecx; likewise for the wrapper kvm_pmu_is_valid_msr_idx. Suggested-by: Paolo Bonzini Reviewed-by: Jim Mattson Signed-off-by: Like Xu Signed-off-by: Paolo Bonzini --- arch/x86/kvm/pmu.c | 6 +++--- arch/x86/kvm/pmu.h | 8 ++++---- arch/x86/kvm/pmu_amd.c | 9 +++++---- arch/x86/kvm/vmx/pmu_intel.c | 10 +++++----- arch/x86/kvm/x86.c | 2 +- 5 files changed, 18 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 75e8f9fae031..33f6fe1b5c56 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -266,9 +266,9 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) } /* check if idx is a valid index to access PMU */ -int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) +int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) { - return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx); + return kvm_x86_ops->pmu_ops->is_valid_rdpmc_ecx(vcpu, idx); } bool is_vmware_backdoor_pmc(u32 pmc_idx) @@ -318,7 +318,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) if (is_vmware_backdoor_pmc(idx)) return kvm_pmu_rdpmc_vmware(vcpu, idx, data); - pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask); + pmc = kvm_x86_ops->pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask); if (!pmc) return 1; diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 58265f761c3b..c4a80fe285a5 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -25,9 +25,9 @@ struct kvm_pmu_ops { unsigned (*find_fixed_event)(int idx); bool (*pmc_is_enabled)(struct kvm_pmc *pmc); struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); - struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx, - u64 *mask); - int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx); + struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, + unsigned int idx, u64 *mask); + int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx); bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); @@ -110,7 +110,7 @@ void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx); void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); -int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx); +int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx); bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c index c8388389a3b0..a4a6d8a09f70 100644 --- a/arch/x86/kvm/pmu_amd.c +++ b/arch/x86/kvm/pmu_amd.c @@ -174,7 +174,7 @@ static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) } /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */ -static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) +static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); @@ -184,7 +184,8 @@ static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) } /* idx is the ECX register of RDPMC instruction */ -static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask) +static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, + unsigned int idx, u64 *mask) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmc *counters; @@ -306,8 +307,8 @@ struct kvm_pmu_ops amd_pmu_ops = { .find_fixed_event = amd_find_fixed_event, .pmc_is_enabled = amd_pmc_is_enabled, .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, - .msr_idx_to_pmc = amd_msr_idx_to_pmc, - .is_valid_msr_idx = amd_is_valid_msr_idx, + .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc, + .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx, .is_valid_msr = amd_is_valid_msr, .get_msr = amd_pmu_get_msr, .set_msr = amd_pmu_set_msr, diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 3e9c059099e9..7a8067ec19bb 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -111,7 +111,7 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) } /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */ -static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) +static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); bool fixed = idx & (1u << 30); @@ -122,8 +122,8 @@ static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) (fixed && idx >= pmu->nr_arch_fixed_counters); } -static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, - unsigned idx, u64 *mask) +static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, + unsigned int idx, u64 *mask) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); bool fixed = idx & (1u << 30); @@ -366,8 +366,8 @@ struct kvm_pmu_ops intel_pmu_ops = { .find_fixed_event = intel_find_fixed_event, .pmc_is_enabled = intel_pmc_is_enabled, .pmc_idx_to_pmc = intel_pmc_idx_to_pmc, - .msr_idx_to_pmc = intel_msr_idx_to_pmc, - .is_valid_msr_idx = intel_is_valid_msr_idx, + .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc, + .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx, .is_valid_msr = intel_is_valid_msr, .get_msr = intel_pmu_get_msr, .set_msr = intel_pmu_set_msr, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 34d9048c881e..72189160bb81 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6144,7 +6144,7 @@ static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase) static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, u32 pmc) { - return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc); + return kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc); } static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, -- cgit v1.2.3 From c900c156c518302058a48d2efe3ca44e465cad22 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Sun, 27 Oct 2019 18:52:41 +0800 Subject: KVM: x86/vPMU: Introduce a new kvm_pmu_ops->msr_idx_to_pmc callback Introduce a new callback msr_idx_to_pmc that returns a struct kvm_pmc*, and change kvm_pmu_is_valid_msr to return ".msr_idx_to_pmc(vcpu, msr) || .is_valid_msr(vcpu, msr)" and AMD just returns false from .is_valid_msr. Suggested-by: Paolo Bonzini Reported-by: kbuild test robot Signed-off-by: Like Xu Signed-off-by: Paolo Bonzini --- arch/x86/kvm/pmu.c | 3 ++- arch/x86/kvm/pmu.h | 1 + arch/x86/kvm/pmu_amd.c | 15 +++++++++++---- arch/x86/kvm/vmx/pmu_intel.c | 13 +++++++++++++ 4 files changed, 27 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 33f6fe1b5c56..472b69b3b6c3 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -334,7 +334,8 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) { - return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr); + return kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, msr) || + kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr); } int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index c4a80fe285a5..b253dd5e56cf 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -27,6 +27,7 @@ struct kvm_pmu_ops { struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, unsigned int idx, u64 *mask); + struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr); int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx); bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c index a4a6d8a09f70..e8609ccd0b62 100644 --- a/arch/x86/kvm/pmu_amd.c +++ b/arch/x86/kvm/pmu_amd.c @@ -199,14 +199,20 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, } static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) +{ + /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */ + return false; +} + +static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); - int ret = false; + struct kvm_pmc *pmc; - ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) || - get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); + pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); + pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); - return ret; + return pmc; } static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) @@ -308,6 +314,7 @@ struct kvm_pmu_ops amd_pmu_ops = { .pmc_is_enabled = amd_pmc_is_enabled, .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc, + .msr_idx_to_pmc = amd_msr_idx_to_pmc, .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx, .is_valid_msr = amd_is_valid_msr, .get_msr = amd_pmu_get_msr, diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 7a8067ec19bb..dcde142327ca 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -162,6 +162,18 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) return ret; } +static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) +{ + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct kvm_pmc *pmc; + + pmc = get_fixed_pmc(pmu, msr); + pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); + pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); + + return pmc; +} + static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); @@ -367,6 +379,7 @@ struct kvm_pmu_ops intel_pmu_ops = { .pmc_is_enabled = intel_pmc_is_enabled, .pmc_idx_to_pmc = intel_pmc_idx_to_pmc, .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc, + .msr_idx_to_pmc = intel_msr_idx_to_pmc, .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx, .is_valid_msr = intel_is_valid_msr, .get_msr = intel_pmu_get_msr, -- cgit v1.2.3 From a6da0d77e98e94fa66187a5ce3cf7e11fbf95503 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Sun, 27 Oct 2019 18:52:42 +0800 Subject: KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter The perf_event_create_kernel_counter() in the pmc_reprogram_counter() is a heavyweight and high-frequency operation, especially when host disables the watchdog (maximum 21000000 ns) which leads to an unacceptable latency of the guest NMI handler. It limits the use of vPMUs in the guest. When a vPMC is fully enabled, the legacy reprogram_*_counter() would stop and release its existing perf_event (if any) every time EVEN in most cases almost the same requested perf_event will be created and configured again. For each vPMC, if the reuqested config ('u64 eventsel' for gp and 'u8 ctrl' for fixed) is the same as its current config AND a new sample period based on pmc->counter is accepted by host perf interface, the current event could be reused safely as a new created one does. Otherwise, do release the undesirable perf_event and reprogram a new one as usual. It's light-weight to call pmc_pause_counter (disable, read and reset event) and pmc_resume_counter (recalibrate period and re-enable event) as guest expects instead of release-and-create again on any condition. Compared to use the filterable event->attr or hw.config, a new 'u64 current_config' field is added to save the last original programed config for each vPMC. Based on this implementation, the number of calls to pmc_reprogram_counter is reduced by ~82.5% for a gp sampling event and ~99.9% for a fixed event. In the usage of multiplexing perf sampling mode, the average latency of the guest NMI handler is reduced from 104923 ns to 48393 ns (~2.16x speed up). If host disables watchdog, the minimum latecy of guest NMI handler could be speed up at ~3413x (from 20407603 to 5979 ns) and at ~786x in the average. Suggested-by: Kan Liang Signed-off-by: Like Xu Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 5 +++++ arch/x86/kvm/pmu.c | 45 +++++++++++++++++++++++++++++++++++++++-- arch/x86/kvm/pmu.h | 12 +++++++++-- arch/x86/kvm/pmu_amd.c | 1 + arch/x86/kvm/vmx/pmu_intel.c | 2 ++ 5 files changed, 61 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 6f6b8886a8eb..a87a6c98adee 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -449,6 +449,11 @@ struct kvm_pmc { u64 eventsel; struct perf_event *perf_event; struct kvm_vcpu *vcpu; + /* + * eventsel value for general purpose counters, + * ctrl value for fixed counters. + */ + u64 current_config; }; struct kvm_pmu { diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 472b69b3b6c3..99565de5410a 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -138,6 +138,35 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); } +static void pmc_pause_counter(struct kvm_pmc *pmc) +{ + u64 counter = pmc->counter; + + if (!pmc->perf_event) + return; + + /* update counter, reset event value to avoid redundant accumulation */ + counter += perf_event_pause(pmc->perf_event, true); + pmc->counter = counter & pmc_bitmask(pmc); +} + +static bool pmc_resume_counter(struct kvm_pmc *pmc) +{ + if (!pmc->perf_event) + return false; + + /* recalibrate sample period and check if it's accepted by perf core */ + if (perf_event_period(pmc->perf_event, + (-pmc->counter) & pmc_bitmask(pmc))) + return false; + + /* reuse perf_event to serve as pmc_reprogram_counter() does*/ + perf_event_enable(pmc->perf_event); + + clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi); + return true; +} + void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) { unsigned config, type = PERF_TYPE_RAW; @@ -152,7 +181,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) pmc->eventsel = eventsel; - pmc_stop_counter(pmc); + pmc_pause_counter(pmc); if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) return; @@ -191,6 +220,12 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) if (type == PERF_TYPE_RAW) config = eventsel & X86_RAW_EVENT_MASK; + if (pmc->current_config == eventsel && pmc_resume_counter(pmc)) + return; + + pmc_release_perf_event(pmc); + + pmc->current_config = eventsel; pmc_reprogram_counter(pmc, type, config, !(eventsel & ARCH_PERFMON_EVENTSEL_USR), !(eventsel & ARCH_PERFMON_EVENTSEL_OS), @@ -207,7 +242,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) struct kvm_pmu_event_filter *filter; struct kvm *kvm = pmc->vcpu->kvm; - pmc_stop_counter(pmc); + pmc_pause_counter(pmc); if (!en_field || !pmc_is_enabled(pmc)) return; @@ -222,6 +257,12 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) return; } + if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc)) + return; + + pmc_release_perf_event(pmc); + + pmc->current_config = (u64)ctrl; pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, kvm_x86_ops->pmu_ops->find_fixed_event(idx), !(en_field & 0x2), /* exclude user */ diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index b253dd5e56cf..7eba298587dc 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -56,12 +56,20 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc) return counter & pmc_bitmask(pmc); } -static inline void pmc_stop_counter(struct kvm_pmc *pmc) +static inline void pmc_release_perf_event(struct kvm_pmc *pmc) { if (pmc->perf_event) { - pmc->counter = pmc_read_counter(pmc); perf_event_release_kernel(pmc->perf_event); pmc->perf_event = NULL; + pmc->current_config = 0; + } +} + +static inline void pmc_stop_counter(struct kvm_pmc *pmc) +{ + if (pmc->perf_event) { + pmc->counter = pmc_read_counter(pmc); + pmc_release_perf_event(pmc); } } diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c index e8609ccd0b62..e87d34136047 100644 --- a/arch/x86/kvm/pmu_amd.c +++ b/arch/x86/kvm/pmu_amd.c @@ -292,6 +292,7 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu) pmu->gp_counters[i].type = KVM_PMC_GP; pmu->gp_counters[i].vcpu = vcpu; pmu->gp_counters[i].idx = i; + pmu->gp_counters[i].current_config = 0; } } diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index dcde142327ca..9b1ddc42f604 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -340,12 +340,14 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) pmu->gp_counters[i].type = KVM_PMC_GP; pmu->gp_counters[i].vcpu = vcpu; pmu->gp_counters[i].idx = i; + pmu->gp_counters[i].current_config = 0; } for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) { pmu->fixed_counters[i].type = KVM_PMC_FIXED; pmu->fixed_counters[i].vcpu = vcpu; pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; + pmu->fixed_counters[i].current_config = 0; } } -- cgit v1.2.3 From b35e5548b41131eb06de041af2f5fb0890d96f96 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Sun, 27 Oct 2019 18:52:43 +0800 Subject: KVM: x86/vPMU: Add lazy mechanism to release perf_event per vPMC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, a host perf_event is created for a vPMC functionality emulation. It’s unpredictable to determine if a disabled perf_event will be reused. If they are disabled and are not reused for a considerable period of time, those obsolete perf_events would increase host context switch overhead that could have been avoided. If the guest doesn't WRMSR any of the vPMC's MSRs during an entire vcpu sched time slice, and its independent enable bit of the vPMC isn't set, we can predict that the guest has finished the use of this vPMC, and then do request KVM_REQ_PMU in kvm_arch_sched_in and release those perf_events in the first call of kvm_pmu_handle_event() after the vcpu is scheduled in. This lazy mechanism delays the event release time to the beginning of the next scheduled time slice if vPMC's MSRs aren't changed during this time slice. If guest comes back to use this vPMC in next time slice, a new perf event would be re-created via perf_event_create_kernel_counter() as usual. Suggested-by: Wei Wang Suggested-by: Paolo Bonzini Signed-off-by: Like Xu Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 14 +++++++++++ arch/x86/kvm/pmu.c | 55 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/pmu.h | 2 ++ arch/x86/kvm/pmu_amd.c | 1 + arch/x86/kvm/vmx/pmu_intel.c | 6 +++++ arch/x86/kvm/x86.c | 6 +++++ 6 files changed, 84 insertions(+) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a87a6c98adee..20bb2fc0883a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -473,6 +473,20 @@ struct kvm_pmu { struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; struct irq_work irq_work; DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX); + DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX); + DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX); + + /* + * The gate to release perf_events not marked in + * pmc_in_use only once in a vcpu time slice. + */ + bool need_cleanup; + + /* + * The total number of programmed perf_events and it helps to avoid + * redundant check before cleanup if guest don't use vPMU at all. + */ + u8 event_count; }; struct kvm_pmu_ops; diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 99565de5410a..d5e6d5b3f06f 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -135,6 +135,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, } pmc->perf_event = event; + pmc_to_pmu(pmc)->event_count++; clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); } @@ -304,6 +305,14 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) reprogram_counter(pmu, bit); } + + /* + * Unused perf_events are only released if the corresponding MSRs + * weren't accessed during the last vCPU time slice. kvm_arch_sched_in + * triggers KVM_REQ_PMU if cleanup is needed. + */ + if (unlikely(pmu->need_cleanup)) + kvm_pmu_cleanup(vcpu); } /* check if idx is a valid index to access PMU */ @@ -379,6 +388,15 @@ bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr); } +static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr) +{ + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, msr); + + if (pmc) + __set_bit(pmc->idx, pmu->pmc_in_use); +} + int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) { return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data); @@ -386,6 +404,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { + kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index); return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info); } @@ -413,9 +432,45 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu) memset(pmu, 0, sizeof(*pmu)); kvm_x86_ops->pmu_ops->init(vcpu); init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn); + pmu->event_count = 0; + pmu->need_cleanup = false; kvm_pmu_refresh(vcpu); } +static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) +{ + struct kvm_pmu *pmu = pmc_to_pmu(pmc); + + if (pmc_is_fixed(pmc)) + return fixed_ctrl_field(pmu->fixed_ctr_ctrl, + pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3; + + return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; +} + +/* Release perf_events for vPMCs that have been unused for a full time slice. */ +void kvm_pmu_cleanup(struct kvm_vcpu *vcpu) +{ + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct kvm_pmc *pmc = NULL; + DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX); + int i; + + pmu->need_cleanup = false; + + bitmap_andnot(bitmask, pmu->all_valid_pmc_idx, + pmu->pmc_in_use, X86_PMC_IDX_MAX); + + for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) { + pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, i); + + if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc)) + pmc_stop_counter(pmc); + } + + bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX); +} + void kvm_pmu_destroy(struct kvm_vcpu *vcpu) { kvm_pmu_reset(vcpu); diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 7eba298587dc..b7a625874203 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -62,6 +62,7 @@ static inline void pmc_release_perf_event(struct kvm_pmc *pmc) perf_event_release_kernel(pmc->perf_event); pmc->perf_event = NULL; pmc->current_config = 0; + pmc_to_pmu(pmc)->event_count--; } } @@ -126,6 +127,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); void kvm_pmu_refresh(struct kvm_vcpu *vcpu); void kvm_pmu_reset(struct kvm_vcpu *vcpu); void kvm_pmu_init(struct kvm_vcpu *vcpu); +void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); void kvm_pmu_destroy(struct kvm_vcpu *vcpu); int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c index e87d34136047..ce0b10fe5e2b 100644 --- a/arch/x86/kvm/pmu_amd.c +++ b/arch/x86/kvm/pmu_amd.c @@ -279,6 +279,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu) pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->nr_arch_fixed_counters = 0; pmu->global_status = 0; + bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters); } static void amd_pmu_init(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 9b1ddc42f604..b5a16379f534 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -46,6 +46,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) if (old_ctrl == new_ctrl) continue; + __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); reprogram_fixed_counter(pmc, new_ctrl, i); } @@ -329,6 +330,11 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) && (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; + + bitmap_set(pmu->all_valid_pmc_idx, + 0, pmu->nr_arch_gp_counters); + bitmap_set(pmu->all_valid_pmc_idx, + INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); } static void intel_pmu_init(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 72189160bb81..8db7275d313f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9455,7 +9455,13 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) { + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + vcpu->arch.l1tf_flush_l1d = true; + if (pmu->version && unlikely(pmu->event_count)) { + pmu->need_cleanup = true; + kvm_make_request(KVM_REQ_PMU, vcpu); + } kvm_x86_ops->sched_in(vcpu, cpu); } -- cgit v1.2.3 From 1924242b2abadfb1144c3c22083fd6f71caadd64 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Tue, 12 Nov 2019 20:33:00 +0200 Subject: KVM: x86: Optimization: Requst TLB flush in fast_cr3_switch() instead of do it directly When KVM emulates a nested VMEntry (L1->L2 VMEntry), it switches mmu root page. If nEPT is used, this will happen from kvm_init_shadow_ept_mmu()->__kvm_mmu_new_cr3() and otherwise it will happpen from nested_vmx_load_cr3()->kvm_mmu_new_cr3(). Either case, __kvm_mmu_new_cr3() will use fast_cr3_switch() in attempt to switch to a previously cached root page. In case fast_cr3_switch() finds a matching cached root page, it will set it in mmu->root_hpa and request KVM_REQ_LOAD_CR3 such that on next entry to guest, KVM will set root HPA in appropriate hardware fields (e.g. vmcs->eptp). In addition, fast_cr3_switch() calls kvm_x86_ops->tlb_flush() in order to flush TLB as MMU root page was replaced. This works as mmu->root_hpa, which vmx_flush_tlb() use, was already replaced in cached_root_available(). However, this may result in unnecessary INVEPT execution because a KVM_REQ_TLB_FLUSH may have already been requested. For example, by prepare_vmcs02() in case L1 don't use VPID. Therefore, change fast_cr3_switch() to just request TLB flush on next entry to guest. Reviewed-by: Bhavesh Davda Signed-off-by: Liran Alon Reviewed-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 24c23c66b226..150d982ec1d2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4295,7 +4295,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, kvm_make_request(KVM_REQ_LOAD_CR3, vcpu); if (!skip_tlb_flush) { kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); - kvm_x86_ops->tlb_flush(vcpu, true); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } /* -- cgit v1.2.3 From f245eeaddc3e442b761de8e9d1b93893a999f9aa Mon Sep 17 00:00:00 2001 From: Wainer dos Santos Moschetta Date: Tue, 12 Nov 2019 09:21:11 -0500 Subject: selftests: kvm: Simplify loop in kvm_create_max_vcpus test On kvm_create_max_vcpus test remove unneeded local variable in the loop that add vcpus to the VM. Signed-off-by: Wainer dos Santos Moschetta Reviewed-by: Krish Sadhukhan Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/kvm_create_max_vcpus.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c index 231d79e57774..6f38c3dc0d56 100644 --- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c +++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c @@ -29,12 +29,9 @@ void test_vcpu_creation(int first_vcpu_id, int num_vcpus) vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR); - for (i = 0; i < num_vcpus; i++) { - int vcpu_id = first_vcpu_id + i; - + for (i = first_vcpu_id; i < first_vcpu_id + num_vcpus; i++) /* This asserts that the vCPU was created. */ - vm_vcpu_add(vm, vcpu_id); - } + vm_vcpu_add(vm, i); kvm_vm_free(vm); } -- cgit v1.2.3 From 9477f4449b0b011ce1d058c09ec450bfcdaab784 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 13 Nov 2019 16:17:15 -0800 Subject: KVM: VMX: Add helper to check reserved bits in IA32_PERF_GLOBAL_CTRL Create a helper function to check the validity of a proposed value for IA32_PERF_GLOBAL_CTRL from the existing check in intel_pmu_set_msr(). Per Intel's SDM, the reserved bits in IA32_PERF_GLOBAL_CTRL must be cleared for the corresponding host/guest state fields. Suggested-by: Jim Mattson Co-developed-by: Krish Sadhukhan Signed-off-by: Krish Sadhukhan Signed-off-by: Oliver Upton Reviewed-by: Jim Mattson Reviewed-by: Peter Shier Signed-off-by: Paolo Bonzini --- arch/x86/kvm/pmu.h | 6 ++++++ arch/x86/kvm/vmx/pmu_intel.c | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index b7a625874203..7ebb62326c14 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -89,6 +89,12 @@ static inline bool pmc_is_enabled(struct kvm_pmc *pmc) return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc); } +static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, + u64 data) +{ + return !(pmu->global_ctrl_mask & data); +} + /* returns general purpose PMC with the specified MSR. Note that it can be * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a * paramenter to tell them apart. diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index b5a16379f534..0990a12a76a8 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -236,7 +236,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_CORE_PERF_GLOBAL_CTRL: if (pmu->global_ctrl == data) return 0; - if (!(data & pmu->global_ctrl_mask)) { + if (kvm_valid_perf_global_ctrl(pmu, data)) { global_ctrl_changed(pmu, data); return 0; } -- cgit v1.2.3 From bfc6ad6ab3563b4151bbcfe162c612930a3e0854 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 13 Nov 2019 16:17:16 -0800 Subject: KVM: nVMX: Check GUEST_IA32_PERF_GLOBAL_CTRL on VM-Entry Add condition to nested_vmx_check_guest_state() to check the validity of GUEST_IA32_PERF_GLOBAL_CTRL. Per Intel's SDM Vol 3 26.3.1.1: If the "load IA32_PERF_GLOBAL_CTRL" VM-entry control is 1, bits reserved in the IA32_PERF_GLOBAL_CTRL MSR must be 0 in the field for that register. Suggested-by: Jim Mattson Co-developed-by: Krish Sadhukhan Signed-off-by: Krish Sadhukhan Signed-off-by: Oliver Upton Reviewed-by: Jim Mattson Reviewed-by: Peter Shier Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index bdb9b3028250..9190da3579c4 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -10,6 +10,7 @@ #include "hyperv.h" #include "mmu.h" #include "nested.h" +#include "pmu.h" #include "trace.h" #include "x86.h" @@ -2790,6 +2791,11 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, return -EINVAL; } + if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && + CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), + vmcs12->guest_ia32_perf_global_ctrl))) + return -EINVAL; + /* * If the load IA32_EFER VM-entry control is 1, the following checks * are performed on the field for the IA32_EFER MSR: -- cgit v1.2.3 From c547cb6f78cf5dc8f029459b115ef44c56a2a776 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 13 Nov 2019 16:17:17 -0800 Subject: KVM: nVMX: Check HOST_IA32_PERF_GLOBAL_CTRL on VM-Entry Add a consistency check on nested vm-entry for host's IA32_PERF_GLOBAL_CTRL from vmcs12. Per Intel's SDM Vol 3 26.2.2: If the "load IA32_PERF_GLOBAL_CTRL" VM-exit control is 1, bits reserved in the IA32_PERF_GLOBAL_CTRL MSR must be 0 in the field for that register" Suggested-by: Jim Mattson Co-developed-by: Krish Sadhukhan Signed-off-by: Krish Sadhukhan Signed-off-by: Oliver Upton Reviewed-by: Jim Mattson Reviewed-by: Peter Shier Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 9190da3579c4..ac896e92de23 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2676,6 +2676,11 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, CC(!kvm_pat_valid(vmcs12->host_ia32_pat))) return -EINVAL; + if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && + CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), + vmcs12->host_ia32_perf_global_ctrl))) + return -EINVAL; + #ifdef CONFIG_X86_64 ia32e = !!(vcpu->arch.efer & EFER_LMA); #else -- cgit v1.2.3 From 458151f65b4d8acfc7403b59fd9694ca15dbfe2e Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 13 Nov 2019 16:17:18 -0800 Subject: KVM: nVMX: Use kvm_set_msr to load IA32_PERF_GLOBAL_CTRL on VM-Exit The existing implementation for loading the IA32_PERF_GLOBAL_CTRL MSR on VM-exit was incorrect, as the next call to atomic_switch_perf_msrs() could cause this value to be overwritten. Instead, call kvm_set_msr() which will allow atomic_switch_perf_msrs() to correctly set the values. Define a macro, SET_MSR_OR_WARN(), to set the MSR with kvm_set_msr() and WARN on failure. Suggested-by: Jim Mattson Co-developed-by: Krish Sadhukhan Signed-off-by: Krish Sadhukhan Signed-off-by: Oliver Upton Reviewed-by: Jim Mattson Reviewed-by: Peter Shier Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index ac896e92de23..75b7091e4a88 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -28,6 +28,16 @@ module_param(nested_early_check, bool, S_IRUGO); failed; \ }) +#define SET_MSR_OR_WARN(vcpu, idx, data) \ +({ \ + bool failed = kvm_set_msr(vcpu, idx, data); \ + if (failed) \ + pr_warn_ratelimited( \ + "%s cannot write MSR (0x%x, 0x%llx)\n", \ + __func__, idx, data); \ + failed; \ +}) + /* * Hyper-V requires all of these, so mark them as supported even though * they are just treated the same as all-context. @@ -3879,8 +3889,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vcpu->arch.pat = vmcs12->host_ia32_pat; } if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) - vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, - vmcs12->host_ia32_perf_global_ctrl); + SET_MSR_OR_WARN(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, + vmcs12->host_ia32_perf_global_ctrl); /* Set L1 segment info according to Intel SDM 27.5.2 Loading Host Segment and Descriptor-Table Registers */ -- cgit v1.2.3 From 71f7347025bf10f5c0b48e149898df57b7f3d414 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 13 Nov 2019 16:17:19 -0800 Subject: KVM: nVMX: Load GUEST_IA32_PERF_GLOBAL_CTRL MSR on VM-Entry Add condition to prepare_vmcs02 which loads IA32_PERF_GLOBAL_CTRL on VM-entry if the "load IA32_PERF_GLOBAL_CTRL" bit on the VM-entry control is set. Use SET_MSR_OR_WARN() rather than directly writing to the field to avoid overwrite by atomic_switch_perf_msrs(). Suggested-by: Jim Mattson Co-developed-by: Krish Sadhukhan Signed-off-by: Krish Sadhukhan Signed-off-by: Oliver Upton Reviewed-by: Jim Mattson Reviewed-by: Peter Shier Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 75b7091e4a88..abef0dbe94bb 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2452,6 +2452,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; + if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && + SET_MSR_OR_WARN(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, + vmcs12->guest_ia32_perf_global_ctrl)) + return -EINVAL; + kvm_rsp_write(vcpu, vmcs12->guest_rsp); kvm_rip_write(vcpu, vmcs12->guest_rip); return 0; -- cgit v1.2.3 From 03a8871add95213827e2bea84c12133ae5df952e Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 13 Nov 2019 16:17:20 -0800 Subject: KVM: nVMX: Expose load IA32_PERF_GLOBAL_CTRL VM-{Entry,Exit} control The "load IA32_PERF_GLOBAL_CTRL" bit for VM-entry and VM-exit should only be exposed to the guest if IA32_PERF_GLOBAL_CTRL is a valid MSR. Create a new helper to allow pmu_refresh() to update the VM-Entry and VM-Exit controls to ensure PMU values are initialized when performing the is_valid_msr() check. Suggested-by: Jim Mattson Co-developed-by: Krish Sadhukhan Signed-off-by: Krish Sadhukhan Signed-off-by: Oliver Upton Reviewed-by: Jim Mattson Reviewed-by: Peter Shier Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 21 +++++++++++++++++++++ arch/x86/kvm/vmx/nested.h | 1 + arch/x86/kvm/vmx/pmu_intel.c | 3 +++ 3 files changed, 25 insertions(+) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index abef0dbe94bb..c6f5e5821d4c 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -4359,6 +4359,27 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, return 0; } +void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx; + + if (!nested_vmx_allowed(vcpu)) + return; + + vmx = to_vmx(vcpu); + if (kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) { + vmx->nested.msrs.entry_ctls_high |= + VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; + vmx->nested.msrs.exit_ctls_high |= + VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; + } else { + vmx->nested.msrs.entry_ctls_high &= + ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; + vmx->nested.msrs.exit_ctls_high &= + ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; + } +} + static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) { gva_t gva; diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index 4cf1d40da15f..19e6015722a9 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -22,6 +22,7 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata); int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, bool wr, int len, gva_t *ret); +void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu); static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 0990a12a76a8..7023138b1cb0 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -15,6 +15,7 @@ #include "x86.h" #include "cpuid.h" #include "lapic.h" +#include "nested.h" #include "pmu.h" static struct kvm_event_hw_type_mapping intel_arch_events[] = { @@ -335,6 +336,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) 0, pmu->nr_arch_gp_counters); bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); + + nested_vmx_pmu_entry_exit_ctls_update(vcpu); } static void intel_pmu_init(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From d4069dbeb51e34e1db0458a7455e509daaaa529a Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 15 Nov 2019 11:36:10 +0100 Subject: KVM: nVMX: mark functions in the header as "static inline" Correct a small inaccuracy in the shattering of vmx.c, which becomes visible now that pmu_intel.c includes nested.h. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index 19e6015722a9..b9e519840f28 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -246,7 +246,7 @@ static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1) return ((val & fixed1) | fixed0) == val; } -static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) +static inline bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) { u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; @@ -260,7 +260,7 @@ static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) return fixed_bits_valid(val, fixed0, fixed1); } -static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) +static inline bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) { u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; @@ -268,7 +268,7 @@ static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) return fixed_bits_valid(val, fixed0, fixed1); } -static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val) +static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val) { u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0; u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1; -- cgit v1.2.3 From 365d3d55d6019233c02d68dbd3d2dfde1b8a1467 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Thu, 7 Nov 2019 21:14:36 -0800 Subject: kvm: nested: Introduce read_and_check_msr_entry() Add the function read_and_check_msr_entry() which just pulls some code out of nested_vmx_store_msr(). This will be useful as reusable code in upcoming patches. Reviewed-by: Liran Alon Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index c6f5e5821d4c..3ef529cc72fb 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -940,6 +940,26 @@ fail: return i + 1; } +static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, + struct vmx_msr_entry *e) +{ + if (kvm_vcpu_read_guest(vcpu, + gpa + i * sizeof(*e), + e, 2 * sizeof(u32))) { + pr_debug_ratelimited( + "%s cannot read MSR entry (%u, 0x%08llx)\n", + __func__, i, gpa + i * sizeof(*e)); + return false; + } + if (nested_vmx_store_msr_check(vcpu, e)) { + pr_debug_ratelimited( + "%s check failed (%u, 0x%x, 0x%x)\n", + __func__, i, e->index, e->reserved); + return false; + } + return true; +} + static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) { u64 data; @@ -951,20 +971,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) if (unlikely(i >= max_msr_list_size)) return -EINVAL; - if (kvm_vcpu_read_guest(vcpu, - gpa + i * sizeof(e), - &e, 2 * sizeof(u32))) { - pr_debug_ratelimited( - "%s cannot read MSR entry (%u, 0x%08llx)\n", - __func__, i, gpa + i * sizeof(e)); + if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) return -EINVAL; - } - if (nested_vmx_store_msr_check(vcpu, &e)) { - pr_debug_ratelimited( - "%s check failed (%u, 0x%x, 0x%x)\n", - __func__, i, e.index, e.reserved); - return -EINVAL; - } + if (kvm_get_msr(vcpu, e.index, &data)) { pr_debug_ratelimited( "%s cannot read MSR (%u, 0x%x)\n", -- cgit v1.2.3 From 7cfe0526fd379e4ff9c3dcf933c1966a3a635013 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Thu, 7 Nov 2019 21:14:37 -0800 Subject: kvm: vmx: Rename NR_AUTOLOAD_MSRS to NR_LOADSTORE_MSRS Rename NR_AUTOLOAD_MSRS to NR_LOADSTORE_MSRS. This needs to be done due to the addition of the MSR-autostore area that will be added in a future patch. After that the name AUTOLOAD will no longer make sense. Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 4 ++-- arch/x86/kvm/vmx/vmx.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 765086756177..ba0124e66db7 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -938,8 +938,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, if (!entry_only) j = find_msr(&m->host, msr); - if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) || - (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) { + if ((i < 0 && m->guest.nr == NR_LOADSTORE_MSRS) || + (j < 0 && m->host.nr == NR_LOADSTORE_MSRS)) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 43331dfafffe..73ff03091d29 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -22,11 +22,11 @@ extern u32 get_umwait_control_msr(void); #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) -#define NR_AUTOLOAD_MSRS 8 +#define NR_LOADSTORE_MSRS 8 struct vmx_msrs { unsigned int nr; - struct vmx_msr_entry val[NR_AUTOLOAD_MSRS]; + struct vmx_msr_entry val[NR_LOADSTORE_MSRS]; }; struct shared_msr_entry { -- cgit v1.2.3 From ef0fbcac3f2aadb10d9a6c461eabc7dd01cbed9b Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Thu, 7 Nov 2019 21:14:38 -0800 Subject: kvm: vmx: Rename function find_msr() to vmx_find_msr_index() Rename function find_msr() to vmx_find_msr_index() in preparation for an upcoming patch where we export it and use it in nested.c. Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index ba0124e66db7..7b191963dde1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -833,7 +833,7 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, vm_exit_controls_clearbit(vmx, exit); } -static int find_msr(struct vmx_msrs *m, unsigned int msr) +static int vmx_find_msr_index(struct vmx_msrs *m, u32 msr) { unsigned int i; @@ -867,7 +867,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) } break; } - i = find_msr(&m->guest, msr); + i = vmx_find_msr_index(&m->guest, msr); if (i < 0) goto skip_guest; --m->guest.nr; @@ -875,7 +875,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); skip_guest: - i = find_msr(&m->host, msr); + i = vmx_find_msr_index(&m->host, msr); if (i < 0) return; @@ -934,9 +934,9 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } - i = find_msr(&m->guest, msr); + i = vmx_find_msr_index(&m->guest, msr); if (!entry_only) - j = find_msr(&m->host, msr); + j = vmx_find_msr_index(&m->host, msr); if ((i < 0 && m->guest.nr == NR_LOADSTORE_MSRS) || (j < 0 && m->host.nr == NR_LOADSTORE_MSRS)) { -- cgit v1.2.3 From 662f1d1d19317e792ccfc53dee625c02dcefac58 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Thu, 7 Nov 2019 21:14:39 -0800 Subject: KVM: nVMX: Add support for capturing highest observable L2 TSC The L1 hypervisor may include the IA32_TIME_STAMP_COUNTER MSR in the vmcs12 MSR VM-exit MSR-store area as a way of determining the highest TSC value that might have been observed by L2 prior to VM-exit. The current implementation does not capture a very tight bound on this value. To tighten the bound, add the IA32_TIME_STAMP_COUNTER MSR to the vmcs02 VM-exit MSR-store area whenever it appears in the vmcs12 VM-exit MSR-store area. When L0 processes the vmcs12 VM-exit MSR-store area during the emulation of an L2->L1 VM-exit, special-case the IA32_TIME_STAMP_COUNTER MSR, using the value stored in the vmcs02 VM-exit MSR-store area to derive the value to be stored in the vmcs12 VM-exit MSR-store area. Reviewed-by: Liran Alon Reviewed-by: Jim Mattson Signed-off-by: Aaron Lewis Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 101 +++++++++++++++++++++++++++++++++++++++++++--- arch/x86/kvm/vmx/vmx.c | 2 +- arch/x86/kvm/vmx/vmx.h | 5 +++ 3 files changed, 101 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 3ef529cc72fb..60d42ce42403 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -940,6 +940,37 @@ fail: return i + 1; } +static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, + u32 msr_index, + u64 *data) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* + * If the L0 hypervisor stored a more accurate value for the TSC that + * does not include the time taken for emulation of the L2->L1 + * VM-exit in L0, use the more accurate value. + */ + if (msr_index == MSR_IA32_TSC) { + int index = vmx_find_msr_index(&vmx->msr_autostore.guest, + MSR_IA32_TSC); + + if (index >= 0) { + u64 val = vmx->msr_autostore.guest.val[index].value; + + *data = kvm_read_l1_tsc(vcpu, val); + return true; + } + } + + if (kvm_get_msr(vcpu, msr_index, data)) { + pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, + msr_index); + return false; + } + return true; +} + static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, struct vmx_msr_entry *e) { @@ -974,12 +1005,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) return -EINVAL; - if (kvm_get_msr(vcpu, e.index, &data)) { - pr_debug_ratelimited( - "%s cannot read MSR (%u, 0x%x)\n", - __func__, i, e.index); + if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data)) return -EINVAL; - } + if (kvm_vcpu_write_guest(vcpu, gpa + i * sizeof(e) + offsetof(struct vmx_msr_entry, value), @@ -993,6 +1021,60 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) return 0; } +static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + u32 count = vmcs12->vm_exit_msr_store_count; + u64 gpa = vmcs12->vm_exit_msr_store_addr; + struct vmx_msr_entry e; + u32 i; + + for (i = 0; i < count; i++) { + if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) + return false; + + if (e.index == msr_index) + return true; + } + return false; +} + +static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, + u32 msr_index) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmx_msrs *autostore = &vmx->msr_autostore.guest; + bool in_vmcs12_store_list; + int msr_autostore_index; + bool in_autostore_list; + int last; + + msr_autostore_index = vmx_find_msr_index(autostore, msr_index); + in_autostore_list = msr_autostore_index >= 0; + in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); + + if (in_vmcs12_store_list && !in_autostore_list) { + if (autostore->nr == NR_LOADSTORE_MSRS) { + /* + * Emulated VMEntry does not fail here. Instead a less + * accurate value will be returned by + * nested_vmx_get_vmexit_msr_value() using kvm_get_msr() + * instead of reading the value from the vmcs02 VMExit + * MSR-store area. + */ + pr_warn_ratelimited( + "Not enough msr entries in msr_autostore. Can't add msr %x\n", + msr_index); + return; + } + last = autostore->nr++; + autostore->val[last].index = msr_index; + } else if (!in_vmcs12_store_list && in_autostore_list) { + last = --autostore->nr; + autostore->val[msr_autostore_index] = autostore->val[last]; + } +} + static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val) { unsigned long invalid_mask; @@ -2038,7 +2120,7 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) * addresses are constant (for vmcs02), the counts can change based * on L2's behavior, e.g. switching to/from long mode. */ - vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); + vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); @@ -2306,6 +2388,13 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); } + /* + * Make sure the msr_autostore list is up to date before we set the + * count in the vmcs02. + */ + prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); + + vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 7b191963dde1..621142e55e28 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -833,7 +833,7 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, vm_exit_controls_clearbit(vmx, exit); } -static int vmx_find_msr_index(struct vmx_msrs *m, u32 msr) +int vmx_find_msr_index(struct vmx_msrs *m, u32 msr) { unsigned int i; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 73ff03091d29..90b97d9d4f7d 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -233,6 +233,10 @@ struct vcpu_vmx { struct vmx_msrs host; } msr_autoload; + struct msr_autostore { + struct vmx_msrs guest; + } msr_autostore; + struct { int vm86_active; ulong save_rflags; @@ -337,6 +341,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr); void pt_update_intercept_for_msr(struct vcpu_vmx *vmx); void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); +int vmx_find_msr_index(struct vmx_msrs *m, u32 msr); #define POSTED_INTR_ON 0 #define POSTED_INTR_SN 1 -- cgit v1.2.3 From 8750e72a79dda2f665ce17b62049f4d62130d991 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Thu, 7 Nov 2019 07:53:42 -0500 Subject: KVM: remember position in kvm->vcpus array MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fetching an index for any vcpu in kvm->vcpus array by traversing the entire array everytime is costly. This patch remembers the position of each vcpu in kvm->vcpus array by storing it in vcpus_idx under kvm_vcpu structure. Signed-off-by: Radim Krčmář Signed-off-by: Nitesh Narayan Lal Signed-off-by: Paolo Bonzini --- include/linux/kvm_host.h | 11 +++-------- virt/kvm/kvm_main.c | 5 +++-- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a817e446c9aa..70b2296fb2ae 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -266,7 +266,8 @@ struct kvm_vcpu { struct preempt_notifier preempt_notifier; #endif int cpu; - int vcpu_id; + int vcpu_id; /* id given by userspace at creation */ + int vcpu_idx; /* index in kvm->vcpus array */ int srcu_idx; int mode; u64 requests; @@ -570,13 +571,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *tmp; - int idx; - - kvm_for_each_vcpu(idx, tmp, vcpu->kvm) - if (tmp == vcpu) - return idx; - BUG(); + return vcpu->vcpu_idx; } #define kvm_for_each_memslot(memslot, slots) \ diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b8534c6b8cf6..a158a93323f7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2685,7 +2685,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) goto unlock_vcpu_destroy; } - BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); + vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); + BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); /* Now it's all set up, let userspace reach it */ kvm_get_kvm(kvm); @@ -2695,7 +2696,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) goto unlock_vcpu_destroy; } - kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; + kvm->vcpus[vcpu->vcpu_idx] = vcpu; /* * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus -- cgit v1.2.3 From 7ee30bc132c683d06a6d9e360e39e483e3990708 Mon Sep 17 00:00:00 2001 From: Nitesh Narayan Lal Date: Thu, 7 Nov 2019 07:53:43 -0500 Subject: KVM: x86: deliver KVM IOAPIC scan request to target vCPUs In IOAPIC fixed delivery mode instead of flushing the scan requests to all vCPUs, we should only send the requests to vCPUs specified within the destination field. This patch introduces kvm_get_dest_vcpus_mask() API which retrieves an array of target vCPUs by using kvm_apic_map_get_dest_lapic() and then based on the vcpus_idx, it sets the bit in a bitmap. However, if the above fails kvm_get_dest_vcpus_mask() finds the target vCPUs by traversing all available vCPUs. Followed by setting the bits in the bitmap. If we had different vCPUs in the previous request for the same redirection table entry then bits corresponding to these vCPUs are also set. This to done to keep ioapic_handled_vectors synchronized. This bitmap is then eventually passed on to kvm_make_vcpus_request_mask() to generate a masked request only for the target vCPUs. This would enable us to reduce the latency overhead on isolated vCPUs caused by the IPI to process due to KVM_REQ_IOAPIC_SCAN. Suggested-by: Marcelo Tosatti Signed-off-by: Nitesh Narayan Lal Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/ioapic.c | 33 +++++++++++++++++++++++++++++-- arch/x86/kvm/lapic.c | 44 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/lapic.h | 3 +++ arch/x86/kvm/x86.c | 14 +++++++++++++ include/linux/kvm_host.h | 2 ++ 6 files changed, 96 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 20bb2fc0883a..898ab9eb4dc8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1588,6 +1588,8 @@ bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); void kvm_make_mclock_inprogress_request(struct kvm *kvm); void kvm_make_scan_ioapic_request(struct kvm *kvm); +void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, + unsigned long *vcpu_bitmap); void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work); diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index d859ae8890d0..ce30ef23c86b 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -271,8 +271,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) { unsigned index; bool mask_before, mask_after; - int old_remote_irr, old_delivery_status; union kvm_ioapic_redirect_entry *e; + unsigned long vcpu_bitmap; + int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode; switch (ioapic->ioregsel) { case IOAPIC_REG_VERSION: @@ -296,6 +297,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) /* Preserve read-only fields */ old_remote_irr = e->fields.remote_irr; old_delivery_status = e->fields.delivery_status; + old_dest_id = e->fields.dest_id; + old_dest_mode = e->fields.dest_mode; if (ioapic->ioregsel & 1) { e->bits &= 0xffffffff; e->bits |= (u64) val << 32; @@ -321,7 +324,33 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG && ioapic->irr & (1 << index)) ioapic_service(ioapic, index, false); - kvm_make_scan_ioapic_request(ioapic->kvm); + if (e->fields.delivery_mode == APIC_DM_FIXED) { + struct kvm_lapic_irq irq; + + irq.shorthand = 0; + irq.vector = e->fields.vector; + irq.delivery_mode = e->fields.delivery_mode << 8; + irq.dest_id = e->fields.dest_id; + irq.dest_mode = e->fields.dest_mode; + kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, + &vcpu_bitmap); + if (old_dest_mode != e->fields.dest_mode || + old_dest_id != e->fields.dest_id) { + /* + * Update vcpu_bitmap with vcpus specified in + * the previous request as well. This is done to + * keep ioapic_handled_vectors synchronized. + */ + irq.dest_id = old_dest_id; + irq.dest_mode = old_dest_mode; + kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, + &vcpu_bitmap); + } + kvm_make_scan_ioapic_request_mask(ioapic->kvm, + &vcpu_bitmap); + } else { + kvm_make_scan_ioapic_request(ioapic->kvm); + } break; } } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 60fb21fe7f42..452cedd6382b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1122,6 +1122,50 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, return result; } +/* + * This routine identifies the destination vcpus mask meant to receive the + * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find + * out the destination vcpus array and set the bitmap or it traverses to + * each available vcpu to identify the same. + */ +void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq, + unsigned long *vcpu_bitmap) +{ + struct kvm_lapic **dest_vcpu = NULL; + struct kvm_lapic *src = NULL; + struct kvm_apic_map *map; + struct kvm_vcpu *vcpu; + unsigned long bitmap; + int i, vcpu_idx; + bool ret; + + rcu_read_lock(); + map = rcu_dereference(kvm->arch.apic_map); + + ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu, + &bitmap); + if (ret) { + for_each_set_bit(i, &bitmap, 16) { + if (!dest_vcpu[i]) + continue; + vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx; + __set_bit(vcpu_idx, vcpu_bitmap); + } + } else { + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!kvm_apic_present(vcpu)) + continue; + if (!kvm_apic_match_dest(vcpu, NULL, + irq->delivery_mode, + irq->dest_id, + irq->dest_mode)) + continue; + __set_bit(i, vcpu_bitmap); + } + } + rcu_read_unlock(); +} + int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) { return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 2aad7e226fc0..c1d77436126a 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -226,6 +226,9 @@ bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector); void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu); +void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq, + unsigned long *vcpu_bitmap); + bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq, struct kvm_vcpu **dest_vcpu); int kvm_vector_to_index(u32 vector, u32 dest_vcpus, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8db7275d313f..991dd01ba08b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7874,6 +7874,20 @@ static void process_smi(struct kvm_vcpu *vcpu) kvm_make_request(KVM_REQ_EVENT, vcpu); } +void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, + unsigned long *vcpu_bitmap) +{ + cpumask_var_t cpus; + bool called; + + zalloc_cpumask_var(&cpus, GFP_ATOMIC); + + called = kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, + vcpu_bitmap, cpus); + + free_cpumask_var(cpus); +} + void kvm_make_scan_ioapic_request(struct kvm *kvm) { kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 70b2296fb2ae..bfe6c6729988 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -786,6 +786,8 @@ void kvm_reload_remote_mmus(struct kvm *kvm); bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, unsigned long *vcpu_bitmap, cpumask_var_t tmp); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); +bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, + unsigned long *vcpu_bitmap); long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); -- cgit v1.2.3 From 9a2ae9f6b6bbd3ef05d5e5977ace854e9b8f04b5 Mon Sep 17 00:00:00 2001 From: Nitesh Narayan Lal Date: Wed, 20 Nov 2019 07:12:24 -0500 Subject: KVM: x86: Zero the IOAPIC scan request dest vCPUs bitmap Not zeroing the bitmap used for identifying the destination vCPUs for an IOAPIC scan request in fixed delivery mode could lead to waking up unwanted vCPUs. This patch zeroes the vCPU bitmap before passing it to kvm_bitmap_or_dest_vcpus(), which is responsible for setting the bitmap with the bits corresponding to the destination vCPUs. Fixes: 7ee30bc132c6("KVM: x86: deliver KVM IOAPIC scan request to target vCPUs") Signed-off-by: Nitesh Narayan Lal Signed-off-by: Paolo Bonzini --- arch/x86/kvm/ioapic.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index ce30ef23c86b..9fd2dd89a1c5 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -332,6 +332,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) irq.delivery_mode = e->fields.delivery_mode << 8; irq.dest_id = e->fields.dest_id; irq.dest_mode = e->fields.dest_mode; + bitmap_zero(&vcpu_bitmap, 16); kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, &vcpu_bitmap); if (old_dest_mode != e->fields.dest_mode || -- cgit v1.2.3 From cc877670975be9082138e34f9e55d9d79c527b5c Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 18 Nov 2019 21:11:21 +0200 Subject: KVM: nVMX: Use semi-colon instead of comma for exit-handlers initialization Reviewed-by: Mark Kanda Signed-off-by: Liran Alon Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 60d42ce42403..f161a941cb09 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6198,23 +6198,23 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) init_vmcs_shadow_fields(); } - exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear, - exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch, - exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld, - exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst, - exit_handlers[EXIT_REASON_VMREAD] = handle_vmread, - exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume, - exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite, - exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff, - exit_handlers[EXIT_REASON_VMON] = handle_vmon, - exit_handlers[EXIT_REASON_INVEPT] = handle_invept, - exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid, - exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc, + exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear; + exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch; + exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld; + exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst; + exit_handlers[EXIT_REASON_VMREAD] = handle_vmread; + exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume; + exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite; + exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff; + exit_handlers[EXIT_REASON_VMON] = handle_vmon; + exit_handlers[EXIT_REASON_INVEPT] = handle_invept; + exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid; + exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc; kvm_x86_ops->check_nested_events = vmx_check_nested_events; kvm_x86_ops->get_nested_state = vmx_get_nested_state; kvm_x86_ops->set_nested_state = vmx_set_nested_state; - kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages, + kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages; kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs; kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version; -- cgit v1.2.3 From c79eb77554bb6dde8ecd5319e2824e4c8e1f4819 Mon Sep 17 00:00:00 2001 From: Chenyi Qiang Date: Tue, 19 Nov 2019 16:33:59 +0800 Subject: KVM: nVMX: add CR4_LA57 bit to nested CR4_FIXED1 When L1 guest uses 5-level paging, it fails vm-entry to L2 due to invalid host-state. It needs to add CR4_LA57 bit to nested CR4_FIXED1 MSR. Signed-off-by: Chenyi Qiang Reviewed-by: Xiaoyao Li Reviewed-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 621142e55e28..89253d60e23a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6962,6 +6962,7 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP)); cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU)); cr4_fixed1_update(X86_CR4_UMIP, ecx, bit(X86_FEATURE_UMIP)); + cr4_fixed1_update(X86_CR4_LA57, ecx, bit(X86_FEATURE_LA57)); #undef cr4_fixed1_update } -- cgit v1.2.3 From 5637f60b6828c0abfd5fe3cc6922a7106b5366c7 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 18 Nov 2019 19:27:02 +0200 Subject: KVM: x86: Unexport kvm_vcpu_reload_apic_access_page() The function is only used in kvm.ko module. Reviewed-by: Mark Kanda Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 991dd01ba08b..050961a51f84 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7965,7 +7965,6 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) */ put_page(page); } -EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) { -- cgit v1.2.3 From 992edeaefed682511bd173dabd2f54b1ce5387df Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Wed, 20 Nov 2019 14:24:52 +0200 Subject: KVM: nVMX: Assume TLB entries of L1 and L2 are tagged differently if L0 use EPT Since commit 1313cc2bd8f6 ("kvm: mmu: Add guest_mode to kvm_mmu_page_role"), guest_mode was added to mmu-role and therefore if L0 use EPT, it will always run L1 and L2 with different EPTP. i.e. EPTP01!=EPTP02. Because TLB entries are tagged with EP4TA, KVM can assume TLB entries populated while running L2 are tagged differently than TLB entries populated while running L1. Therefore, update nested_has_guest_tlb_tag() to consider if L0 use EPT instead of if L1 use EPT. Reviewed-by: Joao Martins Reviewed-by: Krish Sadhukhan Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index f161a941cb09..dc06e67be017 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1126,7 +1126,9 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne * populated by L2 differently than TLB entries populated * by L1. * - * If L1 uses EPT, then TLB entries are tagged with different EPTP. + * If L0 uses EPT, L1 and L2 run with different EPTP because + * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries + * are tagged with different EPTP. * * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged * with different VPID (L1 entries are tagged with vmx->vpid @@ -1136,7 +1138,7 @@ static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - return nested_cpu_has_ept(vmcs12) || + return enable_ept || (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); } -- cgit v1.2.3 From 31a88c82b466d2f31a44e21c479f45b4732ccfd0 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Wed, 13 Nov 2019 17:46:13 +0100 Subject: KVM: PPC: Book3S HV: XIVE: Free previous EQ page when setting up a new one MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The EQ page is allocated by the guest and then passed to the hypervisor with the H_INT_SET_QUEUE_CONFIG hcall. A reference is taken on the page before handing it over to the HW. This reference is dropped either when the guest issues the H_INT_RESET hcall or when the KVM device is released. But, the guest can legitimately call H_INT_SET_QUEUE_CONFIG several times, either to reset the EQ (vCPU hot unplug) or to set a new EQ (guest reboot). In both cases the existing EQ page reference is leaked because we simply overwrite it in the XIVE queue structure without calling put_page(). This is especially visible when the guest memory is backed with huge pages: start a VM up to the guest userspace, either reboot it or unplug a vCPU, quit QEMU. The leak is observed by comparing the value of HugePages_Free in /proc/meminfo before and after the VM is run. Ideally we'd want the XIVE code to handle the EQ page de-allocation at the platform level. This isn't the case right now because the various XIVE drivers have different allocation needs. It could maybe worth introducing hooks for this purpose instead of exposing XIVE internals to the drivers, but this is certainly a huge work to be done later. In the meantime, for easier backport, fix both vCPU unplug and guest reboot leaks by introducing a wrapper around xive_native_configure_queue() that does the necessary cleanup. Reported-by: Satheesh Rajendran Cc: stable@vger.kernel.org # v5.2 Fixes: 13ce3297c576 ("KVM: PPC: Book3S HV: XIVE: Add controls for the EQ configuration") Signed-off-by: Cédric Le Goater Signed-off-by: Greg Kurz Tested-by: Lijun Pan Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_xive_native.c | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 34bd123fa024..0e1fc5a16729 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -50,6 +50,24 @@ static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio) } } +static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q, + u8 prio, __be32 *qpage, + u32 order, bool can_escalate) +{ + int rc; + __be32 *qpage_prev = q->qpage; + + rc = xive_native_configure_queue(vp_id, q, prio, qpage, order, + can_escalate); + if (rc) + return rc; + + if (qpage_prev) + put_page(virt_to_page(qpage_prev)); + + return rc; +} + void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; @@ -575,19 +593,14 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, q->guest_qaddr = 0; q->guest_qshift = 0; - rc = xive_native_configure_queue(xc->vp_id, q, priority, - NULL, 0, true); + rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority, + NULL, 0, true); if (rc) { pr_err("Failed to reset queue %d for VCPU %d: %d\n", priority, xc->server_num, rc); return rc; } - if (q->qpage) { - put_page(virt_to_page(q->qpage)); - q->qpage = NULL; - } - return 0; } @@ -646,8 +659,8 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, * OPAL level because the use of END ESBs is not supported by * Linux. */ - rc = xive_native_configure_queue(xc->vp_id, q, priority, - (__be32 *) qaddr, kvm_eq.qshift, true); + rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority, + (__be32 *) qaddr, kvm_eq.qshift, true); if (rc) { pr_err("Failed to configure queue %d for VCPU %d: %d\n", priority, xc->server_num, rc); -- cgit v1.2.3 From 30486e72093ea2e594f44876b7a445c219449bce Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Wed, 13 Nov 2019 17:46:19 +0100 Subject: KVM: PPC: Book3S HV: XIVE: Fix potential page leak on error path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to check the host page size is big enough to accomodate the EQ. Let's do this before taking a reference on the EQ page to avoid a potential leak if the check fails. Cc: stable@vger.kernel.org # v5.2 Fixes: 13ce3297c576 ("KVM: PPC: Book3S HV: XIVE: Add controls for the EQ configuration") Signed-off-by: Greg Kurz Reviewed-by: Cédric Le Goater Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_xive_native.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 0e1fc5a16729..d83adb1e1490 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -630,12 +630,6 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, srcu_idx = srcu_read_lock(&kvm->srcu); gfn = gpa_to_gfn(kvm_eq.qaddr); - page = gfn_to_page(kvm, gfn); - if (is_error_page(page)) { - srcu_read_unlock(&kvm->srcu, srcu_idx); - pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr); - return -EINVAL; - } page_size = kvm_host_page_size(kvm, gfn); if (1ull << kvm_eq.qshift > page_size) { @@ -644,6 +638,13 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, return -EINVAL; } + page = gfn_to_page(kvm, gfn); + if (is_error_page(page)) { + srcu_read_unlock(&kvm->srcu, srcu_idx); + pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr); + return -EINVAL; + } + qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK); srcu_read_unlock(&kvm->srcu, srcu_idx); -- cgit v1.2.3 From b11494bcabba7383c9db65132f6f73d64fb1407d Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Thu, 21 Nov 2019 00:31:47 +0200 Subject: KVM: nVMX: Do not mark vmcs02->apic_access_page as dirty when unpinning vmcs->apic_access_page is simply a token that the hypervisor puts into the PFN of a 4KB EPTE (or PTE if using shadow-paging) that triggers APIC-access VMExit or APIC virtualization logic whenever a CPU running in VMX non-root mode read/write from/to this PFN. As every write either triggers an APIC-access VMExit or write is performed on vmcs->virtual_apic_page, the PFN pointed to by vmcs->apic_access_page should never actually be touched by CPU. Therefore, there is no need to mark vmcs02->apic_access_page as dirty after unpin it on L2->L1 emulated VMExit or when L1 exit VMX operation. Reviewed-by: Krish Sadhukhan Reviewed-by: Joao Martins Reviewed-by: Jim Mattson Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 5fb59bed344e..783de6d83f9a 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -268,7 +268,7 @@ static void free_nested(struct kvm_vcpu *vcpu) vmx->nested.cached_shadow_vmcs12 = NULL; /* Unpin physical memory we referred to in the vmcs02 */ if (vmx->nested.apic_access_page) { - kvm_release_page_dirty(vmx->nested.apic_access_page); + kvm_release_page_clean(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); @@ -3070,7 +3070,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) * to it so we can release it later. */ if (vmx->nested.apic_access_page) { /* shouldn't happen */ - kvm_release_page_dirty(vmx->nested.apic_access_page); + kvm_release_page_clean(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); @@ -4267,7 +4267,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, /* Unpin physical memory we referred to in vmcs02 */ if (vmx->nested.apic_access_page) { - kvm_release_page_dirty(vmx->nested.apic_access_page); + kvm_release_page_clean(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); -- cgit v1.2.3 From db5a95ec166f3fd3eecbd07ebdf56986a3e9f43f Mon Sep 17 00:00:00 2001 From: Mao Wenan Date: Tue, 19 Nov 2019 11:06:40 +0800 Subject: KVM: x86: remove set but not used variable 'called' Fixes gcc '-Wunused-but-set-variable' warning: arch/x86/kvm/x86.c: In function kvm_make_scan_ioapic_request_mask: arch/x86/kvm/x86.c:7911:7: warning: variable called set but not used [-Wunused-but-set-variable] It is not used since commit 7ee30bc132c6 ("KVM: x86: deliver KVM IOAPIC scan request to target vCPUs") Signed-off-by: Mao Wenan Fixes: 7ee30bc132c6 ("KVM: x86: deliver KVM IOAPIC scan request to target vCPUs") Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c814c226d02e..a256e09f321a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7900,12 +7900,11 @@ void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, unsigned long *vcpu_bitmap) { cpumask_var_t cpus; - bool called; zalloc_cpumask_var(&cpus, GFP_ATOMIC); - called = kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, - vcpu_bitmap, cpus); + kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, + vcpu_bitmap, cpus); free_cpumask_var(cpus); } -- cgit v1.2.3 From 0155b2b91b263248fb5dd01c238439d4ab3731c5 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Wed, 20 Nov 2019 16:33:07 +0200 Subject: KVM: nVMX: Remove unnecessary TLB flushes on L1<->L2 switches when L1 use apic-access-page MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to Intel SDM section 28.3.3.3/28.3.3.4 Guidelines for Use of the INVVPID/INVEPT Instruction, the hypervisor needs to execute INVVPID/INVEPT X in case CPU executes VMEntry with VPID/EPTP X and either: "Virtualize APIC accesses" VM-execution control was changed from 0 to 1, OR the value of apic_access_page was changed. In the nested case, the burden falls on L1, unless L0 enables EPT in vmcs02 but L1 enables neither EPT nor VPID in vmcs12. For this reason prepare_vmcs02() and load_vmcs12_host_state() have special code to request a TLB flush in case L1 does not use EPT but it uses "virtualize APIC accesses". This special case however is not necessary. On a nested vmentry the physical TLB will already be flushed except if all the following apply: * L0 uses VPID * L1 uses VPID * L0 can guarantee TLB entries populated while running L1 are tagged differently than TLB entries populated while running L2. If the first condition is false, the processor will flush the TLB on vmentry to L2. If the second or third condition are false, prepare_vmcs02() will request KVM_REQ_TLB_FLUSH. However, even if both are true, no extra TLB flush is needed to handle the APIC access page: * if L1 doesn't use VPID, the second condition doesn't hold and the TLB will be flushed anyway. * if L1 uses VPID, it has to flush the TLB itself with INVVPID and section 28.3.3.3 doesn't apply to L0. * even INVEPT is not needed because, if L0 uses EPT, it uses different EPTP when running L2 than L1 (because guest_mode is part of mmu-role). In this case SDM section 28.3.3.4 doesn't apply. Similarly, examining nested_vmx_vmexit()->load_vmcs12_host_state(), one could note that L0 won't flush TLB only in cases where SDM sections 28.3.3.3 and 28.3.3.4 don't apply. In particular, if L0 uses different VPIDs for L1 and L2 (i.e. vmx->vpid != vmx->nested.vpid02), section 28.3.3.3 doesn't apply. Thus, remove this flush from prepare_vmcs02() and nested_vmx_vmexit(). Side-note: This patch can be viewed as removing parts of commit fb6c81984313 ("kvm: vmx: Flush TLB when the APIC-access address changes”) that is not relevant anymore since commit 1313cc2bd8f6 ("kvm: mmu: Add guest_mode to kvm_mmu_page_role”). i.e. The first commit assumes that if L0 use EPT and L1 doesn’t use EPT, then L0 will use same EPTP for both L0 and L1. Which indeed required L0 to execute INVEPT before entering L2 guest. This assumption is not true anymore since when guest_mode was added to mmu-role. Reviewed-by: Joao Martins Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 783de6d83f9a..4aea7d304beb 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2493,9 +2493,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (nested_cpu_has_ept(vmcs12)) nested_ept_init_mmu_context(vcpu); - else if (nested_cpu_has2(vmcs12, - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) - vmx_flush_tlb(vcpu, true); /* * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those @@ -4259,10 +4256,6 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, if (vmx->nested.change_vmcs01_virtual_apic_mode) { vmx->nested.change_vmcs01_virtual_apic_mode = false; vmx_set_virtual_apic_mode(vcpu); - } else if (!nested_cpu_has_ept(vmcs12) && - nested_cpu_has2(vmcs12, - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { - vmx_flush_tlb(vcpu, true); } /* Unpin physical memory we referred to in vmcs02 */ -- cgit v1.2.3 From c50d8ae3a1274f32c9033bbb0e1c5b3115da2112 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 21 Nov 2019 10:45:07 +0100 Subject: KVM: x86: create mmu/ subdirectory Preparatory work for shattering mmu.c into multiple files. Besides making it easier to follow, this will also make it possible to write unit tests for various parts. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/Makefile | 4 +- arch/x86/kvm/mmu.c | 6502 ---------------------------------------- arch/x86/kvm/mmu/mmu.c | 6502 ++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/mmu/page_track.c | 265 ++ arch/x86/kvm/mmu/paging_tmpl.h | 1090 +++++++ arch/x86/kvm/page_track.c | 265 -- arch/x86/kvm/paging_tmpl.h | 1090 ------- 7 files changed, 7859 insertions(+), 7859 deletions(-) delete mode 100644 arch/x86/kvm/mmu.c create mode 100644 arch/x86/kvm/mmu/mmu.c create mode 100644 arch/x86/kvm/mmu/page_track.c create mode 100644 arch/x86/kvm/mmu/paging_tmpl.h delete mode 100644 arch/x86/kvm/page_track.c delete mode 100644 arch/x86/kvm/paging_tmpl.h diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 31ecf7a76d5a..b19ef421084d 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -8,9 +8,9 @@ kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o -kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ +kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ - hyperv.o page_track.o debugfs.o + hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o vmx/evmcs.o vmx/nested.o kvm-amd-y += svm.o pmu_amd.o diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c deleted file mode 100644 index 6f92b40d798c..000000000000 --- a/arch/x86/kvm/mmu.c +++ /dev/null @@ -1,6502 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Kernel-based Virtual Machine driver for Linux - * - * This module enables machines with Intel VT-x extensions to run virtual - * machines without emulation or binary translation. - * - * MMU support - * - * Copyright (C) 2006 Qumranet, Inc. - * Copyright 2010 Red Hat, Inc. and/or its affiliates. - * - * Authors: - * Yaniv Kamay - * Avi Kivity - */ - -#include "irq.h" -#include "mmu.h" -#include "x86.h" -#include "kvm_cache_regs.h" -#include "cpuid.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include "trace.h" - -extern bool itlb_multihit_kvm_mitigation; - -static int __read_mostly nx_huge_pages = -1; -#ifdef CONFIG_PREEMPT_RT -/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */ -static uint __read_mostly nx_huge_pages_recovery_ratio = 0; -#else -static uint __read_mostly nx_huge_pages_recovery_ratio = 60; -#endif - -static int set_nx_huge_pages(const char *val, const struct kernel_param *kp); -static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp); - -static struct kernel_param_ops nx_huge_pages_ops = { - .set = set_nx_huge_pages, - .get = param_get_bool, -}; - -static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = { - .set = set_nx_huge_pages_recovery_ratio, - .get = param_get_uint, -}; - -module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644); -__MODULE_PARM_TYPE(nx_huge_pages, "bool"); -module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops, - &nx_huge_pages_recovery_ratio, 0644); -__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint"); - -/* - * When setting this variable to true it enables Two-Dimensional-Paging - * where the hardware walks 2 page tables: - * 1. the guest-virtual to guest-physical - * 2. while doing 1. it walks guest-physical to host-physical - * If the hardware supports that we don't need to do shadow paging. - */ -bool tdp_enabled = false; - -enum { - AUDIT_PRE_PAGE_FAULT, - AUDIT_POST_PAGE_FAULT, - AUDIT_PRE_PTE_WRITE, - AUDIT_POST_PTE_WRITE, - AUDIT_PRE_SYNC, - AUDIT_POST_SYNC -}; - -#undef MMU_DEBUG - -#ifdef MMU_DEBUG -static bool dbg = 0; -module_param(dbg, bool, 0644); - -#define pgprintk(x...) do { if (dbg) printk(x); } while (0) -#define rmap_printk(x...) do { if (dbg) printk(x); } while (0) -#define MMU_WARN_ON(x) WARN_ON(x) -#else -#define pgprintk(x...) do { } while (0) -#define rmap_printk(x...) do { } while (0) -#define MMU_WARN_ON(x) do { } while (0) -#endif - -#define PTE_PREFETCH_NUM 8 - -#define PT_FIRST_AVAIL_BITS_SHIFT 10 -#define PT64_SECOND_AVAIL_BITS_SHIFT 54 - -/* - * The mask used to denote special SPTEs, which can be either MMIO SPTEs or - * Access Tracking SPTEs. - */ -#define SPTE_SPECIAL_MASK (3ULL << 52) -#define SPTE_AD_ENABLED_MASK (0ULL << 52) -#define SPTE_AD_DISABLED_MASK (1ULL << 52) -#define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52) -#define SPTE_MMIO_MASK (3ULL << 52) - -#define PT64_LEVEL_BITS 9 - -#define PT64_LEVEL_SHIFT(level) \ - (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS) - -#define PT64_INDEX(address, level)\ - (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1)) - - -#define PT32_LEVEL_BITS 10 - -#define PT32_LEVEL_SHIFT(level) \ - (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS) - -#define PT32_LVL_OFFSET_MASK(level) \ - (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ - * PT32_LEVEL_BITS))) - 1)) - -#define PT32_INDEX(address, level)\ - (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) - - -#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK -#define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1)) -#else -#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) -#endif -#define PT64_LVL_ADDR_MASK(level) \ - (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ - * PT64_LEVEL_BITS))) - 1)) -#define PT64_LVL_OFFSET_MASK(level) \ - (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ - * PT64_LEVEL_BITS))) - 1)) - -#define PT32_BASE_ADDR_MASK PAGE_MASK -#define PT32_DIR_BASE_ADDR_MASK \ - (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) -#define PT32_LVL_ADDR_MASK(level) \ - (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ - * PT32_LEVEL_BITS))) - 1)) - -#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \ - | shadow_x_mask | shadow_nx_mask | shadow_me_mask) - -#define ACC_EXEC_MASK 1 -#define ACC_WRITE_MASK PT_WRITABLE_MASK -#define ACC_USER_MASK PT_USER_MASK -#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) - -/* The mask for the R/X bits in EPT PTEs */ -#define PT64_EPT_READABLE_MASK 0x1ull -#define PT64_EPT_EXECUTABLE_MASK 0x4ull - -#include - -#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) -#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1)) - -#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) - -/* make pte_list_desc fit well in cache line */ -#define PTE_LIST_EXT 3 - -/* - * Return values of handle_mmio_page_fault and mmu.page_fault: - * RET_PF_RETRY: let CPU fault again on the address. - * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. - * - * For handle_mmio_page_fault only: - * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. - */ -enum { - RET_PF_RETRY = 0, - RET_PF_EMULATE = 1, - RET_PF_INVALID = 2, -}; - -struct pte_list_desc { - u64 *sptes[PTE_LIST_EXT]; - struct pte_list_desc *more; -}; - -struct kvm_shadow_walk_iterator { - u64 addr; - hpa_t shadow_addr; - u64 *sptep; - int level; - unsigned index; -}; - -static const union kvm_mmu_page_role mmu_base_role_mask = { - .cr0_wp = 1, - .gpte_is_8_bytes = 1, - .nxe = 1, - .smep_andnot_wp = 1, - .smap_andnot_wp = 1, - .smm = 1, - .guest_mode = 1, - .ad_disabled = 1, -}; - -#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \ - for (shadow_walk_init_using_root(&(_walker), (_vcpu), \ - (_root), (_addr)); \ - shadow_walk_okay(&(_walker)); \ - shadow_walk_next(&(_walker))) - -#define for_each_shadow_entry(_vcpu, _addr, _walker) \ - for (shadow_walk_init(&(_walker), _vcpu, _addr); \ - shadow_walk_okay(&(_walker)); \ - shadow_walk_next(&(_walker))) - -#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ - for (shadow_walk_init(&(_walker), _vcpu, _addr); \ - shadow_walk_okay(&(_walker)) && \ - ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ - __shadow_walk_next(&(_walker), spte)) - -static struct kmem_cache *pte_list_desc_cache; -static struct kmem_cache *mmu_page_header_cache; -static struct percpu_counter kvm_total_used_mmu_pages; - -static u64 __read_mostly shadow_nx_mask; -static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ -static u64 __read_mostly shadow_user_mask; -static u64 __read_mostly shadow_accessed_mask; -static u64 __read_mostly shadow_dirty_mask; -static u64 __read_mostly shadow_mmio_mask; -static u64 __read_mostly shadow_mmio_value; -static u64 __read_mostly shadow_mmio_access_mask; -static u64 __read_mostly shadow_present_mask; -static u64 __read_mostly shadow_me_mask; - -/* - * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK; - * shadow_acc_track_mask is the set of bits to be cleared in non-accessed - * pages. - */ -static u64 __read_mostly shadow_acc_track_mask; - -/* - * The mask/shift to use for saving the original R/X bits when marking the PTE - * as not-present for access tracking purposes. We do not save the W bit as the - * PTEs being access tracked also need to be dirty tracked, so the W bit will be - * restored only when a write is attempted to the page. - */ -static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK | - PT64_EPT_EXECUTABLE_MASK; -static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT; - -/* - * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order - * to guard against L1TF attacks. - */ -static u64 __read_mostly shadow_nonpresent_or_rsvd_mask; - -/* - * The number of high-order 1 bits to use in the mask above. - */ -static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; - -/* - * In some cases, we need to preserve the GFN of a non-present or reserved - * SPTE when we usurp the upper five bits of the physical address space to - * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll - * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask - * left into the reserved bits, i.e. the GFN in the SPTE will be split into - * high and low parts. This mask covers the lower bits of the GFN. - */ -static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; - -/* - * The number of non-reserved physical address bits irrespective of features - * that repurpose legal bits, e.g. MKTME. - */ -static u8 __read_mostly shadow_phys_bits; - -static void mmu_spte_set(u64 *sptep, u64 spte); -static bool is_executable_pte(u64 spte); -static union kvm_mmu_page_role -kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); - -#define CREATE_TRACE_POINTS -#include "mmutrace.h" - - -static inline bool kvm_available_flush_tlb_with_range(void) -{ - return kvm_x86_ops->tlb_remote_flush_with_range; -} - -static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm, - struct kvm_tlb_range *range) -{ - int ret = -ENOTSUPP; - - if (range && kvm_x86_ops->tlb_remote_flush_with_range) - ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range); - - if (ret) - kvm_flush_remote_tlbs(kvm); -} - -static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, - u64 start_gfn, u64 pages) -{ - struct kvm_tlb_range range; - - range.start_gfn = start_gfn; - range.pages = pages; - - kvm_flush_remote_tlbs_with_range(kvm, &range); -} - -void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask) -{ - BUG_ON((u64)(unsigned)access_mask != access_mask); - BUG_ON((mmio_mask & mmio_value) != mmio_value); - shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; - shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; - shadow_mmio_access_mask = access_mask; -} -EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); - -static bool is_mmio_spte(u64 spte) -{ - return (spte & shadow_mmio_mask) == shadow_mmio_value; -} - -static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) -{ - return sp->role.ad_disabled; -} - -static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) -{ - /* - * When using the EPT page-modification log, the GPAs in the log - * would come from L2 rather than L1. Therefore, we need to rely - * on write protection to record dirty pages. This also bypasses - * PML, since writes now result in a vmexit. - */ - return vcpu->arch.mmu == &vcpu->arch.guest_mmu; -} - -static inline bool spte_ad_enabled(u64 spte) -{ - MMU_WARN_ON(is_mmio_spte(spte)); - return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK; -} - -static inline bool spte_ad_need_write_protect(u64 spte) -{ - MMU_WARN_ON(is_mmio_spte(spte)); - return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK; -} - -static bool is_nx_huge_page_enabled(void) -{ - return READ_ONCE(nx_huge_pages); -} - -static inline u64 spte_shadow_accessed_mask(u64 spte) -{ - MMU_WARN_ON(is_mmio_spte(spte)); - return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; -} - -static inline u64 spte_shadow_dirty_mask(u64 spte) -{ - MMU_WARN_ON(is_mmio_spte(spte)); - return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; -} - -static inline bool is_access_track_spte(u64 spte) -{ - return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; -} - -/* - * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of - * the memslots generation and is derived as follows: - * - * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11 - * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61 - * - * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in - * the MMIO generation number, as doing so would require stealing a bit from - * the "real" generation number and thus effectively halve the maximum number - * of MMIO generations that can be handled before encountering a wrap (which - * requires a full MMU zap). The flag is instead explicitly queried when - * checking for MMIO spte cache hits. - */ -#define MMIO_SPTE_GEN_MASK GENMASK_ULL(18, 0) - -#define MMIO_SPTE_GEN_LOW_START 3 -#define MMIO_SPTE_GEN_LOW_END 11 -#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ - MMIO_SPTE_GEN_LOW_START) - -#define MMIO_SPTE_GEN_HIGH_START 52 -#define MMIO_SPTE_GEN_HIGH_END 61 -#define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \ - MMIO_SPTE_GEN_HIGH_START) -static u64 generation_mmio_spte_mask(u64 gen) -{ - u64 mask; - - WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); - - mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK; - mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK; - return mask; -} - -static u64 get_mmio_spte_generation(u64 spte) -{ - u64 gen; - - spte &= ~shadow_mmio_mask; - - gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START; - gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START; - return gen; -} - -static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, - unsigned access) -{ - u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; - u64 mask = generation_mmio_spte_mask(gen); - u64 gpa = gfn << PAGE_SHIFT; - - access &= shadow_mmio_access_mask; - mask |= shadow_mmio_value | access; - mask |= gpa | shadow_nonpresent_or_rsvd_mask; - mask |= (gpa & shadow_nonpresent_or_rsvd_mask) - << shadow_nonpresent_or_rsvd_mask_len; - - trace_mark_mmio_spte(sptep, gfn, access, gen); - mmu_spte_set(sptep, mask); -} - -static gfn_t get_mmio_spte_gfn(u64 spte) -{ - u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; - - gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) - & shadow_nonpresent_or_rsvd_mask; - - return gpa >> PAGE_SHIFT; -} - -static unsigned get_mmio_spte_access(u64 spte) -{ - return spte & shadow_mmio_access_mask; -} - -static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, - kvm_pfn_t pfn, unsigned access) -{ - if (unlikely(is_noslot_pfn(pfn))) { - mark_mmio_spte(vcpu, sptep, gfn, access); - return true; - } - - return false; -} - -static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) -{ - u64 kvm_gen, spte_gen, gen; - - gen = kvm_vcpu_memslots(vcpu)->generation; - if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) - return false; - - kvm_gen = gen & MMIO_SPTE_GEN_MASK; - spte_gen = get_mmio_spte_generation(spte); - - trace_check_mmio_spte(spte, kvm_gen, spte_gen); - return likely(kvm_gen == spte_gen); -} - -/* - * Sets the shadow PTE masks used by the MMU. - * - * Assumptions: - * - Setting either @accessed_mask or @dirty_mask requires setting both - * - At least one of @accessed_mask or @acc_track_mask must be set - */ -void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, - u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, - u64 acc_track_mask, u64 me_mask) -{ - BUG_ON(!dirty_mask != !accessed_mask); - BUG_ON(!accessed_mask && !acc_track_mask); - BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK); - - shadow_user_mask = user_mask; - shadow_accessed_mask = accessed_mask; - shadow_dirty_mask = dirty_mask; - shadow_nx_mask = nx_mask; - shadow_x_mask = x_mask; - shadow_present_mask = p_mask; - shadow_acc_track_mask = acc_track_mask; - shadow_me_mask = me_mask; -} -EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); - -static u8 kvm_get_shadow_phys_bits(void) -{ - /* - * boot_cpu_data.x86_phys_bits is reduced when MKTME is detected - * in CPU detection code, but MKTME treats those reduced bits as - * 'keyID' thus they are not reserved bits. Therefore for MKTME - * we should still return physical address bits reported by CPUID. - */ - if (!boot_cpu_has(X86_FEATURE_TME) || - WARN_ON_ONCE(boot_cpu_data.extended_cpuid_level < 0x80000008)) - return boot_cpu_data.x86_phys_bits; - - return cpuid_eax(0x80000008) & 0xff; -} - -static void kvm_mmu_reset_all_pte_masks(void) -{ - u8 low_phys_bits; - - shadow_user_mask = 0; - shadow_accessed_mask = 0; - shadow_dirty_mask = 0; - shadow_nx_mask = 0; - shadow_x_mask = 0; - shadow_mmio_mask = 0; - shadow_present_mask = 0; - shadow_acc_track_mask = 0; - - shadow_phys_bits = kvm_get_shadow_phys_bits(); - - /* - * If the CPU has 46 or less physical address bits, then set an - * appropriate mask to guard against L1TF attacks. Otherwise, it is - * assumed that the CPU is not vulnerable to L1TF. - * - * Some Intel CPUs address the L1 cache using more PA bits than are - * reported by CPUID. Use the PA width of the L1 cache when possible - * to achieve more effective mitigation, e.g. if system RAM overlaps - * the most significant bits of legal physical address space. - */ - shadow_nonpresent_or_rsvd_mask = 0; - low_phys_bits = boot_cpu_data.x86_cache_bits; - if (boot_cpu_data.x86_cache_bits < - 52 - shadow_nonpresent_or_rsvd_mask_len) { - shadow_nonpresent_or_rsvd_mask = - rsvd_bits(boot_cpu_data.x86_cache_bits - - shadow_nonpresent_or_rsvd_mask_len, - boot_cpu_data.x86_cache_bits - 1); - low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len; - } else - WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF)); - - shadow_nonpresent_or_rsvd_lower_gfn_mask = - GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); -} - -static int is_cpuid_PSE36(void) -{ - return 1; -} - -static int is_nx(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.efer & EFER_NX; -} - -static int is_shadow_present_pte(u64 pte) -{ - return (pte != 0) && !is_mmio_spte(pte); -} - -static int is_large_pte(u64 pte) -{ - return pte & PT_PAGE_SIZE_MASK; -} - -static int is_last_spte(u64 pte, int level) -{ - if (level == PT_PAGE_TABLE_LEVEL) - return 1; - if (is_large_pte(pte)) - return 1; - return 0; -} - -static bool is_executable_pte(u64 spte) -{ - return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask; -} - -static kvm_pfn_t spte_to_pfn(u64 pte) -{ - return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; -} - -static gfn_t pse36_gfn_delta(u32 gpte) -{ - int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT; - - return (gpte & PT32_DIR_PSE36_MASK) << shift; -} - -#ifdef CONFIG_X86_64 -static void __set_spte(u64 *sptep, u64 spte) -{ - WRITE_ONCE(*sptep, spte); -} - -static void __update_clear_spte_fast(u64 *sptep, u64 spte) -{ - WRITE_ONCE(*sptep, spte); -} - -static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) -{ - return xchg(sptep, spte); -} - -static u64 __get_spte_lockless(u64 *sptep) -{ - return READ_ONCE(*sptep); -} -#else -union split_spte { - struct { - u32 spte_low; - u32 spte_high; - }; - u64 spte; -}; - -static void count_spte_clear(u64 *sptep, u64 spte) -{ - struct kvm_mmu_page *sp = page_header(__pa(sptep)); - - if (is_shadow_present_pte(spte)) - return; - - /* Ensure the spte is completely set before we increase the count */ - smp_wmb(); - sp->clear_spte_count++; -} - -static void __set_spte(u64 *sptep, u64 spte) -{ - union split_spte *ssptep, sspte; - - ssptep = (union split_spte *)sptep; - sspte = (union split_spte)spte; - - ssptep->spte_high = sspte.spte_high; - - /* - * If we map the spte from nonpresent to present, We should store - * the high bits firstly, then set present bit, so cpu can not - * fetch this spte while we are setting the spte. - */ - smp_wmb(); - - WRITE_ONCE(ssptep->spte_low, sspte.spte_low); -} - -static void __update_clear_spte_fast(u64 *sptep, u64 spte) -{ - union split_spte *ssptep, sspte; - - ssptep = (union split_spte *)sptep; - sspte = (union split_spte)spte; - - WRITE_ONCE(ssptep->spte_low, sspte.spte_low); - - /* - * If we map the spte from present to nonpresent, we should clear - * present bit firstly to avoid vcpu fetch the old high bits. - */ - smp_wmb(); - - ssptep->spte_high = sspte.spte_high; - count_spte_clear(sptep, spte); -} - -static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) -{ - union split_spte *ssptep, sspte, orig; - - ssptep = (union split_spte *)sptep; - sspte = (union split_spte)spte; - - /* xchg acts as a barrier before the setting of the high bits */ - orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); - orig.spte_high = ssptep->spte_high; - ssptep->spte_high = sspte.spte_high; - count_spte_clear(sptep, spte); - - return orig.spte; -} - -/* - * The idea using the light way get the spte on x86_32 guest is from - * gup_get_pte (mm/gup.c). - * - * An spte tlb flush may be pending, because kvm_set_pte_rmapp - * coalesces them and we are running out of the MMU lock. Therefore - * we need to protect against in-progress updates of the spte. - * - * Reading the spte while an update is in progress may get the old value - * for the high part of the spte. The race is fine for a present->non-present - * change (because the high part of the spte is ignored for non-present spte), - * but for a present->present change we must reread the spte. - * - * All such changes are done in two steps (present->non-present and - * non-present->present), hence it is enough to count the number of - * present->non-present updates: if it changed while reading the spte, - * we might have hit the race. This is done using clear_spte_count. - */ -static u64 __get_spte_lockless(u64 *sptep) -{ - struct kvm_mmu_page *sp = page_header(__pa(sptep)); - union split_spte spte, *orig = (union split_spte *)sptep; - int count; - -retry: - count = sp->clear_spte_count; - smp_rmb(); - - spte.spte_low = orig->spte_low; - smp_rmb(); - - spte.spte_high = orig->spte_high; - smp_rmb(); - - if (unlikely(spte.spte_low != orig->spte_low || - count != sp->clear_spte_count)) - goto retry; - - return spte.spte; -} -#endif - -static bool spte_can_locklessly_be_made_writable(u64 spte) -{ - return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) == - (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE); -} - -static bool spte_has_volatile_bits(u64 spte) -{ - if (!is_shadow_present_pte(spte)) - return false; - - /* - * Always atomically update spte if it can be updated - * out of mmu-lock, it can ensure dirty bit is not lost, - * also, it can help us to get a stable is_writable_pte() - * to ensure tlb flush is not missed. - */ - if (spte_can_locklessly_be_made_writable(spte) || - is_access_track_spte(spte)) - return true; - - if (spte_ad_enabled(spte)) { - if ((spte & shadow_accessed_mask) == 0 || - (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0)) - return true; - } - - return false; -} - -static bool is_accessed_spte(u64 spte) -{ - u64 accessed_mask = spte_shadow_accessed_mask(spte); - - return accessed_mask ? spte & accessed_mask - : !is_access_track_spte(spte); -} - -static bool is_dirty_spte(u64 spte) -{ - u64 dirty_mask = spte_shadow_dirty_mask(spte); - - return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK; -} - -/* Rules for using mmu_spte_set: - * Set the sptep from nonpresent to present. - * Note: the sptep being assigned *must* be either not present - * or in a state where the hardware will not attempt to update - * the spte. - */ -static void mmu_spte_set(u64 *sptep, u64 new_spte) -{ - WARN_ON(is_shadow_present_pte(*sptep)); - __set_spte(sptep, new_spte); -} - -/* - * Update the SPTE (excluding the PFN), but do not track changes in its - * accessed/dirty status. - */ -static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte) -{ - u64 old_spte = *sptep; - - WARN_ON(!is_shadow_present_pte(new_spte)); - - if (!is_shadow_present_pte(old_spte)) { - mmu_spte_set(sptep, new_spte); - return old_spte; - } - - if (!spte_has_volatile_bits(old_spte)) - __update_clear_spte_fast(sptep, new_spte); - else - old_spte = __update_clear_spte_slow(sptep, new_spte); - - WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte)); - - return old_spte; -} - -/* Rules for using mmu_spte_update: - * Update the state bits, it means the mapped pfn is not changed. - * - * Whenever we overwrite a writable spte with a read-only one we - * should flush remote TLBs. Otherwise rmap_write_protect - * will find a read-only spte, even though the writable spte - * might be cached on a CPU's TLB, the return value indicates this - * case. - * - * Returns true if the TLB needs to be flushed - */ -static bool mmu_spte_update(u64 *sptep, u64 new_spte) -{ - bool flush = false; - u64 old_spte = mmu_spte_update_no_track(sptep, new_spte); - - if (!is_shadow_present_pte(old_spte)) - return false; - - /* - * For the spte updated out of mmu-lock is safe, since - * we always atomically update it, see the comments in - * spte_has_volatile_bits(). - */ - if (spte_can_locklessly_be_made_writable(old_spte) && - !is_writable_pte(new_spte)) - flush = true; - - /* - * Flush TLB when accessed/dirty states are changed in the page tables, - * to guarantee consistency between TLB and page tables. - */ - - if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) { - flush = true; - kvm_set_pfn_accessed(spte_to_pfn(old_spte)); - } - - if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) { - flush = true; - kvm_set_pfn_dirty(spte_to_pfn(old_spte)); - } - - return flush; -} - -/* - * Rules for using mmu_spte_clear_track_bits: - * It sets the sptep from present to nonpresent, and track the - * state bits, it is used to clear the last level sptep. - * Returns non-zero if the PTE was previously valid. - */ -static int mmu_spte_clear_track_bits(u64 *sptep) -{ - kvm_pfn_t pfn; - u64 old_spte = *sptep; - - if (!spte_has_volatile_bits(old_spte)) - __update_clear_spte_fast(sptep, 0ull); - else - old_spte = __update_clear_spte_slow(sptep, 0ull); - - if (!is_shadow_present_pte(old_spte)) - return 0; - - pfn = spte_to_pfn(old_spte); - - /* - * KVM does not hold the refcount of the page used by - * kvm mmu, before reclaiming the page, we should - * unmap it from mmu first. - */ - WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); - - if (is_accessed_spte(old_spte)) - kvm_set_pfn_accessed(pfn); - - if (is_dirty_spte(old_spte)) - kvm_set_pfn_dirty(pfn); - - return 1; -} - -/* - * Rules for using mmu_spte_clear_no_track: - * Directly clear spte without caring the state bits of sptep, - * it is used to set the upper level spte. - */ -static void mmu_spte_clear_no_track(u64 *sptep) -{ - __update_clear_spte_fast(sptep, 0ull); -} - -static u64 mmu_spte_get_lockless(u64 *sptep) -{ - return __get_spte_lockless(sptep); -} - -static u64 mark_spte_for_access_track(u64 spte) -{ - if (spte_ad_enabled(spte)) - return spte & ~shadow_accessed_mask; - - if (is_access_track_spte(spte)) - return spte; - - /* - * Making an Access Tracking PTE will result in removal of write access - * from the PTE. So, verify that we will be able to restore the write - * access in the fast page fault path later on. - */ - WARN_ONCE((spte & PT_WRITABLE_MASK) && - !spte_can_locklessly_be_made_writable(spte), - "kvm: Writable SPTE is not locklessly dirty-trackable\n"); - - WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask << - shadow_acc_track_saved_bits_shift), - "kvm: Access Tracking saved bit locations are not zero\n"); - - spte |= (spte & shadow_acc_track_saved_bits_mask) << - shadow_acc_track_saved_bits_shift; - spte &= ~shadow_acc_track_mask; - - return spte; -} - -/* Restore an acc-track PTE back to a regular PTE */ -static u64 restore_acc_track_spte(u64 spte) -{ - u64 new_spte = spte; - u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift) - & shadow_acc_track_saved_bits_mask; - - WARN_ON_ONCE(spte_ad_enabled(spte)); - WARN_ON_ONCE(!is_access_track_spte(spte)); - - new_spte &= ~shadow_acc_track_mask; - new_spte &= ~(shadow_acc_track_saved_bits_mask << - shadow_acc_track_saved_bits_shift); - new_spte |= saved_bits; - - return new_spte; -} - -/* Returns the Accessed status of the PTE and resets it at the same time. */ -static bool mmu_spte_age(u64 *sptep) -{ - u64 spte = mmu_spte_get_lockless(sptep); - - if (!is_accessed_spte(spte)) - return false; - - if (spte_ad_enabled(spte)) { - clear_bit((ffs(shadow_accessed_mask) - 1), - (unsigned long *)sptep); - } else { - /* - * Capture the dirty status of the page, so that it doesn't get - * lost when the SPTE is marked for access tracking. - */ - if (is_writable_pte(spte)) - kvm_set_pfn_dirty(spte_to_pfn(spte)); - - spte = mark_spte_for_access_track(spte); - mmu_spte_update_no_track(sptep, spte); - } - - return true; -} - -static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) -{ - /* - * Prevent page table teardown by making any free-er wait during - * kvm_flush_remote_tlbs() IPI to all active vcpus. - */ - local_irq_disable(); - - /* - * Make sure a following spte read is not reordered ahead of the write - * to vcpu->mode. - */ - smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); -} - -static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) -{ - /* - * Make sure the write to vcpu->mode is not reordered in front of - * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us - * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. - */ - smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); - local_irq_enable(); -} - -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, - struct kmem_cache *base_cache, int min) -{ - void *obj; - - if (cache->nobjs >= min) - return 0; - while (cache->nobjs < ARRAY_SIZE(cache->objects)) { - obj = kmem_cache_zalloc(base_cache, GFP_KERNEL_ACCOUNT); - if (!obj) - return cache->nobjs >= min ? 0 : -ENOMEM; - cache->objects[cache->nobjs++] = obj; - } - return 0; -} - -static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache) -{ - return cache->nobjs; -} - -static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, - struct kmem_cache *cache) -{ - while (mc->nobjs) - kmem_cache_free(cache, mc->objects[--mc->nobjs]); -} - -static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, - int min) -{ - void *page; - - if (cache->nobjs >= min) - return 0; - while (cache->nobjs < ARRAY_SIZE(cache->objects)) { - page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); - if (!page) - return cache->nobjs >= min ? 0 : -ENOMEM; - cache->objects[cache->nobjs++] = page; - } - return 0; -} - -static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) -{ - while (mc->nobjs) - free_page((unsigned long)mc->objects[--mc->nobjs]); -} - -static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) -{ - int r; - - r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, - pte_list_desc_cache, 8 + PTE_PREFETCH_NUM); - if (r) - goto out; - r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); - if (r) - goto out; - r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, - mmu_page_header_cache, 4); -out: - return r; -} - -static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) -{ - mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, - pte_list_desc_cache); - mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); - mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, - mmu_page_header_cache); -} - -static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) -{ - void *p; - - BUG_ON(!mc->nobjs); - p = mc->objects[--mc->nobjs]; - return p; -} - -static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) -{ - return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); -} - -static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) -{ - kmem_cache_free(pte_list_desc_cache, pte_list_desc); -} - -static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) -{ - if (!sp->role.direct) - return sp->gfns[index]; - - return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); -} - -static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) -{ - if (!sp->role.direct) { - sp->gfns[index] = gfn; - return; - } - - if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) - pr_err_ratelimited("gfn mismatch under direct page %llx " - "(expected %llx, got %llx)\n", - sp->gfn, - kvm_mmu_page_get_gfn(sp, index), gfn); -} - -/* - * Return the pointer to the large page information for a given gfn, - * handling slots that are not large page aligned. - */ -static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, - struct kvm_memory_slot *slot, - int level) -{ - unsigned long idx; - - idx = gfn_to_index(gfn, slot->base_gfn, level); - return &slot->arch.lpage_info[level - 2][idx]; -} - -static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot, - gfn_t gfn, int count) -{ - struct kvm_lpage_info *linfo; - int i; - - for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { - linfo = lpage_info_slot(gfn, slot, i); - linfo->disallow_lpage += count; - WARN_ON(linfo->disallow_lpage < 0); - } -} - -void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) -{ - update_gfn_disallow_lpage_count(slot, gfn, 1); -} - -void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) -{ - update_gfn_disallow_lpage_count(slot, gfn, -1); -} - -static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *slot; - gfn_t gfn; - - kvm->arch.indirect_shadow_pages++; - gfn = sp->gfn; - slots = kvm_memslots_for_spte_role(kvm, sp->role); - slot = __gfn_to_memslot(slots, gfn); - - /* the non-leaf shadow pages are keeping readonly. */ - if (sp->role.level > PT_PAGE_TABLE_LEVEL) - return kvm_slot_page_track_add_page(kvm, slot, gfn, - KVM_PAGE_TRACK_WRITE); - - kvm_mmu_gfn_disallow_lpage(slot, gfn); -} - -static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) -{ - if (sp->lpage_disallowed) - return; - - ++kvm->stat.nx_lpage_splits; - list_add_tail(&sp->lpage_disallowed_link, - &kvm->arch.lpage_disallowed_mmu_pages); - sp->lpage_disallowed = true; -} - -static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *slot; - gfn_t gfn; - - kvm->arch.indirect_shadow_pages--; - gfn = sp->gfn; - slots = kvm_memslots_for_spte_role(kvm, sp->role); - slot = __gfn_to_memslot(slots, gfn); - if (sp->role.level > PT_PAGE_TABLE_LEVEL) - return kvm_slot_page_track_remove_page(kvm, slot, gfn, - KVM_PAGE_TRACK_WRITE); - - kvm_mmu_gfn_allow_lpage(slot, gfn); -} - -static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) -{ - --kvm->stat.nx_lpage_splits; - sp->lpage_disallowed = false; - list_del(&sp->lpage_disallowed_link); -} - -static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level, - struct kvm_memory_slot *slot) -{ - struct kvm_lpage_info *linfo; - - if (slot) { - linfo = lpage_info_slot(gfn, slot, level); - return !!linfo->disallow_lpage; - } - - return true; -} - -static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn, - int level) -{ - struct kvm_memory_slot *slot; - - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - return __mmu_gfn_lpage_is_disallowed(gfn, level, slot); -} - -static int host_mapping_level(struct kvm *kvm, gfn_t gfn) -{ - unsigned long page_size; - int i, ret = 0; - - page_size = kvm_host_page_size(kvm, gfn); - - for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { - if (page_size >= KVM_HPAGE_SIZE(i)) - ret = i; - else - break; - } - - return ret; -} - -static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot, - bool no_dirty_log) -{ - if (!slot || slot->flags & KVM_MEMSLOT_INVALID) - return false; - if (no_dirty_log && slot->dirty_bitmap) - return false; - - return true; -} - -static struct kvm_memory_slot * -gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, - bool no_dirty_log) -{ - struct kvm_memory_slot *slot; - - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - if (!memslot_valid_for_gpte(slot, no_dirty_log)) - slot = NULL; - - return slot; -} - -static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn, - bool *force_pt_level) -{ - int host_level, level, max_level; - struct kvm_memory_slot *slot; - - if (unlikely(*force_pt_level)) - return PT_PAGE_TABLE_LEVEL; - - slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn); - *force_pt_level = !memslot_valid_for_gpte(slot, true); - if (unlikely(*force_pt_level)) - return PT_PAGE_TABLE_LEVEL; - - host_level = host_mapping_level(vcpu->kvm, large_gfn); - - if (host_level == PT_PAGE_TABLE_LEVEL) - return host_level; - - max_level = min(kvm_x86_ops->get_lpage_level(), host_level); - - for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) - if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot)) - break; - - return level - 1; -} - -/* - * About rmap_head encoding: - * - * If the bit zero of rmap_head->val is clear, then it points to the only spte - * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct - * pte_list_desc containing more mappings. - */ - -/* - * Returns the number of pointers in the rmap chain, not counting the new one. - */ -static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, - struct kvm_rmap_head *rmap_head) -{ - struct pte_list_desc *desc; - int i, count = 0; - - if (!rmap_head->val) { - rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); - rmap_head->val = (unsigned long)spte; - } else if (!(rmap_head->val & 1)) { - rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); - desc = mmu_alloc_pte_list_desc(vcpu); - desc->sptes[0] = (u64 *)rmap_head->val; - desc->sptes[1] = spte; - rmap_head->val = (unsigned long)desc | 1; - ++count; - } else { - rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); - desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); - while (desc->sptes[PTE_LIST_EXT-1] && desc->more) { - desc = desc->more; - count += PTE_LIST_EXT; - } - if (desc->sptes[PTE_LIST_EXT-1]) { - desc->more = mmu_alloc_pte_list_desc(vcpu); - desc = desc->more; - } - for (i = 0; desc->sptes[i]; ++i) - ++count; - desc->sptes[i] = spte; - } - return count; -} - -static void -pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, - struct pte_list_desc *desc, int i, - struct pte_list_desc *prev_desc) -{ - int j; - - for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j) - ; - desc->sptes[i] = desc->sptes[j]; - desc->sptes[j] = NULL; - if (j != 0) - return; - if (!prev_desc && !desc->more) - rmap_head->val = (unsigned long)desc->sptes[0]; - else - if (prev_desc) - prev_desc->more = desc->more; - else - rmap_head->val = (unsigned long)desc->more | 1; - mmu_free_pte_list_desc(desc); -} - -static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) -{ - struct pte_list_desc *desc; - struct pte_list_desc *prev_desc; - int i; - - if (!rmap_head->val) { - pr_err("%s: %p 0->BUG\n", __func__, spte); - BUG(); - } else if (!(rmap_head->val & 1)) { - rmap_printk("%s: %p 1->0\n", __func__, spte); - if ((u64 *)rmap_head->val != spte) { - pr_err("%s: %p 1->BUG\n", __func__, spte); - BUG(); - } - rmap_head->val = 0; - } else { - rmap_printk("%s: %p many->many\n", __func__, spte); - desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); - prev_desc = NULL; - while (desc) { - for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) { - if (desc->sptes[i] == spte) { - pte_list_desc_remove_entry(rmap_head, - desc, i, prev_desc); - return; - } - } - prev_desc = desc; - desc = desc->more; - } - pr_err("%s: %p many->many\n", __func__, spte); - BUG(); - } -} - -static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) -{ - mmu_spte_clear_track_bits(sptep); - __pte_list_remove(sptep, rmap_head); -} - -static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level, - struct kvm_memory_slot *slot) -{ - unsigned long idx; - - idx = gfn_to_index(gfn, slot->base_gfn, level); - return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx]; -} - -static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, - struct kvm_mmu_page *sp) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *slot; - - slots = kvm_memslots_for_spte_role(kvm, sp->role); - slot = __gfn_to_memslot(slots, gfn); - return __gfn_to_rmap(gfn, sp->role.level, slot); -} - -static bool rmap_can_add(struct kvm_vcpu *vcpu) -{ - struct kvm_mmu_memory_cache *cache; - - cache = &vcpu->arch.mmu_pte_list_desc_cache; - return mmu_memory_cache_free_objects(cache); -} - -static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) -{ - struct kvm_mmu_page *sp; - struct kvm_rmap_head *rmap_head; - - sp = page_header(__pa(spte)); - kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); - rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); - return pte_list_add(vcpu, spte, rmap_head); -} - -static void rmap_remove(struct kvm *kvm, u64 *spte) -{ - struct kvm_mmu_page *sp; - gfn_t gfn; - struct kvm_rmap_head *rmap_head; - - sp = page_header(__pa(spte)); - gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); - rmap_head = gfn_to_rmap(kvm, gfn, sp); - __pte_list_remove(spte, rmap_head); -} - -/* - * Used by the following functions to iterate through the sptes linked by a - * rmap. All fields are private and not assumed to be used outside. - */ -struct rmap_iterator { - /* private fields */ - struct pte_list_desc *desc; /* holds the sptep if not NULL */ - int pos; /* index of the sptep */ -}; - -/* - * Iteration must be started by this function. This should also be used after - * removing/dropping sptes from the rmap link because in such cases the - * information in the itererator may not be valid. - * - * Returns sptep if found, NULL otherwise. - */ -static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head, - struct rmap_iterator *iter) -{ - u64 *sptep; - - if (!rmap_head->val) - return NULL; - - if (!(rmap_head->val & 1)) { - iter->desc = NULL; - sptep = (u64 *)rmap_head->val; - goto out; - } - - iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); - iter->pos = 0; - sptep = iter->desc->sptes[iter->pos]; -out: - BUG_ON(!is_shadow_present_pte(*sptep)); - return sptep; -} - -/* - * Must be used with a valid iterator: e.g. after rmap_get_first(). - * - * Returns sptep if found, NULL otherwise. - */ -static u64 *rmap_get_next(struct rmap_iterator *iter) -{ - u64 *sptep; - - if (iter->desc) { - if (iter->pos < PTE_LIST_EXT - 1) { - ++iter->pos; - sptep = iter->desc->sptes[iter->pos]; - if (sptep) - goto out; - } - - iter->desc = iter->desc->more; - - if (iter->desc) { - iter->pos = 0; - /* desc->sptes[0] cannot be NULL */ - sptep = iter->desc->sptes[iter->pos]; - goto out; - } - } - - return NULL; -out: - BUG_ON(!is_shadow_present_pte(*sptep)); - return sptep; -} - -#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \ - for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \ - _spte_; _spte_ = rmap_get_next(_iter_)) - -static void drop_spte(struct kvm *kvm, u64 *sptep) -{ - if (mmu_spte_clear_track_bits(sptep)) - rmap_remove(kvm, sptep); -} - - -static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) -{ - if (is_large_pte(*sptep)) { - WARN_ON(page_header(__pa(sptep))->role.level == - PT_PAGE_TABLE_LEVEL); - drop_spte(kvm, sptep); - --kvm->stat.lpages; - return true; - } - - return false; -} - -static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) -{ - if (__drop_large_spte(vcpu->kvm, sptep)) { - struct kvm_mmu_page *sp = page_header(__pa(sptep)); - - kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, - KVM_PAGES_PER_HPAGE(sp->role.level)); - } -} - -/* - * Write-protect on the specified @sptep, @pt_protect indicates whether - * spte write-protection is caused by protecting shadow page table. - * - * Note: write protection is difference between dirty logging and spte - * protection: - * - for dirty logging, the spte can be set to writable at anytime if - * its dirty bitmap is properly set. - * - for spte protection, the spte can be writable only after unsync-ing - * shadow page. - * - * Return true if tlb need be flushed. - */ -static bool spte_write_protect(u64 *sptep, bool pt_protect) -{ - u64 spte = *sptep; - - if (!is_writable_pte(spte) && - !(pt_protect && spte_can_locklessly_be_made_writable(spte))) - return false; - - rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); - - if (pt_protect) - spte &= ~SPTE_MMU_WRITEABLE; - spte = spte & ~PT_WRITABLE_MASK; - - return mmu_spte_update(sptep, spte); -} - -static bool __rmap_write_protect(struct kvm *kvm, - struct kvm_rmap_head *rmap_head, - bool pt_protect) -{ - u64 *sptep; - struct rmap_iterator iter; - bool flush = false; - - for_each_rmap_spte(rmap_head, &iter, sptep) - flush |= spte_write_protect(sptep, pt_protect); - - return flush; -} - -static bool spte_clear_dirty(u64 *sptep) -{ - u64 spte = *sptep; - - rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); - - MMU_WARN_ON(!spte_ad_enabled(spte)); - spte &= ~shadow_dirty_mask; - return mmu_spte_update(sptep, spte); -} - -static bool spte_wrprot_for_clear_dirty(u64 *sptep) -{ - bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT, - (unsigned long *)sptep); - if (was_writable && !spte_ad_enabled(*sptep)) - kvm_set_pfn_dirty(spte_to_pfn(*sptep)); - - return was_writable; -} - -/* - * Gets the GFN ready for another round of dirty logging by clearing the - * - D bit on ad-enabled SPTEs, and - * - W bit on ad-disabled SPTEs. - * Returns true iff any D or W bits were cleared. - */ -static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) -{ - u64 *sptep; - struct rmap_iterator iter; - bool flush = false; - - for_each_rmap_spte(rmap_head, &iter, sptep) - if (spte_ad_need_write_protect(*sptep)) - flush |= spte_wrprot_for_clear_dirty(sptep); - else - flush |= spte_clear_dirty(sptep); - - return flush; -} - -static bool spte_set_dirty(u64 *sptep) -{ - u64 spte = *sptep; - - rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); - - /* - * Similar to the !kvm_x86_ops->slot_disable_log_dirty case, - * do not bother adding back write access to pages marked - * SPTE_AD_WRPROT_ONLY_MASK. - */ - spte |= shadow_dirty_mask; - - return mmu_spte_update(sptep, spte); -} - -static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) -{ - u64 *sptep; - struct rmap_iterator iter; - bool flush = false; - - for_each_rmap_spte(rmap_head, &iter, sptep) - if (spte_ad_enabled(*sptep)) - flush |= spte_set_dirty(sptep); - - return flush; -} - -/** - * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages - * @kvm: kvm instance - * @slot: slot to protect - * @gfn_offset: start of the BITS_PER_LONG pages we care about - * @mask: indicates which pages we should protect - * - * Used when we do not need to care about huge page mappings: e.g. during dirty - * logging we do not have any such mappings. - */ -static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, - struct kvm_memory_slot *slot, - gfn_t gfn_offset, unsigned long mask) -{ - struct kvm_rmap_head *rmap_head; - - while (mask) { - rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), - PT_PAGE_TABLE_LEVEL, slot); - __rmap_write_protect(kvm, rmap_head, false); - - /* clear the first set bit */ - mask &= mask - 1; - } -} - -/** - * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write - * protect the page if the D-bit isn't supported. - * @kvm: kvm instance - * @slot: slot to clear D-bit - * @gfn_offset: start of the BITS_PER_LONG pages we care about - * @mask: indicates which pages we should clear D-bit - * - * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap. - */ -void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, - struct kvm_memory_slot *slot, - gfn_t gfn_offset, unsigned long mask) -{ - struct kvm_rmap_head *rmap_head; - - while (mask) { - rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), - PT_PAGE_TABLE_LEVEL, slot); - __rmap_clear_dirty(kvm, rmap_head); - - /* clear the first set bit */ - mask &= mask - 1; - } -} -EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked); - -/** - * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected - * PT level pages. - * - * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to - * enable dirty logging for them. - * - * Used when we do not need to care about huge page mappings: e.g. during dirty - * logging we do not have any such mappings. - */ -void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, - struct kvm_memory_slot *slot, - gfn_t gfn_offset, unsigned long mask) -{ - if (kvm_x86_ops->enable_log_dirty_pt_masked) - kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset, - mask); - else - kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); -} - -/** - * kvm_arch_write_log_dirty - emulate dirty page logging - * @vcpu: Guest mode vcpu - * - * Emulate arch specific page modification logging for the - * nested hypervisor - */ -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu) -{ - if (kvm_x86_ops->write_log_dirty) - return kvm_x86_ops->write_log_dirty(vcpu); - - return 0; -} - -bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, - struct kvm_memory_slot *slot, u64 gfn) -{ - struct kvm_rmap_head *rmap_head; - int i; - bool write_protected = false; - - for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { - rmap_head = __gfn_to_rmap(gfn, i, slot); - write_protected |= __rmap_write_protect(kvm, rmap_head, true); - } - - return write_protected; -} - -static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) -{ - struct kvm_memory_slot *slot; - - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn); -} - -static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) -{ - u64 *sptep; - struct rmap_iterator iter; - bool flush = false; - - while ((sptep = rmap_get_first(rmap_head, &iter))) { - rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); - - pte_list_remove(rmap_head, sptep); - flush = true; - } - - return flush; -} - -static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - unsigned long data) -{ - return kvm_zap_rmapp(kvm, rmap_head); -} - -static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - unsigned long data) -{ - u64 *sptep; - struct rmap_iterator iter; - int need_flush = 0; - u64 new_spte; - pte_t *ptep = (pte_t *)data; - kvm_pfn_t new_pfn; - - WARN_ON(pte_huge(*ptep)); - new_pfn = pte_pfn(*ptep); - -restart: - for_each_rmap_spte(rmap_head, &iter, sptep) { - rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", - sptep, *sptep, gfn, level); - - need_flush = 1; - - if (pte_write(*ptep)) { - pte_list_remove(rmap_head, sptep); - goto restart; - } else { - new_spte = *sptep & ~PT64_BASE_ADDR_MASK; - new_spte |= (u64)new_pfn << PAGE_SHIFT; - - new_spte &= ~PT_WRITABLE_MASK; - new_spte &= ~SPTE_HOST_WRITEABLE; - - new_spte = mark_spte_for_access_track(new_spte); - - mmu_spte_clear_track_bits(sptep); - mmu_spte_set(sptep, new_spte); - } - } - - if (need_flush && kvm_available_flush_tlb_with_range()) { - kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); - return 0; - } - - return need_flush; -} - -struct slot_rmap_walk_iterator { - /* input fields. */ - struct kvm_memory_slot *slot; - gfn_t start_gfn; - gfn_t end_gfn; - int start_level; - int end_level; - - /* output fields. */ - gfn_t gfn; - struct kvm_rmap_head *rmap; - int level; - - /* private field. */ - struct kvm_rmap_head *end_rmap; -}; - -static void -rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) -{ - iterator->level = level; - iterator->gfn = iterator->start_gfn; - iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot); - iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level, - iterator->slot); -} - -static void -slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, - struct kvm_memory_slot *slot, int start_level, - int end_level, gfn_t start_gfn, gfn_t end_gfn) -{ - iterator->slot = slot; - iterator->start_level = start_level; - iterator->end_level = end_level; - iterator->start_gfn = start_gfn; - iterator->end_gfn = end_gfn; - - rmap_walk_init_level(iterator, iterator->start_level); -} - -static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator) -{ - return !!iterator->rmap; -} - -static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) -{ - if (++iterator->rmap <= iterator->end_rmap) { - iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level)); - return; - } - - if (++iterator->level > iterator->end_level) { - iterator->rmap = NULL; - return; - } - - rmap_walk_init_level(iterator, iterator->level); -} - -#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \ - _start_gfn, _end_gfn, _iter_) \ - for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \ - _end_level_, _start_gfn, _end_gfn); \ - slot_rmap_walk_okay(_iter_); \ - slot_rmap_walk_next(_iter_)) - -static int kvm_handle_hva_range(struct kvm *kvm, - unsigned long start, - unsigned long end, - unsigned long data, - int (*handler)(struct kvm *kvm, - struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, - gfn_t gfn, - int level, - unsigned long data)) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - struct slot_rmap_walk_iterator iterator; - int ret = 0; - int i; - - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(memslot, slots) { - unsigned long hva_start, hva_end; - gfn_t gfn_start, gfn_end; - - hva_start = max(start, memslot->userspace_addr); - hva_end = min(end, memslot->userspace_addr + - (memslot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; - /* - * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn_start, gfn_start+1, ..., gfn_end-1}. - */ - gfn_start = hva_to_gfn_memslot(hva_start, memslot); - gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - - for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL, - PT_MAX_HUGEPAGE_LEVEL, - gfn_start, gfn_end - 1, - &iterator) - ret |= handler(kvm, iterator.rmap, memslot, - iterator.gfn, iterator.level, data); - } - } - - return ret; -} - -static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, - unsigned long data, - int (*handler)(struct kvm *kvm, - struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, - gfn_t gfn, int level, - unsigned long data)) -{ - return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); -} - -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) -{ - return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); -} - -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) -{ - return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); -} - -static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - unsigned long data) -{ - u64 *sptep; - struct rmap_iterator uninitialized_var(iter); - int young = 0; - - for_each_rmap_spte(rmap_head, &iter, sptep) - young |= mmu_spte_age(sptep); - - trace_kvm_age_page(gfn, level, slot, young); - return young; -} - -static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, - int level, unsigned long data) -{ - u64 *sptep; - struct rmap_iterator iter; - - for_each_rmap_spte(rmap_head, &iter, sptep) - if (is_accessed_spte(*sptep)) - return 1; - return 0; -} - -#define RMAP_RECYCLE_THRESHOLD 1000 - -static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) -{ - struct kvm_rmap_head *rmap_head; - struct kvm_mmu_page *sp; - - sp = page_header(__pa(spte)); - - rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); - - kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0); - kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, - KVM_PAGES_PER_HPAGE(sp->role.level)); -} - -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) -{ - return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); -} - -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) -{ - return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); -} - -#ifdef MMU_DEBUG -static int is_empty_shadow_page(u64 *spt) -{ - u64 *pos; - u64 *end; - - for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) - if (is_shadow_present_pte(*pos)) { - printk(KERN_ERR "%s: %p %llx\n", __func__, - pos, *pos); - return 0; - } - return 1; -} -#endif - -/* - * This value is the sum of all of the kvm instances's - * kvm->arch.n_used_mmu_pages values. We need a global, - * aggregate version in order to make the slab shrinker - * faster - */ -static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr) -{ - kvm->arch.n_used_mmu_pages += nr; - percpu_counter_add(&kvm_total_used_mmu_pages, nr); -} - -static void kvm_mmu_free_page(struct kvm_mmu_page *sp) -{ - MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); - hlist_del(&sp->hash_link); - list_del(&sp->link); - free_page((unsigned long)sp->spt); - if (!sp->role.direct) - free_page((unsigned long)sp->gfns); - kmem_cache_free(mmu_page_header_cache, sp); -} - -static unsigned kvm_page_table_hashfn(gfn_t gfn) -{ - return hash_64(gfn, KVM_MMU_HASH_SHIFT); -} - -static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, u64 *parent_pte) -{ - if (!parent_pte) - return; - - pte_list_add(vcpu, parent_pte, &sp->parent_ptes); -} - -static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, - u64 *parent_pte) -{ - __pte_list_remove(parent_pte, &sp->parent_ptes); -} - -static void drop_parent_pte(struct kvm_mmu_page *sp, - u64 *parent_pte) -{ - mmu_page_remove_parent_pte(sp, parent_pte); - mmu_spte_clear_no_track(parent_pte); -} - -static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct) -{ - struct kvm_mmu_page *sp; - - sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); - sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); - if (!direct) - sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); - set_page_private(virt_to_page(sp->spt), (unsigned long)sp); - - /* - * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages() - * depends on valid pages being added to the head of the list. See - * comments in kvm_zap_obsolete_pages(). - */ - sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; - list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); - kvm_mod_used_mmu_pages(vcpu->kvm, +1); - return sp; -} - -static void mark_unsync(u64 *spte); -static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) -{ - u64 *sptep; - struct rmap_iterator iter; - - for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) { - mark_unsync(sptep); - } -} - -static void mark_unsync(u64 *spte) -{ - struct kvm_mmu_page *sp; - unsigned int index; - - sp = page_header(__pa(spte)); - index = spte - sp->spt; - if (__test_and_set_bit(index, sp->unsync_child_bitmap)) - return; - if (sp->unsync_children++) - return; - kvm_mmu_mark_parents_unsync(sp); -} - -static int nonpaging_sync_page(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp) -{ - return 0; -} - -static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root) -{ -} - -static void nonpaging_update_pte(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, u64 *spte, - const void *pte) -{ - WARN_ON(1); -} - -#define KVM_PAGE_ARRAY_NR 16 - -struct kvm_mmu_pages { - struct mmu_page_and_offset { - struct kvm_mmu_page *sp; - unsigned int idx; - } page[KVM_PAGE_ARRAY_NR]; - unsigned int nr; -}; - -static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, - int idx) -{ - int i; - - if (sp->unsync) - for (i=0; i < pvec->nr; i++) - if (pvec->page[i].sp == sp) - return 0; - - pvec->page[pvec->nr].sp = sp; - pvec->page[pvec->nr].idx = idx; - pvec->nr++; - return (pvec->nr == KVM_PAGE_ARRAY_NR); -} - -static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) -{ - --sp->unsync_children; - WARN_ON((int)sp->unsync_children < 0); - __clear_bit(idx, sp->unsync_child_bitmap); -} - -static int __mmu_unsync_walk(struct kvm_mmu_page *sp, - struct kvm_mmu_pages *pvec) -{ - int i, ret, nr_unsync_leaf = 0; - - for_each_set_bit(i, sp->unsync_child_bitmap, 512) { - struct kvm_mmu_page *child; - u64 ent = sp->spt[i]; - - if (!is_shadow_present_pte(ent) || is_large_pte(ent)) { - clear_unsync_child_bit(sp, i); - continue; - } - - child = page_header(ent & PT64_BASE_ADDR_MASK); - - if (child->unsync_children) { - if (mmu_pages_add(pvec, child, i)) - return -ENOSPC; - - ret = __mmu_unsync_walk(child, pvec); - if (!ret) { - clear_unsync_child_bit(sp, i); - continue; - } else if (ret > 0) { - nr_unsync_leaf += ret; - } else - return ret; - } else if (child->unsync) { - nr_unsync_leaf++; - if (mmu_pages_add(pvec, child, i)) - return -ENOSPC; - } else - clear_unsync_child_bit(sp, i); - } - - return nr_unsync_leaf; -} - -#define INVALID_INDEX (-1) - -static int mmu_unsync_walk(struct kvm_mmu_page *sp, - struct kvm_mmu_pages *pvec) -{ - pvec->nr = 0; - if (!sp->unsync_children) - return 0; - - mmu_pages_add(pvec, sp, INVALID_INDEX); - return __mmu_unsync_walk(sp, pvec); -} - -static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) -{ - WARN_ON(!sp->unsync); - trace_kvm_mmu_sync_page(sp); - sp->unsync = 0; - --kvm->stat.mmu_unsync; -} - -static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, - struct list_head *invalid_list); -static void kvm_mmu_commit_zap_page(struct kvm *kvm, - struct list_head *invalid_list); - - -#define for_each_valid_sp(_kvm, _sp, _gfn) \ - hlist_for_each_entry(_sp, \ - &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ - if (is_obsolete_sp((_kvm), (_sp))) { \ - } else - -#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ - for_each_valid_sp(_kvm, _sp, _gfn) \ - if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else - -static inline bool is_ept_sp(struct kvm_mmu_page *sp) -{ - return sp->role.cr0_wp && sp->role.smap_andnot_wp; -} - -/* @sp->gfn should be write-protected at the call site */ -static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - struct list_head *invalid_list) -{ - if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) || - vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { - kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); - return false; - } - - return true; -} - -static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, - struct list_head *invalid_list, - bool remote_flush) -{ - if (!remote_flush && list_empty(invalid_list)) - return false; - - if (!list_empty(invalid_list)) - kvm_mmu_commit_zap_page(kvm, invalid_list); - else - kvm_flush_remote_tlbs(kvm); - return true; -} - -static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, - struct list_head *invalid_list, - bool remote_flush, bool local_flush) -{ - if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush)) - return; - - if (local_flush) - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); -} - -#ifdef CONFIG_KVM_MMU_AUDIT -#include "mmu_audit.c" -#else -static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } -static void mmu_audit_disable(void) { } -#endif - -static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) -{ - return sp->role.invalid || - unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); -} - -static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - struct list_head *invalid_list) -{ - kvm_unlink_unsync_page(vcpu->kvm, sp); - return __kvm_sync_page(vcpu, sp, invalid_list); -} - -/* @gfn should be write-protected at the call site */ -static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, - struct list_head *invalid_list) -{ - struct kvm_mmu_page *s; - bool ret = false; - - for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { - if (!s->unsync) - continue; - - WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); - ret |= kvm_sync_page(vcpu, s, invalid_list); - } - - return ret; -} - -struct mmu_page_path { - struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL]; - unsigned int idx[PT64_ROOT_MAX_LEVEL]; -}; - -#define for_each_sp(pvec, sp, parents, i) \ - for (i = mmu_pages_first(&pvec, &parents); \ - i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ - i = mmu_pages_next(&pvec, &parents, i)) - -static int mmu_pages_next(struct kvm_mmu_pages *pvec, - struct mmu_page_path *parents, - int i) -{ - int n; - - for (n = i+1; n < pvec->nr; n++) { - struct kvm_mmu_page *sp = pvec->page[n].sp; - unsigned idx = pvec->page[n].idx; - int level = sp->role.level; - - parents->idx[level-1] = idx; - if (level == PT_PAGE_TABLE_LEVEL) - break; - - parents->parent[level-2] = sp; - } - - return n; -} - -static int mmu_pages_first(struct kvm_mmu_pages *pvec, - struct mmu_page_path *parents) -{ - struct kvm_mmu_page *sp; - int level; - - if (pvec->nr == 0) - return 0; - - WARN_ON(pvec->page[0].idx != INVALID_INDEX); - - sp = pvec->page[0].sp; - level = sp->role.level; - WARN_ON(level == PT_PAGE_TABLE_LEVEL); - - parents->parent[level-2] = sp; - - /* Also set up a sentinel. Further entries in pvec are all - * children of sp, so this element is never overwritten. - */ - parents->parent[level-1] = NULL; - return mmu_pages_next(pvec, parents, 0); -} - -static void mmu_pages_clear_parents(struct mmu_page_path *parents) -{ - struct kvm_mmu_page *sp; - unsigned int level = 0; - - do { - unsigned int idx = parents->idx[level]; - sp = parents->parent[level]; - if (!sp) - return; - - WARN_ON(idx == INVALID_INDEX); - clear_unsync_child_bit(sp, idx); - level++; - } while (!sp->unsync_children); -} - -static void mmu_sync_children(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *parent) -{ - int i; - struct kvm_mmu_page *sp; - struct mmu_page_path parents; - struct kvm_mmu_pages pages; - LIST_HEAD(invalid_list); - bool flush = false; - - while (mmu_unsync_walk(parent, &pages)) { - bool protected = false; - - for_each_sp(pages, sp, parents, i) - protected |= rmap_write_protect(vcpu, sp->gfn); - - if (protected) { - kvm_flush_remote_tlbs(vcpu->kvm); - flush = false; - } - - for_each_sp(pages, sp, parents, i) { - flush |= kvm_sync_page(vcpu, sp, &invalid_list); - mmu_pages_clear_parents(&parents); - } - if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) { - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); - cond_resched_lock(&vcpu->kvm->mmu_lock); - flush = false; - } - } - - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); -} - -static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) -{ - atomic_set(&sp->write_flooding_count, 0); -} - -static void clear_sp_write_flooding_count(u64 *spte) -{ - struct kvm_mmu_page *sp = page_header(__pa(spte)); - - __clear_sp_write_flooding_count(sp); -} - -static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, - gfn_t gfn, - gva_t gaddr, - unsigned level, - int direct, - unsigned access) -{ - union kvm_mmu_page_role role; - unsigned quadrant; - struct kvm_mmu_page *sp; - bool need_sync = false; - bool flush = false; - int collisions = 0; - LIST_HEAD(invalid_list); - - role = vcpu->arch.mmu->mmu_role.base; - role.level = level; - role.direct = direct; - if (role.direct) - role.gpte_is_8_bytes = true; - role.access = access; - if (!vcpu->arch.mmu->direct_map - && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { - quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); - quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; - role.quadrant = quadrant; - } - for_each_valid_sp(vcpu->kvm, sp, gfn) { - if (sp->gfn != gfn) { - collisions++; - continue; - } - - if (!need_sync && sp->unsync) - need_sync = true; - - if (sp->role.word != role.word) - continue; - - if (sp->unsync) { - /* The page is good, but __kvm_sync_page might still end - * up zapping it. If so, break in order to rebuild it. - */ - if (!__kvm_sync_page(vcpu, sp, &invalid_list)) - break; - - WARN_ON(!list_empty(&invalid_list)); - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); - } - - if (sp->unsync_children) - kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); - - __clear_sp_write_flooding_count(sp); - trace_kvm_mmu_get_page(sp, false); - goto out; - } - - ++vcpu->kvm->stat.mmu_cache_miss; - - sp = kvm_mmu_alloc_page(vcpu, direct); - - sp->gfn = gfn; - sp->role = role; - hlist_add_head(&sp->hash_link, - &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); - if (!direct) { - /* - * we should do write protection before syncing pages - * otherwise the content of the synced shadow page may - * be inconsistent with guest page table. - */ - account_shadowed(vcpu->kvm, sp); - if (level == PT_PAGE_TABLE_LEVEL && - rmap_write_protect(vcpu, gfn)) - kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1); - - if (level > PT_PAGE_TABLE_LEVEL && need_sync) - flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); - } - clear_page(sp->spt); - trace_kvm_mmu_get_page(sp, true); - - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); -out: - if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions) - vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions; - return sp; -} - -static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator, - struct kvm_vcpu *vcpu, hpa_t root, - u64 addr) -{ - iterator->addr = addr; - iterator->shadow_addr = root; - iterator->level = vcpu->arch.mmu->shadow_root_level; - - if (iterator->level == PT64_ROOT_4LEVEL && - vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && - !vcpu->arch.mmu->direct_map) - --iterator->level; - - if (iterator->level == PT32E_ROOT_LEVEL) { - /* - * prev_root is currently only used for 64-bit hosts. So only - * the active root_hpa is valid here. - */ - BUG_ON(root != vcpu->arch.mmu->root_hpa); - - iterator->shadow_addr - = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; - iterator->shadow_addr &= PT64_BASE_ADDR_MASK; - --iterator->level; - if (!iterator->shadow_addr) - iterator->level = 0; - } -} - -static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, - struct kvm_vcpu *vcpu, u64 addr) -{ - shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa, - addr); -} - -static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) -{ - if (iterator->level < PT_PAGE_TABLE_LEVEL) - return false; - - iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); - iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; - return true; -} - -static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, - u64 spte) -{ - if (is_last_spte(spte, iterator->level)) { - iterator->level = 0; - return; - } - - iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; - --iterator->level; -} - -static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) -{ - __shadow_walk_next(iterator, *iterator->sptep); -} - -static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, - struct kvm_mmu_page *sp) -{ - u64 spte; - - BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK); - - spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK | - shadow_user_mask | shadow_x_mask | shadow_me_mask; - - if (sp_ad_disabled(sp)) - spte |= SPTE_AD_DISABLED_MASK; - else - spte |= shadow_accessed_mask; - - mmu_spte_set(sptep, spte); - - mmu_page_add_parent_pte(vcpu, sp, sptep); - - if (sp->unsync_children || sp->unsync) - mark_unsync(sptep); -} - -static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, - unsigned direct_access) -{ - if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { - struct kvm_mmu_page *child; - - /* - * For the direct sp, if the guest pte's dirty bit - * changed form clean to dirty, it will corrupt the - * sp's access: allow writable in the read-only sp, - * so we should update the spte at this point to get - * a new sp with the correct access. - */ - child = page_header(*sptep & PT64_BASE_ADDR_MASK); - if (child->role.access == direct_access) - return; - - drop_parent_pte(child, sptep); - kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1); - } -} - -static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, - u64 *spte) -{ - u64 pte; - struct kvm_mmu_page *child; - - pte = *spte; - if (is_shadow_present_pte(pte)) { - if (is_last_spte(pte, sp->role.level)) { - drop_spte(kvm, spte); - if (is_large_pte(pte)) - --kvm->stat.lpages; - } else { - child = page_header(pte & PT64_BASE_ADDR_MASK); - drop_parent_pte(child, spte); - } - return true; - } - - if (is_mmio_spte(pte)) - mmu_spte_clear_no_track(spte); - - return false; -} - -static void kvm_mmu_page_unlink_children(struct kvm *kvm, - struct kvm_mmu_page *sp) -{ - unsigned i; - - for (i = 0; i < PT64_ENT_PER_PAGE; ++i) - mmu_page_zap_pte(kvm, sp, sp->spt + i); -} - -static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) -{ - u64 *sptep; - struct rmap_iterator iter; - - while ((sptep = rmap_get_first(&sp->parent_ptes, &iter))) - drop_parent_pte(sp, sptep); -} - -static int mmu_zap_unsync_children(struct kvm *kvm, - struct kvm_mmu_page *parent, - struct list_head *invalid_list) -{ - int i, zapped = 0; - struct mmu_page_path parents; - struct kvm_mmu_pages pages; - - if (parent->role.level == PT_PAGE_TABLE_LEVEL) - return 0; - - while (mmu_unsync_walk(parent, &pages)) { - struct kvm_mmu_page *sp; - - for_each_sp(pages, sp, parents, i) { - kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); - mmu_pages_clear_parents(&parents); - zapped++; - } - } - - return zapped; -} - -static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, - struct kvm_mmu_page *sp, - struct list_head *invalid_list, - int *nr_zapped) -{ - bool list_unstable; - - trace_kvm_mmu_prepare_zap_page(sp); - ++kvm->stat.mmu_shadow_zapped; - *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); - kvm_mmu_page_unlink_children(kvm, sp); - kvm_mmu_unlink_parents(kvm, sp); - - /* Zapping children means active_mmu_pages has become unstable. */ - list_unstable = *nr_zapped; - - if (!sp->role.invalid && !sp->role.direct) - unaccount_shadowed(kvm, sp); - - if (sp->unsync) - kvm_unlink_unsync_page(kvm, sp); - if (!sp->root_count) { - /* Count self */ - (*nr_zapped)++; - list_move(&sp->link, invalid_list); - kvm_mod_used_mmu_pages(kvm, -1); - } else { - list_move(&sp->link, &kvm->arch.active_mmu_pages); - - /* - * Obsolete pages cannot be used on any vCPUs, see the comment - * in kvm_mmu_zap_all_fast(). Note, is_obsolete_sp() also - * treats invalid shadow pages as being obsolete. - */ - if (!is_obsolete_sp(kvm, sp)) - kvm_reload_remote_mmus(kvm); - } - - if (sp->lpage_disallowed) - unaccount_huge_nx_page(kvm, sp); - - sp->role.invalid = 1; - return list_unstable; -} - -static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, - struct list_head *invalid_list) -{ - int nr_zapped; - - __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); - return nr_zapped; -} - -static void kvm_mmu_commit_zap_page(struct kvm *kvm, - struct list_head *invalid_list) -{ - struct kvm_mmu_page *sp, *nsp; - - if (list_empty(invalid_list)) - return; - - /* - * We need to make sure everyone sees our modifications to - * the page tables and see changes to vcpu->mode here. The barrier - * in the kvm_flush_remote_tlbs() achieves this. This pairs - * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end. - * - * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit - * guest mode and/or lockless shadow page table walks. - */ - kvm_flush_remote_tlbs(kvm); - - list_for_each_entry_safe(sp, nsp, invalid_list, link) { - WARN_ON(!sp->role.invalid || sp->root_count); - kvm_mmu_free_page(sp); - } -} - -static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, - struct list_head *invalid_list) -{ - struct kvm_mmu_page *sp; - - if (list_empty(&kvm->arch.active_mmu_pages)) - return false; - - sp = list_last_entry(&kvm->arch.active_mmu_pages, - struct kvm_mmu_page, link); - return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); -} - -/* - * Changing the number of mmu pages allocated to the vm - * Note: if goal_nr_mmu_pages is too small, you will get dead lock - */ -void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) -{ - LIST_HEAD(invalid_list); - - spin_lock(&kvm->mmu_lock); - - if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { - /* Need to free some mmu pages to achieve the goal. */ - while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) - if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list)) - break; - - kvm_mmu_commit_zap_page(kvm, &invalid_list); - goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; - } - - kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; - - spin_unlock(&kvm->mmu_lock); -} - -int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) -{ - struct kvm_mmu_page *sp; - LIST_HEAD(invalid_list); - int r; - - pgprintk("%s: looking for gfn %llx\n", __func__, gfn); - r = 0; - spin_lock(&kvm->mmu_lock); - for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { - pgprintk("%s: gfn %llx role %x\n", __func__, gfn, - sp->role.word); - r = 1; - kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); - } - kvm_mmu_commit_zap_page(kvm, &invalid_list); - spin_unlock(&kvm->mmu_lock); - - return r; -} -EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); - -static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) -{ - trace_kvm_mmu_unsync_page(sp); - ++vcpu->kvm->stat.mmu_unsync; - sp->unsync = 1; - - kvm_mmu_mark_parents_unsync(sp); -} - -static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, - bool can_unsync) -{ - struct kvm_mmu_page *sp; - - if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) - return true; - - for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { - if (!can_unsync) - return true; - - if (sp->unsync) - continue; - - WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL); - kvm_unsync_page(vcpu, sp); - } - - /* - * We need to ensure that the marking of unsync pages is visible - * before the SPTE is updated to allow writes because - * kvm_mmu_sync_roots() checks the unsync flags without holding - * the MMU lock and so can race with this. If the SPTE was updated - * before the page had been marked as unsync-ed, something like the - * following could happen: - * - * CPU 1 CPU 2 - * --------------------------------------------------------------------- - * 1.2 Host updates SPTE - * to be writable - * 2.1 Guest writes a GPTE for GVA X. - * (GPTE being in the guest page table shadowed - * by the SP from CPU 1.) - * This reads SPTE during the page table walk. - * Since SPTE.W is read as 1, there is no - * fault. - * - * 2.2 Guest issues TLB flush. - * That causes a VM Exit. - * - * 2.3 kvm_mmu_sync_pages() reads sp->unsync. - * Since it is false, so it just returns. - * - * 2.4 Guest accesses GVA X. - * Since the mapping in the SP was not updated, - * so the old mapping for GVA X incorrectly - * gets used. - * 1.1 Host marks SP - * as unsync - * (sp->unsync = true) - * - * The write barrier below ensures that 1.1 happens before 1.2 and thus - * the situation in 2.4 does not arise. The implicit barrier in 2.2 - * pairs with this write barrier. - */ - smp_wmb(); - - return false; -} - -static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) -{ - if (pfn_valid(pfn)) - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && - /* - * Some reserved pages, such as those from NVDIMM - * DAX devices, are not for MMIO, and can be mapped - * with cached memory type for better performance. - * However, the above check misconceives those pages - * as MMIO, and results in KVM mapping them with UC - * memory type, which would hurt the performance. - * Therefore, we check the host memory type in addition - * and only treat UC/UC-/WC pages as MMIO. - */ - (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); - - return !e820__mapped_raw_any(pfn_to_hpa(pfn), - pfn_to_hpa(pfn + 1) - 1, - E820_TYPE_RAM); -} - -/* Bits which may be returned by set_spte() */ -#define SET_SPTE_WRITE_PROTECTED_PT BIT(0) -#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) - -static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, - unsigned pte_access, int level, - gfn_t gfn, kvm_pfn_t pfn, bool speculative, - bool can_unsync, bool host_writable) -{ - u64 spte = 0; - int ret = 0; - struct kvm_mmu_page *sp; - - if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) - return 0; - - sp = page_header(__pa(sptep)); - if (sp_ad_disabled(sp)) - spte |= SPTE_AD_DISABLED_MASK; - else if (kvm_vcpu_ad_need_write_protect(vcpu)) - spte |= SPTE_AD_WRPROT_ONLY_MASK; - - /* - * For the EPT case, shadow_present_mask is 0 if hardware - * supports exec-only page table entries. In that case, - * ACC_USER_MASK and shadow_user_mask are used to represent - * read access. See FNAME(gpte_access) in paging_tmpl.h. - */ - spte |= shadow_present_mask; - if (!speculative) - spte |= spte_shadow_accessed_mask(spte); - - if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) && - is_nx_huge_page_enabled()) { - pte_access &= ~ACC_EXEC_MASK; - } - - if (pte_access & ACC_EXEC_MASK) - spte |= shadow_x_mask; - else - spte |= shadow_nx_mask; - - if (pte_access & ACC_USER_MASK) - spte |= shadow_user_mask; - - if (level > PT_PAGE_TABLE_LEVEL) - spte |= PT_PAGE_SIZE_MASK; - if (tdp_enabled) - spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, - kvm_is_mmio_pfn(pfn)); - - if (host_writable) - spte |= SPTE_HOST_WRITEABLE; - else - pte_access &= ~ACC_WRITE_MASK; - - if (!kvm_is_mmio_pfn(pfn)) - spte |= shadow_me_mask; - - spte |= (u64)pfn << PAGE_SHIFT; - - if (pte_access & ACC_WRITE_MASK) { - - /* - * Other vcpu creates new sp in the window between - * mapping_level() and acquiring mmu-lock. We can - * allow guest to retry the access, the mapping can - * be fixed if guest refault. - */ - if (level > PT_PAGE_TABLE_LEVEL && - mmu_gfn_lpage_is_disallowed(vcpu, gfn, level)) - goto done; - - spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; - - /* - * Optimization: for pte sync, if spte was writable the hash - * lookup is unnecessary (and expensive). Write protection - * is responsibility of mmu_get_page / kvm_sync_page. - * Same reasoning can be applied to dirty page accounting. - */ - if (!can_unsync && is_writable_pte(*sptep)) - goto set_pte; - - if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { - pgprintk("%s: found shadow page for %llx, marking ro\n", - __func__, gfn); - ret |= SET_SPTE_WRITE_PROTECTED_PT; - pte_access &= ~ACC_WRITE_MASK; - spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); - } - } - - if (pte_access & ACC_WRITE_MASK) { - kvm_vcpu_mark_page_dirty(vcpu, gfn); - spte |= spte_shadow_dirty_mask(spte); - } - - if (speculative) - spte = mark_spte_for_access_track(spte); - -set_pte: - if (mmu_spte_update(sptep, spte)) - ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; -done: - return ret; -} - -static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, - int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn, - bool speculative, bool host_writable) -{ - int was_rmapped = 0; - int rmap_count; - int set_spte_ret; - int ret = RET_PF_RETRY; - bool flush = false; - - pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, - *sptep, write_fault, gfn); - - if (is_shadow_present_pte(*sptep)) { - /* - * If we overwrite a PTE page pointer with a 2MB PMD, unlink - * the parent of the now unreachable PTE. - */ - if (level > PT_PAGE_TABLE_LEVEL && - !is_large_pte(*sptep)) { - struct kvm_mmu_page *child; - u64 pte = *sptep; - - child = page_header(pte & PT64_BASE_ADDR_MASK); - drop_parent_pte(child, sptep); - flush = true; - } else if (pfn != spte_to_pfn(*sptep)) { - pgprintk("hfn old %llx new %llx\n", - spte_to_pfn(*sptep), pfn); - drop_spte(vcpu->kvm, sptep); - flush = true; - } else - was_rmapped = 1; - } - - set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn, - speculative, true, host_writable); - if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { - if (write_fault) - ret = RET_PF_EMULATE; - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); - } - - if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) - kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, - KVM_PAGES_PER_HPAGE(level)); - - if (unlikely(is_mmio_spte(*sptep))) - ret = RET_PF_EMULATE; - - pgprintk("%s: setting spte %llx\n", __func__, *sptep); - trace_kvm_mmu_set_spte(level, gfn, sptep); - if (!was_rmapped && is_large_pte(*sptep)) - ++vcpu->kvm->stat.lpages; - - if (is_shadow_present_pte(*sptep)) { - if (!was_rmapped) { - rmap_count = rmap_add(vcpu, sptep, gfn); - if (rmap_count > RMAP_RECYCLE_THRESHOLD) - rmap_recycle(vcpu, sptep, gfn); - } - } - - return ret; -} - -static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, - bool no_dirty_log) -{ - struct kvm_memory_slot *slot; - - slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); - if (!slot) - return KVM_PFN_ERR_FAULT; - - return gfn_to_pfn_memslot_atomic(slot, gfn); -} - -static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, - u64 *start, u64 *end) -{ - struct page *pages[PTE_PREFETCH_NUM]; - struct kvm_memory_slot *slot; - unsigned access = sp->role.access; - int i, ret; - gfn_t gfn; - - gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); - slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); - if (!slot) - return -1; - - ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start); - if (ret <= 0) - return -1; - - for (i = 0; i < ret; i++, gfn++, start++) { - mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn, - page_to_pfn(pages[i]), true, true); - put_page(pages[i]); - } - - return 0; -} - -static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, u64 *sptep) -{ - u64 *spte, *start = NULL; - int i; - - WARN_ON(!sp->role.direct); - - i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); - spte = sp->spt + i; - - for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { - if (is_shadow_present_pte(*spte) || spte == sptep) { - if (!start) - continue; - if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) - break; - start = NULL; - } else if (!start) - start = spte; - } -} - -static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) -{ - struct kvm_mmu_page *sp; - - sp = page_header(__pa(sptep)); - - /* - * Without accessed bits, there's no way to distinguish between - * actually accessed translations and prefetched, so disable pte - * prefetch if accessed bits aren't available. - */ - if (sp_ad_disabled(sp)) - return; - - if (sp->role.level > PT_PAGE_TABLE_LEVEL) - return; - - __direct_pte_prefetch(vcpu, sp, sptep); -} - -static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it, - gfn_t gfn, kvm_pfn_t *pfnp, int *levelp) -{ - int level = *levelp; - u64 spte = *it.sptep; - - if (it.level == level && level > PT_PAGE_TABLE_LEVEL && - is_nx_huge_page_enabled() && - is_shadow_present_pte(spte) && - !is_large_pte(spte)) { - /* - * A small SPTE exists for this pfn, but FNAME(fetch) - * and __direct_map would like to create a large PTE - * instead: just force them to go down another level, - * patching back for them into pfn the next 9 bits of - * the address. - */ - u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1); - *pfnp |= gfn & page_mask; - (*levelp)--; - } -} - -static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, - int map_writable, int level, kvm_pfn_t pfn, - bool prefault, bool lpage_disallowed) -{ - struct kvm_shadow_walk_iterator it; - struct kvm_mmu_page *sp; - int ret; - gfn_t gfn = gpa >> PAGE_SHIFT; - gfn_t base_gfn = gfn; - - if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) - return RET_PF_RETRY; - - trace_kvm_mmu_spte_requested(gpa, level, pfn); - for_each_shadow_entry(vcpu, gpa, it) { - /* - * We cannot overwrite existing page tables with an NX - * large page, as the leaf could be executable. - */ - disallowed_hugepage_adjust(it, gfn, &pfn, &level); - - base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); - if (it.level == level) - break; - - drop_large_spte(vcpu, it.sptep); - if (!is_shadow_present_pte(*it.sptep)) { - sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr, - it.level - 1, true, ACC_ALL); - - link_shadow_page(vcpu, it.sptep, sp); - if (lpage_disallowed) - account_huge_nx_page(vcpu->kvm, sp); - } - } - - ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, - write, level, base_gfn, pfn, prefault, - map_writable); - direct_pte_prefetch(vcpu, it.sptep); - ++vcpu->stat.pf_fixed; - return ret; -} - -static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) -{ - send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk); -} - -static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) -{ - /* - * Do not cache the mmio info caused by writing the readonly gfn - * into the spte otherwise read access on readonly gfn also can - * caused mmio page fault and treat it as mmio access. - */ - if (pfn == KVM_PFN_ERR_RO_FAULT) - return RET_PF_EMULATE; - - if (pfn == KVM_PFN_ERR_HWPOISON) { - kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); - return RET_PF_RETRY; - } - - return -EFAULT; -} - -static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, - gfn_t gfn, kvm_pfn_t *pfnp, - int *levelp) -{ - kvm_pfn_t pfn = *pfnp; - int level = *levelp; - - /* - * Check if it's a transparent hugepage. If this would be an - * hugetlbfs page, level wouldn't be set to - * PT_PAGE_TABLE_LEVEL and there would be no adjustment done - * here. - */ - if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && - !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && - PageTransCompoundMap(pfn_to_page(pfn)) && - !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { - unsigned long mask; - /* - * mmu_notifier_retry was successful and we hold the - * mmu_lock here, so the pmd can't become splitting - * from under us, and in turn - * __split_huge_page_refcount() can't run from under - * us and we can safely transfer the refcount from - * PG_tail to PG_head as we switch the pfn to tail to - * head. - */ - *levelp = level = PT_DIRECTORY_LEVEL; - mask = KVM_PAGES_PER_HPAGE(level) - 1; - VM_BUG_ON((gfn & mask) != (pfn & mask)); - if (pfn & mask) { - kvm_release_pfn_clean(pfn); - pfn &= ~mask; - kvm_get_pfn(pfn); - *pfnp = pfn; - } - } -} - -static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, - kvm_pfn_t pfn, unsigned access, int *ret_val) -{ - /* The pfn is invalid, report the error! */ - if (unlikely(is_error_pfn(pfn))) { - *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); - return true; - } - - if (unlikely(is_noslot_pfn(pfn))) - vcpu_cache_mmio_info(vcpu, gva, gfn, - access & shadow_mmio_access_mask); - - return false; -} - -static bool page_fault_can_be_fast(u32 error_code) -{ - /* - * Do not fix the mmio spte with invalid generation number which - * need to be updated by slow page fault path. - */ - if (unlikely(error_code & PFERR_RSVD_MASK)) - return false; - - /* See if the page fault is due to an NX violation */ - if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)) - == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)))) - return false; - - /* - * #PF can be fast if: - * 1. The shadow page table entry is not present, which could mean that - * the fault is potentially caused by access tracking (if enabled). - * 2. The shadow page table entry is present and the fault - * is caused by write-protect, that means we just need change the W - * bit of the spte which can be done out of mmu-lock. - * - * However, if access tracking is disabled we know that a non-present - * page must be a genuine page fault where we have to create a new SPTE. - * So, if access tracking is disabled, we return true only for write - * accesses to a present page. - */ - - return shadow_acc_track_mask != 0 || - ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)) - == (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)); -} - -/* - * Returns true if the SPTE was fixed successfully. Otherwise, - * someone else modified the SPTE from its original value. - */ -static bool -fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - u64 *sptep, u64 old_spte, u64 new_spte) -{ - gfn_t gfn; - - WARN_ON(!sp->role.direct); - - /* - * Theoretically we could also set dirty bit (and flush TLB) here in - * order to eliminate unnecessary PML logging. See comments in - * set_spte. But fast_page_fault is very unlikely to happen with PML - * enabled, so we do not do this. This might result in the same GPA - * to be logged in PML buffer again when the write really happens, and - * eventually to be called by mark_page_dirty twice. But it's also no - * harm. This also avoids the TLB flush needed after setting dirty bit - * so non-PML cases won't be impacted. - * - * Compare with set_spte where instead shadow_dirty_mask is set. - */ - if (cmpxchg64(sptep, old_spte, new_spte) != old_spte) - return false; - - if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) { - /* - * The gfn of direct spte is stable since it is - * calculated by sp->gfn. - */ - gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); - kvm_vcpu_mark_page_dirty(vcpu, gfn); - } - - return true; -} - -static bool is_access_allowed(u32 fault_err_code, u64 spte) -{ - if (fault_err_code & PFERR_FETCH_MASK) - return is_executable_pte(spte); - - if (fault_err_code & PFERR_WRITE_MASK) - return is_writable_pte(spte); - - /* Fault was on Read access */ - return spte & PT_PRESENT_MASK; -} - -/* - * Return value: - * - true: let the vcpu to access on the same address again. - * - false: let the real page fault path to fix it. - */ -static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, - u32 error_code) -{ - struct kvm_shadow_walk_iterator iterator; - struct kvm_mmu_page *sp; - bool fault_handled = false; - u64 spte = 0ull; - uint retry_count = 0; - - if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) - return false; - - if (!page_fault_can_be_fast(error_code)) - return false; - - walk_shadow_page_lockless_begin(vcpu); - - do { - u64 new_spte; - - for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) - if (!is_shadow_present_pte(spte) || - iterator.level < level) - break; - - sp = page_header(__pa(iterator.sptep)); - if (!is_last_spte(spte, sp->role.level)) - break; - - /* - * Check whether the memory access that caused the fault would - * still cause it if it were to be performed right now. If not, - * then this is a spurious fault caused by TLB lazily flushed, - * or some other CPU has already fixed the PTE after the - * current CPU took the fault. - * - * Need not check the access of upper level table entries since - * they are always ACC_ALL. - */ - if (is_access_allowed(error_code, spte)) { - fault_handled = true; - break; - } - - new_spte = spte; - - if (is_access_track_spte(spte)) - new_spte = restore_acc_track_spte(new_spte); - - /* - * Currently, to simplify the code, write-protection can - * be removed in the fast path only if the SPTE was - * write-protected for dirty-logging or access tracking. - */ - if ((error_code & PFERR_WRITE_MASK) && - spte_can_locklessly_be_made_writable(spte)) - { - new_spte |= PT_WRITABLE_MASK; - - /* - * Do not fix write-permission on the large spte. Since - * we only dirty the first page into the dirty-bitmap in - * fast_pf_fix_direct_spte(), other pages are missed - * if its slot has dirty logging enabled. - * - * Instead, we let the slow page fault path create a - * normal spte to fix the access. - * - * See the comments in kvm_arch_commit_memory_region(). - */ - if (sp->role.level > PT_PAGE_TABLE_LEVEL) - break; - } - - /* Verify that the fault can be handled in the fast path */ - if (new_spte == spte || - !is_access_allowed(error_code, new_spte)) - break; - - /* - * Currently, fast page fault only works for direct mapping - * since the gfn is not stable for indirect shadow page. See - * Documentation/virt/kvm/locking.txt to get more detail. - */ - fault_handled = fast_pf_fix_direct_spte(vcpu, sp, - iterator.sptep, spte, - new_spte); - if (fault_handled) - break; - - if (++retry_count > 4) { - printk_once(KERN_WARNING - "kvm: Fast #PF retrying more than 4 times.\n"); - break; - } - - } while (true); - - trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, - spte, fault_handled); - walk_shadow_page_lockless_end(vcpu); - - return fault_handled; -} - -static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, - gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable); -static int make_mmu_pages_available(struct kvm_vcpu *vcpu); - -static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, - gfn_t gfn, bool prefault) -{ - int r; - int level; - bool force_pt_level; - kvm_pfn_t pfn; - unsigned long mmu_seq; - bool map_writable, write = error_code & PFERR_WRITE_MASK; - bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && - is_nx_huge_page_enabled(); - - force_pt_level = lpage_disallowed; - level = mapping_level(vcpu, gfn, &force_pt_level); - if (likely(!force_pt_level)) { - /* - * This path builds a PAE pagetable - so we can map - * 2mb pages at maximum. Therefore check if the level - * is larger than that. - */ - if (level > PT_DIRECTORY_LEVEL) - level = PT_DIRECTORY_LEVEL; - - gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); - } - - if (fast_page_fault(vcpu, v, level, error_code)) - return RET_PF_RETRY; - - mmu_seq = vcpu->kvm->mmu_notifier_seq; - smp_rmb(); - - if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) - return RET_PF_RETRY; - - if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) - return r; - - r = RET_PF_RETRY; - spin_lock(&vcpu->kvm->mmu_lock); - if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) - goto out_unlock; - if (make_mmu_pages_available(vcpu) < 0) - goto out_unlock; - if (likely(!force_pt_level)) - transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); - r = __direct_map(vcpu, v, write, map_writable, level, pfn, - prefault, false); -out_unlock: - spin_unlock(&vcpu->kvm->mmu_lock); - kvm_release_pfn_clean(pfn); - return r; -} - -static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, - struct list_head *invalid_list) -{ - struct kvm_mmu_page *sp; - - if (!VALID_PAGE(*root_hpa)) - return; - - sp = page_header(*root_hpa & PT64_BASE_ADDR_MASK); - --sp->root_count; - if (!sp->root_count && sp->role.invalid) - kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); - - *root_hpa = INVALID_PAGE; -} - -/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */ -void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - ulong roots_to_free) -{ - int i; - LIST_HEAD(invalid_list); - bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; - - BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); - - /* Before acquiring the MMU lock, see if we need to do any real work. */ - if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) { - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) && - VALID_PAGE(mmu->prev_roots[i].hpa)) - break; - - if (i == KVM_MMU_NUM_PREV_ROOTS) - return; - } - - spin_lock(&vcpu->kvm->mmu_lock); - - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) - mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa, - &invalid_list); - - if (free_active_root) { - if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && - (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { - mmu_free_root_page(vcpu->kvm, &mmu->root_hpa, - &invalid_list); - } else { - for (i = 0; i < 4; ++i) - if (mmu->pae_root[i] != 0) - mmu_free_root_page(vcpu->kvm, - &mmu->pae_root[i], - &invalid_list); - mmu->root_hpa = INVALID_PAGE; - } - mmu->root_cr3 = 0; - } - - kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); - spin_unlock(&vcpu->kvm->mmu_lock); -} -EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); - -static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) -{ - int ret = 0; - - if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { - kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); - ret = 1; - } - - return ret; -} - -static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) -{ - struct kvm_mmu_page *sp; - unsigned i; - - if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { - spin_lock(&vcpu->kvm->mmu_lock); - if(make_mmu_pages_available(vcpu) < 0) { - spin_unlock(&vcpu->kvm->mmu_lock); - return -ENOSPC; - } - sp = kvm_mmu_get_page(vcpu, 0, 0, - vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL); - ++sp->root_count; - spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu->root_hpa = __pa(sp->spt); - } else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) { - for (i = 0; i < 4; ++i) { - hpa_t root = vcpu->arch.mmu->pae_root[i]; - - MMU_WARN_ON(VALID_PAGE(root)); - spin_lock(&vcpu->kvm->mmu_lock); - if (make_mmu_pages_available(vcpu) < 0) { - spin_unlock(&vcpu->kvm->mmu_lock); - return -ENOSPC; - } - sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), - i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL); - root = __pa(sp->spt); - ++sp->root_count; - spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK; - } - vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); - } else - BUG(); - vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); - - return 0; -} - -static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) -{ - struct kvm_mmu_page *sp; - u64 pdptr, pm_mask; - gfn_t root_gfn, root_cr3; - int i; - - root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); - root_gfn = root_cr3 >> PAGE_SHIFT; - - if (mmu_check_root(vcpu, root_gfn)) - return 1; - - /* - * Do we shadow a long mode page table? If so we need to - * write-protect the guests page table root. - */ - if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { - hpa_t root = vcpu->arch.mmu->root_hpa; - - MMU_WARN_ON(VALID_PAGE(root)); - - spin_lock(&vcpu->kvm->mmu_lock); - if (make_mmu_pages_available(vcpu) < 0) { - spin_unlock(&vcpu->kvm->mmu_lock); - return -ENOSPC; - } - sp = kvm_mmu_get_page(vcpu, root_gfn, 0, - vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL); - root = __pa(sp->spt); - ++sp->root_count; - spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu->root_hpa = root; - goto set_root_cr3; - } - - /* - * We shadow a 32 bit page table. This may be a legacy 2-level - * or a PAE 3-level page table. In either case we need to be aware that - * the shadow page table may be a PAE or a long mode page table. - */ - pm_mask = PT_PRESENT_MASK; - if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) - pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; - - for (i = 0; i < 4; ++i) { - hpa_t root = vcpu->arch.mmu->pae_root[i]; - - MMU_WARN_ON(VALID_PAGE(root)); - if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) { - pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i); - if (!(pdptr & PT_PRESENT_MASK)) { - vcpu->arch.mmu->pae_root[i] = 0; - continue; - } - root_gfn = pdptr >> PAGE_SHIFT; - if (mmu_check_root(vcpu, root_gfn)) - return 1; - } - spin_lock(&vcpu->kvm->mmu_lock); - if (make_mmu_pages_available(vcpu) < 0) { - spin_unlock(&vcpu->kvm->mmu_lock); - return -ENOSPC; - } - sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, - 0, ACC_ALL); - root = __pa(sp->spt); - ++sp->root_count; - spin_unlock(&vcpu->kvm->mmu_lock); - - vcpu->arch.mmu->pae_root[i] = root | pm_mask; - } - vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); - - /* - * If we shadow a 32 bit page table with a long mode page - * table we enter this path. - */ - if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) { - if (vcpu->arch.mmu->lm_root == NULL) { - /* - * The additional page necessary for this is only - * allocated on demand. - */ - - u64 *lm_root; - - lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT); - if (lm_root == NULL) - return 1; - - lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask; - - vcpu->arch.mmu->lm_root = lm_root; - } - - vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); - } - -set_root_cr3: - vcpu->arch.mmu->root_cr3 = root_cr3; - - return 0; -} - -static int mmu_alloc_roots(struct kvm_vcpu *vcpu) -{ - if (vcpu->arch.mmu->direct_map) - return mmu_alloc_direct_roots(vcpu); - else - return mmu_alloc_shadow_roots(vcpu); -} - -void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) -{ - int i; - struct kvm_mmu_page *sp; - - if (vcpu->arch.mmu->direct_map) - return; - - if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) - return; - - vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); - - if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { - hpa_t root = vcpu->arch.mmu->root_hpa; - sp = page_header(root); - - /* - * Even if another CPU was marking the SP as unsync-ed - * simultaneously, any guest page table changes are not - * guaranteed to be visible anyway until this VCPU issues a TLB - * flush strictly after those changes are made. We only need to - * ensure that the other CPU sets these flags before any actual - * changes to the page tables are made. The comments in - * mmu_need_write_protect() describe what could go wrong if this - * requirement isn't satisfied. - */ - if (!smp_load_acquire(&sp->unsync) && - !smp_load_acquire(&sp->unsync_children)) - return; - - spin_lock(&vcpu->kvm->mmu_lock); - kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); - - mmu_sync_children(vcpu, sp); - - kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); - spin_unlock(&vcpu->kvm->mmu_lock); - return; - } - - spin_lock(&vcpu->kvm->mmu_lock); - kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); - - for (i = 0; i < 4; ++i) { - hpa_t root = vcpu->arch.mmu->pae_root[i]; - - if (root && VALID_PAGE(root)) { - root &= PT64_BASE_ADDR_MASK; - sp = page_header(root); - mmu_sync_children(vcpu, sp); - } - } - - kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); - spin_unlock(&vcpu->kvm->mmu_lock); -} -EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots); - -static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, - u32 access, struct x86_exception *exception) -{ - if (exception) - exception->error_code = 0; - return vaddr; -} - -static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, - u32 access, - struct x86_exception *exception) -{ - if (exception) - exception->error_code = 0; - return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); -} - -static bool -__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) -{ - int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f; - - return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) | - ((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0); -} - -static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) -{ - return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level); -} - -static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) -{ - return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); -} - -static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) -{ - /* - * A nested guest cannot use the MMIO cache if it is using nested - * page tables, because cr2 is a nGPA while the cache stores GPAs. - */ - if (mmu_is_nested(vcpu)) - return false; - - if (direct) - return vcpu_match_mmio_gpa(vcpu, addr); - - return vcpu_match_mmio_gva(vcpu, addr); -} - -/* return true if reserved bit is detected on spte. */ -static bool -walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) -{ - struct kvm_shadow_walk_iterator iterator; - u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull; - int root, leaf; - bool reserved = false; - - if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) - goto exit; - - walk_shadow_page_lockless_begin(vcpu); - - for (shadow_walk_init(&iterator, vcpu, addr), - leaf = root = iterator.level; - shadow_walk_okay(&iterator); - __shadow_walk_next(&iterator, spte)) { - spte = mmu_spte_get_lockless(iterator.sptep); - - sptes[leaf - 1] = spte; - leaf--; - - if (!is_shadow_present_pte(spte)) - break; - - reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte, - iterator.level); - } - - walk_shadow_page_lockless_end(vcpu); - - if (reserved) { - pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n", - __func__, addr); - while (root > leaf) { - pr_err("------ spte 0x%llx level %d.\n", - sptes[root - 1], root); - root--; - } - } -exit: - *sptep = spte; - return reserved; -} - -static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) -{ - u64 spte; - bool reserved; - - if (mmio_info_in_cache(vcpu, addr, direct)) - return RET_PF_EMULATE; - - reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); - if (WARN_ON(reserved)) - return -EINVAL; - - if (is_mmio_spte(spte)) { - gfn_t gfn = get_mmio_spte_gfn(spte); - unsigned access = get_mmio_spte_access(spte); - - if (!check_mmio_spte(vcpu, spte)) - return RET_PF_INVALID; - - if (direct) - addr = 0; - - trace_handle_mmio_page_fault(addr, gfn, access); - vcpu_cache_mmio_info(vcpu, addr, gfn, access); - return RET_PF_EMULATE; - } - - /* - * If the page table is zapped by other cpus, let CPU fault again on - * the address. - */ - return RET_PF_RETRY; -} - -static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, - u32 error_code, gfn_t gfn) -{ - if (unlikely(error_code & PFERR_RSVD_MASK)) - return false; - - if (!(error_code & PFERR_PRESENT_MASK) || - !(error_code & PFERR_WRITE_MASK)) - return false; - - /* - * guest is writing the page which is write tracked which can - * not be fixed by page fault handler. - */ - if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) - return true; - - return false; -} - -static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) -{ - struct kvm_shadow_walk_iterator iterator; - u64 spte; - - if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) - return; - - walk_shadow_page_lockless_begin(vcpu); - for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { - clear_sp_write_flooding_count(iterator.sptep); - if (!is_shadow_present_pte(spte)) - break; - } - walk_shadow_page_lockless_end(vcpu); -} - -static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, - u32 error_code, bool prefault) -{ - gfn_t gfn = gva >> PAGE_SHIFT; - int r; - - pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); - - if (page_fault_handle_page_track(vcpu, error_code, gfn)) - return RET_PF_EMULATE; - - r = mmu_topup_memory_caches(vcpu); - if (r) - return r; - - MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); - - - return nonpaging_map(vcpu, gva & PAGE_MASK, - error_code, gfn, prefault); -} - -static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) -{ - struct kvm_arch_async_pf arch; - - arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; - arch.gfn = gfn; - arch.direct_map = vcpu->arch.mmu->direct_map; - arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu); - - return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); -} - -static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, - gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable) -{ - struct kvm_memory_slot *slot; - bool async; - - /* - * Don't expose private memslots to L2. - */ - if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) { - *pfn = KVM_PFN_NOSLOT; - return false; - } - - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - async = false; - *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); - if (!async) - return false; /* *pfn has correct page already */ - - if (!prefault && kvm_can_do_async_pf(vcpu)) { - trace_kvm_try_async_get_page(gva, gfn); - if (kvm_find_async_pf_gfn(vcpu, gfn)) { - trace_kvm_async_pf_doublefault(gva, gfn); - kvm_make_request(KVM_REQ_APF_HALT, vcpu); - return true; - } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) - return true; - } - - *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable); - return false; -} - -int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, - u64 fault_address, char *insn, int insn_len) -{ - int r = 1; - - vcpu->arch.l1tf_flush_l1d = true; - switch (vcpu->arch.apf.host_apf_reason) { - default: - trace_kvm_page_fault(fault_address, error_code); - - if (kvm_event_needs_reinjection(vcpu)) - kvm_mmu_unprotect_page_virt(vcpu, fault_address); - r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn, - insn_len); - break; - case KVM_PV_REASON_PAGE_NOT_PRESENT: - vcpu->arch.apf.host_apf_reason = 0; - local_irq_disable(); - kvm_async_pf_task_wait(fault_address, 0); - local_irq_enable(); - break; - case KVM_PV_REASON_PAGE_READY: - vcpu->arch.apf.host_apf_reason = 0; - local_irq_disable(); - kvm_async_pf_task_wake(fault_address); - local_irq_enable(); - break; - } - return r; -} -EXPORT_SYMBOL_GPL(kvm_handle_page_fault); - -static bool -check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level) -{ - int page_num = KVM_PAGES_PER_HPAGE(level); - - gfn &= ~(page_num - 1); - - return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num); -} - -static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, - bool prefault) -{ - kvm_pfn_t pfn; - int r; - int level; - bool force_pt_level; - gfn_t gfn = gpa >> PAGE_SHIFT; - unsigned long mmu_seq; - int write = error_code & PFERR_WRITE_MASK; - bool map_writable; - bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && - is_nx_huge_page_enabled(); - - MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); - - if (page_fault_handle_page_track(vcpu, error_code, gfn)) - return RET_PF_EMULATE; - - r = mmu_topup_memory_caches(vcpu); - if (r) - return r; - - force_pt_level = - lpage_disallowed || - !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL); - level = mapping_level(vcpu, gfn, &force_pt_level); - if (likely(!force_pt_level)) { - if (level > PT_DIRECTORY_LEVEL && - !check_hugepage_cache_consistency(vcpu, gfn, level)) - level = PT_DIRECTORY_LEVEL; - gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); - } - - if (fast_page_fault(vcpu, gpa, level, error_code)) - return RET_PF_RETRY; - - mmu_seq = vcpu->kvm->mmu_notifier_seq; - smp_rmb(); - - if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) - return RET_PF_RETRY; - - if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) - return r; - - r = RET_PF_RETRY; - spin_lock(&vcpu->kvm->mmu_lock); - if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) - goto out_unlock; - if (make_mmu_pages_available(vcpu) < 0) - goto out_unlock; - if (likely(!force_pt_level)) - transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); - r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, - prefault, lpage_disallowed); -out_unlock: - spin_unlock(&vcpu->kvm->mmu_lock); - kvm_release_pfn_clean(pfn); - return r; -} - -static void nonpaging_init_context(struct kvm_vcpu *vcpu, - struct kvm_mmu *context) -{ - context->page_fault = nonpaging_page_fault; - context->gva_to_gpa = nonpaging_gva_to_gpa; - context->sync_page = nonpaging_sync_page; - context->invlpg = nonpaging_invlpg; - context->update_pte = nonpaging_update_pte; - context->root_level = 0; - context->shadow_root_level = PT32E_ROOT_LEVEL; - context->direct_map = true; - context->nx = false; -} - -/* - * Find out if a previously cached root matching the new CR3/role is available. - * The current root is also inserted into the cache. - * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is - * returned. - * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and - * false is returned. This root should now be freed by the caller. - */ -static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, - union kvm_mmu_page_role new_role) -{ - uint i; - struct kvm_mmu_root_info root; - struct kvm_mmu *mmu = vcpu->arch.mmu; - - root.cr3 = mmu->root_cr3; - root.hpa = mmu->root_hpa; - - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { - swap(root, mmu->prev_roots[i]); - - if (new_cr3 == root.cr3 && VALID_PAGE(root.hpa) && - page_header(root.hpa) != NULL && - new_role.word == page_header(root.hpa)->role.word) - break; - } - - mmu->root_hpa = root.hpa; - mmu->root_cr3 = root.cr3; - - return i < KVM_MMU_NUM_PREV_ROOTS; -} - -static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, - union kvm_mmu_page_role new_role, - bool skip_tlb_flush) -{ - struct kvm_mmu *mmu = vcpu->arch.mmu; - - /* - * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid - * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs - * later if necessary. - */ - if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && - mmu->root_level >= PT64_ROOT_4LEVEL) { - if (mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT)) - return false; - - if (cached_root_available(vcpu, new_cr3, new_role)) { - /* - * It is possible that the cached previous root page is - * obsolete because of a change in the MMU generation - * number. However, changing the generation number is - * accompanied by KVM_REQ_MMU_RELOAD, which will free - * the root set here and allocate a new one. - */ - kvm_make_request(KVM_REQ_LOAD_CR3, vcpu); - if (!skip_tlb_flush) { - kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); - } - - /* - * The last MMIO access's GVA and GPA are cached in the - * VCPU. When switching to a new CR3, that GVA->GPA - * mapping may no longer be valid. So clear any cached - * MMIO info even when we don't need to sync the shadow - * page tables. - */ - vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); - - __clear_sp_write_flooding_count( - page_header(mmu->root_hpa)); - - return true; - } - } - - return false; -} - -static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, - union kvm_mmu_page_role new_role, - bool skip_tlb_flush) -{ - if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush)) - kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, - KVM_MMU_ROOT_CURRENT); -} - -void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush) -{ - __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu), - skip_tlb_flush); -} -EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3); - -static unsigned long get_cr3(struct kvm_vcpu *vcpu) -{ - return kvm_read_cr3(vcpu); -} - -static void inject_page_fault(struct kvm_vcpu *vcpu, - struct x86_exception *fault) -{ - vcpu->arch.mmu->inject_page_fault(vcpu, fault); -} - -static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, - unsigned access, int *nr_present) -{ - if (unlikely(is_mmio_spte(*sptep))) { - if (gfn != get_mmio_spte_gfn(*sptep)) { - mmu_spte_clear_no_track(sptep); - return true; - } - - (*nr_present)++; - mark_mmio_spte(vcpu, sptep, gfn, access); - return true; - } - - return false; -} - -static inline bool is_last_gpte(struct kvm_mmu *mmu, - unsigned level, unsigned gpte) -{ - /* - * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. - * If it is clear, there are no large pages at this level, so clear - * PT_PAGE_SIZE_MASK in gpte if that is the case. - */ - gpte &= level - mmu->last_nonleaf_level; - - /* - * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set - * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means - * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then. - */ - gpte |= level - PT_PAGE_TABLE_LEVEL - 1; - - return gpte & PT_PAGE_SIZE_MASK; -} - -#define PTTYPE_EPT 18 /* arbitrary */ -#define PTTYPE PTTYPE_EPT -#include "paging_tmpl.h" -#undef PTTYPE - -#define PTTYPE 64 -#include "paging_tmpl.h" -#undef PTTYPE - -#define PTTYPE 32 -#include "paging_tmpl.h" -#undef PTTYPE - -static void -__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, - struct rsvd_bits_validate *rsvd_check, - int maxphyaddr, int level, bool nx, bool gbpages, - bool pse, bool amd) -{ - u64 exb_bit_rsvd = 0; - u64 gbpages_bit_rsvd = 0; - u64 nonleaf_bit8_rsvd = 0; - - rsvd_check->bad_mt_xwr = 0; - - if (!nx) - exb_bit_rsvd = rsvd_bits(63, 63); - if (!gbpages) - gbpages_bit_rsvd = rsvd_bits(7, 7); - - /* - * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for - * leaf entries) on AMD CPUs only. - */ - if (amd) - nonleaf_bit8_rsvd = rsvd_bits(8, 8); - - switch (level) { - case PT32_ROOT_LEVEL: - /* no rsvd bits for 2 level 4K page table entries */ - rsvd_check->rsvd_bits_mask[0][1] = 0; - rsvd_check->rsvd_bits_mask[0][0] = 0; - rsvd_check->rsvd_bits_mask[1][0] = - rsvd_check->rsvd_bits_mask[0][0]; - - if (!pse) { - rsvd_check->rsvd_bits_mask[1][1] = 0; - break; - } - - if (is_cpuid_PSE36()) - /* 36bits PSE 4MB page */ - rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); - else - /* 32 bits PSE 4MB page */ - rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); - break; - case PT32E_ROOT_LEVEL: - rsvd_check->rsvd_bits_mask[0][2] = - rsvd_bits(maxphyaddr, 63) | - rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */ - rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 62); /* PDE */ - rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 62); /* PTE */ - rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 62) | - rsvd_bits(13, 20); /* large page */ - rsvd_check->rsvd_bits_mask[1][0] = - rsvd_check->rsvd_bits_mask[0][0]; - break; - case PT64_ROOT_5LEVEL: - rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd | - nonleaf_bit8_rsvd | rsvd_bits(7, 7) | - rsvd_bits(maxphyaddr, 51); - rsvd_check->rsvd_bits_mask[1][4] = - rsvd_check->rsvd_bits_mask[0][4]; - /* fall through */ - case PT64_ROOT_4LEVEL: - rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | - nonleaf_bit8_rsvd | rsvd_bits(7, 7) | - rsvd_bits(maxphyaddr, 51); - rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | - nonleaf_bit8_rsvd | gbpages_bit_rsvd | - rsvd_bits(maxphyaddr, 51); - rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 51); - rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 51); - rsvd_check->rsvd_bits_mask[1][3] = - rsvd_check->rsvd_bits_mask[0][3]; - rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd | - gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) | - rsvd_bits(13, 29); - rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 51) | - rsvd_bits(13, 20); /* large page */ - rsvd_check->rsvd_bits_mask[1][0] = - rsvd_check->rsvd_bits_mask[0][0]; - break; - } -} - -static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, - struct kvm_mmu *context) -{ - __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, - cpuid_maxphyaddr(vcpu), context->root_level, - context->nx, - guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), - is_pse(vcpu), guest_cpuid_is_amd(vcpu)); -} - -static void -__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, - int maxphyaddr, bool execonly) -{ - u64 bad_mt_xwr; - - rsvd_check->rsvd_bits_mask[0][4] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); - rsvd_check->rsvd_bits_mask[0][3] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); - rsvd_check->rsvd_bits_mask[0][2] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); - rsvd_check->rsvd_bits_mask[0][1] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); - rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); - - /* large page */ - rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; - rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; - rsvd_check->rsvd_bits_mask[1][2] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); - rsvd_check->rsvd_bits_mask[1][1] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20); - rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; - - bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */ - bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */ - bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */ - bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */ - bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */ - if (!execonly) { - /* bits 0..2 must not be 100 unless VMX capabilities allow it */ - bad_mt_xwr |= REPEAT_BYTE(1ull << 4); - } - rsvd_check->bad_mt_xwr = bad_mt_xwr; -} - -static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, - struct kvm_mmu *context, bool execonly) -{ - __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, - cpuid_maxphyaddr(vcpu), execonly); -} - -/* - * the page table on host is the shadow page table for the page - * table in guest or amd nested guest, its mmu features completely - * follow the features in guest. - */ -void -reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) -{ - bool uses_nx = context->nx || - context->mmu_role.base.smep_andnot_wp; - struct rsvd_bits_validate *shadow_zero_check; - int i; - - /* - * Passing "true" to the last argument is okay; it adds a check - * on bit 8 of the SPTEs which KVM doesn't use anyway. - */ - shadow_zero_check = &context->shadow_zero_check; - __reset_rsvds_bits_mask(vcpu, shadow_zero_check, - shadow_phys_bits, - context->shadow_root_level, uses_nx, - guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), - is_pse(vcpu), true); - - if (!shadow_me_mask) - return; - - for (i = context->shadow_root_level; --i >= 0;) { - shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; - shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; - } - -} -EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); - -static inline bool boot_cpu_is_amd(void) -{ - WARN_ON_ONCE(!tdp_enabled); - return shadow_x_mask == 0; -} - -/* - * the direct page table on host, use as much mmu features as - * possible, however, kvm currently does not do execution-protection. - */ -static void -reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, - struct kvm_mmu *context) -{ - struct rsvd_bits_validate *shadow_zero_check; - int i; - - shadow_zero_check = &context->shadow_zero_check; - - if (boot_cpu_is_amd()) - __reset_rsvds_bits_mask(vcpu, shadow_zero_check, - shadow_phys_bits, - context->shadow_root_level, false, - boot_cpu_has(X86_FEATURE_GBPAGES), - true, true); - else - __reset_rsvds_bits_mask_ept(shadow_zero_check, - shadow_phys_bits, - false); - - if (!shadow_me_mask) - return; - - for (i = context->shadow_root_level; --i >= 0;) { - shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; - shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; - } -} - -/* - * as the comments in reset_shadow_zero_bits_mask() except it - * is the shadow page table for intel nested guest. - */ -static void -reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, - struct kvm_mmu *context, bool execonly) -{ - __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, - shadow_phys_bits, execonly); -} - -#define BYTE_MASK(access) \ - ((1 & (access) ? 2 : 0) | \ - (2 & (access) ? 4 : 0) | \ - (3 & (access) ? 8 : 0) | \ - (4 & (access) ? 16 : 0) | \ - (5 & (access) ? 32 : 0) | \ - (6 & (access) ? 64 : 0) | \ - (7 & (access) ? 128 : 0)) - - -static void update_permission_bitmask(struct kvm_vcpu *vcpu, - struct kvm_mmu *mmu, bool ept) -{ - unsigned byte; - - const u8 x = BYTE_MASK(ACC_EXEC_MASK); - const u8 w = BYTE_MASK(ACC_WRITE_MASK); - const u8 u = BYTE_MASK(ACC_USER_MASK); - - bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0; - bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0; - bool cr0_wp = is_write_protection(vcpu); - - for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { - unsigned pfec = byte << 1; - - /* - * Each "*f" variable has a 1 bit for each UWX value - * that causes a fault with the given PFEC. - */ - - /* Faults from writes to non-writable pages */ - u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0; - /* Faults from user mode accesses to supervisor pages */ - u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0; - /* Faults from fetches of non-executable pages*/ - u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0; - /* Faults from kernel mode fetches of user pages */ - u8 smepf = 0; - /* Faults from kernel mode accesses of user pages */ - u8 smapf = 0; - - if (!ept) { - /* Faults from kernel mode accesses to user pages */ - u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u; - - /* Not really needed: !nx will cause pte.nx to fault */ - if (!mmu->nx) - ff = 0; - - /* Allow supervisor writes if !cr0.wp */ - if (!cr0_wp) - wf = (pfec & PFERR_USER_MASK) ? wf : 0; - - /* Disallow supervisor fetches of user code if cr4.smep */ - if (cr4_smep) - smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0; - - /* - * SMAP:kernel-mode data accesses from user-mode - * mappings should fault. A fault is considered - * as a SMAP violation if all of the following - * conditions are true: - * - X86_CR4_SMAP is set in CR4 - * - A user page is accessed - * - The access is not a fetch - * - Page fault in kernel mode - * - if CPL = 3 or X86_EFLAGS_AC is clear - * - * Here, we cover the first three conditions. - * The fourth is computed dynamically in permission_fault(); - * PFERR_RSVD_MASK bit will be set in PFEC if the access is - * *not* subject to SMAP restrictions. - */ - if (cr4_smap) - smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf; - } - - mmu->permissions[byte] = ff | uf | wf | smepf | smapf; - } -} - -/* -* PKU is an additional mechanism by which the paging controls access to -* user-mode addresses based on the value in the PKRU register. Protection -* key violations are reported through a bit in the page fault error code. -* Unlike other bits of the error code, the PK bit is not known at the -* call site of e.g. gva_to_gpa; it must be computed directly in -* permission_fault based on two bits of PKRU, on some machine state (CR4, -* CR0, EFER, CPL), and on other bits of the error code and the page tables. -* -* In particular the following conditions come from the error code, the -* page tables and the machine state: -* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1 -* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch) -* - PK is always zero if U=0 in the page tables -* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access. -* -* The PKRU bitmask caches the result of these four conditions. The error -* code (minus the P bit) and the page table's U bit form an index into the -* PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed -* with the two bits of the PKRU register corresponding to the protection key. -* For the first three conditions above the bits will be 00, thus masking -* away both AD and WD. For all reads or if the last condition holds, WD -* only will be masked away. -*/ -static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - bool ept) -{ - unsigned bit; - bool wp; - - if (ept) { - mmu->pkru_mask = 0; - return; - } - - /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */ - if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) { - mmu->pkru_mask = 0; - return; - } - - wp = is_write_protection(vcpu); - - for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) { - unsigned pfec, pkey_bits; - bool check_pkey, check_write, ff, uf, wf, pte_user; - - pfec = bit << 1; - ff = pfec & PFERR_FETCH_MASK; - uf = pfec & PFERR_USER_MASK; - wf = pfec & PFERR_WRITE_MASK; - - /* PFEC.RSVD is replaced by ACC_USER_MASK. */ - pte_user = pfec & PFERR_RSVD_MASK; - - /* - * Only need to check the access which is not an - * instruction fetch and is to a user page. - */ - check_pkey = (!ff && pte_user); - /* - * write access is controlled by PKRU if it is a - * user access or CR0.WP = 1. - */ - check_write = check_pkey && wf && (uf || wp); - - /* PKRU.AD stops both read and write access. */ - pkey_bits = !!check_pkey; - /* PKRU.WD stops write access. */ - pkey_bits |= (!!check_write) << 1; - - mmu->pkru_mask |= (pkey_bits & 3) << pfec; - } -} - -static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) -{ - unsigned root_level = mmu->root_level; - - mmu->last_nonleaf_level = root_level; - if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu)) - mmu->last_nonleaf_level++; -} - -static void paging64_init_context_common(struct kvm_vcpu *vcpu, - struct kvm_mmu *context, - int level) -{ - context->nx = is_nx(vcpu); - context->root_level = level; - - reset_rsvds_bits_mask(vcpu, context); - update_permission_bitmask(vcpu, context, false); - update_pkru_bitmask(vcpu, context, false); - update_last_nonleaf_level(vcpu, context); - - MMU_WARN_ON(!is_pae(vcpu)); - context->page_fault = paging64_page_fault; - context->gva_to_gpa = paging64_gva_to_gpa; - context->sync_page = paging64_sync_page; - context->invlpg = paging64_invlpg; - context->update_pte = paging64_update_pte; - context->shadow_root_level = level; - context->direct_map = false; -} - -static void paging64_init_context(struct kvm_vcpu *vcpu, - struct kvm_mmu *context) -{ - int root_level = is_la57_mode(vcpu) ? - PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; - - paging64_init_context_common(vcpu, context, root_level); -} - -static void paging32_init_context(struct kvm_vcpu *vcpu, - struct kvm_mmu *context) -{ - context->nx = false; - context->root_level = PT32_ROOT_LEVEL; - - reset_rsvds_bits_mask(vcpu, context); - update_permission_bitmask(vcpu, context, false); - update_pkru_bitmask(vcpu, context, false); - update_last_nonleaf_level(vcpu, context); - - context->page_fault = paging32_page_fault; - context->gva_to_gpa = paging32_gva_to_gpa; - context->sync_page = paging32_sync_page; - context->invlpg = paging32_invlpg; - context->update_pte = paging32_update_pte; - context->shadow_root_level = PT32E_ROOT_LEVEL; - context->direct_map = false; -} - -static void paging32E_init_context(struct kvm_vcpu *vcpu, - struct kvm_mmu *context) -{ - paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); -} - -static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) -{ - union kvm_mmu_extended_role ext = {0}; - - ext.cr0_pg = !!is_paging(vcpu); - ext.cr4_pae = !!is_pae(vcpu); - ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); - ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); - ext.cr4_pse = !!is_pse(vcpu); - ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); - ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); - ext.maxphyaddr = cpuid_maxphyaddr(vcpu); - - ext.valid = 1; - - return ext; -} - -static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, - bool base_only) -{ - union kvm_mmu_role role = {0}; - - role.base.access = ACC_ALL; - role.base.nxe = !!is_nx(vcpu); - role.base.cr0_wp = is_write_protection(vcpu); - role.base.smm = is_smm(vcpu); - role.base.guest_mode = is_guest_mode(vcpu); - - if (base_only) - return role; - - role.ext = kvm_calc_mmu_role_ext(vcpu); - - return role; -} - -static union kvm_mmu_role -kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) -{ - union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); - - role.base.ad_disabled = (shadow_accessed_mask == 0); - role.base.level = kvm_x86_ops->get_tdp_level(vcpu); - role.base.direct = true; - role.base.gpte_is_8_bytes = true; - - return role; -} - -static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) -{ - struct kvm_mmu *context = vcpu->arch.mmu; - union kvm_mmu_role new_role = - kvm_calc_tdp_mmu_root_page_role(vcpu, false); - - new_role.base.word &= mmu_base_role_mask.word; - if (new_role.as_u64 == context->mmu_role.as_u64) - return; - - context->mmu_role.as_u64 = new_role.as_u64; - context->page_fault = tdp_page_fault; - context->sync_page = nonpaging_sync_page; - context->invlpg = nonpaging_invlpg; - context->update_pte = nonpaging_update_pte; - context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu); - context->direct_map = true; - context->set_cr3 = kvm_x86_ops->set_tdp_cr3; - context->get_cr3 = get_cr3; - context->get_pdptr = kvm_pdptr_read; - context->inject_page_fault = kvm_inject_page_fault; - - if (!is_paging(vcpu)) { - context->nx = false; - context->gva_to_gpa = nonpaging_gva_to_gpa; - context->root_level = 0; - } else if (is_long_mode(vcpu)) { - context->nx = is_nx(vcpu); - context->root_level = is_la57_mode(vcpu) ? - PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; - reset_rsvds_bits_mask(vcpu, context); - context->gva_to_gpa = paging64_gva_to_gpa; - } else if (is_pae(vcpu)) { - context->nx = is_nx(vcpu); - context->root_level = PT32E_ROOT_LEVEL; - reset_rsvds_bits_mask(vcpu, context); - context->gva_to_gpa = paging64_gva_to_gpa; - } else { - context->nx = false; - context->root_level = PT32_ROOT_LEVEL; - reset_rsvds_bits_mask(vcpu, context); - context->gva_to_gpa = paging32_gva_to_gpa; - } - - update_permission_bitmask(vcpu, context, false); - update_pkru_bitmask(vcpu, context, false); - update_last_nonleaf_level(vcpu, context); - reset_tdp_shadow_zero_bits_mask(vcpu, context); -} - -static union kvm_mmu_role -kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) -{ - union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); - - role.base.smep_andnot_wp = role.ext.cr4_smep && - !is_write_protection(vcpu); - role.base.smap_andnot_wp = role.ext.cr4_smap && - !is_write_protection(vcpu); - role.base.direct = !is_paging(vcpu); - role.base.gpte_is_8_bytes = !!is_pae(vcpu); - - if (!is_long_mode(vcpu)) - role.base.level = PT32E_ROOT_LEVEL; - else if (is_la57_mode(vcpu)) - role.base.level = PT64_ROOT_5LEVEL; - else - role.base.level = PT64_ROOT_4LEVEL; - - return role; -} - -void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) -{ - struct kvm_mmu *context = vcpu->arch.mmu; - union kvm_mmu_role new_role = - kvm_calc_shadow_mmu_root_page_role(vcpu, false); - - new_role.base.word &= mmu_base_role_mask.word; - if (new_role.as_u64 == context->mmu_role.as_u64) - return; - - if (!is_paging(vcpu)) - nonpaging_init_context(vcpu, context); - else if (is_long_mode(vcpu)) - paging64_init_context(vcpu, context); - else if (is_pae(vcpu)) - paging32E_init_context(vcpu, context); - else - paging32_init_context(vcpu, context); - - context->mmu_role.as_u64 = new_role.as_u64; - reset_shadow_zero_bits_mask(vcpu, context); -} -EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); - -static union kvm_mmu_role -kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, - bool execonly) -{ - union kvm_mmu_role role = {0}; - - /* SMM flag is inherited from root_mmu */ - role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm; - - role.base.level = PT64_ROOT_4LEVEL; - role.base.gpte_is_8_bytes = true; - role.base.direct = false; - role.base.ad_disabled = !accessed_dirty; - role.base.guest_mode = true; - role.base.access = ACC_ALL; - - /* - * WP=1 and NOT_WP=1 is an impossible combination, use WP and the - * SMAP variation to denote shadow EPT entries. - */ - role.base.cr0_wp = true; - role.base.smap_andnot_wp = true; - - role.ext = kvm_calc_mmu_role_ext(vcpu); - role.ext.execonly = execonly; - - return role; -} - -void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, - bool accessed_dirty, gpa_t new_eptp) -{ - struct kvm_mmu *context = vcpu->arch.mmu; - union kvm_mmu_role new_role = - kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, - execonly); - - __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false); - - new_role.base.word &= mmu_base_role_mask.word; - if (new_role.as_u64 == context->mmu_role.as_u64) - return; - - context->shadow_root_level = PT64_ROOT_4LEVEL; - - context->nx = true; - context->ept_ad = accessed_dirty; - context->page_fault = ept_page_fault; - context->gva_to_gpa = ept_gva_to_gpa; - context->sync_page = ept_sync_page; - context->invlpg = ept_invlpg; - context->update_pte = ept_update_pte; - context->root_level = PT64_ROOT_4LEVEL; - context->direct_map = false; - context->mmu_role.as_u64 = new_role.as_u64; - - update_permission_bitmask(vcpu, context, true); - update_pkru_bitmask(vcpu, context, true); - update_last_nonleaf_level(vcpu, context); - reset_rsvds_bits_mask_ept(vcpu, context, execonly); - reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); -} -EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); - -static void init_kvm_softmmu(struct kvm_vcpu *vcpu) -{ - struct kvm_mmu *context = vcpu->arch.mmu; - - kvm_init_shadow_mmu(vcpu); - context->set_cr3 = kvm_x86_ops->set_cr3; - context->get_cr3 = get_cr3; - context->get_pdptr = kvm_pdptr_read; - context->inject_page_fault = kvm_inject_page_fault; -} - -static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) -{ - union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false); - struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; - - new_role.base.word &= mmu_base_role_mask.word; - if (new_role.as_u64 == g_context->mmu_role.as_u64) - return; - - g_context->mmu_role.as_u64 = new_role.as_u64; - g_context->get_cr3 = get_cr3; - g_context->get_pdptr = kvm_pdptr_read; - g_context->inject_page_fault = kvm_inject_page_fault; - - /* - * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using - * L1's nested page tables (e.g. EPT12). The nested translation - * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using - * L2's page tables as the first level of translation and L1's - * nested page tables as the second level of translation. Basically - * the gva_to_gpa functions between mmu and nested_mmu are swapped. - */ - if (!is_paging(vcpu)) { - g_context->nx = false; - g_context->root_level = 0; - g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; - } else if (is_long_mode(vcpu)) { - g_context->nx = is_nx(vcpu); - g_context->root_level = is_la57_mode(vcpu) ? - PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; - reset_rsvds_bits_mask(vcpu, g_context); - g_context->gva_to_gpa = paging64_gva_to_gpa_nested; - } else if (is_pae(vcpu)) { - g_context->nx = is_nx(vcpu); - g_context->root_level = PT32E_ROOT_LEVEL; - reset_rsvds_bits_mask(vcpu, g_context); - g_context->gva_to_gpa = paging64_gva_to_gpa_nested; - } else { - g_context->nx = false; - g_context->root_level = PT32_ROOT_LEVEL; - reset_rsvds_bits_mask(vcpu, g_context); - g_context->gva_to_gpa = paging32_gva_to_gpa_nested; - } - - update_permission_bitmask(vcpu, g_context, false); - update_pkru_bitmask(vcpu, g_context, false); - update_last_nonleaf_level(vcpu, g_context); -} - -void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots) -{ - if (reset_roots) { - uint i; - - vcpu->arch.mmu->root_hpa = INVALID_PAGE; - - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; - } - - if (mmu_is_nested(vcpu)) - init_kvm_nested_mmu(vcpu); - else if (tdp_enabled) - init_kvm_tdp_mmu(vcpu); - else - init_kvm_softmmu(vcpu); -} -EXPORT_SYMBOL_GPL(kvm_init_mmu); - -static union kvm_mmu_page_role -kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu) -{ - union kvm_mmu_role role; - - if (tdp_enabled) - role = kvm_calc_tdp_mmu_root_page_role(vcpu, true); - else - role = kvm_calc_shadow_mmu_root_page_role(vcpu, true); - - return role.base; -} - -void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) -{ - kvm_mmu_unload(vcpu); - kvm_init_mmu(vcpu, true); -} -EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); - -int kvm_mmu_load(struct kvm_vcpu *vcpu) -{ - int r; - - r = mmu_topup_memory_caches(vcpu); - if (r) - goto out; - r = mmu_alloc_roots(vcpu); - kvm_mmu_sync_roots(vcpu); - if (r) - goto out; - kvm_mmu_load_cr3(vcpu); - kvm_x86_ops->tlb_flush(vcpu, true); -out: - return r; -} -EXPORT_SYMBOL_GPL(kvm_mmu_load); - -void kvm_mmu_unload(struct kvm_vcpu *vcpu) -{ - kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); - WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa)); - kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); - WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa)); -} -EXPORT_SYMBOL_GPL(kvm_mmu_unload); - -static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, u64 *spte, - const void *new) -{ - if (sp->role.level != PT_PAGE_TABLE_LEVEL) { - ++vcpu->kvm->stat.mmu_pde_zapped; - return; - } - - ++vcpu->kvm->stat.mmu_pte_updated; - vcpu->arch.mmu->update_pte(vcpu, sp, spte, new); -} - -static bool need_remote_flush(u64 old, u64 new) -{ - if (!is_shadow_present_pte(old)) - return false; - if (!is_shadow_present_pte(new)) - return true; - if ((old ^ new) & PT64_BASE_ADDR_MASK) - return true; - old ^= shadow_nx_mask; - new ^= shadow_nx_mask; - return (old & ~new & PT64_PERM_MASK) != 0; -} - -static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, - int *bytes) -{ - u64 gentry = 0; - int r; - - /* - * Assume that the pte write on a page table of the same type - * as the current vcpu paging mode since we update the sptes only - * when they have the same mode. - */ - if (is_pae(vcpu) && *bytes == 4) { - /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ - *gpa &= ~(gpa_t)7; - *bytes = 8; - } - - if (*bytes == 4 || *bytes == 8) { - r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes); - if (r) - gentry = 0; - } - - return gentry; -} - -/* - * If we're seeing too many writes to a page, it may no longer be a page table, - * or we may be forking, in which case it is better to unmap the page. - */ -static bool detect_write_flooding(struct kvm_mmu_page *sp) -{ - /* - * Skip write-flooding detected for the sp whose level is 1, because - * it can become unsync, then the guest page is not write-protected. - */ - if (sp->role.level == PT_PAGE_TABLE_LEVEL) - return false; - - atomic_inc(&sp->write_flooding_count); - return atomic_read(&sp->write_flooding_count) >= 3; -} - -/* - * Misaligned accesses are too much trouble to fix up; also, they usually - * indicate a page is not used as a page table. - */ -static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, - int bytes) -{ - unsigned offset, pte_size, misaligned; - - pgprintk("misaligned: gpa %llx bytes %d role %x\n", - gpa, bytes, sp->role.word); - - offset = offset_in_page(gpa); - pte_size = sp->role.gpte_is_8_bytes ? 8 : 4; - - /* - * Sometimes, the OS only writes the last one bytes to update status - * bits, for example, in linux, andb instruction is used in clear_bit(). - */ - if (!(offset & (pte_size - 1)) && bytes == 1) - return false; - - misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); - misaligned |= bytes < 4; - - return misaligned; -} - -static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) -{ - unsigned page_offset, quadrant; - u64 *spte; - int level; - - page_offset = offset_in_page(gpa); - level = sp->role.level; - *nspte = 1; - if (!sp->role.gpte_is_8_bytes) { - page_offset <<= 1; /* 32->64 */ - /* - * A 32-bit pde maps 4MB while the shadow pdes map - * only 2MB. So we need to double the offset again - * and zap two pdes instead of one. - */ - if (level == PT32_ROOT_LEVEL) { - page_offset &= ~7; /* kill rounding error */ - page_offset <<= 1; - *nspte = 2; - } - quadrant = page_offset >> PAGE_SHIFT; - page_offset &= ~PAGE_MASK; - if (quadrant != sp->role.quadrant) - return NULL; - } - - spte = &sp->spt[page_offset / sizeof(*spte)]; - return spte; -} - -static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, - const u8 *new, int bytes, - struct kvm_page_track_notifier_node *node) -{ - gfn_t gfn = gpa >> PAGE_SHIFT; - struct kvm_mmu_page *sp; - LIST_HEAD(invalid_list); - u64 entry, gentry, *spte; - int npte; - bool remote_flush, local_flush; - - /* - * If we don't have indirect shadow pages, it means no page is - * write-protected, so we can exit simply. - */ - if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) - return; - - remote_flush = local_flush = false; - - pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); - - /* - * No need to care whether allocation memory is successful - * or not since pte prefetch is skiped if it does not have - * enough objects in the cache. - */ - mmu_topup_memory_caches(vcpu); - - spin_lock(&vcpu->kvm->mmu_lock); - - gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); - - ++vcpu->kvm->stat.mmu_pte_write; - kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); - - for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { - if (detect_write_misaligned(sp, gpa, bytes) || - detect_write_flooding(sp)) { - kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); - ++vcpu->kvm->stat.mmu_flooded; - continue; - } - - spte = get_written_sptes(sp, gpa, &npte); - if (!spte) - continue; - - local_flush = true; - while (npte--) { - u32 base_role = vcpu->arch.mmu->mmu_role.base.word; - - entry = *spte; - mmu_page_zap_pte(vcpu->kvm, sp, spte); - if (gentry && - !((sp->role.word ^ base_role) - & mmu_base_role_mask.word) && rmap_can_add(vcpu)) - mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); - if (need_remote_flush(entry, *spte)) - remote_flush = true; - ++spte; - } - } - kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); - kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); - spin_unlock(&vcpu->kvm->mmu_lock); -} - -int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) -{ - gpa_t gpa; - int r; - - if (vcpu->arch.mmu->direct_map) - return 0; - - gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); - - r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); - - return r; -} -EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); - -static int make_mmu_pages_available(struct kvm_vcpu *vcpu) -{ - LIST_HEAD(invalid_list); - - if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES)) - return 0; - - while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { - if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) - break; - - ++vcpu->kvm->stat.mmu_recycled; - } - kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); - - if (!kvm_mmu_available_pages(vcpu->kvm)) - return -ENOSPC; - return 0; -} - -int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, - void *insn, int insn_len) -{ - int r, emulation_type = 0; - bool direct = vcpu->arch.mmu->direct_map; - - /* With shadow page tables, fault_address contains a GVA or nGPA. */ - if (vcpu->arch.mmu->direct_map) { - vcpu->arch.gpa_available = true; - vcpu->arch.gpa_val = cr2; - } - - r = RET_PF_INVALID; - if (unlikely(error_code & PFERR_RSVD_MASK)) { - r = handle_mmio_page_fault(vcpu, cr2, direct); - if (r == RET_PF_EMULATE) - goto emulate; - } - - if (r == RET_PF_INVALID) { - r = vcpu->arch.mmu->page_fault(vcpu, cr2, - lower_32_bits(error_code), - false); - WARN_ON(r == RET_PF_INVALID); - } - - if (r == RET_PF_RETRY) - return 1; - if (r < 0) - return r; - - /* - * Before emulating the instruction, check if the error code - * was due to a RO violation while translating the guest page. - * This can occur when using nested virtualization with nested - * paging in both guests. If true, we simply unprotect the page - * and resume the guest. - */ - if (vcpu->arch.mmu->direct_map && - (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { - kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2)); - return 1; - } - - /* - * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still - * optimistically try to just unprotect the page and let the processor - * re-execute the instruction that caused the page fault. Do not allow - * retrying MMIO emulation, as it's not only pointless but could also - * cause us to enter an infinite loop because the processor will keep - * faulting on the non-existent MMIO address. Retrying an instruction - * from a nested guest is also pointless and dangerous as we are only - * explicitly shadowing L1's page tables, i.e. unprotecting something - * for L1 isn't going to magically fix whatever issue cause L2 to fail. - */ - if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu)) - emulation_type = EMULTYPE_ALLOW_RETRY; -emulate: - /* - * On AMD platforms, under certain conditions insn_len may be zero on #NPF. - * This can happen if a guest gets a page-fault on data access but the HW - * table walker is not able to read the instruction page (e.g instruction - * page is not present in memory). In those cases we simply restart the - * guest, with the exception of AMD Erratum 1096 which is unrecoverable. - */ - if (unlikely(insn && !insn_len)) { - if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu)) - return 1; - } - - return x86_emulate_instruction(vcpu, cr2, emulation_type, insn, - insn_len); -} -EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); - -void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) -{ - struct kvm_mmu *mmu = vcpu->arch.mmu; - int i; - - /* INVLPG on a * non-canonical address is a NOP according to the SDM. */ - if (is_noncanonical_address(gva, vcpu)) - return; - - mmu->invlpg(vcpu, gva, mmu->root_hpa); - - /* - * INVLPG is required to invalidate any global mappings for the VA, - * irrespective of PCID. Since it would take us roughly similar amount - * of work to determine whether any of the prev_root mappings of the VA - * is marked global, or to just sync it blindly, so we might as well - * just always sync it. - * - * Mappings not reachable via the current cr3 or the prev_roots will be - * synced when switching to that cr3, so nothing needs to be done here - * for them. - */ - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - if (VALID_PAGE(mmu->prev_roots[i].hpa)) - mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); - - kvm_x86_ops->tlb_flush_gva(vcpu, gva); - ++vcpu->stat.invlpg; -} -EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); - -void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) -{ - struct kvm_mmu *mmu = vcpu->arch.mmu; - bool tlb_flush = false; - uint i; - - if (pcid == kvm_get_active_pcid(vcpu)) { - mmu->invlpg(vcpu, gva, mmu->root_hpa); - tlb_flush = true; - } - - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { - if (VALID_PAGE(mmu->prev_roots[i].hpa) && - pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) { - mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); - tlb_flush = true; - } - } - - if (tlb_flush) - kvm_x86_ops->tlb_flush_gva(vcpu, gva); - - ++vcpu->stat.invlpg; - - /* - * Mappings not reachable via the current cr3 or the prev_roots will be - * synced when switching to that cr3, so nothing needs to be done here - * for them. - */ -} -EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva); - -void kvm_enable_tdp(void) -{ - tdp_enabled = true; -} -EXPORT_SYMBOL_GPL(kvm_enable_tdp); - -void kvm_disable_tdp(void) -{ - tdp_enabled = false; -} -EXPORT_SYMBOL_GPL(kvm_disable_tdp); - - -/* The return value indicates if tlb flush on all vcpus is needed. */ -typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); - -/* The caller should hold mmu-lock before calling this function. */ -static __always_inline bool -slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, - slot_level_handler fn, int start_level, int end_level, - gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) -{ - struct slot_rmap_walk_iterator iterator; - bool flush = false; - - for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn, - end_gfn, &iterator) { - if (iterator.rmap) - flush |= fn(kvm, iterator.rmap); - - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { - if (flush && lock_flush_tlb) { - kvm_flush_remote_tlbs_with_address(kvm, - start_gfn, - iterator.gfn - start_gfn + 1); - flush = false; - } - cond_resched_lock(&kvm->mmu_lock); - } - } - - if (flush && lock_flush_tlb) { - kvm_flush_remote_tlbs_with_address(kvm, start_gfn, - end_gfn - start_gfn + 1); - flush = false; - } - - return flush; -} - -static __always_inline bool -slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, - slot_level_handler fn, int start_level, int end_level, - bool lock_flush_tlb) -{ - return slot_handle_level_range(kvm, memslot, fn, start_level, - end_level, memslot->base_gfn, - memslot->base_gfn + memslot->npages - 1, - lock_flush_tlb); -} - -static __always_inline bool -slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, - slot_level_handler fn, bool lock_flush_tlb) -{ - return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, - PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); -} - -static __always_inline bool -slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, - slot_level_handler fn, bool lock_flush_tlb) -{ - return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1, - PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); -} - -static __always_inline bool -slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, - slot_level_handler fn, bool lock_flush_tlb) -{ - return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, - PT_PAGE_TABLE_LEVEL, lock_flush_tlb); -} - -static void free_mmu_pages(struct kvm_mmu *mmu) -{ - free_page((unsigned long)mmu->pae_root); - free_page((unsigned long)mmu->lm_root); -} - -static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) -{ - struct page *page; - int i; - - /* - * When using PAE paging, the four PDPTEs are treated as 'root' pages, - * while the PDP table is a per-vCPU construct that's allocated at MMU - * creation. When emulating 32-bit mode, cr3 is only 32 bits even on - * x86_64. Therefore we need to allocate the PDP table in the first - * 4GB of memory, which happens to fit the DMA32 zone. Except for - * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can - * skip allocating the PDP table. - */ - if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL) - return 0; - - page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32); - if (!page) - return -ENOMEM; - - mmu->pae_root = page_address(page); - for (i = 0; i < 4; ++i) - mmu->pae_root[i] = INVALID_PAGE; - - return 0; -} - -int kvm_mmu_create(struct kvm_vcpu *vcpu) -{ - uint i; - int ret; - - vcpu->arch.mmu = &vcpu->arch.root_mmu; - vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; - - vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; - vcpu->arch.root_mmu.root_cr3 = 0; - vcpu->arch.root_mmu.translate_gpa = translate_gpa; - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; - - vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; - vcpu->arch.guest_mmu.root_cr3 = 0; - vcpu->arch.guest_mmu.translate_gpa = translate_gpa; - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; - - vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; - - ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu); - if (ret) - return ret; - - ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu); - if (ret) - goto fail_allocate_root; - - return ret; - fail_allocate_root: - free_mmu_pages(&vcpu->arch.guest_mmu); - return ret; -} - -#define BATCH_ZAP_PAGES 10 -static void kvm_zap_obsolete_pages(struct kvm *kvm) -{ - struct kvm_mmu_page *sp, *node; - int nr_zapped, batch = 0; - -restart: - list_for_each_entry_safe_reverse(sp, node, - &kvm->arch.active_mmu_pages, link) { - /* - * No obsolete valid page exists before a newly created page - * since active_mmu_pages is a FIFO list. - */ - if (!is_obsolete_sp(kvm, sp)) - break; - - /* - * Skip invalid pages with a non-zero root count, zapping pages - * with a non-zero root count will never succeed, i.e. the page - * will get thrown back on active_mmu_pages and we'll get stuck - * in an infinite loop. - */ - if (sp->role.invalid && sp->root_count) - continue; - - /* - * No need to flush the TLB since we're only zapping shadow - * pages with an obsolete generation number and all vCPUS have - * loaded a new root, i.e. the shadow pages being zapped cannot - * be in active use by the guest. - */ - if (batch >= BATCH_ZAP_PAGES && - cond_resched_lock(&kvm->mmu_lock)) { - batch = 0; - goto restart; - } - - if (__kvm_mmu_prepare_zap_page(kvm, sp, - &kvm->arch.zapped_obsolete_pages, &nr_zapped)) { - batch += nr_zapped; - goto restart; - } - } - - /* - * Trigger a remote TLB flush before freeing the page tables to ensure - * KVM is not in the middle of a lockless shadow page table walk, which - * may reference the pages. - */ - kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); -} - -/* - * Fast invalidate all shadow pages and use lock-break technique - * to zap obsolete pages. - * - * It's required when memslot is being deleted or VM is being - * destroyed, in these cases, we should ensure that KVM MMU does - * not use any resource of the being-deleted slot or all slots - * after calling the function. - */ -static void kvm_mmu_zap_all_fast(struct kvm *kvm) -{ - lockdep_assert_held(&kvm->slots_lock); - - spin_lock(&kvm->mmu_lock); - trace_kvm_mmu_zap_all_fast(kvm); - - /* - * Toggle mmu_valid_gen between '0' and '1'. Because slots_lock is - * held for the entire duration of zapping obsolete pages, it's - * impossible for there to be multiple invalid generations associated - * with *valid* shadow pages at any given time, i.e. there is exactly - * one valid generation and (at most) one invalid generation. - */ - kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1; - - /* - * Notify all vcpus to reload its shadow page table and flush TLB. - * Then all vcpus will switch to new shadow page table with the new - * mmu_valid_gen. - * - * Note: we need to do this under the protection of mmu_lock, - * otherwise, vcpu would purge shadow page but miss tlb flush. - */ - kvm_reload_remote_mmus(kvm); - - kvm_zap_obsolete_pages(kvm); - spin_unlock(&kvm->mmu_lock); -} - -static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) -{ - return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); -} - -static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot, - struct kvm_page_track_notifier_node *node) -{ - kvm_mmu_zap_all_fast(kvm); -} - -void kvm_mmu_init_vm(struct kvm *kvm) -{ - struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; - - node->track_write = kvm_mmu_pte_write; - node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; - kvm_page_track_register_notifier(kvm, node); -} - -void kvm_mmu_uninit_vm(struct kvm *kvm) -{ - struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; - - kvm_page_track_unregister_notifier(kvm, node); -} - -void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - int i; - - spin_lock(&kvm->mmu_lock); - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(memslot, slots) { - gfn_t start, end; - - start = max(gfn_start, memslot->base_gfn); - end = min(gfn_end, memslot->base_gfn + memslot->npages); - if (start >= end) - continue; - - slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, - PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL, - start, end - 1, true); - } - } - - spin_unlock(&kvm->mmu_lock); -} - -static bool slot_rmap_write_protect(struct kvm *kvm, - struct kvm_rmap_head *rmap_head) -{ - return __rmap_write_protect(kvm, rmap_head, false); -} - -void kvm_mmu_slot_remove_write_access(struct kvm *kvm, - struct kvm_memory_slot *memslot) -{ - bool flush; - - spin_lock(&kvm->mmu_lock); - flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect, - false); - spin_unlock(&kvm->mmu_lock); - - /* - * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log() - * which do tlb flush out of mmu-lock should be serialized by - * kvm->slots_lock otherwise tlb flush would be missed. - */ - lockdep_assert_held(&kvm->slots_lock); - - /* - * We can flush all the TLBs out of the mmu lock without TLB - * corruption since we just change the spte from writable to - * readonly so that we only need to care the case of changing - * spte from present to present (changing the spte from present - * to nonpresent will flush all the TLBs immediately), in other - * words, the only case we care is mmu_spte_update() where we - * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE - * instead of PT_WRITABLE_MASK, that means it does not depend - * on PT_WRITABLE_MASK anymore. - */ - if (flush) - kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, - memslot->npages); -} - -static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, - struct kvm_rmap_head *rmap_head) -{ - u64 *sptep; - struct rmap_iterator iter; - int need_tlb_flush = 0; - kvm_pfn_t pfn; - struct kvm_mmu_page *sp; - -restart: - for_each_rmap_spte(rmap_head, &iter, sptep) { - sp = page_header(__pa(sptep)); - pfn = spte_to_pfn(*sptep); - - /* - * We cannot do huge page mapping for indirect shadow pages, - * which are found on the last rmap (level = 1) when not using - * tdp; such shadow pages are synced with the page table in - * the guest, and the guest page table is using 4K page size - * mapping if the indirect sp has level = 1. - */ - if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && - !kvm_is_zone_device_pfn(pfn) && - PageTransCompoundMap(pfn_to_page(pfn))) { - pte_list_remove(rmap_head, sptep); - - if (kvm_available_flush_tlb_with_range()) - kvm_flush_remote_tlbs_with_address(kvm, sp->gfn, - KVM_PAGES_PER_HPAGE(sp->role.level)); - else - need_tlb_flush = 1; - - goto restart; - } - } - - return need_tlb_flush; -} - -void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, - const struct kvm_memory_slot *memslot) -{ - /* FIXME: const-ify all uses of struct kvm_memory_slot. */ - spin_lock(&kvm->mmu_lock); - slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot, - kvm_mmu_zap_collapsible_spte, true); - spin_unlock(&kvm->mmu_lock); -} - -void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, - struct kvm_memory_slot *memslot) -{ - bool flush; - - spin_lock(&kvm->mmu_lock); - flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); - spin_unlock(&kvm->mmu_lock); - - lockdep_assert_held(&kvm->slots_lock); - - /* - * It's also safe to flush TLBs out of mmu lock here as currently this - * function is only used for dirty logging, in which case flushing TLB - * out of mmu lock also guarantees no dirty pages will be lost in - * dirty_bitmap. - */ - if (flush) - kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, - memslot->npages); -} -EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); - -void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, - struct kvm_memory_slot *memslot) -{ - bool flush; - - spin_lock(&kvm->mmu_lock); - flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, - false); - spin_unlock(&kvm->mmu_lock); - - /* see kvm_mmu_slot_remove_write_access */ - lockdep_assert_held(&kvm->slots_lock); - - if (flush) - kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, - memslot->npages); -} -EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); - -void kvm_mmu_slot_set_dirty(struct kvm *kvm, - struct kvm_memory_slot *memslot) -{ - bool flush; - - spin_lock(&kvm->mmu_lock); - flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); - spin_unlock(&kvm->mmu_lock); - - lockdep_assert_held(&kvm->slots_lock); - - /* see kvm_mmu_slot_leaf_clear_dirty */ - if (flush) - kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, - memslot->npages); -} -EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); - -void kvm_mmu_zap_all(struct kvm *kvm) -{ - struct kvm_mmu_page *sp, *node; - LIST_HEAD(invalid_list); - int ign; - - spin_lock(&kvm->mmu_lock); -restart: - list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { - if (sp->role.invalid && sp->root_count) - continue; - if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) - goto restart; - if (cond_resched_lock(&kvm->mmu_lock)) - goto restart; - } - - kvm_mmu_commit_zap_page(kvm, &invalid_list); - spin_unlock(&kvm->mmu_lock); -} - -void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) -{ - WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); - - gen &= MMIO_SPTE_GEN_MASK; - - /* - * Generation numbers are incremented in multiples of the number of - * address spaces in order to provide unique generations across all - * address spaces. Strip what is effectively the address space - * modifier prior to checking for a wrap of the MMIO generation so - * that a wrap in any address space is detected. - */ - gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1); - - /* - * The very rare case: if the MMIO generation number has wrapped, - * zap all shadow pages. - */ - if (unlikely(gen == 0)) { - kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); - kvm_mmu_zap_all_fast(kvm); - } -} - -static unsigned long -mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) -{ - struct kvm *kvm; - int nr_to_scan = sc->nr_to_scan; - unsigned long freed = 0; - - mutex_lock(&kvm_lock); - - list_for_each_entry(kvm, &vm_list, vm_list) { - int idx; - LIST_HEAD(invalid_list); - - /* - * Never scan more than sc->nr_to_scan VM instances. - * Will not hit this condition practically since we do not try - * to shrink more than one VM and it is very unlikely to see - * !n_used_mmu_pages so many times. - */ - if (!nr_to_scan--) - break; - /* - * n_used_mmu_pages is accessed without holding kvm->mmu_lock - * here. We may skip a VM instance errorneosly, but we do not - * want to shrink a VM that only started to populate its MMU - * anyway. - */ - if (!kvm->arch.n_used_mmu_pages && - !kvm_has_zapped_obsolete_pages(kvm)) - continue; - - idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); - - if (kvm_has_zapped_obsolete_pages(kvm)) { - kvm_mmu_commit_zap_page(kvm, - &kvm->arch.zapped_obsolete_pages); - goto unlock; - } - - if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) - freed++; - kvm_mmu_commit_zap_page(kvm, &invalid_list); - -unlock: - spin_unlock(&kvm->mmu_lock); - srcu_read_unlock(&kvm->srcu, idx); - - /* - * unfair on small ones - * per-vm shrinkers cry out - * sadness comes quickly - */ - list_move_tail(&kvm->vm_list, &vm_list); - break; - } - - mutex_unlock(&kvm_lock); - return freed; -} - -static unsigned long -mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) -{ - return percpu_counter_read_positive(&kvm_total_used_mmu_pages); -} - -static struct shrinker mmu_shrinker = { - .count_objects = mmu_shrink_count, - .scan_objects = mmu_shrink_scan, - .seeks = DEFAULT_SEEKS * 10, -}; - -static void mmu_destroy_caches(void) -{ - kmem_cache_destroy(pte_list_desc_cache); - kmem_cache_destroy(mmu_page_header_cache); -} - -static void kvm_set_mmio_spte_mask(void) -{ - u64 mask; - - /* - * Set the reserved bits and the present bit of an paging-structure - * entry to generate page fault with PFER.RSV = 1. - */ - - /* - * Mask the uppermost physical address bit, which would be reserved as - * long as the supported physical address width is less than 52. - */ - mask = 1ull << 51; - - /* Set the present bit. */ - mask |= 1ull; - - /* - * If reserved bit is not supported, clear the present bit to disable - * mmio page fault. - */ - if (IS_ENABLED(CONFIG_X86_64) && shadow_phys_bits == 52) - mask &= ~1ull; - - kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); -} - -static bool get_nx_auto_mode(void) -{ - /* Return true when CPU has the bug, and mitigations are ON */ - return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off(); -} - -static void __set_nx_huge_pages(bool val) -{ - nx_huge_pages = itlb_multihit_kvm_mitigation = val; -} - -static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) -{ - bool old_val = nx_huge_pages; - bool new_val; - - /* In "auto" mode deploy workaround only if CPU has the bug. */ - if (sysfs_streq(val, "off")) - new_val = 0; - else if (sysfs_streq(val, "force")) - new_val = 1; - else if (sysfs_streq(val, "auto")) - new_val = get_nx_auto_mode(); - else if (strtobool(val, &new_val) < 0) - return -EINVAL; - - __set_nx_huge_pages(new_val); - - if (new_val != old_val) { - struct kvm *kvm; - - mutex_lock(&kvm_lock); - - list_for_each_entry(kvm, &vm_list, vm_list) { - mutex_lock(&kvm->slots_lock); - kvm_mmu_zap_all_fast(kvm); - mutex_unlock(&kvm->slots_lock); - - wake_up_process(kvm->arch.nx_lpage_recovery_thread); - } - mutex_unlock(&kvm_lock); - } - - return 0; -} - -int kvm_mmu_module_init(void) -{ - int ret = -ENOMEM; - - if (nx_huge_pages == -1) - __set_nx_huge_pages(get_nx_auto_mode()); - - /* - * MMU roles use union aliasing which is, generally speaking, an - * undefined behavior. However, we supposedly know how compilers behave - * and the current status quo is unlikely to change. Guardians below are - * supposed to let us know if the assumption becomes false. - */ - BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32)); - BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32)); - BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64)); - - kvm_mmu_reset_all_pte_masks(); - - kvm_set_mmio_spte_mask(); - - pte_list_desc_cache = kmem_cache_create("pte_list_desc", - sizeof(struct pte_list_desc), - 0, SLAB_ACCOUNT, NULL); - if (!pte_list_desc_cache) - goto out; - - mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", - sizeof(struct kvm_mmu_page), - 0, SLAB_ACCOUNT, NULL); - if (!mmu_page_header_cache) - goto out; - - if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) - goto out; - - ret = register_shrinker(&mmu_shrinker); - if (ret) - goto out; - - return 0; - -out: - mmu_destroy_caches(); - return ret; -} - -/* - * Calculate mmu pages needed for kvm. - */ -unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) -{ - unsigned long nr_mmu_pages; - unsigned long nr_pages = 0; - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - int i; - - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - slots = __kvm_memslots(kvm, i); - - kvm_for_each_memslot(memslot, slots) - nr_pages += memslot->npages; - } - - nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; - nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); - - return nr_mmu_pages; -} - -void kvm_mmu_destroy(struct kvm_vcpu *vcpu) -{ - kvm_mmu_unload(vcpu); - free_mmu_pages(&vcpu->arch.root_mmu); - free_mmu_pages(&vcpu->arch.guest_mmu); - mmu_free_memory_caches(vcpu); -} - -void kvm_mmu_module_exit(void) -{ - mmu_destroy_caches(); - percpu_counter_destroy(&kvm_total_used_mmu_pages); - unregister_shrinker(&mmu_shrinker); - mmu_audit_disable(); -} - -static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp) -{ - unsigned int old_val; - int err; - - old_val = nx_huge_pages_recovery_ratio; - err = param_set_uint(val, kp); - if (err) - return err; - - if (READ_ONCE(nx_huge_pages) && - !old_val && nx_huge_pages_recovery_ratio) { - struct kvm *kvm; - - mutex_lock(&kvm_lock); - - list_for_each_entry(kvm, &vm_list, vm_list) - wake_up_process(kvm->arch.nx_lpage_recovery_thread); - - mutex_unlock(&kvm_lock); - } - - return err; -} - -static void kvm_recover_nx_lpages(struct kvm *kvm) -{ - int rcu_idx; - struct kvm_mmu_page *sp; - unsigned int ratio; - LIST_HEAD(invalid_list); - ulong to_zap; - - rcu_idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); - - ratio = READ_ONCE(nx_huge_pages_recovery_ratio); - to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0; - while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) { - /* - * We use a separate list instead of just using active_mmu_pages - * because the number of lpage_disallowed pages is expected to - * be relatively small compared to the total. - */ - sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages, - struct kvm_mmu_page, - lpage_disallowed_link); - WARN_ON_ONCE(!sp->lpage_disallowed); - kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); - WARN_ON_ONCE(sp->lpage_disallowed); - - if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) { - kvm_mmu_commit_zap_page(kvm, &invalid_list); - if (to_zap) - cond_resched_lock(&kvm->mmu_lock); - } - } - - spin_unlock(&kvm->mmu_lock); - srcu_read_unlock(&kvm->srcu, rcu_idx); -} - -static long get_nx_lpage_recovery_timeout(u64 start_time) -{ - return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio) - ? start_time + 60 * HZ - get_jiffies_64() - : MAX_SCHEDULE_TIMEOUT; -} - -static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data) -{ - u64 start_time; - long remaining_time; - - while (true) { - start_time = get_jiffies_64(); - remaining_time = get_nx_lpage_recovery_timeout(start_time); - - set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop() && remaining_time > 0) { - schedule_timeout(remaining_time); - remaining_time = get_nx_lpage_recovery_timeout(start_time); - set_current_state(TASK_INTERRUPTIBLE); - } - - set_current_state(TASK_RUNNING); - - if (kthread_should_stop()) - return 0; - - kvm_recover_nx_lpages(kvm); - } -} - -int kvm_mmu_post_init_vm(struct kvm *kvm) -{ - int err; - - err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0, - "kvm-nx-lpage-recovery", - &kvm->arch.nx_lpage_recovery_thread); - if (!err) - kthread_unpark(kvm->arch.nx_lpage_recovery_thread); - - return err; -} - -void kvm_mmu_pre_destroy_vm(struct kvm *kvm) -{ - if (kvm->arch.nx_lpage_recovery_thread) - kthread_stop(kvm->arch.nx_lpage_recovery_thread); -} diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c new file mode 100644 index 000000000000..6f92b40d798c --- /dev/null +++ b/arch/x86/kvm/mmu/mmu.c @@ -0,0 +1,6502 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Kernel-based Virtual Machine driver for Linux + * + * This module enables machines with Intel VT-x extensions to run virtual + * machines without emulation or binary translation. + * + * MMU support + * + * Copyright (C) 2006 Qumranet, Inc. + * Copyright 2010 Red Hat, Inc. and/or its affiliates. + * + * Authors: + * Yaniv Kamay + * Avi Kivity + */ + +#include "irq.h" +#include "mmu.h" +#include "x86.h" +#include "kvm_cache_regs.h" +#include "cpuid.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include "trace.h" + +extern bool itlb_multihit_kvm_mitigation; + +static int __read_mostly nx_huge_pages = -1; +#ifdef CONFIG_PREEMPT_RT +/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */ +static uint __read_mostly nx_huge_pages_recovery_ratio = 0; +#else +static uint __read_mostly nx_huge_pages_recovery_ratio = 60; +#endif + +static int set_nx_huge_pages(const char *val, const struct kernel_param *kp); +static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp); + +static struct kernel_param_ops nx_huge_pages_ops = { + .set = set_nx_huge_pages, + .get = param_get_bool, +}; + +static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = { + .set = set_nx_huge_pages_recovery_ratio, + .get = param_get_uint, +}; + +module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644); +__MODULE_PARM_TYPE(nx_huge_pages, "bool"); +module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops, + &nx_huge_pages_recovery_ratio, 0644); +__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint"); + +/* + * When setting this variable to true it enables Two-Dimensional-Paging + * where the hardware walks 2 page tables: + * 1. the guest-virtual to guest-physical + * 2. while doing 1. it walks guest-physical to host-physical + * If the hardware supports that we don't need to do shadow paging. + */ +bool tdp_enabled = false; + +enum { + AUDIT_PRE_PAGE_FAULT, + AUDIT_POST_PAGE_FAULT, + AUDIT_PRE_PTE_WRITE, + AUDIT_POST_PTE_WRITE, + AUDIT_PRE_SYNC, + AUDIT_POST_SYNC +}; + +#undef MMU_DEBUG + +#ifdef MMU_DEBUG +static bool dbg = 0; +module_param(dbg, bool, 0644); + +#define pgprintk(x...) do { if (dbg) printk(x); } while (0) +#define rmap_printk(x...) do { if (dbg) printk(x); } while (0) +#define MMU_WARN_ON(x) WARN_ON(x) +#else +#define pgprintk(x...) do { } while (0) +#define rmap_printk(x...) do { } while (0) +#define MMU_WARN_ON(x) do { } while (0) +#endif + +#define PTE_PREFETCH_NUM 8 + +#define PT_FIRST_AVAIL_BITS_SHIFT 10 +#define PT64_SECOND_AVAIL_BITS_SHIFT 54 + +/* + * The mask used to denote special SPTEs, which can be either MMIO SPTEs or + * Access Tracking SPTEs. + */ +#define SPTE_SPECIAL_MASK (3ULL << 52) +#define SPTE_AD_ENABLED_MASK (0ULL << 52) +#define SPTE_AD_DISABLED_MASK (1ULL << 52) +#define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52) +#define SPTE_MMIO_MASK (3ULL << 52) + +#define PT64_LEVEL_BITS 9 + +#define PT64_LEVEL_SHIFT(level) \ + (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS) + +#define PT64_INDEX(address, level)\ + (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1)) + + +#define PT32_LEVEL_BITS 10 + +#define PT32_LEVEL_SHIFT(level) \ + (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS) + +#define PT32_LVL_OFFSET_MASK(level) \ + (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ + * PT32_LEVEL_BITS))) - 1)) + +#define PT32_INDEX(address, level)\ + (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) + + +#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK +#define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1)) +#else +#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) +#endif +#define PT64_LVL_ADDR_MASK(level) \ + (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ + * PT64_LEVEL_BITS))) - 1)) +#define PT64_LVL_OFFSET_MASK(level) \ + (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ + * PT64_LEVEL_BITS))) - 1)) + +#define PT32_BASE_ADDR_MASK PAGE_MASK +#define PT32_DIR_BASE_ADDR_MASK \ + (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) +#define PT32_LVL_ADDR_MASK(level) \ + (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ + * PT32_LEVEL_BITS))) - 1)) + +#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \ + | shadow_x_mask | shadow_nx_mask | shadow_me_mask) + +#define ACC_EXEC_MASK 1 +#define ACC_WRITE_MASK PT_WRITABLE_MASK +#define ACC_USER_MASK PT_USER_MASK +#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) + +/* The mask for the R/X bits in EPT PTEs */ +#define PT64_EPT_READABLE_MASK 0x1ull +#define PT64_EPT_EXECUTABLE_MASK 0x4ull + +#include + +#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) +#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1)) + +#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) + +/* make pte_list_desc fit well in cache line */ +#define PTE_LIST_EXT 3 + +/* + * Return values of handle_mmio_page_fault and mmu.page_fault: + * RET_PF_RETRY: let CPU fault again on the address. + * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. + * + * For handle_mmio_page_fault only: + * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. + */ +enum { + RET_PF_RETRY = 0, + RET_PF_EMULATE = 1, + RET_PF_INVALID = 2, +}; + +struct pte_list_desc { + u64 *sptes[PTE_LIST_EXT]; + struct pte_list_desc *more; +}; + +struct kvm_shadow_walk_iterator { + u64 addr; + hpa_t shadow_addr; + u64 *sptep; + int level; + unsigned index; +}; + +static const union kvm_mmu_page_role mmu_base_role_mask = { + .cr0_wp = 1, + .gpte_is_8_bytes = 1, + .nxe = 1, + .smep_andnot_wp = 1, + .smap_andnot_wp = 1, + .smm = 1, + .guest_mode = 1, + .ad_disabled = 1, +}; + +#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \ + for (shadow_walk_init_using_root(&(_walker), (_vcpu), \ + (_root), (_addr)); \ + shadow_walk_okay(&(_walker)); \ + shadow_walk_next(&(_walker))) + +#define for_each_shadow_entry(_vcpu, _addr, _walker) \ + for (shadow_walk_init(&(_walker), _vcpu, _addr); \ + shadow_walk_okay(&(_walker)); \ + shadow_walk_next(&(_walker))) + +#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ + for (shadow_walk_init(&(_walker), _vcpu, _addr); \ + shadow_walk_okay(&(_walker)) && \ + ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ + __shadow_walk_next(&(_walker), spte)) + +static struct kmem_cache *pte_list_desc_cache; +static struct kmem_cache *mmu_page_header_cache; +static struct percpu_counter kvm_total_used_mmu_pages; + +static u64 __read_mostly shadow_nx_mask; +static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ +static u64 __read_mostly shadow_user_mask; +static u64 __read_mostly shadow_accessed_mask; +static u64 __read_mostly shadow_dirty_mask; +static u64 __read_mostly shadow_mmio_mask; +static u64 __read_mostly shadow_mmio_value; +static u64 __read_mostly shadow_mmio_access_mask; +static u64 __read_mostly shadow_present_mask; +static u64 __read_mostly shadow_me_mask; + +/* + * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK; + * shadow_acc_track_mask is the set of bits to be cleared in non-accessed + * pages. + */ +static u64 __read_mostly shadow_acc_track_mask; + +/* + * The mask/shift to use for saving the original R/X bits when marking the PTE + * as not-present for access tracking purposes. We do not save the W bit as the + * PTEs being access tracked also need to be dirty tracked, so the W bit will be + * restored only when a write is attempted to the page. + */ +static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK | + PT64_EPT_EXECUTABLE_MASK; +static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT; + +/* + * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order + * to guard against L1TF attacks. + */ +static u64 __read_mostly shadow_nonpresent_or_rsvd_mask; + +/* + * The number of high-order 1 bits to use in the mask above. + */ +static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; + +/* + * In some cases, we need to preserve the GFN of a non-present or reserved + * SPTE when we usurp the upper five bits of the physical address space to + * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll + * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask + * left into the reserved bits, i.e. the GFN in the SPTE will be split into + * high and low parts. This mask covers the lower bits of the GFN. + */ +static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; + +/* + * The number of non-reserved physical address bits irrespective of features + * that repurpose legal bits, e.g. MKTME. + */ +static u8 __read_mostly shadow_phys_bits; + +static void mmu_spte_set(u64 *sptep, u64 spte); +static bool is_executable_pte(u64 spte); +static union kvm_mmu_page_role +kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); + +#define CREATE_TRACE_POINTS +#include "mmutrace.h" + + +static inline bool kvm_available_flush_tlb_with_range(void) +{ + return kvm_x86_ops->tlb_remote_flush_with_range; +} + +static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm, + struct kvm_tlb_range *range) +{ + int ret = -ENOTSUPP; + + if (range && kvm_x86_ops->tlb_remote_flush_with_range) + ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range); + + if (ret) + kvm_flush_remote_tlbs(kvm); +} + +static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, + u64 start_gfn, u64 pages) +{ + struct kvm_tlb_range range; + + range.start_gfn = start_gfn; + range.pages = pages; + + kvm_flush_remote_tlbs_with_range(kvm, &range); +} + +void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask) +{ + BUG_ON((u64)(unsigned)access_mask != access_mask); + BUG_ON((mmio_mask & mmio_value) != mmio_value); + shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; + shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; + shadow_mmio_access_mask = access_mask; +} +EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); + +static bool is_mmio_spte(u64 spte) +{ + return (spte & shadow_mmio_mask) == shadow_mmio_value; +} + +static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) +{ + return sp->role.ad_disabled; +} + +static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) +{ + /* + * When using the EPT page-modification log, the GPAs in the log + * would come from L2 rather than L1. Therefore, we need to rely + * on write protection to record dirty pages. This also bypasses + * PML, since writes now result in a vmexit. + */ + return vcpu->arch.mmu == &vcpu->arch.guest_mmu; +} + +static inline bool spte_ad_enabled(u64 spte) +{ + MMU_WARN_ON(is_mmio_spte(spte)); + return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK; +} + +static inline bool spte_ad_need_write_protect(u64 spte) +{ + MMU_WARN_ON(is_mmio_spte(spte)); + return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK; +} + +static bool is_nx_huge_page_enabled(void) +{ + return READ_ONCE(nx_huge_pages); +} + +static inline u64 spte_shadow_accessed_mask(u64 spte) +{ + MMU_WARN_ON(is_mmio_spte(spte)); + return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; +} + +static inline u64 spte_shadow_dirty_mask(u64 spte) +{ + MMU_WARN_ON(is_mmio_spte(spte)); + return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; +} + +static inline bool is_access_track_spte(u64 spte) +{ + return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; +} + +/* + * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of + * the memslots generation and is derived as follows: + * + * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11 + * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61 + * + * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in + * the MMIO generation number, as doing so would require stealing a bit from + * the "real" generation number and thus effectively halve the maximum number + * of MMIO generations that can be handled before encountering a wrap (which + * requires a full MMU zap). The flag is instead explicitly queried when + * checking for MMIO spte cache hits. + */ +#define MMIO_SPTE_GEN_MASK GENMASK_ULL(18, 0) + +#define MMIO_SPTE_GEN_LOW_START 3 +#define MMIO_SPTE_GEN_LOW_END 11 +#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ + MMIO_SPTE_GEN_LOW_START) + +#define MMIO_SPTE_GEN_HIGH_START 52 +#define MMIO_SPTE_GEN_HIGH_END 61 +#define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \ + MMIO_SPTE_GEN_HIGH_START) +static u64 generation_mmio_spte_mask(u64 gen) +{ + u64 mask; + + WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); + + mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK; + mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK; + return mask; +} + +static u64 get_mmio_spte_generation(u64 spte) +{ + u64 gen; + + spte &= ~shadow_mmio_mask; + + gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START; + gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START; + return gen; +} + +static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, + unsigned access) +{ + u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; + u64 mask = generation_mmio_spte_mask(gen); + u64 gpa = gfn << PAGE_SHIFT; + + access &= shadow_mmio_access_mask; + mask |= shadow_mmio_value | access; + mask |= gpa | shadow_nonpresent_or_rsvd_mask; + mask |= (gpa & shadow_nonpresent_or_rsvd_mask) + << shadow_nonpresent_or_rsvd_mask_len; + + trace_mark_mmio_spte(sptep, gfn, access, gen); + mmu_spte_set(sptep, mask); +} + +static gfn_t get_mmio_spte_gfn(u64 spte) +{ + u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; + + gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) + & shadow_nonpresent_or_rsvd_mask; + + return gpa >> PAGE_SHIFT; +} + +static unsigned get_mmio_spte_access(u64 spte) +{ + return spte & shadow_mmio_access_mask; +} + +static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, + kvm_pfn_t pfn, unsigned access) +{ + if (unlikely(is_noslot_pfn(pfn))) { + mark_mmio_spte(vcpu, sptep, gfn, access); + return true; + } + + return false; +} + +static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) +{ + u64 kvm_gen, spte_gen, gen; + + gen = kvm_vcpu_memslots(vcpu)->generation; + if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) + return false; + + kvm_gen = gen & MMIO_SPTE_GEN_MASK; + spte_gen = get_mmio_spte_generation(spte); + + trace_check_mmio_spte(spte, kvm_gen, spte_gen); + return likely(kvm_gen == spte_gen); +} + +/* + * Sets the shadow PTE masks used by the MMU. + * + * Assumptions: + * - Setting either @accessed_mask or @dirty_mask requires setting both + * - At least one of @accessed_mask or @acc_track_mask must be set + */ +void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, + u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, + u64 acc_track_mask, u64 me_mask) +{ + BUG_ON(!dirty_mask != !accessed_mask); + BUG_ON(!accessed_mask && !acc_track_mask); + BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK); + + shadow_user_mask = user_mask; + shadow_accessed_mask = accessed_mask; + shadow_dirty_mask = dirty_mask; + shadow_nx_mask = nx_mask; + shadow_x_mask = x_mask; + shadow_present_mask = p_mask; + shadow_acc_track_mask = acc_track_mask; + shadow_me_mask = me_mask; +} +EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); + +static u8 kvm_get_shadow_phys_bits(void) +{ + /* + * boot_cpu_data.x86_phys_bits is reduced when MKTME is detected + * in CPU detection code, but MKTME treats those reduced bits as + * 'keyID' thus they are not reserved bits. Therefore for MKTME + * we should still return physical address bits reported by CPUID. + */ + if (!boot_cpu_has(X86_FEATURE_TME) || + WARN_ON_ONCE(boot_cpu_data.extended_cpuid_level < 0x80000008)) + return boot_cpu_data.x86_phys_bits; + + return cpuid_eax(0x80000008) & 0xff; +} + +static void kvm_mmu_reset_all_pte_masks(void) +{ + u8 low_phys_bits; + + shadow_user_mask = 0; + shadow_accessed_mask = 0; + shadow_dirty_mask = 0; + shadow_nx_mask = 0; + shadow_x_mask = 0; + shadow_mmio_mask = 0; + shadow_present_mask = 0; + shadow_acc_track_mask = 0; + + shadow_phys_bits = kvm_get_shadow_phys_bits(); + + /* + * If the CPU has 46 or less physical address bits, then set an + * appropriate mask to guard against L1TF attacks. Otherwise, it is + * assumed that the CPU is not vulnerable to L1TF. + * + * Some Intel CPUs address the L1 cache using more PA bits than are + * reported by CPUID. Use the PA width of the L1 cache when possible + * to achieve more effective mitigation, e.g. if system RAM overlaps + * the most significant bits of legal physical address space. + */ + shadow_nonpresent_or_rsvd_mask = 0; + low_phys_bits = boot_cpu_data.x86_cache_bits; + if (boot_cpu_data.x86_cache_bits < + 52 - shadow_nonpresent_or_rsvd_mask_len) { + shadow_nonpresent_or_rsvd_mask = + rsvd_bits(boot_cpu_data.x86_cache_bits - + shadow_nonpresent_or_rsvd_mask_len, + boot_cpu_data.x86_cache_bits - 1); + low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len; + } else + WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF)); + + shadow_nonpresent_or_rsvd_lower_gfn_mask = + GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); +} + +static int is_cpuid_PSE36(void) +{ + return 1; +} + +static int is_nx(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.efer & EFER_NX; +} + +static int is_shadow_present_pte(u64 pte) +{ + return (pte != 0) && !is_mmio_spte(pte); +} + +static int is_large_pte(u64 pte) +{ + return pte & PT_PAGE_SIZE_MASK; +} + +static int is_last_spte(u64 pte, int level) +{ + if (level == PT_PAGE_TABLE_LEVEL) + return 1; + if (is_large_pte(pte)) + return 1; + return 0; +} + +static bool is_executable_pte(u64 spte) +{ + return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask; +} + +static kvm_pfn_t spte_to_pfn(u64 pte) +{ + return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; +} + +static gfn_t pse36_gfn_delta(u32 gpte) +{ + int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT; + + return (gpte & PT32_DIR_PSE36_MASK) << shift; +} + +#ifdef CONFIG_X86_64 +static void __set_spte(u64 *sptep, u64 spte) +{ + WRITE_ONCE(*sptep, spte); +} + +static void __update_clear_spte_fast(u64 *sptep, u64 spte) +{ + WRITE_ONCE(*sptep, spte); +} + +static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) +{ + return xchg(sptep, spte); +} + +static u64 __get_spte_lockless(u64 *sptep) +{ + return READ_ONCE(*sptep); +} +#else +union split_spte { + struct { + u32 spte_low; + u32 spte_high; + }; + u64 spte; +}; + +static void count_spte_clear(u64 *sptep, u64 spte) +{ + struct kvm_mmu_page *sp = page_header(__pa(sptep)); + + if (is_shadow_present_pte(spte)) + return; + + /* Ensure the spte is completely set before we increase the count */ + smp_wmb(); + sp->clear_spte_count++; +} + +static void __set_spte(u64 *sptep, u64 spte) +{ + union split_spte *ssptep, sspte; + + ssptep = (union split_spte *)sptep; + sspte = (union split_spte)spte; + + ssptep->spte_high = sspte.spte_high; + + /* + * If we map the spte from nonpresent to present, We should store + * the high bits firstly, then set present bit, so cpu can not + * fetch this spte while we are setting the spte. + */ + smp_wmb(); + + WRITE_ONCE(ssptep->spte_low, sspte.spte_low); +} + +static void __update_clear_spte_fast(u64 *sptep, u64 spte) +{ + union split_spte *ssptep, sspte; + + ssptep = (union split_spte *)sptep; + sspte = (union split_spte)spte; + + WRITE_ONCE(ssptep->spte_low, sspte.spte_low); + + /* + * If we map the spte from present to nonpresent, we should clear + * present bit firstly to avoid vcpu fetch the old high bits. + */ + smp_wmb(); + + ssptep->spte_high = sspte.spte_high; + count_spte_clear(sptep, spte); +} + +static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) +{ + union split_spte *ssptep, sspte, orig; + + ssptep = (union split_spte *)sptep; + sspte = (union split_spte)spte; + + /* xchg acts as a barrier before the setting of the high bits */ + orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); + orig.spte_high = ssptep->spte_high; + ssptep->spte_high = sspte.spte_high; + count_spte_clear(sptep, spte); + + return orig.spte; +} + +/* + * The idea using the light way get the spte on x86_32 guest is from + * gup_get_pte (mm/gup.c). + * + * An spte tlb flush may be pending, because kvm_set_pte_rmapp + * coalesces them and we are running out of the MMU lock. Therefore + * we need to protect against in-progress updates of the spte. + * + * Reading the spte while an update is in progress may get the old value + * for the high part of the spte. The race is fine for a present->non-present + * change (because the high part of the spte is ignored for non-present spte), + * but for a present->present change we must reread the spte. + * + * All such changes are done in two steps (present->non-present and + * non-present->present), hence it is enough to count the number of + * present->non-present updates: if it changed while reading the spte, + * we might have hit the race. This is done using clear_spte_count. + */ +static u64 __get_spte_lockless(u64 *sptep) +{ + struct kvm_mmu_page *sp = page_header(__pa(sptep)); + union split_spte spte, *orig = (union split_spte *)sptep; + int count; + +retry: + count = sp->clear_spte_count; + smp_rmb(); + + spte.spte_low = orig->spte_low; + smp_rmb(); + + spte.spte_high = orig->spte_high; + smp_rmb(); + + if (unlikely(spte.spte_low != orig->spte_low || + count != sp->clear_spte_count)) + goto retry; + + return spte.spte; +} +#endif + +static bool spte_can_locklessly_be_made_writable(u64 spte) +{ + return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) == + (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE); +} + +static bool spte_has_volatile_bits(u64 spte) +{ + if (!is_shadow_present_pte(spte)) + return false; + + /* + * Always atomically update spte if it can be updated + * out of mmu-lock, it can ensure dirty bit is not lost, + * also, it can help us to get a stable is_writable_pte() + * to ensure tlb flush is not missed. + */ + if (spte_can_locklessly_be_made_writable(spte) || + is_access_track_spte(spte)) + return true; + + if (spte_ad_enabled(spte)) { + if ((spte & shadow_accessed_mask) == 0 || + (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0)) + return true; + } + + return false; +} + +static bool is_accessed_spte(u64 spte) +{ + u64 accessed_mask = spte_shadow_accessed_mask(spte); + + return accessed_mask ? spte & accessed_mask + : !is_access_track_spte(spte); +} + +static bool is_dirty_spte(u64 spte) +{ + u64 dirty_mask = spte_shadow_dirty_mask(spte); + + return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK; +} + +/* Rules for using mmu_spte_set: + * Set the sptep from nonpresent to present. + * Note: the sptep being assigned *must* be either not present + * or in a state where the hardware will not attempt to update + * the spte. + */ +static void mmu_spte_set(u64 *sptep, u64 new_spte) +{ + WARN_ON(is_shadow_present_pte(*sptep)); + __set_spte(sptep, new_spte); +} + +/* + * Update the SPTE (excluding the PFN), but do not track changes in its + * accessed/dirty status. + */ +static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte) +{ + u64 old_spte = *sptep; + + WARN_ON(!is_shadow_present_pte(new_spte)); + + if (!is_shadow_present_pte(old_spte)) { + mmu_spte_set(sptep, new_spte); + return old_spte; + } + + if (!spte_has_volatile_bits(old_spte)) + __update_clear_spte_fast(sptep, new_spte); + else + old_spte = __update_clear_spte_slow(sptep, new_spte); + + WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte)); + + return old_spte; +} + +/* Rules for using mmu_spte_update: + * Update the state bits, it means the mapped pfn is not changed. + * + * Whenever we overwrite a writable spte with a read-only one we + * should flush remote TLBs. Otherwise rmap_write_protect + * will find a read-only spte, even though the writable spte + * might be cached on a CPU's TLB, the return value indicates this + * case. + * + * Returns true if the TLB needs to be flushed + */ +static bool mmu_spte_update(u64 *sptep, u64 new_spte) +{ + bool flush = false; + u64 old_spte = mmu_spte_update_no_track(sptep, new_spte); + + if (!is_shadow_present_pte(old_spte)) + return false; + + /* + * For the spte updated out of mmu-lock is safe, since + * we always atomically update it, see the comments in + * spte_has_volatile_bits(). + */ + if (spte_can_locklessly_be_made_writable(old_spte) && + !is_writable_pte(new_spte)) + flush = true; + + /* + * Flush TLB when accessed/dirty states are changed in the page tables, + * to guarantee consistency between TLB and page tables. + */ + + if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) { + flush = true; + kvm_set_pfn_accessed(spte_to_pfn(old_spte)); + } + + if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) { + flush = true; + kvm_set_pfn_dirty(spte_to_pfn(old_spte)); + } + + return flush; +} + +/* + * Rules for using mmu_spte_clear_track_bits: + * It sets the sptep from present to nonpresent, and track the + * state bits, it is used to clear the last level sptep. + * Returns non-zero if the PTE was previously valid. + */ +static int mmu_spte_clear_track_bits(u64 *sptep) +{ + kvm_pfn_t pfn; + u64 old_spte = *sptep; + + if (!spte_has_volatile_bits(old_spte)) + __update_clear_spte_fast(sptep, 0ull); + else + old_spte = __update_clear_spte_slow(sptep, 0ull); + + if (!is_shadow_present_pte(old_spte)) + return 0; + + pfn = spte_to_pfn(old_spte); + + /* + * KVM does not hold the refcount of the page used by + * kvm mmu, before reclaiming the page, we should + * unmap it from mmu first. + */ + WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); + + if (is_accessed_spte(old_spte)) + kvm_set_pfn_accessed(pfn); + + if (is_dirty_spte(old_spte)) + kvm_set_pfn_dirty(pfn); + + return 1; +} + +/* + * Rules for using mmu_spte_clear_no_track: + * Directly clear spte without caring the state bits of sptep, + * it is used to set the upper level spte. + */ +static void mmu_spte_clear_no_track(u64 *sptep) +{ + __update_clear_spte_fast(sptep, 0ull); +} + +static u64 mmu_spte_get_lockless(u64 *sptep) +{ + return __get_spte_lockless(sptep); +} + +static u64 mark_spte_for_access_track(u64 spte) +{ + if (spte_ad_enabled(spte)) + return spte & ~shadow_accessed_mask; + + if (is_access_track_spte(spte)) + return spte; + + /* + * Making an Access Tracking PTE will result in removal of write access + * from the PTE. So, verify that we will be able to restore the write + * access in the fast page fault path later on. + */ + WARN_ONCE((spte & PT_WRITABLE_MASK) && + !spte_can_locklessly_be_made_writable(spte), + "kvm: Writable SPTE is not locklessly dirty-trackable\n"); + + WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask << + shadow_acc_track_saved_bits_shift), + "kvm: Access Tracking saved bit locations are not zero\n"); + + spte |= (spte & shadow_acc_track_saved_bits_mask) << + shadow_acc_track_saved_bits_shift; + spte &= ~shadow_acc_track_mask; + + return spte; +} + +/* Restore an acc-track PTE back to a regular PTE */ +static u64 restore_acc_track_spte(u64 spte) +{ + u64 new_spte = spte; + u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift) + & shadow_acc_track_saved_bits_mask; + + WARN_ON_ONCE(spte_ad_enabled(spte)); + WARN_ON_ONCE(!is_access_track_spte(spte)); + + new_spte &= ~shadow_acc_track_mask; + new_spte &= ~(shadow_acc_track_saved_bits_mask << + shadow_acc_track_saved_bits_shift); + new_spte |= saved_bits; + + return new_spte; +} + +/* Returns the Accessed status of the PTE and resets it at the same time. */ +static bool mmu_spte_age(u64 *sptep) +{ + u64 spte = mmu_spte_get_lockless(sptep); + + if (!is_accessed_spte(spte)) + return false; + + if (spte_ad_enabled(spte)) { + clear_bit((ffs(shadow_accessed_mask) - 1), + (unsigned long *)sptep); + } else { + /* + * Capture the dirty status of the page, so that it doesn't get + * lost when the SPTE is marked for access tracking. + */ + if (is_writable_pte(spte)) + kvm_set_pfn_dirty(spte_to_pfn(spte)); + + spte = mark_spte_for_access_track(spte); + mmu_spte_update_no_track(sptep, spte); + } + + return true; +} + +static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) +{ + /* + * Prevent page table teardown by making any free-er wait during + * kvm_flush_remote_tlbs() IPI to all active vcpus. + */ + local_irq_disable(); + + /* + * Make sure a following spte read is not reordered ahead of the write + * to vcpu->mode. + */ + smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); +} + +static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) +{ + /* + * Make sure the write to vcpu->mode is not reordered in front of + * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us + * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. + */ + smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); + local_irq_enable(); +} + +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, + struct kmem_cache *base_cache, int min) +{ + void *obj; + + if (cache->nobjs >= min) + return 0; + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { + obj = kmem_cache_zalloc(base_cache, GFP_KERNEL_ACCOUNT); + if (!obj) + return cache->nobjs >= min ? 0 : -ENOMEM; + cache->objects[cache->nobjs++] = obj; + } + return 0; +} + +static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache) +{ + return cache->nobjs; +} + +static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, + struct kmem_cache *cache) +{ + while (mc->nobjs) + kmem_cache_free(cache, mc->objects[--mc->nobjs]); +} + +static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, + int min) +{ + void *page; + + if (cache->nobjs >= min) + return 0; + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { + page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); + if (!page) + return cache->nobjs >= min ? 0 : -ENOMEM; + cache->objects[cache->nobjs++] = page; + } + return 0; +} + +static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) +{ + while (mc->nobjs) + free_page((unsigned long)mc->objects[--mc->nobjs]); +} + +static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) +{ + int r; + + r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, + pte_list_desc_cache, 8 + PTE_PREFETCH_NUM); + if (r) + goto out; + r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); + if (r) + goto out; + r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, + mmu_page_header_cache, 4); +out: + return r; +} + +static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) +{ + mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, + pte_list_desc_cache); + mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); + mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, + mmu_page_header_cache); +} + +static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) +{ + void *p; + + BUG_ON(!mc->nobjs); + p = mc->objects[--mc->nobjs]; + return p; +} + +static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) +{ + return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); +} + +static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) +{ + kmem_cache_free(pte_list_desc_cache, pte_list_desc); +} + +static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) +{ + if (!sp->role.direct) + return sp->gfns[index]; + + return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); +} + +static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) +{ + if (!sp->role.direct) { + sp->gfns[index] = gfn; + return; + } + + if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) + pr_err_ratelimited("gfn mismatch under direct page %llx " + "(expected %llx, got %llx)\n", + sp->gfn, + kvm_mmu_page_get_gfn(sp, index), gfn); +} + +/* + * Return the pointer to the large page information for a given gfn, + * handling slots that are not large page aligned. + */ +static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, + struct kvm_memory_slot *slot, + int level) +{ + unsigned long idx; + + idx = gfn_to_index(gfn, slot->base_gfn, level); + return &slot->arch.lpage_info[level - 2][idx]; +} + +static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot, + gfn_t gfn, int count) +{ + struct kvm_lpage_info *linfo; + int i; + + for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { + linfo = lpage_info_slot(gfn, slot, i); + linfo->disallow_lpage += count; + WARN_ON(linfo->disallow_lpage < 0); + } +} + +void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) +{ + update_gfn_disallow_lpage_count(slot, gfn, 1); +} + +void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) +{ + update_gfn_disallow_lpage_count(slot, gfn, -1); +} + +static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *slot; + gfn_t gfn; + + kvm->arch.indirect_shadow_pages++; + gfn = sp->gfn; + slots = kvm_memslots_for_spte_role(kvm, sp->role); + slot = __gfn_to_memslot(slots, gfn); + + /* the non-leaf shadow pages are keeping readonly. */ + if (sp->role.level > PT_PAGE_TABLE_LEVEL) + return kvm_slot_page_track_add_page(kvm, slot, gfn, + KVM_PAGE_TRACK_WRITE); + + kvm_mmu_gfn_disallow_lpage(slot, gfn); +} + +static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + if (sp->lpage_disallowed) + return; + + ++kvm->stat.nx_lpage_splits; + list_add_tail(&sp->lpage_disallowed_link, + &kvm->arch.lpage_disallowed_mmu_pages); + sp->lpage_disallowed = true; +} + +static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *slot; + gfn_t gfn; + + kvm->arch.indirect_shadow_pages--; + gfn = sp->gfn; + slots = kvm_memslots_for_spte_role(kvm, sp->role); + slot = __gfn_to_memslot(slots, gfn); + if (sp->role.level > PT_PAGE_TABLE_LEVEL) + return kvm_slot_page_track_remove_page(kvm, slot, gfn, + KVM_PAGE_TRACK_WRITE); + + kvm_mmu_gfn_allow_lpage(slot, gfn); +} + +static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + --kvm->stat.nx_lpage_splits; + sp->lpage_disallowed = false; + list_del(&sp->lpage_disallowed_link); +} + +static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level, + struct kvm_memory_slot *slot) +{ + struct kvm_lpage_info *linfo; + + if (slot) { + linfo = lpage_info_slot(gfn, slot, level); + return !!linfo->disallow_lpage; + } + + return true; +} + +static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn, + int level) +{ + struct kvm_memory_slot *slot; + + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + return __mmu_gfn_lpage_is_disallowed(gfn, level, slot); +} + +static int host_mapping_level(struct kvm *kvm, gfn_t gfn) +{ + unsigned long page_size; + int i, ret = 0; + + page_size = kvm_host_page_size(kvm, gfn); + + for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { + if (page_size >= KVM_HPAGE_SIZE(i)) + ret = i; + else + break; + } + + return ret; +} + +static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot, + bool no_dirty_log) +{ + if (!slot || slot->flags & KVM_MEMSLOT_INVALID) + return false; + if (no_dirty_log && slot->dirty_bitmap) + return false; + + return true; +} + +static struct kvm_memory_slot * +gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, + bool no_dirty_log) +{ + struct kvm_memory_slot *slot; + + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + if (!memslot_valid_for_gpte(slot, no_dirty_log)) + slot = NULL; + + return slot; +} + +static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn, + bool *force_pt_level) +{ + int host_level, level, max_level; + struct kvm_memory_slot *slot; + + if (unlikely(*force_pt_level)) + return PT_PAGE_TABLE_LEVEL; + + slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn); + *force_pt_level = !memslot_valid_for_gpte(slot, true); + if (unlikely(*force_pt_level)) + return PT_PAGE_TABLE_LEVEL; + + host_level = host_mapping_level(vcpu->kvm, large_gfn); + + if (host_level == PT_PAGE_TABLE_LEVEL) + return host_level; + + max_level = min(kvm_x86_ops->get_lpage_level(), host_level); + + for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) + if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot)) + break; + + return level - 1; +} + +/* + * About rmap_head encoding: + * + * If the bit zero of rmap_head->val is clear, then it points to the only spte + * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct + * pte_list_desc containing more mappings. + */ + +/* + * Returns the number of pointers in the rmap chain, not counting the new one. + */ +static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, + struct kvm_rmap_head *rmap_head) +{ + struct pte_list_desc *desc; + int i, count = 0; + + if (!rmap_head->val) { + rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); + rmap_head->val = (unsigned long)spte; + } else if (!(rmap_head->val & 1)) { + rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); + desc = mmu_alloc_pte_list_desc(vcpu); + desc->sptes[0] = (u64 *)rmap_head->val; + desc->sptes[1] = spte; + rmap_head->val = (unsigned long)desc | 1; + ++count; + } else { + rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); + desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + while (desc->sptes[PTE_LIST_EXT-1] && desc->more) { + desc = desc->more; + count += PTE_LIST_EXT; + } + if (desc->sptes[PTE_LIST_EXT-1]) { + desc->more = mmu_alloc_pte_list_desc(vcpu); + desc = desc->more; + } + for (i = 0; desc->sptes[i]; ++i) + ++count; + desc->sptes[i] = spte; + } + return count; +} + +static void +pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, + struct pte_list_desc *desc, int i, + struct pte_list_desc *prev_desc) +{ + int j; + + for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j) + ; + desc->sptes[i] = desc->sptes[j]; + desc->sptes[j] = NULL; + if (j != 0) + return; + if (!prev_desc && !desc->more) + rmap_head->val = (unsigned long)desc->sptes[0]; + else + if (prev_desc) + prev_desc->more = desc->more; + else + rmap_head->val = (unsigned long)desc->more | 1; + mmu_free_pte_list_desc(desc); +} + +static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) +{ + struct pte_list_desc *desc; + struct pte_list_desc *prev_desc; + int i; + + if (!rmap_head->val) { + pr_err("%s: %p 0->BUG\n", __func__, spte); + BUG(); + } else if (!(rmap_head->val & 1)) { + rmap_printk("%s: %p 1->0\n", __func__, spte); + if ((u64 *)rmap_head->val != spte) { + pr_err("%s: %p 1->BUG\n", __func__, spte); + BUG(); + } + rmap_head->val = 0; + } else { + rmap_printk("%s: %p many->many\n", __func__, spte); + desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + prev_desc = NULL; + while (desc) { + for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) { + if (desc->sptes[i] == spte) { + pte_list_desc_remove_entry(rmap_head, + desc, i, prev_desc); + return; + } + } + prev_desc = desc; + desc = desc->more; + } + pr_err("%s: %p many->many\n", __func__, spte); + BUG(); + } +} + +static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) +{ + mmu_spte_clear_track_bits(sptep); + __pte_list_remove(sptep, rmap_head); +} + +static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level, + struct kvm_memory_slot *slot) +{ + unsigned long idx; + + idx = gfn_to_index(gfn, slot->base_gfn, level); + return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx]; +} + +static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, + struct kvm_mmu_page *sp) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *slot; + + slots = kvm_memslots_for_spte_role(kvm, sp->role); + slot = __gfn_to_memslot(slots, gfn); + return __gfn_to_rmap(gfn, sp->role.level, slot); +} + +static bool rmap_can_add(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu_memory_cache *cache; + + cache = &vcpu->arch.mmu_pte_list_desc_cache; + return mmu_memory_cache_free_objects(cache); +} + +static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) +{ + struct kvm_mmu_page *sp; + struct kvm_rmap_head *rmap_head; + + sp = page_header(__pa(spte)); + kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); + rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); + return pte_list_add(vcpu, spte, rmap_head); +} + +static void rmap_remove(struct kvm *kvm, u64 *spte) +{ + struct kvm_mmu_page *sp; + gfn_t gfn; + struct kvm_rmap_head *rmap_head; + + sp = page_header(__pa(spte)); + gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); + rmap_head = gfn_to_rmap(kvm, gfn, sp); + __pte_list_remove(spte, rmap_head); +} + +/* + * Used by the following functions to iterate through the sptes linked by a + * rmap. All fields are private and not assumed to be used outside. + */ +struct rmap_iterator { + /* private fields */ + struct pte_list_desc *desc; /* holds the sptep if not NULL */ + int pos; /* index of the sptep */ +}; + +/* + * Iteration must be started by this function. This should also be used after + * removing/dropping sptes from the rmap link because in such cases the + * information in the itererator may not be valid. + * + * Returns sptep if found, NULL otherwise. + */ +static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head, + struct rmap_iterator *iter) +{ + u64 *sptep; + + if (!rmap_head->val) + return NULL; + + if (!(rmap_head->val & 1)) { + iter->desc = NULL; + sptep = (u64 *)rmap_head->val; + goto out; + } + + iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + iter->pos = 0; + sptep = iter->desc->sptes[iter->pos]; +out: + BUG_ON(!is_shadow_present_pte(*sptep)); + return sptep; +} + +/* + * Must be used with a valid iterator: e.g. after rmap_get_first(). + * + * Returns sptep if found, NULL otherwise. + */ +static u64 *rmap_get_next(struct rmap_iterator *iter) +{ + u64 *sptep; + + if (iter->desc) { + if (iter->pos < PTE_LIST_EXT - 1) { + ++iter->pos; + sptep = iter->desc->sptes[iter->pos]; + if (sptep) + goto out; + } + + iter->desc = iter->desc->more; + + if (iter->desc) { + iter->pos = 0; + /* desc->sptes[0] cannot be NULL */ + sptep = iter->desc->sptes[iter->pos]; + goto out; + } + } + + return NULL; +out: + BUG_ON(!is_shadow_present_pte(*sptep)); + return sptep; +} + +#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \ + for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \ + _spte_; _spte_ = rmap_get_next(_iter_)) + +static void drop_spte(struct kvm *kvm, u64 *sptep) +{ + if (mmu_spte_clear_track_bits(sptep)) + rmap_remove(kvm, sptep); +} + + +static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) +{ + if (is_large_pte(*sptep)) { + WARN_ON(page_header(__pa(sptep))->role.level == + PT_PAGE_TABLE_LEVEL); + drop_spte(kvm, sptep); + --kvm->stat.lpages; + return true; + } + + return false; +} + +static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) +{ + if (__drop_large_spte(vcpu->kvm, sptep)) { + struct kvm_mmu_page *sp = page_header(__pa(sptep)); + + kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, + KVM_PAGES_PER_HPAGE(sp->role.level)); + } +} + +/* + * Write-protect on the specified @sptep, @pt_protect indicates whether + * spte write-protection is caused by protecting shadow page table. + * + * Note: write protection is difference between dirty logging and spte + * protection: + * - for dirty logging, the spte can be set to writable at anytime if + * its dirty bitmap is properly set. + * - for spte protection, the spte can be writable only after unsync-ing + * shadow page. + * + * Return true if tlb need be flushed. + */ +static bool spte_write_protect(u64 *sptep, bool pt_protect) +{ + u64 spte = *sptep; + + if (!is_writable_pte(spte) && + !(pt_protect && spte_can_locklessly_be_made_writable(spte))) + return false; + + rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); + + if (pt_protect) + spte &= ~SPTE_MMU_WRITEABLE; + spte = spte & ~PT_WRITABLE_MASK; + + return mmu_spte_update(sptep, spte); +} + +static bool __rmap_write_protect(struct kvm *kvm, + struct kvm_rmap_head *rmap_head, + bool pt_protect) +{ + u64 *sptep; + struct rmap_iterator iter; + bool flush = false; + + for_each_rmap_spte(rmap_head, &iter, sptep) + flush |= spte_write_protect(sptep, pt_protect); + + return flush; +} + +static bool spte_clear_dirty(u64 *sptep) +{ + u64 spte = *sptep; + + rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); + + MMU_WARN_ON(!spte_ad_enabled(spte)); + spte &= ~shadow_dirty_mask; + return mmu_spte_update(sptep, spte); +} + +static bool spte_wrprot_for_clear_dirty(u64 *sptep) +{ + bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT, + (unsigned long *)sptep); + if (was_writable && !spte_ad_enabled(*sptep)) + kvm_set_pfn_dirty(spte_to_pfn(*sptep)); + + return was_writable; +} + +/* + * Gets the GFN ready for another round of dirty logging by clearing the + * - D bit on ad-enabled SPTEs, and + * - W bit on ad-disabled SPTEs. + * Returns true iff any D or W bits were cleared. + */ +static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) +{ + u64 *sptep; + struct rmap_iterator iter; + bool flush = false; + + for_each_rmap_spte(rmap_head, &iter, sptep) + if (spte_ad_need_write_protect(*sptep)) + flush |= spte_wrprot_for_clear_dirty(sptep); + else + flush |= spte_clear_dirty(sptep); + + return flush; +} + +static bool spte_set_dirty(u64 *sptep) +{ + u64 spte = *sptep; + + rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); + + /* + * Similar to the !kvm_x86_ops->slot_disable_log_dirty case, + * do not bother adding back write access to pages marked + * SPTE_AD_WRPROT_ONLY_MASK. + */ + spte |= shadow_dirty_mask; + + return mmu_spte_update(sptep, spte); +} + +static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) +{ + u64 *sptep; + struct rmap_iterator iter; + bool flush = false; + + for_each_rmap_spte(rmap_head, &iter, sptep) + if (spte_ad_enabled(*sptep)) + flush |= spte_set_dirty(sptep); + + return flush; +} + +/** + * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages + * @kvm: kvm instance + * @slot: slot to protect + * @gfn_offset: start of the BITS_PER_LONG pages we care about + * @mask: indicates which pages we should protect + * + * Used when we do not need to care about huge page mappings: e.g. during dirty + * logging we do not have any such mappings. + */ +static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + struct kvm_rmap_head *rmap_head; + + while (mask) { + rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), + PT_PAGE_TABLE_LEVEL, slot); + __rmap_write_protect(kvm, rmap_head, false); + + /* clear the first set bit */ + mask &= mask - 1; + } +} + +/** + * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write + * protect the page if the D-bit isn't supported. + * @kvm: kvm instance + * @slot: slot to clear D-bit + * @gfn_offset: start of the BITS_PER_LONG pages we care about + * @mask: indicates which pages we should clear D-bit + * + * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap. + */ +void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + struct kvm_rmap_head *rmap_head; + + while (mask) { + rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), + PT_PAGE_TABLE_LEVEL, slot); + __rmap_clear_dirty(kvm, rmap_head); + + /* clear the first set bit */ + mask &= mask - 1; + } +} +EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked); + +/** + * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected + * PT level pages. + * + * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to + * enable dirty logging for them. + * + * Used when we do not need to care about huge page mappings: e.g. during dirty + * logging we do not have any such mappings. + */ +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + if (kvm_x86_ops->enable_log_dirty_pt_masked) + kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset, + mask); + else + kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); +} + +/** + * kvm_arch_write_log_dirty - emulate dirty page logging + * @vcpu: Guest mode vcpu + * + * Emulate arch specific page modification logging for the + * nested hypervisor + */ +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu) +{ + if (kvm_x86_ops->write_log_dirty) + return kvm_x86_ops->write_log_dirty(vcpu); + + return 0; +} + +bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, + struct kvm_memory_slot *slot, u64 gfn) +{ + struct kvm_rmap_head *rmap_head; + int i; + bool write_protected = false; + + for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { + rmap_head = __gfn_to_rmap(gfn, i, slot); + write_protected |= __rmap_write_protect(kvm, rmap_head, true); + } + + return write_protected; +} + +static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) +{ + struct kvm_memory_slot *slot; + + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn); +} + +static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) +{ + u64 *sptep; + struct rmap_iterator iter; + bool flush = false; + + while ((sptep = rmap_get_first(rmap_head, &iter))) { + rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); + + pte_list_remove(rmap_head, sptep); + flush = true; + } + + return flush; +} + +static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + unsigned long data) +{ + return kvm_zap_rmapp(kvm, rmap_head); +} + +static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + unsigned long data) +{ + u64 *sptep; + struct rmap_iterator iter; + int need_flush = 0; + u64 new_spte; + pte_t *ptep = (pte_t *)data; + kvm_pfn_t new_pfn; + + WARN_ON(pte_huge(*ptep)); + new_pfn = pte_pfn(*ptep); + +restart: + for_each_rmap_spte(rmap_head, &iter, sptep) { + rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", + sptep, *sptep, gfn, level); + + need_flush = 1; + + if (pte_write(*ptep)) { + pte_list_remove(rmap_head, sptep); + goto restart; + } else { + new_spte = *sptep & ~PT64_BASE_ADDR_MASK; + new_spte |= (u64)new_pfn << PAGE_SHIFT; + + new_spte &= ~PT_WRITABLE_MASK; + new_spte &= ~SPTE_HOST_WRITEABLE; + + new_spte = mark_spte_for_access_track(new_spte); + + mmu_spte_clear_track_bits(sptep); + mmu_spte_set(sptep, new_spte); + } + } + + if (need_flush && kvm_available_flush_tlb_with_range()) { + kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); + return 0; + } + + return need_flush; +} + +struct slot_rmap_walk_iterator { + /* input fields. */ + struct kvm_memory_slot *slot; + gfn_t start_gfn; + gfn_t end_gfn; + int start_level; + int end_level; + + /* output fields. */ + gfn_t gfn; + struct kvm_rmap_head *rmap; + int level; + + /* private field. */ + struct kvm_rmap_head *end_rmap; +}; + +static void +rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) +{ + iterator->level = level; + iterator->gfn = iterator->start_gfn; + iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot); + iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level, + iterator->slot); +} + +static void +slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, + struct kvm_memory_slot *slot, int start_level, + int end_level, gfn_t start_gfn, gfn_t end_gfn) +{ + iterator->slot = slot; + iterator->start_level = start_level; + iterator->end_level = end_level; + iterator->start_gfn = start_gfn; + iterator->end_gfn = end_gfn; + + rmap_walk_init_level(iterator, iterator->start_level); +} + +static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator) +{ + return !!iterator->rmap; +} + +static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) +{ + if (++iterator->rmap <= iterator->end_rmap) { + iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level)); + return; + } + + if (++iterator->level > iterator->end_level) { + iterator->rmap = NULL; + return; + } + + rmap_walk_init_level(iterator, iterator->level); +} + +#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \ + _start_gfn, _end_gfn, _iter_) \ + for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \ + _end_level_, _start_gfn, _end_gfn); \ + slot_rmap_walk_okay(_iter_); \ + slot_rmap_walk_next(_iter_)) + +static int kvm_handle_hva_range(struct kvm *kvm, + unsigned long start, + unsigned long end, + unsigned long data, + int (*handler)(struct kvm *kvm, + struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, + gfn_t gfn, + int level, + unsigned long data)) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + struct slot_rmap_walk_iterator iterator; + int ret = 0; + int i; + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot(memslot, slots) { + unsigned long hva_start, hva_end; + gfn_t gfn_start, gfn_end; + + hva_start = max(start, memslot->userspace_addr); + hva_end = min(end, memslot->userspace_addr + + (memslot->npages << PAGE_SHIFT)); + if (hva_start >= hva_end) + continue; + /* + * {gfn(page) | page intersects with [hva_start, hva_end)} = + * {gfn_start, gfn_start+1, ..., gfn_end-1}. + */ + gfn_start = hva_to_gfn_memslot(hva_start, memslot); + gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); + + for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL, + PT_MAX_HUGEPAGE_LEVEL, + gfn_start, gfn_end - 1, + &iterator) + ret |= handler(kvm, iterator.rmap, memslot, + iterator.gfn, iterator.level, data); + } + } + + return ret; +} + +static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, + unsigned long data, + int (*handler)(struct kvm *kvm, + struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, + gfn_t gfn, int level, + unsigned long data)) +{ + return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); +} + +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +{ + return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); +} + +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +{ + return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); +} + +static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + unsigned long data) +{ + u64 *sptep; + struct rmap_iterator uninitialized_var(iter); + int young = 0; + + for_each_rmap_spte(rmap_head, &iter, sptep) + young |= mmu_spte_age(sptep); + + trace_kvm_age_page(gfn, level, slot, young); + return young; +} + +static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, + int level, unsigned long data) +{ + u64 *sptep; + struct rmap_iterator iter; + + for_each_rmap_spte(rmap_head, &iter, sptep) + if (is_accessed_spte(*sptep)) + return 1; + return 0; +} + +#define RMAP_RECYCLE_THRESHOLD 1000 + +static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) +{ + struct kvm_rmap_head *rmap_head; + struct kvm_mmu_page *sp; + + sp = page_header(__pa(spte)); + + rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); + + kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0); + kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, + KVM_PAGES_PER_HPAGE(sp->role.level)); +} + +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) +{ + return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); +} + +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); +} + +#ifdef MMU_DEBUG +static int is_empty_shadow_page(u64 *spt) +{ + u64 *pos; + u64 *end; + + for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) + if (is_shadow_present_pte(*pos)) { + printk(KERN_ERR "%s: %p %llx\n", __func__, + pos, *pos); + return 0; + } + return 1; +} +#endif + +/* + * This value is the sum of all of the kvm instances's + * kvm->arch.n_used_mmu_pages values. We need a global, + * aggregate version in order to make the slab shrinker + * faster + */ +static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr) +{ + kvm->arch.n_used_mmu_pages += nr; + percpu_counter_add(&kvm_total_used_mmu_pages, nr); +} + +static void kvm_mmu_free_page(struct kvm_mmu_page *sp) +{ + MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); + hlist_del(&sp->hash_link); + list_del(&sp->link); + free_page((unsigned long)sp->spt); + if (!sp->role.direct) + free_page((unsigned long)sp->gfns); + kmem_cache_free(mmu_page_header_cache, sp); +} + +static unsigned kvm_page_table_hashfn(gfn_t gfn) +{ + return hash_64(gfn, KVM_MMU_HASH_SHIFT); +} + +static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, u64 *parent_pte) +{ + if (!parent_pte) + return; + + pte_list_add(vcpu, parent_pte, &sp->parent_ptes); +} + +static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, + u64 *parent_pte) +{ + __pte_list_remove(parent_pte, &sp->parent_ptes); +} + +static void drop_parent_pte(struct kvm_mmu_page *sp, + u64 *parent_pte) +{ + mmu_page_remove_parent_pte(sp, parent_pte); + mmu_spte_clear_no_track(parent_pte); +} + +static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct) +{ + struct kvm_mmu_page *sp; + + sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); + sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); + if (!direct) + sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); + set_page_private(virt_to_page(sp->spt), (unsigned long)sp); + + /* + * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages() + * depends on valid pages being added to the head of the list. See + * comments in kvm_zap_obsolete_pages(). + */ + sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; + list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); + kvm_mod_used_mmu_pages(vcpu->kvm, +1); + return sp; +} + +static void mark_unsync(u64 *spte); +static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) +{ + u64 *sptep; + struct rmap_iterator iter; + + for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) { + mark_unsync(sptep); + } +} + +static void mark_unsync(u64 *spte) +{ + struct kvm_mmu_page *sp; + unsigned int index; + + sp = page_header(__pa(spte)); + index = spte - sp->spt; + if (__test_and_set_bit(index, sp->unsync_child_bitmap)) + return; + if (sp->unsync_children++) + return; + kvm_mmu_mark_parents_unsync(sp); +} + +static int nonpaging_sync_page(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp) +{ + return 0; +} + +static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root) +{ +} + +static void nonpaging_update_pte(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, u64 *spte, + const void *pte) +{ + WARN_ON(1); +} + +#define KVM_PAGE_ARRAY_NR 16 + +struct kvm_mmu_pages { + struct mmu_page_and_offset { + struct kvm_mmu_page *sp; + unsigned int idx; + } page[KVM_PAGE_ARRAY_NR]; + unsigned int nr; +}; + +static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, + int idx) +{ + int i; + + if (sp->unsync) + for (i=0; i < pvec->nr; i++) + if (pvec->page[i].sp == sp) + return 0; + + pvec->page[pvec->nr].sp = sp; + pvec->page[pvec->nr].idx = idx; + pvec->nr++; + return (pvec->nr == KVM_PAGE_ARRAY_NR); +} + +static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) +{ + --sp->unsync_children; + WARN_ON((int)sp->unsync_children < 0); + __clear_bit(idx, sp->unsync_child_bitmap); +} + +static int __mmu_unsync_walk(struct kvm_mmu_page *sp, + struct kvm_mmu_pages *pvec) +{ + int i, ret, nr_unsync_leaf = 0; + + for_each_set_bit(i, sp->unsync_child_bitmap, 512) { + struct kvm_mmu_page *child; + u64 ent = sp->spt[i]; + + if (!is_shadow_present_pte(ent) || is_large_pte(ent)) { + clear_unsync_child_bit(sp, i); + continue; + } + + child = page_header(ent & PT64_BASE_ADDR_MASK); + + if (child->unsync_children) { + if (mmu_pages_add(pvec, child, i)) + return -ENOSPC; + + ret = __mmu_unsync_walk(child, pvec); + if (!ret) { + clear_unsync_child_bit(sp, i); + continue; + } else if (ret > 0) { + nr_unsync_leaf += ret; + } else + return ret; + } else if (child->unsync) { + nr_unsync_leaf++; + if (mmu_pages_add(pvec, child, i)) + return -ENOSPC; + } else + clear_unsync_child_bit(sp, i); + } + + return nr_unsync_leaf; +} + +#define INVALID_INDEX (-1) + +static int mmu_unsync_walk(struct kvm_mmu_page *sp, + struct kvm_mmu_pages *pvec) +{ + pvec->nr = 0; + if (!sp->unsync_children) + return 0; + + mmu_pages_add(pvec, sp, INVALID_INDEX); + return __mmu_unsync_walk(sp, pvec); +} + +static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + WARN_ON(!sp->unsync); + trace_kvm_mmu_sync_page(sp); + sp->unsync = 0; + --kvm->stat.mmu_unsync; +} + +static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, + struct list_head *invalid_list); +static void kvm_mmu_commit_zap_page(struct kvm *kvm, + struct list_head *invalid_list); + + +#define for_each_valid_sp(_kvm, _sp, _gfn) \ + hlist_for_each_entry(_sp, \ + &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ + if (is_obsolete_sp((_kvm), (_sp))) { \ + } else + +#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ + for_each_valid_sp(_kvm, _sp, _gfn) \ + if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else + +static inline bool is_ept_sp(struct kvm_mmu_page *sp) +{ + return sp->role.cr0_wp && sp->role.smap_andnot_wp; +} + +/* @sp->gfn should be write-protected at the call site */ +static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + struct list_head *invalid_list) +{ + if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) || + vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { + kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); + return false; + } + + return true; +} + +static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, + struct list_head *invalid_list, + bool remote_flush) +{ + if (!remote_flush && list_empty(invalid_list)) + return false; + + if (!list_empty(invalid_list)) + kvm_mmu_commit_zap_page(kvm, invalid_list); + else + kvm_flush_remote_tlbs(kvm); + return true; +} + +static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, + struct list_head *invalid_list, + bool remote_flush, bool local_flush) +{ + if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush)) + return; + + if (local_flush) + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); +} + +#ifdef CONFIG_KVM_MMU_AUDIT +#include "mmu_audit.c" +#else +static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } +static void mmu_audit_disable(void) { } +#endif + +static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + return sp->role.invalid || + unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); +} + +static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + struct list_head *invalid_list) +{ + kvm_unlink_unsync_page(vcpu->kvm, sp); + return __kvm_sync_page(vcpu, sp, invalid_list); +} + +/* @gfn should be write-protected at the call site */ +static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, + struct list_head *invalid_list) +{ + struct kvm_mmu_page *s; + bool ret = false; + + for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { + if (!s->unsync) + continue; + + WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); + ret |= kvm_sync_page(vcpu, s, invalid_list); + } + + return ret; +} + +struct mmu_page_path { + struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL]; + unsigned int idx[PT64_ROOT_MAX_LEVEL]; +}; + +#define for_each_sp(pvec, sp, parents, i) \ + for (i = mmu_pages_first(&pvec, &parents); \ + i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ + i = mmu_pages_next(&pvec, &parents, i)) + +static int mmu_pages_next(struct kvm_mmu_pages *pvec, + struct mmu_page_path *parents, + int i) +{ + int n; + + for (n = i+1; n < pvec->nr; n++) { + struct kvm_mmu_page *sp = pvec->page[n].sp; + unsigned idx = pvec->page[n].idx; + int level = sp->role.level; + + parents->idx[level-1] = idx; + if (level == PT_PAGE_TABLE_LEVEL) + break; + + parents->parent[level-2] = sp; + } + + return n; +} + +static int mmu_pages_first(struct kvm_mmu_pages *pvec, + struct mmu_page_path *parents) +{ + struct kvm_mmu_page *sp; + int level; + + if (pvec->nr == 0) + return 0; + + WARN_ON(pvec->page[0].idx != INVALID_INDEX); + + sp = pvec->page[0].sp; + level = sp->role.level; + WARN_ON(level == PT_PAGE_TABLE_LEVEL); + + parents->parent[level-2] = sp; + + /* Also set up a sentinel. Further entries in pvec are all + * children of sp, so this element is never overwritten. + */ + parents->parent[level-1] = NULL; + return mmu_pages_next(pvec, parents, 0); +} + +static void mmu_pages_clear_parents(struct mmu_page_path *parents) +{ + struct kvm_mmu_page *sp; + unsigned int level = 0; + + do { + unsigned int idx = parents->idx[level]; + sp = parents->parent[level]; + if (!sp) + return; + + WARN_ON(idx == INVALID_INDEX); + clear_unsync_child_bit(sp, idx); + level++; + } while (!sp->unsync_children); +} + +static void mmu_sync_children(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *parent) +{ + int i; + struct kvm_mmu_page *sp; + struct mmu_page_path parents; + struct kvm_mmu_pages pages; + LIST_HEAD(invalid_list); + bool flush = false; + + while (mmu_unsync_walk(parent, &pages)) { + bool protected = false; + + for_each_sp(pages, sp, parents, i) + protected |= rmap_write_protect(vcpu, sp->gfn); + + if (protected) { + kvm_flush_remote_tlbs(vcpu->kvm); + flush = false; + } + + for_each_sp(pages, sp, parents, i) { + flush |= kvm_sync_page(vcpu, sp, &invalid_list); + mmu_pages_clear_parents(&parents); + } + if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) { + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); + cond_resched_lock(&vcpu->kvm->mmu_lock); + flush = false; + } + } + + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); +} + +static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) +{ + atomic_set(&sp->write_flooding_count, 0); +} + +static void clear_sp_write_flooding_count(u64 *spte) +{ + struct kvm_mmu_page *sp = page_header(__pa(spte)); + + __clear_sp_write_flooding_count(sp); +} + +static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, + gfn_t gfn, + gva_t gaddr, + unsigned level, + int direct, + unsigned access) +{ + union kvm_mmu_page_role role; + unsigned quadrant; + struct kvm_mmu_page *sp; + bool need_sync = false; + bool flush = false; + int collisions = 0; + LIST_HEAD(invalid_list); + + role = vcpu->arch.mmu->mmu_role.base; + role.level = level; + role.direct = direct; + if (role.direct) + role.gpte_is_8_bytes = true; + role.access = access; + if (!vcpu->arch.mmu->direct_map + && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { + quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); + quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; + role.quadrant = quadrant; + } + for_each_valid_sp(vcpu->kvm, sp, gfn) { + if (sp->gfn != gfn) { + collisions++; + continue; + } + + if (!need_sync && sp->unsync) + need_sync = true; + + if (sp->role.word != role.word) + continue; + + if (sp->unsync) { + /* The page is good, but __kvm_sync_page might still end + * up zapping it. If so, break in order to rebuild it. + */ + if (!__kvm_sync_page(vcpu, sp, &invalid_list)) + break; + + WARN_ON(!list_empty(&invalid_list)); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } + + if (sp->unsync_children) + kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); + + __clear_sp_write_flooding_count(sp); + trace_kvm_mmu_get_page(sp, false); + goto out; + } + + ++vcpu->kvm->stat.mmu_cache_miss; + + sp = kvm_mmu_alloc_page(vcpu, direct); + + sp->gfn = gfn; + sp->role = role; + hlist_add_head(&sp->hash_link, + &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); + if (!direct) { + /* + * we should do write protection before syncing pages + * otherwise the content of the synced shadow page may + * be inconsistent with guest page table. + */ + account_shadowed(vcpu->kvm, sp); + if (level == PT_PAGE_TABLE_LEVEL && + rmap_write_protect(vcpu, gfn)) + kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1); + + if (level > PT_PAGE_TABLE_LEVEL && need_sync) + flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); + } + clear_page(sp->spt); + trace_kvm_mmu_get_page(sp, true); + + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); +out: + if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions) + vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions; + return sp; +} + +static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator, + struct kvm_vcpu *vcpu, hpa_t root, + u64 addr) +{ + iterator->addr = addr; + iterator->shadow_addr = root; + iterator->level = vcpu->arch.mmu->shadow_root_level; + + if (iterator->level == PT64_ROOT_4LEVEL && + vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && + !vcpu->arch.mmu->direct_map) + --iterator->level; + + if (iterator->level == PT32E_ROOT_LEVEL) { + /* + * prev_root is currently only used for 64-bit hosts. So only + * the active root_hpa is valid here. + */ + BUG_ON(root != vcpu->arch.mmu->root_hpa); + + iterator->shadow_addr + = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; + iterator->shadow_addr &= PT64_BASE_ADDR_MASK; + --iterator->level; + if (!iterator->shadow_addr) + iterator->level = 0; + } +} + +static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, + struct kvm_vcpu *vcpu, u64 addr) +{ + shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa, + addr); +} + +static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) +{ + if (iterator->level < PT_PAGE_TABLE_LEVEL) + return false; + + iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); + iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; + return true; +} + +static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, + u64 spte) +{ + if (is_last_spte(spte, iterator->level)) { + iterator->level = 0; + return; + } + + iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; + --iterator->level; +} + +static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) +{ + __shadow_walk_next(iterator, *iterator->sptep); +} + +static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, + struct kvm_mmu_page *sp) +{ + u64 spte; + + BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK); + + spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK | + shadow_user_mask | shadow_x_mask | shadow_me_mask; + + if (sp_ad_disabled(sp)) + spte |= SPTE_AD_DISABLED_MASK; + else + spte |= shadow_accessed_mask; + + mmu_spte_set(sptep, spte); + + mmu_page_add_parent_pte(vcpu, sp, sptep); + + if (sp->unsync_children || sp->unsync) + mark_unsync(sptep); +} + +static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, + unsigned direct_access) +{ + if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { + struct kvm_mmu_page *child; + + /* + * For the direct sp, if the guest pte's dirty bit + * changed form clean to dirty, it will corrupt the + * sp's access: allow writable in the read-only sp, + * so we should update the spte at this point to get + * a new sp with the correct access. + */ + child = page_header(*sptep & PT64_BASE_ADDR_MASK); + if (child->role.access == direct_access) + return; + + drop_parent_pte(child, sptep); + kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1); + } +} + +static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, + u64 *spte) +{ + u64 pte; + struct kvm_mmu_page *child; + + pte = *spte; + if (is_shadow_present_pte(pte)) { + if (is_last_spte(pte, sp->role.level)) { + drop_spte(kvm, spte); + if (is_large_pte(pte)) + --kvm->stat.lpages; + } else { + child = page_header(pte & PT64_BASE_ADDR_MASK); + drop_parent_pte(child, spte); + } + return true; + } + + if (is_mmio_spte(pte)) + mmu_spte_clear_no_track(spte); + + return false; +} + +static void kvm_mmu_page_unlink_children(struct kvm *kvm, + struct kvm_mmu_page *sp) +{ + unsigned i; + + for (i = 0; i < PT64_ENT_PER_PAGE; ++i) + mmu_page_zap_pte(kvm, sp, sp->spt + i); +} + +static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + u64 *sptep; + struct rmap_iterator iter; + + while ((sptep = rmap_get_first(&sp->parent_ptes, &iter))) + drop_parent_pte(sp, sptep); +} + +static int mmu_zap_unsync_children(struct kvm *kvm, + struct kvm_mmu_page *parent, + struct list_head *invalid_list) +{ + int i, zapped = 0; + struct mmu_page_path parents; + struct kvm_mmu_pages pages; + + if (parent->role.level == PT_PAGE_TABLE_LEVEL) + return 0; + + while (mmu_unsync_walk(parent, &pages)) { + struct kvm_mmu_page *sp; + + for_each_sp(pages, sp, parents, i) { + kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); + mmu_pages_clear_parents(&parents); + zapped++; + } + } + + return zapped; +} + +static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, + struct kvm_mmu_page *sp, + struct list_head *invalid_list, + int *nr_zapped) +{ + bool list_unstable; + + trace_kvm_mmu_prepare_zap_page(sp); + ++kvm->stat.mmu_shadow_zapped; + *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); + kvm_mmu_page_unlink_children(kvm, sp); + kvm_mmu_unlink_parents(kvm, sp); + + /* Zapping children means active_mmu_pages has become unstable. */ + list_unstable = *nr_zapped; + + if (!sp->role.invalid && !sp->role.direct) + unaccount_shadowed(kvm, sp); + + if (sp->unsync) + kvm_unlink_unsync_page(kvm, sp); + if (!sp->root_count) { + /* Count self */ + (*nr_zapped)++; + list_move(&sp->link, invalid_list); + kvm_mod_used_mmu_pages(kvm, -1); + } else { + list_move(&sp->link, &kvm->arch.active_mmu_pages); + + /* + * Obsolete pages cannot be used on any vCPUs, see the comment + * in kvm_mmu_zap_all_fast(). Note, is_obsolete_sp() also + * treats invalid shadow pages as being obsolete. + */ + if (!is_obsolete_sp(kvm, sp)) + kvm_reload_remote_mmus(kvm); + } + + if (sp->lpage_disallowed) + unaccount_huge_nx_page(kvm, sp); + + sp->role.invalid = 1; + return list_unstable; +} + +static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, + struct list_head *invalid_list) +{ + int nr_zapped; + + __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); + return nr_zapped; +} + +static void kvm_mmu_commit_zap_page(struct kvm *kvm, + struct list_head *invalid_list) +{ + struct kvm_mmu_page *sp, *nsp; + + if (list_empty(invalid_list)) + return; + + /* + * We need to make sure everyone sees our modifications to + * the page tables and see changes to vcpu->mode here. The barrier + * in the kvm_flush_remote_tlbs() achieves this. This pairs + * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end. + * + * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit + * guest mode and/or lockless shadow page table walks. + */ + kvm_flush_remote_tlbs(kvm); + + list_for_each_entry_safe(sp, nsp, invalid_list, link) { + WARN_ON(!sp->role.invalid || sp->root_count); + kvm_mmu_free_page(sp); + } +} + +static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, + struct list_head *invalid_list) +{ + struct kvm_mmu_page *sp; + + if (list_empty(&kvm->arch.active_mmu_pages)) + return false; + + sp = list_last_entry(&kvm->arch.active_mmu_pages, + struct kvm_mmu_page, link); + return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); +} + +/* + * Changing the number of mmu pages allocated to the vm + * Note: if goal_nr_mmu_pages is too small, you will get dead lock + */ +void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) +{ + LIST_HEAD(invalid_list); + + spin_lock(&kvm->mmu_lock); + + if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { + /* Need to free some mmu pages to achieve the goal. */ + while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) + if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list)) + break; + + kvm_mmu_commit_zap_page(kvm, &invalid_list); + goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; + } + + kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; + + spin_unlock(&kvm->mmu_lock); +} + +int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) +{ + struct kvm_mmu_page *sp; + LIST_HEAD(invalid_list); + int r; + + pgprintk("%s: looking for gfn %llx\n", __func__, gfn); + r = 0; + spin_lock(&kvm->mmu_lock); + for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { + pgprintk("%s: gfn %llx role %x\n", __func__, gfn, + sp->role.word); + r = 1; + kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); + } + kvm_mmu_commit_zap_page(kvm, &invalid_list); + spin_unlock(&kvm->mmu_lock); + + return r; +} +EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); + +static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +{ + trace_kvm_mmu_unsync_page(sp); + ++vcpu->kvm->stat.mmu_unsync; + sp->unsync = 1; + + kvm_mmu_mark_parents_unsync(sp); +} + +static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, + bool can_unsync) +{ + struct kvm_mmu_page *sp; + + if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) + return true; + + for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { + if (!can_unsync) + return true; + + if (sp->unsync) + continue; + + WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL); + kvm_unsync_page(vcpu, sp); + } + + /* + * We need to ensure that the marking of unsync pages is visible + * before the SPTE is updated to allow writes because + * kvm_mmu_sync_roots() checks the unsync flags without holding + * the MMU lock and so can race with this. If the SPTE was updated + * before the page had been marked as unsync-ed, something like the + * following could happen: + * + * CPU 1 CPU 2 + * --------------------------------------------------------------------- + * 1.2 Host updates SPTE + * to be writable + * 2.1 Guest writes a GPTE for GVA X. + * (GPTE being in the guest page table shadowed + * by the SP from CPU 1.) + * This reads SPTE during the page table walk. + * Since SPTE.W is read as 1, there is no + * fault. + * + * 2.2 Guest issues TLB flush. + * That causes a VM Exit. + * + * 2.3 kvm_mmu_sync_pages() reads sp->unsync. + * Since it is false, so it just returns. + * + * 2.4 Guest accesses GVA X. + * Since the mapping in the SP was not updated, + * so the old mapping for GVA X incorrectly + * gets used. + * 1.1 Host marks SP + * as unsync + * (sp->unsync = true) + * + * The write barrier below ensures that 1.1 happens before 1.2 and thus + * the situation in 2.4 does not arise. The implicit barrier in 2.2 + * pairs with this write barrier. + */ + smp_wmb(); + + return false; +} + +static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) +{ + if (pfn_valid(pfn)) + return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && + /* + * Some reserved pages, such as those from NVDIMM + * DAX devices, are not for MMIO, and can be mapped + * with cached memory type for better performance. + * However, the above check misconceives those pages + * as MMIO, and results in KVM mapping them with UC + * memory type, which would hurt the performance. + * Therefore, we check the host memory type in addition + * and only treat UC/UC-/WC pages as MMIO. + */ + (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); + + return !e820__mapped_raw_any(pfn_to_hpa(pfn), + pfn_to_hpa(pfn + 1) - 1, + E820_TYPE_RAM); +} + +/* Bits which may be returned by set_spte() */ +#define SET_SPTE_WRITE_PROTECTED_PT BIT(0) +#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) + +static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, + unsigned pte_access, int level, + gfn_t gfn, kvm_pfn_t pfn, bool speculative, + bool can_unsync, bool host_writable) +{ + u64 spte = 0; + int ret = 0; + struct kvm_mmu_page *sp; + + if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) + return 0; + + sp = page_header(__pa(sptep)); + if (sp_ad_disabled(sp)) + spte |= SPTE_AD_DISABLED_MASK; + else if (kvm_vcpu_ad_need_write_protect(vcpu)) + spte |= SPTE_AD_WRPROT_ONLY_MASK; + + /* + * For the EPT case, shadow_present_mask is 0 if hardware + * supports exec-only page table entries. In that case, + * ACC_USER_MASK and shadow_user_mask are used to represent + * read access. See FNAME(gpte_access) in paging_tmpl.h. + */ + spte |= shadow_present_mask; + if (!speculative) + spte |= spte_shadow_accessed_mask(spte); + + if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) && + is_nx_huge_page_enabled()) { + pte_access &= ~ACC_EXEC_MASK; + } + + if (pte_access & ACC_EXEC_MASK) + spte |= shadow_x_mask; + else + spte |= shadow_nx_mask; + + if (pte_access & ACC_USER_MASK) + spte |= shadow_user_mask; + + if (level > PT_PAGE_TABLE_LEVEL) + spte |= PT_PAGE_SIZE_MASK; + if (tdp_enabled) + spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, + kvm_is_mmio_pfn(pfn)); + + if (host_writable) + spte |= SPTE_HOST_WRITEABLE; + else + pte_access &= ~ACC_WRITE_MASK; + + if (!kvm_is_mmio_pfn(pfn)) + spte |= shadow_me_mask; + + spte |= (u64)pfn << PAGE_SHIFT; + + if (pte_access & ACC_WRITE_MASK) { + + /* + * Other vcpu creates new sp in the window between + * mapping_level() and acquiring mmu-lock. We can + * allow guest to retry the access, the mapping can + * be fixed if guest refault. + */ + if (level > PT_PAGE_TABLE_LEVEL && + mmu_gfn_lpage_is_disallowed(vcpu, gfn, level)) + goto done; + + spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; + + /* + * Optimization: for pte sync, if spte was writable the hash + * lookup is unnecessary (and expensive). Write protection + * is responsibility of mmu_get_page / kvm_sync_page. + * Same reasoning can be applied to dirty page accounting. + */ + if (!can_unsync && is_writable_pte(*sptep)) + goto set_pte; + + if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { + pgprintk("%s: found shadow page for %llx, marking ro\n", + __func__, gfn); + ret |= SET_SPTE_WRITE_PROTECTED_PT; + pte_access &= ~ACC_WRITE_MASK; + spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); + } + } + + if (pte_access & ACC_WRITE_MASK) { + kvm_vcpu_mark_page_dirty(vcpu, gfn); + spte |= spte_shadow_dirty_mask(spte); + } + + if (speculative) + spte = mark_spte_for_access_track(spte); + +set_pte: + if (mmu_spte_update(sptep, spte)) + ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; +done: + return ret; +} + +static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, + int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn, + bool speculative, bool host_writable) +{ + int was_rmapped = 0; + int rmap_count; + int set_spte_ret; + int ret = RET_PF_RETRY; + bool flush = false; + + pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, + *sptep, write_fault, gfn); + + if (is_shadow_present_pte(*sptep)) { + /* + * If we overwrite a PTE page pointer with a 2MB PMD, unlink + * the parent of the now unreachable PTE. + */ + if (level > PT_PAGE_TABLE_LEVEL && + !is_large_pte(*sptep)) { + struct kvm_mmu_page *child; + u64 pte = *sptep; + + child = page_header(pte & PT64_BASE_ADDR_MASK); + drop_parent_pte(child, sptep); + flush = true; + } else if (pfn != spte_to_pfn(*sptep)) { + pgprintk("hfn old %llx new %llx\n", + spte_to_pfn(*sptep), pfn); + drop_spte(vcpu->kvm, sptep); + flush = true; + } else + was_rmapped = 1; + } + + set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn, + speculative, true, host_writable); + if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { + if (write_fault) + ret = RET_PF_EMULATE; + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } + + if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) + kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, + KVM_PAGES_PER_HPAGE(level)); + + if (unlikely(is_mmio_spte(*sptep))) + ret = RET_PF_EMULATE; + + pgprintk("%s: setting spte %llx\n", __func__, *sptep); + trace_kvm_mmu_set_spte(level, gfn, sptep); + if (!was_rmapped && is_large_pte(*sptep)) + ++vcpu->kvm->stat.lpages; + + if (is_shadow_present_pte(*sptep)) { + if (!was_rmapped) { + rmap_count = rmap_add(vcpu, sptep, gfn); + if (rmap_count > RMAP_RECYCLE_THRESHOLD) + rmap_recycle(vcpu, sptep, gfn); + } + } + + return ret; +} + +static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, + bool no_dirty_log) +{ + struct kvm_memory_slot *slot; + + slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); + if (!slot) + return KVM_PFN_ERR_FAULT; + + return gfn_to_pfn_memslot_atomic(slot, gfn); +} + +static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, + u64 *start, u64 *end) +{ + struct page *pages[PTE_PREFETCH_NUM]; + struct kvm_memory_slot *slot; + unsigned access = sp->role.access; + int i, ret; + gfn_t gfn; + + gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); + slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); + if (!slot) + return -1; + + ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start); + if (ret <= 0) + return -1; + + for (i = 0; i < ret; i++, gfn++, start++) { + mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn, + page_to_pfn(pages[i]), true, true); + put_page(pages[i]); + } + + return 0; +} + +static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, u64 *sptep) +{ + u64 *spte, *start = NULL; + int i; + + WARN_ON(!sp->role.direct); + + i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); + spte = sp->spt + i; + + for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { + if (is_shadow_present_pte(*spte) || spte == sptep) { + if (!start) + continue; + if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) + break; + start = NULL; + } else if (!start) + start = spte; + } +} + +static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) +{ + struct kvm_mmu_page *sp; + + sp = page_header(__pa(sptep)); + + /* + * Without accessed bits, there's no way to distinguish between + * actually accessed translations and prefetched, so disable pte + * prefetch if accessed bits aren't available. + */ + if (sp_ad_disabled(sp)) + return; + + if (sp->role.level > PT_PAGE_TABLE_LEVEL) + return; + + __direct_pte_prefetch(vcpu, sp, sptep); +} + +static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it, + gfn_t gfn, kvm_pfn_t *pfnp, int *levelp) +{ + int level = *levelp; + u64 spte = *it.sptep; + + if (it.level == level && level > PT_PAGE_TABLE_LEVEL && + is_nx_huge_page_enabled() && + is_shadow_present_pte(spte) && + !is_large_pte(spte)) { + /* + * A small SPTE exists for this pfn, but FNAME(fetch) + * and __direct_map would like to create a large PTE + * instead: just force them to go down another level, + * patching back for them into pfn the next 9 bits of + * the address. + */ + u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1); + *pfnp |= gfn & page_mask; + (*levelp)--; + } +} + +static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, + int map_writable, int level, kvm_pfn_t pfn, + bool prefault, bool lpage_disallowed) +{ + struct kvm_shadow_walk_iterator it; + struct kvm_mmu_page *sp; + int ret; + gfn_t gfn = gpa >> PAGE_SHIFT; + gfn_t base_gfn = gfn; + + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) + return RET_PF_RETRY; + + trace_kvm_mmu_spte_requested(gpa, level, pfn); + for_each_shadow_entry(vcpu, gpa, it) { + /* + * We cannot overwrite existing page tables with an NX + * large page, as the leaf could be executable. + */ + disallowed_hugepage_adjust(it, gfn, &pfn, &level); + + base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); + if (it.level == level) + break; + + drop_large_spte(vcpu, it.sptep); + if (!is_shadow_present_pte(*it.sptep)) { + sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr, + it.level - 1, true, ACC_ALL); + + link_shadow_page(vcpu, it.sptep, sp); + if (lpage_disallowed) + account_huge_nx_page(vcpu->kvm, sp); + } + } + + ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, + write, level, base_gfn, pfn, prefault, + map_writable); + direct_pte_prefetch(vcpu, it.sptep); + ++vcpu->stat.pf_fixed; + return ret; +} + +static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) +{ + send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk); +} + +static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) +{ + /* + * Do not cache the mmio info caused by writing the readonly gfn + * into the spte otherwise read access on readonly gfn also can + * caused mmio page fault and treat it as mmio access. + */ + if (pfn == KVM_PFN_ERR_RO_FAULT) + return RET_PF_EMULATE; + + if (pfn == KVM_PFN_ERR_HWPOISON) { + kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); + return RET_PF_RETRY; + } + + return -EFAULT; +} + +static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, + gfn_t gfn, kvm_pfn_t *pfnp, + int *levelp) +{ + kvm_pfn_t pfn = *pfnp; + int level = *levelp; + + /* + * Check if it's a transparent hugepage. If this would be an + * hugetlbfs page, level wouldn't be set to + * PT_PAGE_TABLE_LEVEL and there would be no adjustment done + * here. + */ + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && + !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && + PageTransCompoundMap(pfn_to_page(pfn)) && + !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { + unsigned long mask; + /* + * mmu_notifier_retry was successful and we hold the + * mmu_lock here, so the pmd can't become splitting + * from under us, and in turn + * __split_huge_page_refcount() can't run from under + * us and we can safely transfer the refcount from + * PG_tail to PG_head as we switch the pfn to tail to + * head. + */ + *levelp = level = PT_DIRECTORY_LEVEL; + mask = KVM_PAGES_PER_HPAGE(level) - 1; + VM_BUG_ON((gfn & mask) != (pfn & mask)); + if (pfn & mask) { + kvm_release_pfn_clean(pfn); + pfn &= ~mask; + kvm_get_pfn(pfn); + *pfnp = pfn; + } + } +} + +static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, + kvm_pfn_t pfn, unsigned access, int *ret_val) +{ + /* The pfn is invalid, report the error! */ + if (unlikely(is_error_pfn(pfn))) { + *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); + return true; + } + + if (unlikely(is_noslot_pfn(pfn))) + vcpu_cache_mmio_info(vcpu, gva, gfn, + access & shadow_mmio_access_mask); + + return false; +} + +static bool page_fault_can_be_fast(u32 error_code) +{ + /* + * Do not fix the mmio spte with invalid generation number which + * need to be updated by slow page fault path. + */ + if (unlikely(error_code & PFERR_RSVD_MASK)) + return false; + + /* See if the page fault is due to an NX violation */ + if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)) + == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)))) + return false; + + /* + * #PF can be fast if: + * 1. The shadow page table entry is not present, which could mean that + * the fault is potentially caused by access tracking (if enabled). + * 2. The shadow page table entry is present and the fault + * is caused by write-protect, that means we just need change the W + * bit of the spte which can be done out of mmu-lock. + * + * However, if access tracking is disabled we know that a non-present + * page must be a genuine page fault where we have to create a new SPTE. + * So, if access tracking is disabled, we return true only for write + * accesses to a present page. + */ + + return shadow_acc_track_mask != 0 || + ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)) + == (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)); +} + +/* + * Returns true if the SPTE was fixed successfully. Otherwise, + * someone else modified the SPTE from its original value. + */ +static bool +fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + u64 *sptep, u64 old_spte, u64 new_spte) +{ + gfn_t gfn; + + WARN_ON(!sp->role.direct); + + /* + * Theoretically we could also set dirty bit (and flush TLB) here in + * order to eliminate unnecessary PML logging. See comments in + * set_spte. But fast_page_fault is very unlikely to happen with PML + * enabled, so we do not do this. This might result in the same GPA + * to be logged in PML buffer again when the write really happens, and + * eventually to be called by mark_page_dirty twice. But it's also no + * harm. This also avoids the TLB flush needed after setting dirty bit + * so non-PML cases won't be impacted. + * + * Compare with set_spte where instead shadow_dirty_mask is set. + */ + if (cmpxchg64(sptep, old_spte, new_spte) != old_spte) + return false; + + if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) { + /* + * The gfn of direct spte is stable since it is + * calculated by sp->gfn. + */ + gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); + kvm_vcpu_mark_page_dirty(vcpu, gfn); + } + + return true; +} + +static bool is_access_allowed(u32 fault_err_code, u64 spte) +{ + if (fault_err_code & PFERR_FETCH_MASK) + return is_executable_pte(spte); + + if (fault_err_code & PFERR_WRITE_MASK) + return is_writable_pte(spte); + + /* Fault was on Read access */ + return spte & PT_PRESENT_MASK; +} + +/* + * Return value: + * - true: let the vcpu to access on the same address again. + * - false: let the real page fault path to fix it. + */ +static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, + u32 error_code) +{ + struct kvm_shadow_walk_iterator iterator; + struct kvm_mmu_page *sp; + bool fault_handled = false; + u64 spte = 0ull; + uint retry_count = 0; + + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) + return false; + + if (!page_fault_can_be_fast(error_code)) + return false; + + walk_shadow_page_lockless_begin(vcpu); + + do { + u64 new_spte; + + for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) + if (!is_shadow_present_pte(spte) || + iterator.level < level) + break; + + sp = page_header(__pa(iterator.sptep)); + if (!is_last_spte(spte, sp->role.level)) + break; + + /* + * Check whether the memory access that caused the fault would + * still cause it if it were to be performed right now. If not, + * then this is a spurious fault caused by TLB lazily flushed, + * or some other CPU has already fixed the PTE after the + * current CPU took the fault. + * + * Need not check the access of upper level table entries since + * they are always ACC_ALL. + */ + if (is_access_allowed(error_code, spte)) { + fault_handled = true; + break; + } + + new_spte = spte; + + if (is_access_track_spte(spte)) + new_spte = restore_acc_track_spte(new_spte); + + /* + * Currently, to simplify the code, write-protection can + * be removed in the fast path only if the SPTE was + * write-protected for dirty-logging or access tracking. + */ + if ((error_code & PFERR_WRITE_MASK) && + spte_can_locklessly_be_made_writable(spte)) + { + new_spte |= PT_WRITABLE_MASK; + + /* + * Do not fix write-permission on the large spte. Since + * we only dirty the first page into the dirty-bitmap in + * fast_pf_fix_direct_spte(), other pages are missed + * if its slot has dirty logging enabled. + * + * Instead, we let the slow page fault path create a + * normal spte to fix the access. + * + * See the comments in kvm_arch_commit_memory_region(). + */ + if (sp->role.level > PT_PAGE_TABLE_LEVEL) + break; + } + + /* Verify that the fault can be handled in the fast path */ + if (new_spte == spte || + !is_access_allowed(error_code, new_spte)) + break; + + /* + * Currently, fast page fault only works for direct mapping + * since the gfn is not stable for indirect shadow page. See + * Documentation/virt/kvm/locking.txt to get more detail. + */ + fault_handled = fast_pf_fix_direct_spte(vcpu, sp, + iterator.sptep, spte, + new_spte); + if (fault_handled) + break; + + if (++retry_count > 4) { + printk_once(KERN_WARNING + "kvm: Fast #PF retrying more than 4 times.\n"); + break; + } + + } while (true); + + trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, + spte, fault_handled); + walk_shadow_page_lockless_end(vcpu); + + return fault_handled; +} + +static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, + gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable); +static int make_mmu_pages_available(struct kvm_vcpu *vcpu); + +static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, + gfn_t gfn, bool prefault) +{ + int r; + int level; + bool force_pt_level; + kvm_pfn_t pfn; + unsigned long mmu_seq; + bool map_writable, write = error_code & PFERR_WRITE_MASK; + bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && + is_nx_huge_page_enabled(); + + force_pt_level = lpage_disallowed; + level = mapping_level(vcpu, gfn, &force_pt_level); + if (likely(!force_pt_level)) { + /* + * This path builds a PAE pagetable - so we can map + * 2mb pages at maximum. Therefore check if the level + * is larger than that. + */ + if (level > PT_DIRECTORY_LEVEL) + level = PT_DIRECTORY_LEVEL; + + gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); + } + + if (fast_page_fault(vcpu, v, level, error_code)) + return RET_PF_RETRY; + + mmu_seq = vcpu->kvm->mmu_notifier_seq; + smp_rmb(); + + if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) + return RET_PF_RETRY; + + if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) + return r; + + r = RET_PF_RETRY; + spin_lock(&vcpu->kvm->mmu_lock); + if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) + goto out_unlock; + if (make_mmu_pages_available(vcpu) < 0) + goto out_unlock; + if (likely(!force_pt_level)) + transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); + r = __direct_map(vcpu, v, write, map_writable, level, pfn, + prefault, false); +out_unlock: + spin_unlock(&vcpu->kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + return r; +} + +static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, + struct list_head *invalid_list) +{ + struct kvm_mmu_page *sp; + + if (!VALID_PAGE(*root_hpa)) + return; + + sp = page_header(*root_hpa & PT64_BASE_ADDR_MASK); + --sp->root_count; + if (!sp->root_count && sp->role.invalid) + kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); + + *root_hpa = INVALID_PAGE; +} + +/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */ +void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + ulong roots_to_free) +{ + int i; + LIST_HEAD(invalid_list); + bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; + + BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); + + /* Before acquiring the MMU lock, see if we need to do any real work. */ + if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) { + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) + if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) && + VALID_PAGE(mmu->prev_roots[i].hpa)) + break; + + if (i == KVM_MMU_NUM_PREV_ROOTS) + return; + } + + spin_lock(&vcpu->kvm->mmu_lock); + + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) + if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) + mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa, + &invalid_list); + + if (free_active_root) { + if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && + (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { + mmu_free_root_page(vcpu->kvm, &mmu->root_hpa, + &invalid_list); + } else { + for (i = 0; i < 4; ++i) + if (mmu->pae_root[i] != 0) + mmu_free_root_page(vcpu->kvm, + &mmu->pae_root[i], + &invalid_list); + mmu->root_hpa = INVALID_PAGE; + } + mmu->root_cr3 = 0; + } + + kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); + spin_unlock(&vcpu->kvm->mmu_lock); +} +EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); + +static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) +{ + int ret = 0; + + if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + ret = 1; + } + + return ret; +} + +static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu_page *sp; + unsigned i; + + if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { + spin_lock(&vcpu->kvm->mmu_lock); + if(make_mmu_pages_available(vcpu) < 0) { + spin_unlock(&vcpu->kvm->mmu_lock); + return -ENOSPC; + } + sp = kvm_mmu_get_page(vcpu, 0, 0, + vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL); + ++sp->root_count; + spin_unlock(&vcpu->kvm->mmu_lock); + vcpu->arch.mmu->root_hpa = __pa(sp->spt); + } else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) { + for (i = 0; i < 4; ++i) { + hpa_t root = vcpu->arch.mmu->pae_root[i]; + + MMU_WARN_ON(VALID_PAGE(root)); + spin_lock(&vcpu->kvm->mmu_lock); + if (make_mmu_pages_available(vcpu) < 0) { + spin_unlock(&vcpu->kvm->mmu_lock); + return -ENOSPC; + } + sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), + i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL); + root = __pa(sp->spt); + ++sp->root_count; + spin_unlock(&vcpu->kvm->mmu_lock); + vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK; + } + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); + } else + BUG(); + vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); + + return 0; +} + +static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu_page *sp; + u64 pdptr, pm_mask; + gfn_t root_gfn, root_cr3; + int i; + + root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); + root_gfn = root_cr3 >> PAGE_SHIFT; + + if (mmu_check_root(vcpu, root_gfn)) + return 1; + + /* + * Do we shadow a long mode page table? If so we need to + * write-protect the guests page table root. + */ + if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { + hpa_t root = vcpu->arch.mmu->root_hpa; + + MMU_WARN_ON(VALID_PAGE(root)); + + spin_lock(&vcpu->kvm->mmu_lock); + if (make_mmu_pages_available(vcpu) < 0) { + spin_unlock(&vcpu->kvm->mmu_lock); + return -ENOSPC; + } + sp = kvm_mmu_get_page(vcpu, root_gfn, 0, + vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL); + root = __pa(sp->spt); + ++sp->root_count; + spin_unlock(&vcpu->kvm->mmu_lock); + vcpu->arch.mmu->root_hpa = root; + goto set_root_cr3; + } + + /* + * We shadow a 32 bit page table. This may be a legacy 2-level + * or a PAE 3-level page table. In either case we need to be aware that + * the shadow page table may be a PAE or a long mode page table. + */ + pm_mask = PT_PRESENT_MASK; + if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) + pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; + + for (i = 0; i < 4; ++i) { + hpa_t root = vcpu->arch.mmu->pae_root[i]; + + MMU_WARN_ON(VALID_PAGE(root)); + if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) { + pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i); + if (!(pdptr & PT_PRESENT_MASK)) { + vcpu->arch.mmu->pae_root[i] = 0; + continue; + } + root_gfn = pdptr >> PAGE_SHIFT; + if (mmu_check_root(vcpu, root_gfn)) + return 1; + } + spin_lock(&vcpu->kvm->mmu_lock); + if (make_mmu_pages_available(vcpu) < 0) { + spin_unlock(&vcpu->kvm->mmu_lock); + return -ENOSPC; + } + sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, + 0, ACC_ALL); + root = __pa(sp->spt); + ++sp->root_count; + spin_unlock(&vcpu->kvm->mmu_lock); + + vcpu->arch.mmu->pae_root[i] = root | pm_mask; + } + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); + + /* + * If we shadow a 32 bit page table with a long mode page + * table we enter this path. + */ + if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) { + if (vcpu->arch.mmu->lm_root == NULL) { + /* + * The additional page necessary for this is only + * allocated on demand. + */ + + u64 *lm_root; + + lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT); + if (lm_root == NULL) + return 1; + + lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask; + + vcpu->arch.mmu->lm_root = lm_root; + } + + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); + } + +set_root_cr3: + vcpu->arch.mmu->root_cr3 = root_cr3; + + return 0; +} + +static int mmu_alloc_roots(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.mmu->direct_map) + return mmu_alloc_direct_roots(vcpu); + else + return mmu_alloc_shadow_roots(vcpu); +} + +void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) +{ + int i; + struct kvm_mmu_page *sp; + + if (vcpu->arch.mmu->direct_map) + return; + + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) + return; + + vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); + + if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { + hpa_t root = vcpu->arch.mmu->root_hpa; + sp = page_header(root); + + /* + * Even if another CPU was marking the SP as unsync-ed + * simultaneously, any guest page table changes are not + * guaranteed to be visible anyway until this VCPU issues a TLB + * flush strictly after those changes are made. We only need to + * ensure that the other CPU sets these flags before any actual + * changes to the page tables are made. The comments in + * mmu_need_write_protect() describe what could go wrong if this + * requirement isn't satisfied. + */ + if (!smp_load_acquire(&sp->unsync) && + !smp_load_acquire(&sp->unsync_children)) + return; + + spin_lock(&vcpu->kvm->mmu_lock); + kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); + + mmu_sync_children(vcpu, sp); + + kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); + spin_unlock(&vcpu->kvm->mmu_lock); + return; + } + + spin_lock(&vcpu->kvm->mmu_lock); + kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); + + for (i = 0; i < 4; ++i) { + hpa_t root = vcpu->arch.mmu->pae_root[i]; + + if (root && VALID_PAGE(root)) { + root &= PT64_BASE_ADDR_MASK; + sp = page_header(root); + mmu_sync_children(vcpu, sp); + } + } + + kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); + spin_unlock(&vcpu->kvm->mmu_lock); +} +EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots); + +static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, + u32 access, struct x86_exception *exception) +{ + if (exception) + exception->error_code = 0; + return vaddr; +} + +static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, + u32 access, + struct x86_exception *exception) +{ + if (exception) + exception->error_code = 0; + return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); +} + +static bool +__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) +{ + int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f; + + return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) | + ((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0); +} + +static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) +{ + return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level); +} + +static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) +{ + return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); +} + +static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) +{ + /* + * A nested guest cannot use the MMIO cache if it is using nested + * page tables, because cr2 is a nGPA while the cache stores GPAs. + */ + if (mmu_is_nested(vcpu)) + return false; + + if (direct) + return vcpu_match_mmio_gpa(vcpu, addr); + + return vcpu_match_mmio_gva(vcpu, addr); +} + +/* return true if reserved bit is detected on spte. */ +static bool +walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) +{ + struct kvm_shadow_walk_iterator iterator; + u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull; + int root, leaf; + bool reserved = false; + + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) + goto exit; + + walk_shadow_page_lockless_begin(vcpu); + + for (shadow_walk_init(&iterator, vcpu, addr), + leaf = root = iterator.level; + shadow_walk_okay(&iterator); + __shadow_walk_next(&iterator, spte)) { + spte = mmu_spte_get_lockless(iterator.sptep); + + sptes[leaf - 1] = spte; + leaf--; + + if (!is_shadow_present_pte(spte)) + break; + + reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte, + iterator.level); + } + + walk_shadow_page_lockless_end(vcpu); + + if (reserved) { + pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n", + __func__, addr); + while (root > leaf) { + pr_err("------ spte 0x%llx level %d.\n", + sptes[root - 1], root); + root--; + } + } +exit: + *sptep = spte; + return reserved; +} + +static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) +{ + u64 spte; + bool reserved; + + if (mmio_info_in_cache(vcpu, addr, direct)) + return RET_PF_EMULATE; + + reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); + if (WARN_ON(reserved)) + return -EINVAL; + + if (is_mmio_spte(spte)) { + gfn_t gfn = get_mmio_spte_gfn(spte); + unsigned access = get_mmio_spte_access(spte); + + if (!check_mmio_spte(vcpu, spte)) + return RET_PF_INVALID; + + if (direct) + addr = 0; + + trace_handle_mmio_page_fault(addr, gfn, access); + vcpu_cache_mmio_info(vcpu, addr, gfn, access); + return RET_PF_EMULATE; + } + + /* + * If the page table is zapped by other cpus, let CPU fault again on + * the address. + */ + return RET_PF_RETRY; +} + +static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, + u32 error_code, gfn_t gfn) +{ + if (unlikely(error_code & PFERR_RSVD_MASK)) + return false; + + if (!(error_code & PFERR_PRESENT_MASK) || + !(error_code & PFERR_WRITE_MASK)) + return false; + + /* + * guest is writing the page which is write tracked which can + * not be fixed by page fault handler. + */ + if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) + return true; + + return false; +} + +static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) +{ + struct kvm_shadow_walk_iterator iterator; + u64 spte; + + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) + return; + + walk_shadow_page_lockless_begin(vcpu); + for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { + clear_sp_write_flooding_count(iterator.sptep); + if (!is_shadow_present_pte(spte)) + break; + } + walk_shadow_page_lockless_end(vcpu); +} + +static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, + u32 error_code, bool prefault) +{ + gfn_t gfn = gva >> PAGE_SHIFT; + int r; + + pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); + + if (page_fault_handle_page_track(vcpu, error_code, gfn)) + return RET_PF_EMULATE; + + r = mmu_topup_memory_caches(vcpu); + if (r) + return r; + + MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); + + + return nonpaging_map(vcpu, gva & PAGE_MASK, + error_code, gfn, prefault); +} + +static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) +{ + struct kvm_arch_async_pf arch; + + arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; + arch.gfn = gfn; + arch.direct_map = vcpu->arch.mmu->direct_map; + arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu); + + return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); +} + +static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, + gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable) +{ + struct kvm_memory_slot *slot; + bool async; + + /* + * Don't expose private memslots to L2. + */ + if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) { + *pfn = KVM_PFN_NOSLOT; + return false; + } + + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + async = false; + *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); + if (!async) + return false; /* *pfn has correct page already */ + + if (!prefault && kvm_can_do_async_pf(vcpu)) { + trace_kvm_try_async_get_page(gva, gfn); + if (kvm_find_async_pf_gfn(vcpu, gfn)) { + trace_kvm_async_pf_doublefault(gva, gfn); + kvm_make_request(KVM_REQ_APF_HALT, vcpu); + return true; + } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) + return true; + } + + *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable); + return false; +} + +int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, + u64 fault_address, char *insn, int insn_len) +{ + int r = 1; + + vcpu->arch.l1tf_flush_l1d = true; + switch (vcpu->arch.apf.host_apf_reason) { + default: + trace_kvm_page_fault(fault_address, error_code); + + if (kvm_event_needs_reinjection(vcpu)) + kvm_mmu_unprotect_page_virt(vcpu, fault_address); + r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn, + insn_len); + break; + case KVM_PV_REASON_PAGE_NOT_PRESENT: + vcpu->arch.apf.host_apf_reason = 0; + local_irq_disable(); + kvm_async_pf_task_wait(fault_address, 0); + local_irq_enable(); + break; + case KVM_PV_REASON_PAGE_READY: + vcpu->arch.apf.host_apf_reason = 0; + local_irq_disable(); + kvm_async_pf_task_wake(fault_address); + local_irq_enable(); + break; + } + return r; +} +EXPORT_SYMBOL_GPL(kvm_handle_page_fault); + +static bool +check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level) +{ + int page_num = KVM_PAGES_PER_HPAGE(level); + + gfn &= ~(page_num - 1); + + return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num); +} + +static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, + bool prefault) +{ + kvm_pfn_t pfn; + int r; + int level; + bool force_pt_level; + gfn_t gfn = gpa >> PAGE_SHIFT; + unsigned long mmu_seq; + int write = error_code & PFERR_WRITE_MASK; + bool map_writable; + bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && + is_nx_huge_page_enabled(); + + MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); + + if (page_fault_handle_page_track(vcpu, error_code, gfn)) + return RET_PF_EMULATE; + + r = mmu_topup_memory_caches(vcpu); + if (r) + return r; + + force_pt_level = + lpage_disallowed || + !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL); + level = mapping_level(vcpu, gfn, &force_pt_level); + if (likely(!force_pt_level)) { + if (level > PT_DIRECTORY_LEVEL && + !check_hugepage_cache_consistency(vcpu, gfn, level)) + level = PT_DIRECTORY_LEVEL; + gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); + } + + if (fast_page_fault(vcpu, gpa, level, error_code)) + return RET_PF_RETRY; + + mmu_seq = vcpu->kvm->mmu_notifier_seq; + smp_rmb(); + + if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) + return RET_PF_RETRY; + + if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) + return r; + + r = RET_PF_RETRY; + spin_lock(&vcpu->kvm->mmu_lock); + if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) + goto out_unlock; + if (make_mmu_pages_available(vcpu) < 0) + goto out_unlock; + if (likely(!force_pt_level)) + transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); + r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, + prefault, lpage_disallowed); +out_unlock: + spin_unlock(&vcpu->kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + return r; +} + +static void nonpaging_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + context->page_fault = nonpaging_page_fault; + context->gva_to_gpa = nonpaging_gva_to_gpa; + context->sync_page = nonpaging_sync_page; + context->invlpg = nonpaging_invlpg; + context->update_pte = nonpaging_update_pte; + context->root_level = 0; + context->shadow_root_level = PT32E_ROOT_LEVEL; + context->direct_map = true; + context->nx = false; +} + +/* + * Find out if a previously cached root matching the new CR3/role is available. + * The current root is also inserted into the cache. + * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is + * returned. + * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and + * false is returned. This root should now be freed by the caller. + */ +static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, + union kvm_mmu_page_role new_role) +{ + uint i; + struct kvm_mmu_root_info root; + struct kvm_mmu *mmu = vcpu->arch.mmu; + + root.cr3 = mmu->root_cr3; + root.hpa = mmu->root_hpa; + + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { + swap(root, mmu->prev_roots[i]); + + if (new_cr3 == root.cr3 && VALID_PAGE(root.hpa) && + page_header(root.hpa) != NULL && + new_role.word == page_header(root.hpa)->role.word) + break; + } + + mmu->root_hpa = root.hpa; + mmu->root_cr3 = root.cr3; + + return i < KVM_MMU_NUM_PREV_ROOTS; +} + +static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, + union kvm_mmu_page_role new_role, + bool skip_tlb_flush) +{ + struct kvm_mmu *mmu = vcpu->arch.mmu; + + /* + * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid + * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs + * later if necessary. + */ + if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && + mmu->root_level >= PT64_ROOT_4LEVEL) { + if (mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT)) + return false; + + if (cached_root_available(vcpu, new_cr3, new_role)) { + /* + * It is possible that the cached previous root page is + * obsolete because of a change in the MMU generation + * number. However, changing the generation number is + * accompanied by KVM_REQ_MMU_RELOAD, which will free + * the root set here and allocate a new one. + */ + kvm_make_request(KVM_REQ_LOAD_CR3, vcpu); + if (!skip_tlb_flush) { + kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } + + /* + * The last MMIO access's GVA and GPA are cached in the + * VCPU. When switching to a new CR3, that GVA->GPA + * mapping may no longer be valid. So clear any cached + * MMIO info even when we don't need to sync the shadow + * page tables. + */ + vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); + + __clear_sp_write_flooding_count( + page_header(mmu->root_hpa)); + + return true; + } + } + + return false; +} + +static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, + union kvm_mmu_page_role new_role, + bool skip_tlb_flush) +{ + if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush)) + kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, + KVM_MMU_ROOT_CURRENT); +} + +void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush) +{ + __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu), + skip_tlb_flush); +} +EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3); + +static unsigned long get_cr3(struct kvm_vcpu *vcpu) +{ + return kvm_read_cr3(vcpu); +} + +static void inject_page_fault(struct kvm_vcpu *vcpu, + struct x86_exception *fault) +{ + vcpu->arch.mmu->inject_page_fault(vcpu, fault); +} + +static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, + unsigned access, int *nr_present) +{ + if (unlikely(is_mmio_spte(*sptep))) { + if (gfn != get_mmio_spte_gfn(*sptep)) { + mmu_spte_clear_no_track(sptep); + return true; + } + + (*nr_present)++; + mark_mmio_spte(vcpu, sptep, gfn, access); + return true; + } + + return false; +} + +static inline bool is_last_gpte(struct kvm_mmu *mmu, + unsigned level, unsigned gpte) +{ + /* + * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. + * If it is clear, there are no large pages at this level, so clear + * PT_PAGE_SIZE_MASK in gpte if that is the case. + */ + gpte &= level - mmu->last_nonleaf_level; + + /* + * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set + * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means + * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then. + */ + gpte |= level - PT_PAGE_TABLE_LEVEL - 1; + + return gpte & PT_PAGE_SIZE_MASK; +} + +#define PTTYPE_EPT 18 /* arbitrary */ +#define PTTYPE PTTYPE_EPT +#include "paging_tmpl.h" +#undef PTTYPE + +#define PTTYPE 64 +#include "paging_tmpl.h" +#undef PTTYPE + +#define PTTYPE 32 +#include "paging_tmpl.h" +#undef PTTYPE + +static void +__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, + struct rsvd_bits_validate *rsvd_check, + int maxphyaddr, int level, bool nx, bool gbpages, + bool pse, bool amd) +{ + u64 exb_bit_rsvd = 0; + u64 gbpages_bit_rsvd = 0; + u64 nonleaf_bit8_rsvd = 0; + + rsvd_check->bad_mt_xwr = 0; + + if (!nx) + exb_bit_rsvd = rsvd_bits(63, 63); + if (!gbpages) + gbpages_bit_rsvd = rsvd_bits(7, 7); + + /* + * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for + * leaf entries) on AMD CPUs only. + */ + if (amd) + nonleaf_bit8_rsvd = rsvd_bits(8, 8); + + switch (level) { + case PT32_ROOT_LEVEL: + /* no rsvd bits for 2 level 4K page table entries */ + rsvd_check->rsvd_bits_mask[0][1] = 0; + rsvd_check->rsvd_bits_mask[0][0] = 0; + rsvd_check->rsvd_bits_mask[1][0] = + rsvd_check->rsvd_bits_mask[0][0]; + + if (!pse) { + rsvd_check->rsvd_bits_mask[1][1] = 0; + break; + } + + if (is_cpuid_PSE36()) + /* 36bits PSE 4MB page */ + rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); + else + /* 32 bits PSE 4MB page */ + rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); + break; + case PT32E_ROOT_LEVEL: + rsvd_check->rsvd_bits_mask[0][2] = + rsvd_bits(maxphyaddr, 63) | + rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */ + rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 62); /* PDE */ + rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 62); /* PTE */ + rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 62) | + rsvd_bits(13, 20); /* large page */ + rsvd_check->rsvd_bits_mask[1][0] = + rsvd_check->rsvd_bits_mask[0][0]; + break; + case PT64_ROOT_5LEVEL: + rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd | + nonleaf_bit8_rsvd | rsvd_bits(7, 7) | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[1][4] = + rsvd_check->rsvd_bits_mask[0][4]; + /* fall through */ + case PT64_ROOT_4LEVEL: + rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | + nonleaf_bit8_rsvd | rsvd_bits(7, 7) | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | + nonleaf_bit8_rsvd | gbpages_bit_rsvd | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[1][3] = + rsvd_check->rsvd_bits_mask[0][3]; + rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd | + gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) | + rsvd_bits(13, 29); + rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 51) | + rsvd_bits(13, 20); /* large page */ + rsvd_check->rsvd_bits_mask[1][0] = + rsvd_check->rsvd_bits_mask[0][0]; + break; + } +} + +static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, + cpuid_maxphyaddr(vcpu), context->root_level, + context->nx, + guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), + is_pse(vcpu), guest_cpuid_is_amd(vcpu)); +} + +static void +__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, + int maxphyaddr, bool execonly) +{ + u64 bad_mt_xwr; + + rsvd_check->rsvd_bits_mask[0][4] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); + rsvd_check->rsvd_bits_mask[0][3] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); + rsvd_check->rsvd_bits_mask[0][2] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); + rsvd_check->rsvd_bits_mask[0][1] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); + rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); + + /* large page */ + rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; + rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; + rsvd_check->rsvd_bits_mask[1][2] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); + rsvd_check->rsvd_bits_mask[1][1] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20); + rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; + + bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */ + bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */ + bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */ + bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */ + bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */ + if (!execonly) { + /* bits 0..2 must not be 100 unless VMX capabilities allow it */ + bad_mt_xwr |= REPEAT_BYTE(1ull << 4); + } + rsvd_check->bad_mt_xwr = bad_mt_xwr; +} + +static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, bool execonly) +{ + __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, + cpuid_maxphyaddr(vcpu), execonly); +} + +/* + * the page table on host is the shadow page table for the page + * table in guest or amd nested guest, its mmu features completely + * follow the features in guest. + */ +void +reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) +{ + bool uses_nx = context->nx || + context->mmu_role.base.smep_andnot_wp; + struct rsvd_bits_validate *shadow_zero_check; + int i; + + /* + * Passing "true" to the last argument is okay; it adds a check + * on bit 8 of the SPTEs which KVM doesn't use anyway. + */ + shadow_zero_check = &context->shadow_zero_check; + __reset_rsvds_bits_mask(vcpu, shadow_zero_check, + shadow_phys_bits, + context->shadow_root_level, uses_nx, + guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), + is_pse(vcpu), true); + + if (!shadow_me_mask) + return; + + for (i = context->shadow_root_level; --i >= 0;) { + shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; + shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; + } + +} +EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); + +static inline bool boot_cpu_is_amd(void) +{ + WARN_ON_ONCE(!tdp_enabled); + return shadow_x_mask == 0; +} + +/* + * the direct page table on host, use as much mmu features as + * possible, however, kvm currently does not do execution-protection. + */ +static void +reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + struct rsvd_bits_validate *shadow_zero_check; + int i; + + shadow_zero_check = &context->shadow_zero_check; + + if (boot_cpu_is_amd()) + __reset_rsvds_bits_mask(vcpu, shadow_zero_check, + shadow_phys_bits, + context->shadow_root_level, false, + boot_cpu_has(X86_FEATURE_GBPAGES), + true, true); + else + __reset_rsvds_bits_mask_ept(shadow_zero_check, + shadow_phys_bits, + false); + + if (!shadow_me_mask) + return; + + for (i = context->shadow_root_level; --i >= 0;) { + shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; + shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; + } +} + +/* + * as the comments in reset_shadow_zero_bits_mask() except it + * is the shadow page table for intel nested guest. + */ +static void +reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, bool execonly) +{ + __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, + shadow_phys_bits, execonly); +} + +#define BYTE_MASK(access) \ + ((1 & (access) ? 2 : 0) | \ + (2 & (access) ? 4 : 0) | \ + (3 & (access) ? 8 : 0) | \ + (4 & (access) ? 16 : 0) | \ + (5 & (access) ? 32 : 0) | \ + (6 & (access) ? 64 : 0) | \ + (7 & (access) ? 128 : 0)) + + +static void update_permission_bitmask(struct kvm_vcpu *vcpu, + struct kvm_mmu *mmu, bool ept) +{ + unsigned byte; + + const u8 x = BYTE_MASK(ACC_EXEC_MASK); + const u8 w = BYTE_MASK(ACC_WRITE_MASK); + const u8 u = BYTE_MASK(ACC_USER_MASK); + + bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0; + bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0; + bool cr0_wp = is_write_protection(vcpu); + + for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { + unsigned pfec = byte << 1; + + /* + * Each "*f" variable has a 1 bit for each UWX value + * that causes a fault with the given PFEC. + */ + + /* Faults from writes to non-writable pages */ + u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0; + /* Faults from user mode accesses to supervisor pages */ + u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0; + /* Faults from fetches of non-executable pages*/ + u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0; + /* Faults from kernel mode fetches of user pages */ + u8 smepf = 0; + /* Faults from kernel mode accesses of user pages */ + u8 smapf = 0; + + if (!ept) { + /* Faults from kernel mode accesses to user pages */ + u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u; + + /* Not really needed: !nx will cause pte.nx to fault */ + if (!mmu->nx) + ff = 0; + + /* Allow supervisor writes if !cr0.wp */ + if (!cr0_wp) + wf = (pfec & PFERR_USER_MASK) ? wf : 0; + + /* Disallow supervisor fetches of user code if cr4.smep */ + if (cr4_smep) + smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0; + + /* + * SMAP:kernel-mode data accesses from user-mode + * mappings should fault. A fault is considered + * as a SMAP violation if all of the following + * conditions are true: + * - X86_CR4_SMAP is set in CR4 + * - A user page is accessed + * - The access is not a fetch + * - Page fault in kernel mode + * - if CPL = 3 or X86_EFLAGS_AC is clear + * + * Here, we cover the first three conditions. + * The fourth is computed dynamically in permission_fault(); + * PFERR_RSVD_MASK bit will be set in PFEC if the access is + * *not* subject to SMAP restrictions. + */ + if (cr4_smap) + smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf; + } + + mmu->permissions[byte] = ff | uf | wf | smepf | smapf; + } +} + +/* +* PKU is an additional mechanism by which the paging controls access to +* user-mode addresses based on the value in the PKRU register. Protection +* key violations are reported through a bit in the page fault error code. +* Unlike other bits of the error code, the PK bit is not known at the +* call site of e.g. gva_to_gpa; it must be computed directly in +* permission_fault based on two bits of PKRU, on some machine state (CR4, +* CR0, EFER, CPL), and on other bits of the error code and the page tables. +* +* In particular the following conditions come from the error code, the +* page tables and the machine state: +* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1 +* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch) +* - PK is always zero if U=0 in the page tables +* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access. +* +* The PKRU bitmask caches the result of these four conditions. The error +* code (minus the P bit) and the page table's U bit form an index into the +* PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed +* with the two bits of the PKRU register corresponding to the protection key. +* For the first three conditions above the bits will be 00, thus masking +* away both AD and WD. For all reads or if the last condition holds, WD +* only will be masked away. +*/ +static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + bool ept) +{ + unsigned bit; + bool wp; + + if (ept) { + mmu->pkru_mask = 0; + return; + } + + /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */ + if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) { + mmu->pkru_mask = 0; + return; + } + + wp = is_write_protection(vcpu); + + for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) { + unsigned pfec, pkey_bits; + bool check_pkey, check_write, ff, uf, wf, pte_user; + + pfec = bit << 1; + ff = pfec & PFERR_FETCH_MASK; + uf = pfec & PFERR_USER_MASK; + wf = pfec & PFERR_WRITE_MASK; + + /* PFEC.RSVD is replaced by ACC_USER_MASK. */ + pte_user = pfec & PFERR_RSVD_MASK; + + /* + * Only need to check the access which is not an + * instruction fetch and is to a user page. + */ + check_pkey = (!ff && pte_user); + /* + * write access is controlled by PKRU if it is a + * user access or CR0.WP = 1. + */ + check_write = check_pkey && wf && (uf || wp); + + /* PKRU.AD stops both read and write access. */ + pkey_bits = !!check_pkey; + /* PKRU.WD stops write access. */ + pkey_bits |= (!!check_write) << 1; + + mmu->pkru_mask |= (pkey_bits & 3) << pfec; + } +} + +static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) +{ + unsigned root_level = mmu->root_level; + + mmu->last_nonleaf_level = root_level; + if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu)) + mmu->last_nonleaf_level++; +} + +static void paging64_init_context_common(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, + int level) +{ + context->nx = is_nx(vcpu); + context->root_level = level; + + reset_rsvds_bits_mask(vcpu, context); + update_permission_bitmask(vcpu, context, false); + update_pkru_bitmask(vcpu, context, false); + update_last_nonleaf_level(vcpu, context); + + MMU_WARN_ON(!is_pae(vcpu)); + context->page_fault = paging64_page_fault; + context->gva_to_gpa = paging64_gva_to_gpa; + context->sync_page = paging64_sync_page; + context->invlpg = paging64_invlpg; + context->update_pte = paging64_update_pte; + context->shadow_root_level = level; + context->direct_map = false; +} + +static void paging64_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + int root_level = is_la57_mode(vcpu) ? + PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; + + paging64_init_context_common(vcpu, context, root_level); +} + +static void paging32_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + context->nx = false; + context->root_level = PT32_ROOT_LEVEL; + + reset_rsvds_bits_mask(vcpu, context); + update_permission_bitmask(vcpu, context, false); + update_pkru_bitmask(vcpu, context, false); + update_last_nonleaf_level(vcpu, context); + + context->page_fault = paging32_page_fault; + context->gva_to_gpa = paging32_gva_to_gpa; + context->sync_page = paging32_sync_page; + context->invlpg = paging32_invlpg; + context->update_pte = paging32_update_pte; + context->shadow_root_level = PT32E_ROOT_LEVEL; + context->direct_map = false; +} + +static void paging32E_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); +} + +static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) +{ + union kvm_mmu_extended_role ext = {0}; + + ext.cr0_pg = !!is_paging(vcpu); + ext.cr4_pae = !!is_pae(vcpu); + ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); + ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); + ext.cr4_pse = !!is_pse(vcpu); + ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); + ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); + ext.maxphyaddr = cpuid_maxphyaddr(vcpu); + + ext.valid = 1; + + return ext; +} + +static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, + bool base_only) +{ + union kvm_mmu_role role = {0}; + + role.base.access = ACC_ALL; + role.base.nxe = !!is_nx(vcpu); + role.base.cr0_wp = is_write_protection(vcpu); + role.base.smm = is_smm(vcpu); + role.base.guest_mode = is_guest_mode(vcpu); + + if (base_only) + return role; + + role.ext = kvm_calc_mmu_role_ext(vcpu); + + return role; +} + +static union kvm_mmu_role +kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) +{ + union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); + + role.base.ad_disabled = (shadow_accessed_mask == 0); + role.base.level = kvm_x86_ops->get_tdp_level(vcpu); + role.base.direct = true; + role.base.gpte_is_8_bytes = true; + + return role; +} + +static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *context = vcpu->arch.mmu; + union kvm_mmu_role new_role = + kvm_calc_tdp_mmu_root_page_role(vcpu, false); + + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == context->mmu_role.as_u64) + return; + + context->mmu_role.as_u64 = new_role.as_u64; + context->page_fault = tdp_page_fault; + context->sync_page = nonpaging_sync_page; + context->invlpg = nonpaging_invlpg; + context->update_pte = nonpaging_update_pte; + context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu); + context->direct_map = true; + context->set_cr3 = kvm_x86_ops->set_tdp_cr3; + context->get_cr3 = get_cr3; + context->get_pdptr = kvm_pdptr_read; + context->inject_page_fault = kvm_inject_page_fault; + + if (!is_paging(vcpu)) { + context->nx = false; + context->gva_to_gpa = nonpaging_gva_to_gpa; + context->root_level = 0; + } else if (is_long_mode(vcpu)) { + context->nx = is_nx(vcpu); + context->root_level = is_la57_mode(vcpu) ? + PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; + reset_rsvds_bits_mask(vcpu, context); + context->gva_to_gpa = paging64_gva_to_gpa; + } else if (is_pae(vcpu)) { + context->nx = is_nx(vcpu); + context->root_level = PT32E_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, context); + context->gva_to_gpa = paging64_gva_to_gpa; + } else { + context->nx = false; + context->root_level = PT32_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, context); + context->gva_to_gpa = paging32_gva_to_gpa; + } + + update_permission_bitmask(vcpu, context, false); + update_pkru_bitmask(vcpu, context, false); + update_last_nonleaf_level(vcpu, context); + reset_tdp_shadow_zero_bits_mask(vcpu, context); +} + +static union kvm_mmu_role +kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) +{ + union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); + + role.base.smep_andnot_wp = role.ext.cr4_smep && + !is_write_protection(vcpu); + role.base.smap_andnot_wp = role.ext.cr4_smap && + !is_write_protection(vcpu); + role.base.direct = !is_paging(vcpu); + role.base.gpte_is_8_bytes = !!is_pae(vcpu); + + if (!is_long_mode(vcpu)) + role.base.level = PT32E_ROOT_LEVEL; + else if (is_la57_mode(vcpu)) + role.base.level = PT64_ROOT_5LEVEL; + else + role.base.level = PT64_ROOT_4LEVEL; + + return role; +} + +void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *context = vcpu->arch.mmu; + union kvm_mmu_role new_role = + kvm_calc_shadow_mmu_root_page_role(vcpu, false); + + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == context->mmu_role.as_u64) + return; + + if (!is_paging(vcpu)) + nonpaging_init_context(vcpu, context); + else if (is_long_mode(vcpu)) + paging64_init_context(vcpu, context); + else if (is_pae(vcpu)) + paging32E_init_context(vcpu, context); + else + paging32_init_context(vcpu, context); + + context->mmu_role.as_u64 = new_role.as_u64; + reset_shadow_zero_bits_mask(vcpu, context); +} +EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); + +static union kvm_mmu_role +kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, + bool execonly) +{ + union kvm_mmu_role role = {0}; + + /* SMM flag is inherited from root_mmu */ + role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm; + + role.base.level = PT64_ROOT_4LEVEL; + role.base.gpte_is_8_bytes = true; + role.base.direct = false; + role.base.ad_disabled = !accessed_dirty; + role.base.guest_mode = true; + role.base.access = ACC_ALL; + + /* + * WP=1 and NOT_WP=1 is an impossible combination, use WP and the + * SMAP variation to denote shadow EPT entries. + */ + role.base.cr0_wp = true; + role.base.smap_andnot_wp = true; + + role.ext = kvm_calc_mmu_role_ext(vcpu); + role.ext.execonly = execonly; + + return role; +} + +void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, + bool accessed_dirty, gpa_t new_eptp) +{ + struct kvm_mmu *context = vcpu->arch.mmu; + union kvm_mmu_role new_role = + kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, + execonly); + + __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false); + + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == context->mmu_role.as_u64) + return; + + context->shadow_root_level = PT64_ROOT_4LEVEL; + + context->nx = true; + context->ept_ad = accessed_dirty; + context->page_fault = ept_page_fault; + context->gva_to_gpa = ept_gva_to_gpa; + context->sync_page = ept_sync_page; + context->invlpg = ept_invlpg; + context->update_pte = ept_update_pte; + context->root_level = PT64_ROOT_4LEVEL; + context->direct_map = false; + context->mmu_role.as_u64 = new_role.as_u64; + + update_permission_bitmask(vcpu, context, true); + update_pkru_bitmask(vcpu, context, true); + update_last_nonleaf_level(vcpu, context); + reset_rsvds_bits_mask_ept(vcpu, context, execonly); + reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); +} +EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); + +static void init_kvm_softmmu(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *context = vcpu->arch.mmu; + + kvm_init_shadow_mmu(vcpu); + context->set_cr3 = kvm_x86_ops->set_cr3; + context->get_cr3 = get_cr3; + context->get_pdptr = kvm_pdptr_read; + context->inject_page_fault = kvm_inject_page_fault; +} + +static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) +{ + union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false); + struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; + + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == g_context->mmu_role.as_u64) + return; + + g_context->mmu_role.as_u64 = new_role.as_u64; + g_context->get_cr3 = get_cr3; + g_context->get_pdptr = kvm_pdptr_read; + g_context->inject_page_fault = kvm_inject_page_fault; + + /* + * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using + * L1's nested page tables (e.g. EPT12). The nested translation + * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using + * L2's page tables as the first level of translation and L1's + * nested page tables as the second level of translation. Basically + * the gva_to_gpa functions between mmu and nested_mmu are swapped. + */ + if (!is_paging(vcpu)) { + g_context->nx = false; + g_context->root_level = 0; + g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; + } else if (is_long_mode(vcpu)) { + g_context->nx = is_nx(vcpu); + g_context->root_level = is_la57_mode(vcpu) ? + PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; + reset_rsvds_bits_mask(vcpu, g_context); + g_context->gva_to_gpa = paging64_gva_to_gpa_nested; + } else if (is_pae(vcpu)) { + g_context->nx = is_nx(vcpu); + g_context->root_level = PT32E_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, g_context); + g_context->gva_to_gpa = paging64_gva_to_gpa_nested; + } else { + g_context->nx = false; + g_context->root_level = PT32_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, g_context); + g_context->gva_to_gpa = paging32_gva_to_gpa_nested; + } + + update_permission_bitmask(vcpu, g_context, false); + update_pkru_bitmask(vcpu, g_context, false); + update_last_nonleaf_level(vcpu, g_context); +} + +void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots) +{ + if (reset_roots) { + uint i; + + vcpu->arch.mmu->root_hpa = INVALID_PAGE; + + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) + vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; + } + + if (mmu_is_nested(vcpu)) + init_kvm_nested_mmu(vcpu); + else if (tdp_enabled) + init_kvm_tdp_mmu(vcpu); + else + init_kvm_softmmu(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_init_mmu); + +static union kvm_mmu_page_role +kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu) +{ + union kvm_mmu_role role; + + if (tdp_enabled) + role = kvm_calc_tdp_mmu_root_page_role(vcpu, true); + else + role = kvm_calc_shadow_mmu_root_page_role(vcpu, true); + + return role.base; +} + +void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) +{ + kvm_mmu_unload(vcpu); + kvm_init_mmu(vcpu, true); +} +EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); + +int kvm_mmu_load(struct kvm_vcpu *vcpu) +{ + int r; + + r = mmu_topup_memory_caches(vcpu); + if (r) + goto out; + r = mmu_alloc_roots(vcpu); + kvm_mmu_sync_roots(vcpu); + if (r) + goto out; + kvm_mmu_load_cr3(vcpu); + kvm_x86_ops->tlb_flush(vcpu, true); +out: + return r; +} +EXPORT_SYMBOL_GPL(kvm_mmu_load); + +void kvm_mmu_unload(struct kvm_vcpu *vcpu) +{ + kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); + WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa)); + kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); + WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa)); +} +EXPORT_SYMBOL_GPL(kvm_mmu_unload); + +static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, u64 *spte, + const void *new) +{ + if (sp->role.level != PT_PAGE_TABLE_LEVEL) { + ++vcpu->kvm->stat.mmu_pde_zapped; + return; + } + + ++vcpu->kvm->stat.mmu_pte_updated; + vcpu->arch.mmu->update_pte(vcpu, sp, spte, new); +} + +static bool need_remote_flush(u64 old, u64 new) +{ + if (!is_shadow_present_pte(old)) + return false; + if (!is_shadow_present_pte(new)) + return true; + if ((old ^ new) & PT64_BASE_ADDR_MASK) + return true; + old ^= shadow_nx_mask; + new ^= shadow_nx_mask; + return (old & ~new & PT64_PERM_MASK) != 0; +} + +static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, + int *bytes) +{ + u64 gentry = 0; + int r; + + /* + * Assume that the pte write on a page table of the same type + * as the current vcpu paging mode since we update the sptes only + * when they have the same mode. + */ + if (is_pae(vcpu) && *bytes == 4) { + /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ + *gpa &= ~(gpa_t)7; + *bytes = 8; + } + + if (*bytes == 4 || *bytes == 8) { + r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes); + if (r) + gentry = 0; + } + + return gentry; +} + +/* + * If we're seeing too many writes to a page, it may no longer be a page table, + * or we may be forking, in which case it is better to unmap the page. + */ +static bool detect_write_flooding(struct kvm_mmu_page *sp) +{ + /* + * Skip write-flooding detected for the sp whose level is 1, because + * it can become unsync, then the guest page is not write-protected. + */ + if (sp->role.level == PT_PAGE_TABLE_LEVEL) + return false; + + atomic_inc(&sp->write_flooding_count); + return atomic_read(&sp->write_flooding_count) >= 3; +} + +/* + * Misaligned accesses are too much trouble to fix up; also, they usually + * indicate a page is not used as a page table. + */ +static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, + int bytes) +{ + unsigned offset, pte_size, misaligned; + + pgprintk("misaligned: gpa %llx bytes %d role %x\n", + gpa, bytes, sp->role.word); + + offset = offset_in_page(gpa); + pte_size = sp->role.gpte_is_8_bytes ? 8 : 4; + + /* + * Sometimes, the OS only writes the last one bytes to update status + * bits, for example, in linux, andb instruction is used in clear_bit(). + */ + if (!(offset & (pte_size - 1)) && bytes == 1) + return false; + + misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); + misaligned |= bytes < 4; + + return misaligned; +} + +static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) +{ + unsigned page_offset, quadrant; + u64 *spte; + int level; + + page_offset = offset_in_page(gpa); + level = sp->role.level; + *nspte = 1; + if (!sp->role.gpte_is_8_bytes) { + page_offset <<= 1; /* 32->64 */ + /* + * A 32-bit pde maps 4MB while the shadow pdes map + * only 2MB. So we need to double the offset again + * and zap two pdes instead of one. + */ + if (level == PT32_ROOT_LEVEL) { + page_offset &= ~7; /* kill rounding error */ + page_offset <<= 1; + *nspte = 2; + } + quadrant = page_offset >> PAGE_SHIFT; + page_offset &= ~PAGE_MASK; + if (quadrant != sp->role.quadrant) + return NULL; + } + + spte = &sp->spt[page_offset / sizeof(*spte)]; + return spte; +} + +static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, + const u8 *new, int bytes, + struct kvm_page_track_notifier_node *node) +{ + gfn_t gfn = gpa >> PAGE_SHIFT; + struct kvm_mmu_page *sp; + LIST_HEAD(invalid_list); + u64 entry, gentry, *spte; + int npte; + bool remote_flush, local_flush; + + /* + * If we don't have indirect shadow pages, it means no page is + * write-protected, so we can exit simply. + */ + if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) + return; + + remote_flush = local_flush = false; + + pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); + + /* + * No need to care whether allocation memory is successful + * or not since pte prefetch is skiped if it does not have + * enough objects in the cache. + */ + mmu_topup_memory_caches(vcpu); + + spin_lock(&vcpu->kvm->mmu_lock); + + gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); + + ++vcpu->kvm->stat.mmu_pte_write; + kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); + + for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { + if (detect_write_misaligned(sp, gpa, bytes) || + detect_write_flooding(sp)) { + kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); + ++vcpu->kvm->stat.mmu_flooded; + continue; + } + + spte = get_written_sptes(sp, gpa, &npte); + if (!spte) + continue; + + local_flush = true; + while (npte--) { + u32 base_role = vcpu->arch.mmu->mmu_role.base.word; + + entry = *spte; + mmu_page_zap_pte(vcpu->kvm, sp, spte); + if (gentry && + !((sp->role.word ^ base_role) + & mmu_base_role_mask.word) && rmap_can_add(vcpu)) + mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); + if (need_remote_flush(entry, *spte)) + remote_flush = true; + ++spte; + } + } + kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); + kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); + spin_unlock(&vcpu->kvm->mmu_lock); +} + +int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) +{ + gpa_t gpa; + int r; + + if (vcpu->arch.mmu->direct_map) + return 0; + + gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); + + r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); + + return r; +} +EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); + +static int make_mmu_pages_available(struct kvm_vcpu *vcpu) +{ + LIST_HEAD(invalid_list); + + if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES)) + return 0; + + while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { + if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) + break; + + ++vcpu->kvm->stat.mmu_recycled; + } + kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); + + if (!kvm_mmu_available_pages(vcpu->kvm)) + return -ENOSPC; + return 0; +} + +int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, + void *insn, int insn_len) +{ + int r, emulation_type = 0; + bool direct = vcpu->arch.mmu->direct_map; + + /* With shadow page tables, fault_address contains a GVA or nGPA. */ + if (vcpu->arch.mmu->direct_map) { + vcpu->arch.gpa_available = true; + vcpu->arch.gpa_val = cr2; + } + + r = RET_PF_INVALID; + if (unlikely(error_code & PFERR_RSVD_MASK)) { + r = handle_mmio_page_fault(vcpu, cr2, direct); + if (r == RET_PF_EMULATE) + goto emulate; + } + + if (r == RET_PF_INVALID) { + r = vcpu->arch.mmu->page_fault(vcpu, cr2, + lower_32_bits(error_code), + false); + WARN_ON(r == RET_PF_INVALID); + } + + if (r == RET_PF_RETRY) + return 1; + if (r < 0) + return r; + + /* + * Before emulating the instruction, check if the error code + * was due to a RO violation while translating the guest page. + * This can occur when using nested virtualization with nested + * paging in both guests. If true, we simply unprotect the page + * and resume the guest. + */ + if (vcpu->arch.mmu->direct_map && + (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { + kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2)); + return 1; + } + + /* + * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still + * optimistically try to just unprotect the page and let the processor + * re-execute the instruction that caused the page fault. Do not allow + * retrying MMIO emulation, as it's not only pointless but could also + * cause us to enter an infinite loop because the processor will keep + * faulting on the non-existent MMIO address. Retrying an instruction + * from a nested guest is also pointless and dangerous as we are only + * explicitly shadowing L1's page tables, i.e. unprotecting something + * for L1 isn't going to magically fix whatever issue cause L2 to fail. + */ + if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu)) + emulation_type = EMULTYPE_ALLOW_RETRY; +emulate: + /* + * On AMD platforms, under certain conditions insn_len may be zero on #NPF. + * This can happen if a guest gets a page-fault on data access but the HW + * table walker is not able to read the instruction page (e.g instruction + * page is not present in memory). In those cases we simply restart the + * guest, with the exception of AMD Erratum 1096 which is unrecoverable. + */ + if (unlikely(insn && !insn_len)) { + if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu)) + return 1; + } + + return x86_emulate_instruction(vcpu, cr2, emulation_type, insn, + insn_len); +} +EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); + +void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) +{ + struct kvm_mmu *mmu = vcpu->arch.mmu; + int i; + + /* INVLPG on a * non-canonical address is a NOP according to the SDM. */ + if (is_noncanonical_address(gva, vcpu)) + return; + + mmu->invlpg(vcpu, gva, mmu->root_hpa); + + /* + * INVLPG is required to invalidate any global mappings for the VA, + * irrespective of PCID. Since it would take us roughly similar amount + * of work to determine whether any of the prev_root mappings of the VA + * is marked global, or to just sync it blindly, so we might as well + * just always sync it. + * + * Mappings not reachable via the current cr3 or the prev_roots will be + * synced when switching to that cr3, so nothing needs to be done here + * for them. + */ + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) + if (VALID_PAGE(mmu->prev_roots[i].hpa)) + mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); + + kvm_x86_ops->tlb_flush_gva(vcpu, gva); + ++vcpu->stat.invlpg; +} +EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); + +void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) +{ + struct kvm_mmu *mmu = vcpu->arch.mmu; + bool tlb_flush = false; + uint i; + + if (pcid == kvm_get_active_pcid(vcpu)) { + mmu->invlpg(vcpu, gva, mmu->root_hpa); + tlb_flush = true; + } + + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { + if (VALID_PAGE(mmu->prev_roots[i].hpa) && + pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) { + mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); + tlb_flush = true; + } + } + + if (tlb_flush) + kvm_x86_ops->tlb_flush_gva(vcpu, gva); + + ++vcpu->stat.invlpg; + + /* + * Mappings not reachable via the current cr3 or the prev_roots will be + * synced when switching to that cr3, so nothing needs to be done here + * for them. + */ +} +EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva); + +void kvm_enable_tdp(void) +{ + tdp_enabled = true; +} +EXPORT_SYMBOL_GPL(kvm_enable_tdp); + +void kvm_disable_tdp(void) +{ + tdp_enabled = false; +} +EXPORT_SYMBOL_GPL(kvm_disable_tdp); + + +/* The return value indicates if tlb flush on all vcpus is needed. */ +typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); + +/* The caller should hold mmu-lock before calling this function. */ +static __always_inline bool +slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, int start_level, int end_level, + gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) +{ + struct slot_rmap_walk_iterator iterator; + bool flush = false; + + for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn, + end_gfn, &iterator) { + if (iterator.rmap) + flush |= fn(kvm, iterator.rmap); + + if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (flush && lock_flush_tlb) { + kvm_flush_remote_tlbs_with_address(kvm, + start_gfn, + iterator.gfn - start_gfn + 1); + flush = false; + } + cond_resched_lock(&kvm->mmu_lock); + } + } + + if (flush && lock_flush_tlb) { + kvm_flush_remote_tlbs_with_address(kvm, start_gfn, + end_gfn - start_gfn + 1); + flush = false; + } + + return flush; +} + +static __always_inline bool +slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, int start_level, int end_level, + bool lock_flush_tlb) +{ + return slot_handle_level_range(kvm, memslot, fn, start_level, + end_level, memslot->base_gfn, + memslot->base_gfn + memslot->npages - 1, + lock_flush_tlb); +} + +static __always_inline bool +slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, bool lock_flush_tlb) +{ + return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, + PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); +} + +static __always_inline bool +slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, bool lock_flush_tlb) +{ + return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1, + PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); +} + +static __always_inline bool +slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, bool lock_flush_tlb) +{ + return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, + PT_PAGE_TABLE_LEVEL, lock_flush_tlb); +} + +static void free_mmu_pages(struct kvm_mmu *mmu) +{ + free_page((unsigned long)mmu->pae_root); + free_page((unsigned long)mmu->lm_root); +} + +static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) +{ + struct page *page; + int i; + + /* + * When using PAE paging, the four PDPTEs are treated as 'root' pages, + * while the PDP table is a per-vCPU construct that's allocated at MMU + * creation. When emulating 32-bit mode, cr3 is only 32 bits even on + * x86_64. Therefore we need to allocate the PDP table in the first + * 4GB of memory, which happens to fit the DMA32 zone. Except for + * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can + * skip allocating the PDP table. + */ + if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL) + return 0; + + page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32); + if (!page) + return -ENOMEM; + + mmu->pae_root = page_address(page); + for (i = 0; i < 4; ++i) + mmu->pae_root[i] = INVALID_PAGE; + + return 0; +} + +int kvm_mmu_create(struct kvm_vcpu *vcpu) +{ + uint i; + int ret; + + vcpu->arch.mmu = &vcpu->arch.root_mmu; + vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; + + vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.root_mmu.root_cr3 = 0; + vcpu->arch.root_mmu.translate_gpa = translate_gpa; + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) + vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; + + vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.guest_mmu.root_cr3 = 0; + vcpu->arch.guest_mmu.translate_gpa = translate_gpa; + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) + vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; + + vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; + + ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu); + if (ret) + return ret; + + ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu); + if (ret) + goto fail_allocate_root; + + return ret; + fail_allocate_root: + free_mmu_pages(&vcpu->arch.guest_mmu); + return ret; +} + +#define BATCH_ZAP_PAGES 10 +static void kvm_zap_obsolete_pages(struct kvm *kvm) +{ + struct kvm_mmu_page *sp, *node; + int nr_zapped, batch = 0; + +restart: + list_for_each_entry_safe_reverse(sp, node, + &kvm->arch.active_mmu_pages, link) { + /* + * No obsolete valid page exists before a newly created page + * since active_mmu_pages is a FIFO list. + */ + if (!is_obsolete_sp(kvm, sp)) + break; + + /* + * Skip invalid pages with a non-zero root count, zapping pages + * with a non-zero root count will never succeed, i.e. the page + * will get thrown back on active_mmu_pages and we'll get stuck + * in an infinite loop. + */ + if (sp->role.invalid && sp->root_count) + continue; + + /* + * No need to flush the TLB since we're only zapping shadow + * pages with an obsolete generation number and all vCPUS have + * loaded a new root, i.e. the shadow pages being zapped cannot + * be in active use by the guest. + */ + if (batch >= BATCH_ZAP_PAGES && + cond_resched_lock(&kvm->mmu_lock)) { + batch = 0; + goto restart; + } + + if (__kvm_mmu_prepare_zap_page(kvm, sp, + &kvm->arch.zapped_obsolete_pages, &nr_zapped)) { + batch += nr_zapped; + goto restart; + } + } + + /* + * Trigger a remote TLB flush before freeing the page tables to ensure + * KVM is not in the middle of a lockless shadow page table walk, which + * may reference the pages. + */ + kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); +} + +/* + * Fast invalidate all shadow pages and use lock-break technique + * to zap obsolete pages. + * + * It's required when memslot is being deleted or VM is being + * destroyed, in these cases, we should ensure that KVM MMU does + * not use any resource of the being-deleted slot or all slots + * after calling the function. + */ +static void kvm_mmu_zap_all_fast(struct kvm *kvm) +{ + lockdep_assert_held(&kvm->slots_lock); + + spin_lock(&kvm->mmu_lock); + trace_kvm_mmu_zap_all_fast(kvm); + + /* + * Toggle mmu_valid_gen between '0' and '1'. Because slots_lock is + * held for the entire duration of zapping obsolete pages, it's + * impossible for there to be multiple invalid generations associated + * with *valid* shadow pages at any given time, i.e. there is exactly + * one valid generation and (at most) one invalid generation. + */ + kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1; + + /* + * Notify all vcpus to reload its shadow page table and flush TLB. + * Then all vcpus will switch to new shadow page table with the new + * mmu_valid_gen. + * + * Note: we need to do this under the protection of mmu_lock, + * otherwise, vcpu would purge shadow page but miss tlb flush. + */ + kvm_reload_remote_mmus(kvm); + + kvm_zap_obsolete_pages(kvm); + spin_unlock(&kvm->mmu_lock); +} + +static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) +{ + return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); +} + +static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot, + struct kvm_page_track_notifier_node *node) +{ + kvm_mmu_zap_all_fast(kvm); +} + +void kvm_mmu_init_vm(struct kvm *kvm) +{ + struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; + + node->track_write = kvm_mmu_pte_write; + node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; + kvm_page_track_register_notifier(kvm, node); +} + +void kvm_mmu_uninit_vm(struct kvm *kvm) +{ + struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; + + kvm_page_track_unregister_notifier(kvm, node); +} + +void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int i; + + spin_lock(&kvm->mmu_lock); + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot(memslot, slots) { + gfn_t start, end; + + start = max(gfn_start, memslot->base_gfn); + end = min(gfn_end, memslot->base_gfn + memslot->npages); + if (start >= end) + continue; + + slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, + PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL, + start, end - 1, true); + } + } + + spin_unlock(&kvm->mmu_lock); +} + +static bool slot_rmap_write_protect(struct kvm *kvm, + struct kvm_rmap_head *rmap_head) +{ + return __rmap_write_protect(kvm, rmap_head, false); +} + +void kvm_mmu_slot_remove_write_access(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + bool flush; + + spin_lock(&kvm->mmu_lock); + flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect, + false); + spin_unlock(&kvm->mmu_lock); + + /* + * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log() + * which do tlb flush out of mmu-lock should be serialized by + * kvm->slots_lock otherwise tlb flush would be missed. + */ + lockdep_assert_held(&kvm->slots_lock); + + /* + * We can flush all the TLBs out of the mmu lock without TLB + * corruption since we just change the spte from writable to + * readonly so that we only need to care the case of changing + * spte from present to present (changing the spte from present + * to nonpresent will flush all the TLBs immediately), in other + * words, the only case we care is mmu_spte_update() where we + * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE + * instead of PT_WRITABLE_MASK, that means it does not depend + * on PT_WRITABLE_MASK anymore. + */ + if (flush) + kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, + memslot->npages); +} + +static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, + struct kvm_rmap_head *rmap_head) +{ + u64 *sptep; + struct rmap_iterator iter; + int need_tlb_flush = 0; + kvm_pfn_t pfn; + struct kvm_mmu_page *sp; + +restart: + for_each_rmap_spte(rmap_head, &iter, sptep) { + sp = page_header(__pa(sptep)); + pfn = spte_to_pfn(*sptep); + + /* + * We cannot do huge page mapping for indirect shadow pages, + * which are found on the last rmap (level = 1) when not using + * tdp; such shadow pages are synced with the page table in + * the guest, and the guest page table is using 4K page size + * mapping if the indirect sp has level = 1. + */ + if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && + !kvm_is_zone_device_pfn(pfn) && + PageTransCompoundMap(pfn_to_page(pfn))) { + pte_list_remove(rmap_head, sptep); + + if (kvm_available_flush_tlb_with_range()) + kvm_flush_remote_tlbs_with_address(kvm, sp->gfn, + KVM_PAGES_PER_HPAGE(sp->role.level)); + else + need_tlb_flush = 1; + + goto restart; + } + } + + return need_tlb_flush; +} + +void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, + const struct kvm_memory_slot *memslot) +{ + /* FIXME: const-ify all uses of struct kvm_memory_slot. */ + spin_lock(&kvm->mmu_lock); + slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot, + kvm_mmu_zap_collapsible_spte, true); + spin_unlock(&kvm->mmu_lock); +} + +void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + bool flush; + + spin_lock(&kvm->mmu_lock); + flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); + spin_unlock(&kvm->mmu_lock); + + lockdep_assert_held(&kvm->slots_lock); + + /* + * It's also safe to flush TLBs out of mmu lock here as currently this + * function is only used for dirty logging, in which case flushing TLB + * out of mmu lock also guarantees no dirty pages will be lost in + * dirty_bitmap. + */ + if (flush) + kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, + memslot->npages); +} +EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); + +void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + bool flush; + + spin_lock(&kvm->mmu_lock); + flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, + false); + spin_unlock(&kvm->mmu_lock); + + /* see kvm_mmu_slot_remove_write_access */ + lockdep_assert_held(&kvm->slots_lock); + + if (flush) + kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, + memslot->npages); +} +EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); + +void kvm_mmu_slot_set_dirty(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + bool flush; + + spin_lock(&kvm->mmu_lock); + flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); + spin_unlock(&kvm->mmu_lock); + + lockdep_assert_held(&kvm->slots_lock); + + /* see kvm_mmu_slot_leaf_clear_dirty */ + if (flush) + kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, + memslot->npages); +} +EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); + +void kvm_mmu_zap_all(struct kvm *kvm) +{ + struct kvm_mmu_page *sp, *node; + LIST_HEAD(invalid_list); + int ign; + + spin_lock(&kvm->mmu_lock); +restart: + list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { + if (sp->role.invalid && sp->root_count) + continue; + if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) + goto restart; + if (cond_resched_lock(&kvm->mmu_lock)) + goto restart; + } + + kvm_mmu_commit_zap_page(kvm, &invalid_list); + spin_unlock(&kvm->mmu_lock); +} + +void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) +{ + WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); + + gen &= MMIO_SPTE_GEN_MASK; + + /* + * Generation numbers are incremented in multiples of the number of + * address spaces in order to provide unique generations across all + * address spaces. Strip what is effectively the address space + * modifier prior to checking for a wrap of the MMIO generation so + * that a wrap in any address space is detected. + */ + gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1); + + /* + * The very rare case: if the MMIO generation number has wrapped, + * zap all shadow pages. + */ + if (unlikely(gen == 0)) { + kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); + kvm_mmu_zap_all_fast(kvm); + } +} + +static unsigned long +mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + struct kvm *kvm; + int nr_to_scan = sc->nr_to_scan; + unsigned long freed = 0; + + mutex_lock(&kvm_lock); + + list_for_each_entry(kvm, &vm_list, vm_list) { + int idx; + LIST_HEAD(invalid_list); + + /* + * Never scan more than sc->nr_to_scan VM instances. + * Will not hit this condition practically since we do not try + * to shrink more than one VM and it is very unlikely to see + * !n_used_mmu_pages so many times. + */ + if (!nr_to_scan--) + break; + /* + * n_used_mmu_pages is accessed without holding kvm->mmu_lock + * here. We may skip a VM instance errorneosly, but we do not + * want to shrink a VM that only started to populate its MMU + * anyway. + */ + if (!kvm->arch.n_used_mmu_pages && + !kvm_has_zapped_obsolete_pages(kvm)) + continue; + + idx = srcu_read_lock(&kvm->srcu); + spin_lock(&kvm->mmu_lock); + + if (kvm_has_zapped_obsolete_pages(kvm)) { + kvm_mmu_commit_zap_page(kvm, + &kvm->arch.zapped_obsolete_pages); + goto unlock; + } + + if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) + freed++; + kvm_mmu_commit_zap_page(kvm, &invalid_list); + +unlock: + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); + + /* + * unfair on small ones + * per-vm shrinkers cry out + * sadness comes quickly + */ + list_move_tail(&kvm->vm_list, &vm_list); + break; + } + + mutex_unlock(&kvm_lock); + return freed; +} + +static unsigned long +mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) +{ + return percpu_counter_read_positive(&kvm_total_used_mmu_pages); +} + +static struct shrinker mmu_shrinker = { + .count_objects = mmu_shrink_count, + .scan_objects = mmu_shrink_scan, + .seeks = DEFAULT_SEEKS * 10, +}; + +static void mmu_destroy_caches(void) +{ + kmem_cache_destroy(pte_list_desc_cache); + kmem_cache_destroy(mmu_page_header_cache); +} + +static void kvm_set_mmio_spte_mask(void) +{ + u64 mask; + + /* + * Set the reserved bits and the present bit of an paging-structure + * entry to generate page fault with PFER.RSV = 1. + */ + + /* + * Mask the uppermost physical address bit, which would be reserved as + * long as the supported physical address width is less than 52. + */ + mask = 1ull << 51; + + /* Set the present bit. */ + mask |= 1ull; + + /* + * If reserved bit is not supported, clear the present bit to disable + * mmio page fault. + */ + if (IS_ENABLED(CONFIG_X86_64) && shadow_phys_bits == 52) + mask &= ~1ull; + + kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); +} + +static bool get_nx_auto_mode(void) +{ + /* Return true when CPU has the bug, and mitigations are ON */ + return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off(); +} + +static void __set_nx_huge_pages(bool val) +{ + nx_huge_pages = itlb_multihit_kvm_mitigation = val; +} + +static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) +{ + bool old_val = nx_huge_pages; + bool new_val; + + /* In "auto" mode deploy workaround only if CPU has the bug. */ + if (sysfs_streq(val, "off")) + new_val = 0; + else if (sysfs_streq(val, "force")) + new_val = 1; + else if (sysfs_streq(val, "auto")) + new_val = get_nx_auto_mode(); + else if (strtobool(val, &new_val) < 0) + return -EINVAL; + + __set_nx_huge_pages(new_val); + + if (new_val != old_val) { + struct kvm *kvm; + + mutex_lock(&kvm_lock); + + list_for_each_entry(kvm, &vm_list, vm_list) { + mutex_lock(&kvm->slots_lock); + kvm_mmu_zap_all_fast(kvm); + mutex_unlock(&kvm->slots_lock); + + wake_up_process(kvm->arch.nx_lpage_recovery_thread); + } + mutex_unlock(&kvm_lock); + } + + return 0; +} + +int kvm_mmu_module_init(void) +{ + int ret = -ENOMEM; + + if (nx_huge_pages == -1) + __set_nx_huge_pages(get_nx_auto_mode()); + + /* + * MMU roles use union aliasing which is, generally speaking, an + * undefined behavior. However, we supposedly know how compilers behave + * and the current status quo is unlikely to change. Guardians below are + * supposed to let us know if the assumption becomes false. + */ + BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32)); + BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32)); + BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64)); + + kvm_mmu_reset_all_pte_masks(); + + kvm_set_mmio_spte_mask(); + + pte_list_desc_cache = kmem_cache_create("pte_list_desc", + sizeof(struct pte_list_desc), + 0, SLAB_ACCOUNT, NULL); + if (!pte_list_desc_cache) + goto out; + + mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", + sizeof(struct kvm_mmu_page), + 0, SLAB_ACCOUNT, NULL); + if (!mmu_page_header_cache) + goto out; + + if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) + goto out; + + ret = register_shrinker(&mmu_shrinker); + if (ret) + goto out; + + return 0; + +out: + mmu_destroy_caches(); + return ret; +} + +/* + * Calculate mmu pages needed for kvm. + */ +unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) +{ + unsigned long nr_mmu_pages; + unsigned long nr_pages = 0; + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int i; + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + + kvm_for_each_memslot(memslot, slots) + nr_pages += memslot->npages; + } + + nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; + nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); + + return nr_mmu_pages; +} + +void kvm_mmu_destroy(struct kvm_vcpu *vcpu) +{ + kvm_mmu_unload(vcpu); + free_mmu_pages(&vcpu->arch.root_mmu); + free_mmu_pages(&vcpu->arch.guest_mmu); + mmu_free_memory_caches(vcpu); +} + +void kvm_mmu_module_exit(void) +{ + mmu_destroy_caches(); + percpu_counter_destroy(&kvm_total_used_mmu_pages); + unregister_shrinker(&mmu_shrinker); + mmu_audit_disable(); +} + +static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp) +{ + unsigned int old_val; + int err; + + old_val = nx_huge_pages_recovery_ratio; + err = param_set_uint(val, kp); + if (err) + return err; + + if (READ_ONCE(nx_huge_pages) && + !old_val && nx_huge_pages_recovery_ratio) { + struct kvm *kvm; + + mutex_lock(&kvm_lock); + + list_for_each_entry(kvm, &vm_list, vm_list) + wake_up_process(kvm->arch.nx_lpage_recovery_thread); + + mutex_unlock(&kvm_lock); + } + + return err; +} + +static void kvm_recover_nx_lpages(struct kvm *kvm) +{ + int rcu_idx; + struct kvm_mmu_page *sp; + unsigned int ratio; + LIST_HEAD(invalid_list); + ulong to_zap; + + rcu_idx = srcu_read_lock(&kvm->srcu); + spin_lock(&kvm->mmu_lock); + + ratio = READ_ONCE(nx_huge_pages_recovery_ratio); + to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0; + while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) { + /* + * We use a separate list instead of just using active_mmu_pages + * because the number of lpage_disallowed pages is expected to + * be relatively small compared to the total. + */ + sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages, + struct kvm_mmu_page, + lpage_disallowed_link); + WARN_ON_ONCE(!sp->lpage_disallowed); + kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); + WARN_ON_ONCE(sp->lpage_disallowed); + + if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) { + kvm_mmu_commit_zap_page(kvm, &invalid_list); + if (to_zap) + cond_resched_lock(&kvm->mmu_lock); + } + } + + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, rcu_idx); +} + +static long get_nx_lpage_recovery_timeout(u64 start_time) +{ + return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio) + ? start_time + 60 * HZ - get_jiffies_64() + : MAX_SCHEDULE_TIMEOUT; +} + +static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data) +{ + u64 start_time; + long remaining_time; + + while (true) { + start_time = get_jiffies_64(); + remaining_time = get_nx_lpage_recovery_timeout(start_time); + + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop() && remaining_time > 0) { + schedule_timeout(remaining_time); + remaining_time = get_nx_lpage_recovery_timeout(start_time); + set_current_state(TASK_INTERRUPTIBLE); + } + + set_current_state(TASK_RUNNING); + + if (kthread_should_stop()) + return 0; + + kvm_recover_nx_lpages(kvm); + } +} + +int kvm_mmu_post_init_vm(struct kvm *kvm) +{ + int err; + + err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0, + "kvm-nx-lpage-recovery", + &kvm->arch.nx_lpage_recovery_thread); + if (!err) + kthread_unpark(kvm->arch.nx_lpage_recovery_thread); + + return err; +} + +void kvm_mmu_pre_destroy_vm(struct kvm *kvm) +{ + if (kvm->arch.nx_lpage_recovery_thread) + kthread_stop(kvm->arch.nx_lpage_recovery_thread); +} diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c new file mode 100644 index 000000000000..3521e2d176f2 --- /dev/null +++ b/arch/x86/kvm/mmu/page_track.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Support KVM gust page tracking + * + * This feature allows us to track page access in guest. Currently, only + * write access is tracked. + * + * Copyright(C) 2015 Intel Corporation. + * + * Author: + * Xiao Guangrong + */ + +#include +#include + +#include +#include + +#include "mmu.h" + +void kvm_page_track_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ + int i; + + for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) + if (!dont || free->arch.gfn_track[i] != + dont->arch.gfn_track[i]) { + kvfree(free->arch.gfn_track[i]); + free->arch.gfn_track[i] = NULL; + } +} + +int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, + unsigned long npages) +{ + int i; + + for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { + slot->arch.gfn_track[i] = + kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]), + GFP_KERNEL_ACCOUNT); + if (!slot->arch.gfn_track[i]) + goto track_free; + } + + return 0; + +track_free: + kvm_page_track_free_memslot(slot, NULL); + return -ENOMEM; +} + +static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode) +{ + if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX) + return false; + + return true; +} + +static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode, short count) +{ + int index, val; + + index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); + + val = slot->arch.gfn_track[mode][index]; + + if (WARN_ON(val + count < 0 || val + count > USHRT_MAX)) + return; + + slot->arch.gfn_track[mode][index] += count; +} + +/* + * add guest page to the tracking pool so that corresponding access on that + * page will be intercepted. + * + * It should be called under the protection both of mmu-lock and kvm->srcu + * or kvm->slots_lock. + * + * @kvm: the guest instance we are interested in. + * @slot: the @gfn belongs to. + * @gfn: the guest page. + * @mode: tracking mode, currently only write track is supported. + */ +void kvm_slot_page_track_add_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode) +{ + + if (WARN_ON(!page_track_mode_is_valid(mode))) + return; + + update_gfn_track(slot, gfn, mode, 1); + + /* + * new track stops large page mapping for the + * tracked page. + */ + kvm_mmu_gfn_disallow_lpage(slot, gfn); + + if (mode == KVM_PAGE_TRACK_WRITE) + if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) + kvm_flush_remote_tlbs(kvm); +} +EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page); + +/* + * remove the guest page from the tracking pool which stops the interception + * of corresponding access on that page. It is the opposed operation of + * kvm_slot_page_track_add_page(). + * + * It should be called under the protection both of mmu-lock and kvm->srcu + * or kvm->slots_lock. + * + * @kvm: the guest instance we are interested in. + * @slot: the @gfn belongs to. + * @gfn: the guest page. + * @mode: tracking mode, currently only write track is supported. + */ +void kvm_slot_page_track_remove_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode) +{ + if (WARN_ON(!page_track_mode_is_valid(mode))) + return; + + update_gfn_track(slot, gfn, mode, -1); + + /* + * allow large page mapping for the tracked page + * after the tracker is gone. + */ + kvm_mmu_gfn_allow_lpage(slot, gfn); +} +EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page); + +/* + * check if the corresponding access on the specified guest page is tracked. + */ +bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, + enum kvm_page_track_mode mode) +{ + struct kvm_memory_slot *slot; + int index; + + if (WARN_ON(!page_track_mode_is_valid(mode))) + return false; + + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + if (!slot) + return false; + + index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); + return !!READ_ONCE(slot->arch.gfn_track[mode][index]); +} + +void kvm_page_track_cleanup(struct kvm *kvm) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + cleanup_srcu_struct(&head->track_srcu); +} + +void kvm_page_track_init(struct kvm *kvm) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + init_srcu_struct(&head->track_srcu); + INIT_HLIST_HEAD(&head->track_notifier_list); +} + +/* + * register the notifier so that event interception for the tracked guest + * pages can be received. + */ +void +kvm_page_track_register_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + + spin_lock(&kvm->mmu_lock); + hlist_add_head_rcu(&n->node, &head->track_notifier_list); + spin_unlock(&kvm->mmu_lock); +} +EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier); + +/* + * stop receiving the event interception. It is the opposed operation of + * kvm_page_track_register_notifier(). + */ +void +kvm_page_track_unregister_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + + spin_lock(&kvm->mmu_lock); + hlist_del_rcu(&n->node); + spin_unlock(&kvm->mmu_lock); + synchronize_srcu(&head->track_srcu); +} +EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier); + +/* + * Notify the node that write access is intercepted and write emulation is + * finished at this time. + * + * The node should figure out if the written page is the one that node is + * interested in by itself. + */ +void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, + int bytes) +{ + struct kvm_page_track_notifier_head *head; + struct kvm_page_track_notifier_node *n; + int idx; + + head = &vcpu->kvm->arch.track_notifier_head; + + if (hlist_empty(&head->track_notifier_list)) + return; + + idx = srcu_read_lock(&head->track_srcu); + hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) + if (n->track_write) + n->track_write(vcpu, gpa, new, bytes, n); + srcu_read_unlock(&head->track_srcu, idx); +} + +/* + * Notify the node that memory slot is being removed or moved so that it can + * drop write-protection for the pages in the memory slot. + * + * The node should figure out it has any write-protected pages in this slot + * by itself. + */ +void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot) +{ + struct kvm_page_track_notifier_head *head; + struct kvm_page_track_notifier_node *n; + int idx; + + head = &kvm->arch.track_notifier_head; + + if (hlist_empty(&head->track_notifier_list)) + return; + + idx = srcu_read_lock(&head->track_srcu); + hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) + if (n->track_flush_slot) + n->track_flush_slot(kvm, slot, n); + srcu_read_unlock(&head->track_srcu, idx); +} diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h new file mode 100644 index 000000000000..97b21e7fd013 --- /dev/null +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -0,0 +1,1090 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Kernel-based Virtual Machine driver for Linux + * + * This module enables machines with Intel VT-x extensions to run virtual + * machines without emulation or binary translation. + * + * MMU support + * + * Copyright (C) 2006 Qumranet, Inc. + * Copyright 2010 Red Hat, Inc. and/or its affiliates. + * + * Authors: + * Yaniv Kamay + * Avi Kivity + */ + +/* + * We need the mmu code to access both 32-bit and 64-bit guest ptes, + * so the code in this file is compiled twice, once per pte size. + */ + +#if PTTYPE == 64 + #define pt_element_t u64 + #define guest_walker guest_walker64 + #define FNAME(name) paging##64_##name + #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK + #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) + #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) + #define PT_INDEX(addr, level) PT64_INDEX(addr, level) + #define PT_LEVEL_BITS PT64_LEVEL_BITS + #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT + #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT + #define PT_HAVE_ACCESSED_DIRTY(mmu) true + #ifdef CONFIG_X86_64 + #define PT_MAX_FULL_LEVELS 4 + #define CMPXCHG cmpxchg + #else + #define CMPXCHG cmpxchg64 + #define PT_MAX_FULL_LEVELS 2 + #endif +#elif PTTYPE == 32 + #define pt_element_t u32 + #define guest_walker guest_walker32 + #define FNAME(name) paging##32_##name + #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK + #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl) + #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl) + #define PT_INDEX(addr, level) PT32_INDEX(addr, level) + #define PT_LEVEL_BITS PT32_LEVEL_BITS + #define PT_MAX_FULL_LEVELS 2 + #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT + #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT + #define PT_HAVE_ACCESSED_DIRTY(mmu) true + #define CMPXCHG cmpxchg +#elif PTTYPE == PTTYPE_EPT + #define pt_element_t u64 + #define guest_walker guest_walkerEPT + #define FNAME(name) ept_##name + #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK + #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) + #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) + #define PT_INDEX(addr, level) PT64_INDEX(addr, level) + #define PT_LEVEL_BITS PT64_LEVEL_BITS + #define PT_GUEST_DIRTY_SHIFT 9 + #define PT_GUEST_ACCESSED_SHIFT 8 + #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) + #define CMPXCHG cmpxchg64 + #define PT_MAX_FULL_LEVELS 4 +#else + #error Invalid PTTYPE value +#endif + +#define PT_GUEST_DIRTY_MASK (1 << PT_GUEST_DIRTY_SHIFT) +#define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT) + +#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl) +#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL) + +/* + * The guest_walker structure emulates the behavior of the hardware page + * table walker. + */ +struct guest_walker { + int level; + unsigned max_level; + gfn_t table_gfn[PT_MAX_FULL_LEVELS]; + pt_element_t ptes[PT_MAX_FULL_LEVELS]; + pt_element_t prefetch_ptes[PTE_PREFETCH_NUM]; + gpa_t pte_gpa[PT_MAX_FULL_LEVELS]; + pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS]; + bool pte_writable[PT_MAX_FULL_LEVELS]; + unsigned pt_access; + unsigned pte_access; + gfn_t gfn; + struct x86_exception fault; +}; + +static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) +{ + return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; +} + +static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access, + unsigned gpte) +{ + unsigned mask; + + /* dirty bit is not supported, so no need to track it */ + if (!PT_HAVE_ACCESSED_DIRTY(mmu)) + return; + + BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); + + mask = (unsigned)~ACC_WRITE_MASK; + /* Allow write access to dirty gptes */ + mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & + PT_WRITABLE_MASK; + *access &= mask; +} + +static inline int FNAME(is_present_gpte)(unsigned long pte) +{ +#if PTTYPE != PTTYPE_EPT + return pte & PT_PRESENT_MASK; +#else + return pte & 7; +#endif +} + +static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + pt_element_t __user *ptep_user, unsigned index, + pt_element_t orig_pte, pt_element_t new_pte) +{ + int npages; + pt_element_t ret; + pt_element_t *table; + struct page *page; + + npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page); + if (likely(npages == 1)) { + table = kmap_atomic(page); + ret = CMPXCHG(&table[index], orig_pte, new_pte); + kunmap_atomic(table); + + kvm_release_page_dirty(page); + } else { + struct vm_area_struct *vma; + unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK; + unsigned long pfn; + unsigned long paddr; + + down_read(¤t->mm->mmap_sem); + vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE); + if (!vma || !(vma->vm_flags & VM_PFNMAP)) { + up_read(¤t->mm->mmap_sem); + return -EFAULT; + } + pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; + paddr = pfn << PAGE_SHIFT; + table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB); + if (!table) { + up_read(¤t->mm->mmap_sem); + return -EFAULT; + } + ret = CMPXCHG(&table[index], orig_pte, new_pte); + memunmap(table); + up_read(¤t->mm->mmap_sem); + } + + return (ret != orig_pte); +} + +static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, u64 *spte, + u64 gpte) +{ + if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) + goto no_present; + + if (!FNAME(is_present_gpte)(gpte)) + goto no_present; + + /* if accessed bit is not supported prefetch non accessed gpte */ + if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && + !(gpte & PT_GUEST_ACCESSED_MASK)) + goto no_present; + + return false; + +no_present: + drop_spte(vcpu->kvm, spte); + return true; +} + +/* + * For PTTYPE_EPT, a page table can be executable but not readable + * on supported processors. Therefore, set_spte does not automatically + * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK + * to signify readability since it isn't used in the EPT case + */ +static inline unsigned FNAME(gpte_access)(u64 gpte) +{ + unsigned access; +#if PTTYPE == PTTYPE_EPT + access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) | + ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) | + ((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0); +#else + BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK); + BUILD_BUG_ON(ACC_EXEC_MASK != 1); + access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK); + /* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */ + access ^= (gpte >> PT64_NX_SHIFT); +#endif + + return access; +} + +static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, + struct kvm_mmu *mmu, + struct guest_walker *walker, + int write_fault) +{ + unsigned level, index; + pt_element_t pte, orig_pte; + pt_element_t __user *ptep_user; + gfn_t table_gfn; + int ret; + + /* dirty/accessed bits are not supported, so no need to update them */ + if (!PT_HAVE_ACCESSED_DIRTY(mmu)) + return 0; + + for (level = walker->max_level; level >= walker->level; --level) { + pte = orig_pte = walker->ptes[level - 1]; + table_gfn = walker->table_gfn[level - 1]; + ptep_user = walker->ptep_user[level - 1]; + index = offset_in_page(ptep_user) / sizeof(pt_element_t); + if (!(pte & PT_GUEST_ACCESSED_MASK)) { + trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); + pte |= PT_GUEST_ACCESSED_MASK; + } + if (level == walker->level && write_fault && + !(pte & PT_GUEST_DIRTY_MASK)) { + trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); +#if PTTYPE == PTTYPE_EPT + if (kvm_arch_write_log_dirty(vcpu)) + return -EINVAL; +#endif + pte |= PT_GUEST_DIRTY_MASK; + } + if (pte == orig_pte) + continue; + + /* + * If the slot is read-only, simply do not process the accessed + * and dirty bits. This is the correct thing to do if the slot + * is ROM, and page tables in read-as-ROM/write-as-MMIO slots + * are only supported if the accessed and dirty bits are already + * set in the ROM (so that MMIO writes are never needed). + * + * Note that NPT does not allow this at all and faults, since + * it always wants nested page table entries for the guest + * page tables to be writable. And EPT works but will simply + * overwrite the read-only memory to set the accessed and dirty + * bits. + */ + if (unlikely(!walker->pte_writable[level - 1])) + continue; + + ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); + if (ret) + return ret; + + kvm_vcpu_mark_page_dirty(vcpu, table_gfn); + walker->ptes[level - 1] = pte; + } + return 0; +} + +static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte) +{ + unsigned pkeys = 0; +#if PTTYPE == 64 + pte_t pte = {.pte = gpte}; + + pkeys = pte_flags_pkey(pte_flags(pte)); +#endif + return pkeys; +} + +/* + * Fetch a guest pte for a guest virtual address + */ +static int FNAME(walk_addr_generic)(struct guest_walker *walker, + struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + gva_t addr, u32 access) +{ + int ret; + pt_element_t pte; + pt_element_t __user *uninitialized_var(ptep_user); + gfn_t table_gfn; + u64 pt_access, pte_access; + unsigned index, accessed_dirty, pte_pkey; + unsigned nested_access; + gpa_t pte_gpa; + bool have_ad; + int offset; + u64 walk_nx_mask = 0; + const int write_fault = access & PFERR_WRITE_MASK; + const int user_fault = access & PFERR_USER_MASK; + const int fetch_fault = access & PFERR_FETCH_MASK; + u16 errcode = 0; + gpa_t real_gpa; + gfn_t gfn; + + trace_kvm_mmu_pagetable_walk(addr, access); +retry_walk: + walker->level = mmu->root_level; + pte = mmu->get_cr3(vcpu); + have_ad = PT_HAVE_ACCESSED_DIRTY(mmu); + +#if PTTYPE == 64 + walk_nx_mask = 1ULL << PT64_NX_SHIFT; + if (walker->level == PT32E_ROOT_LEVEL) { + pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); + trace_kvm_mmu_paging_element(pte, walker->level); + if (!FNAME(is_present_gpte)(pte)) + goto error; + --walker->level; + } +#endif + walker->max_level = walker->level; + ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu))); + + /* + * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging + * by the MOV to CR instruction are treated as reads and do not cause the + * processor to set the dirty flag in any EPT paging-structure entry. + */ + nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK; + + pte_access = ~0; + ++walker->level; + + do { + gfn_t real_gfn; + unsigned long host_addr; + + pt_access = pte_access; + --walker->level; + + index = PT_INDEX(addr, walker->level); + table_gfn = gpte_to_gfn(pte); + offset = index * sizeof(pt_element_t); + pte_gpa = gfn_to_gpa(table_gfn) + offset; + + BUG_ON(walker->level < 1); + walker->table_gfn[walker->level - 1] = table_gfn; + walker->pte_gpa[walker->level - 1] = pte_gpa; + + real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), + nested_access, + &walker->fault); + + /* + * FIXME: This can happen if emulation (for of an INS/OUTS + * instruction) triggers a nested page fault. The exit + * qualification / exit info field will incorrectly have + * "guest page access" as the nested page fault's cause, + * instead of "guest page structure access". To fix this, + * the x86_exception struct should be augmented with enough + * information to fix the exit_qualification or exit_info_1 + * fields. + */ + if (unlikely(real_gfn == UNMAPPED_GVA)) + return 0; + + real_gfn = gpa_to_gfn(real_gfn); + + host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn, + &walker->pte_writable[walker->level - 1]); + if (unlikely(kvm_is_error_hva(host_addr))) + goto error; + + ptep_user = (pt_element_t __user *)((void *)host_addr + offset); + if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) + goto error; + walker->ptep_user[walker->level - 1] = ptep_user; + + trace_kvm_mmu_paging_element(pte, walker->level); + + /* + * Inverting the NX it lets us AND it like other + * permission bits. + */ + pte_access = pt_access & (pte ^ walk_nx_mask); + + if (unlikely(!FNAME(is_present_gpte)(pte))) + goto error; + + if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) { + errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK; + goto error; + } + + walker->ptes[walker->level - 1] = pte; + } while (!is_last_gpte(mmu, walker->level, pte)); + + pte_pkey = FNAME(gpte_pkeys)(vcpu, pte); + accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0; + + /* Convert to ACC_*_MASK flags for struct guest_walker. */ + walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask); + walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask); + errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access); + if (unlikely(errcode)) + goto error; + + gfn = gpte_to_gfn_lvl(pte, walker->level); + gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; + + if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36()) + gfn += pse36_gfn_delta(pte); + + real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); + if (real_gpa == UNMAPPED_GVA) + return 0; + + walker->gfn = real_gpa >> PAGE_SHIFT; + + if (!write_fault) + FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte); + else + /* + * On a write fault, fold the dirty bit into accessed_dirty. + * For modes without A/D bits support accessed_dirty will be + * always clear. + */ + accessed_dirty &= pte >> + (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); + + if (unlikely(!accessed_dirty)) { + ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); + if (unlikely(ret < 0)) + goto error; + else if (ret) + goto retry_walk; + } + + pgprintk("%s: pte %llx pte_access %x pt_access %x\n", + __func__, (u64)pte, walker->pte_access, walker->pt_access); + return 1; + +error: + errcode |= write_fault | user_fault; + if (fetch_fault && (mmu->nx || + kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))) + errcode |= PFERR_FETCH_MASK; + + walker->fault.vector = PF_VECTOR; + walker->fault.error_code_valid = true; + walker->fault.error_code = errcode; + +#if PTTYPE == PTTYPE_EPT + /* + * Use PFERR_RSVD_MASK in error_code to to tell if EPT + * misconfiguration requires to be injected. The detection is + * done by is_rsvd_bits_set() above. + * + * We set up the value of exit_qualification to inject: + * [2:0] - Derive from the access bits. The exit_qualification might be + * out of date if it is serving an EPT misconfiguration. + * [5:3] - Calculated by the page walk of the guest EPT page tables + * [7:8] - Derived from [7:8] of real exit_qualification + * + * The other bits are set to 0. + */ + if (!(errcode & PFERR_RSVD_MASK)) { + vcpu->arch.exit_qualification &= 0x180; + if (write_fault) + vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE; + if (user_fault) + vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ; + if (fetch_fault) + vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR; + vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3; + } +#endif + walker->fault.address = addr; + walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; + + trace_kvm_mmu_walker_error(walker->fault.error_code); + return 0; +} + +static int FNAME(walk_addr)(struct guest_walker *walker, + struct kvm_vcpu *vcpu, gva_t addr, u32 access) +{ + return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, + access); +} + +#if PTTYPE != PTTYPE_EPT +static int FNAME(walk_addr_nested)(struct guest_walker *walker, + struct kvm_vcpu *vcpu, gva_t addr, + u32 access) +{ + return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, + addr, access); +} +#endif + +static bool +FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + u64 *spte, pt_element_t gpte, bool no_dirty_log) +{ + unsigned pte_access; + gfn_t gfn; + kvm_pfn_t pfn; + + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) + return false; + + pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); + + gfn = gpte_to_gfn(gpte); + pte_access = sp->role.access & FNAME(gpte_access)(gpte); + FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); + pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, + no_dirty_log && (pte_access & ACC_WRITE_MASK)); + if (is_error_pfn(pfn)) + return false; + + /* + * we call mmu_set_spte() with host_writable = true because + * pte_prefetch_gfn_to_pfn always gets a writable pfn. + */ + mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn, + true, true); + + kvm_release_pfn_clean(pfn); + return true; +} + +static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + u64 *spte, const void *pte) +{ + pt_element_t gpte = *(const pt_element_t *)pte; + + FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); +} + +static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, + struct guest_walker *gw, int level) +{ + pt_element_t curr_pte; + gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1]; + u64 mask; + int r, index; + + if (level == PT_PAGE_TABLE_LEVEL) { + mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1; + base_gpa = pte_gpa & ~mask; + index = (pte_gpa - base_gpa) / sizeof(pt_element_t); + + r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa, + gw->prefetch_ptes, sizeof(gw->prefetch_ptes)); + curr_pte = gw->prefetch_ptes[index]; + } else + r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, + &curr_pte, sizeof(curr_pte)); + + return r || curr_pte != gw->ptes[level - 1]; +} + +static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, + u64 *sptep) +{ + struct kvm_mmu_page *sp; + pt_element_t *gptep = gw->prefetch_ptes; + u64 *spte; + int i; + + sp = page_header(__pa(sptep)); + + if (sp->role.level > PT_PAGE_TABLE_LEVEL) + return; + + if (sp->role.direct) + return __direct_pte_prefetch(vcpu, sp, sptep); + + i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); + spte = sp->spt + i; + + for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { + if (spte == sptep) + continue; + + if (is_shadow_present_pte(*spte)) + continue; + + if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true)) + break; + } +} + +/* + * Fetch a shadow pte for a specific level in the paging hierarchy. + * If the guest tries to write a write-protected page, we need to + * emulate this operation, return 1 to indicate this case. + */ +static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, + struct guest_walker *gw, + int write_fault, int hlevel, + kvm_pfn_t pfn, bool map_writable, bool prefault, + bool lpage_disallowed) +{ + struct kvm_mmu_page *sp = NULL; + struct kvm_shadow_walk_iterator it; + unsigned direct_access, access = gw->pt_access; + int top_level, ret; + gfn_t gfn, base_gfn; + + direct_access = gw->pte_access; + + top_level = vcpu->arch.mmu->root_level; + if (top_level == PT32E_ROOT_LEVEL) + top_level = PT32_ROOT_LEVEL; + /* + * Verify that the top-level gpte is still there. Since the page + * is a root page, it is either write protected (and cannot be + * changed from now on) or it is invalid (in which case, we don't + * really care if it changes underneath us after this point). + */ + if (FNAME(gpte_changed)(vcpu, gw, top_level)) + goto out_gpte_changed; + + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) + goto out_gpte_changed; + + for (shadow_walk_init(&it, vcpu, addr); + shadow_walk_okay(&it) && it.level > gw->level; + shadow_walk_next(&it)) { + gfn_t table_gfn; + + clear_sp_write_flooding_count(it.sptep); + drop_large_spte(vcpu, it.sptep); + + sp = NULL; + if (!is_shadow_present_pte(*it.sptep)) { + table_gfn = gw->table_gfn[it.level - 2]; + sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1, + false, access); + } + + /* + * Verify that the gpte in the page we've just write + * protected is still there. + */ + if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) + goto out_gpte_changed; + + if (sp) + link_shadow_page(vcpu, it.sptep, sp); + } + + /* + * FNAME(page_fault) might have clobbered the bottom bits of + * gw->gfn, restore them from the virtual address. + */ + gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT); + base_gfn = gfn; + + trace_kvm_mmu_spte_requested(addr, gw->level, pfn); + + for (; shadow_walk_okay(&it); shadow_walk_next(&it)) { + clear_sp_write_flooding_count(it.sptep); + + /* + * We cannot overwrite existing page tables with an NX + * large page, as the leaf could be executable. + */ + disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel); + + base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); + if (it.level == hlevel) + break; + + validate_direct_spte(vcpu, it.sptep, direct_access); + + drop_large_spte(vcpu, it.sptep); + + if (!is_shadow_present_pte(*it.sptep)) { + sp = kvm_mmu_get_page(vcpu, base_gfn, addr, + it.level - 1, true, direct_access); + link_shadow_page(vcpu, it.sptep, sp); + if (lpage_disallowed) + account_huge_nx_page(vcpu->kvm, sp); + } + } + + ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, + it.level, base_gfn, pfn, prefault, map_writable); + FNAME(pte_prefetch)(vcpu, gw, it.sptep); + ++vcpu->stat.pf_fixed; + return ret; + +out_gpte_changed: + return RET_PF_RETRY; +} + + /* + * To see whether the mapped gfn can write its page table in the current + * mapping. + * + * It is the helper function of FNAME(page_fault). When guest uses large page + * size to map the writable gfn which is used as current page table, we should + * force kvm to use small page size to map it because new shadow page will be + * created when kvm establishes shadow page table that stop kvm using large + * page size. Do it early can avoid unnecessary #PF and emulation. + * + * @write_fault_to_shadow_pgtable will return true if the fault gfn is + * currently used as its page table. + * + * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok + * since the PDPT is always shadowed, that means, we can not use large page + * size to map the gfn which is used as PDPT. + */ +static bool +FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, + struct guest_walker *walker, int user_fault, + bool *write_fault_to_shadow_pgtable) +{ + int level; + gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1); + bool self_changed = false; + + if (!(walker->pte_access & ACC_WRITE_MASK || + (!is_write_protection(vcpu) && !user_fault))) + return false; + + for (level = walker->level; level <= walker->max_level; level++) { + gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1]; + + self_changed |= !(gfn & mask); + *write_fault_to_shadow_pgtable |= !gfn; + } + + return self_changed; +} + +/* + * Page fault handler. There are several causes for a page fault: + * - there is no shadow pte for the guest pte + * - write access through a shadow pte marked read only so that we can set + * the dirty bit + * - write access to a shadow pte marked read only so we can update the page + * dirty bitmap, when userspace requests it + * - mmio access; in this case we will never install a present shadow pte + * - normal guest page fault due to the guest pte marked not present, not + * writable, or not executable + * + * Returns: 1 if we need to emulate the instruction, 0 otherwise, or + * a negative value on error. + */ +static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, + bool prefault) +{ + int write_fault = error_code & PFERR_WRITE_MASK; + int user_fault = error_code & PFERR_USER_MASK; + struct guest_walker walker; + int r; + kvm_pfn_t pfn; + int level = PT_PAGE_TABLE_LEVEL; + unsigned long mmu_seq; + bool map_writable, is_self_change_mapping; + bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && + is_nx_huge_page_enabled(); + bool force_pt_level = lpage_disallowed; + + pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); + + r = mmu_topup_memory_caches(vcpu); + if (r) + return r; + + /* + * If PFEC.RSVD is set, this is a shadow page fault. + * The bit needs to be cleared before walking guest page tables. + */ + error_code &= ~PFERR_RSVD_MASK; + + /* + * Look up the guest pte for the faulting address. + */ + r = FNAME(walk_addr)(&walker, vcpu, addr, error_code); + + /* + * The page is not mapped by the guest. Let the guest handle it. + */ + if (!r) { + pgprintk("%s: guest page fault\n", __func__); + if (!prefault) + inject_page_fault(vcpu, &walker.fault); + + return RET_PF_RETRY; + } + + if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) { + shadow_page_table_clear_flood(vcpu, addr); + return RET_PF_EMULATE; + } + + vcpu->arch.write_fault_to_shadow_pgtable = false; + + is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, + &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); + + if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) { + level = mapping_level(vcpu, walker.gfn, &force_pt_level); + if (likely(!force_pt_level)) { + level = min(walker.level, level); + walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); + } + } else + force_pt_level = true; + + mmu_seq = vcpu->kvm->mmu_notifier_seq; + smp_rmb(); + + if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, + &map_writable)) + return RET_PF_RETRY; + + if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r)) + return r; + + /* + * Do not change pte_access if the pfn is a mmio page, otherwise + * we will cache the incorrect access into mmio spte. + */ + if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) && + !is_write_protection(vcpu) && !user_fault && + !is_noslot_pfn(pfn)) { + walker.pte_access |= ACC_WRITE_MASK; + walker.pte_access &= ~ACC_USER_MASK; + + /* + * If we converted a user page to a kernel page, + * so that the kernel can write to it when cr0.wp=0, + * then we should prevent the kernel from executing it + * if SMEP is enabled. + */ + if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) + walker.pte_access &= ~ACC_EXEC_MASK; + } + + r = RET_PF_RETRY; + spin_lock(&vcpu->kvm->mmu_lock); + if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) + goto out_unlock; + + kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); + if (make_mmu_pages_available(vcpu) < 0) + goto out_unlock; + if (!force_pt_level) + transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level); + r = FNAME(fetch)(vcpu, addr, &walker, write_fault, + level, pfn, map_writable, prefault, lpage_disallowed); + kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); + +out_unlock: + spin_unlock(&vcpu->kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + return r; +} + +static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) +{ + int offset = 0; + + WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL); + + if (PTTYPE == 32) + offset = sp->role.quadrant << PT64_LEVEL_BITS; + + return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); +} + +static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) +{ + struct kvm_shadow_walk_iterator iterator; + struct kvm_mmu_page *sp; + int level; + u64 *sptep; + + vcpu_clear_mmio_info(vcpu, gva); + + /* + * No need to check return value here, rmap_can_add() can + * help us to skip pte prefetch later. + */ + mmu_topup_memory_caches(vcpu); + + if (!VALID_PAGE(root_hpa)) { + WARN_ON(1); + return; + } + + spin_lock(&vcpu->kvm->mmu_lock); + for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) { + level = iterator.level; + sptep = iterator.sptep; + + sp = page_header(__pa(sptep)); + if (is_last_spte(*sptep, level)) { + pt_element_t gpte; + gpa_t pte_gpa; + + if (!sp->unsync) + break; + + pte_gpa = FNAME(get_level1_sp_gpa)(sp); + pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); + + if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) + kvm_flush_remote_tlbs_with_address(vcpu->kvm, + sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); + + if (!rmap_can_add(vcpu)) + break; + + if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, + sizeof(pt_element_t))) + break; + + FNAME(update_pte)(vcpu, sp, sptep, &gpte); + } + + if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) + break; + } + spin_unlock(&vcpu->kvm->mmu_lock); +} + +static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, + struct x86_exception *exception) +{ + struct guest_walker walker; + gpa_t gpa = UNMAPPED_GVA; + int r; + + r = FNAME(walk_addr)(&walker, vcpu, vaddr, access); + + if (r) { + gpa = gfn_to_gpa(walker.gfn); + gpa |= vaddr & ~PAGE_MASK; + } else if (exception) + *exception = walker.fault; + + return gpa; +} + +#if PTTYPE != PTTYPE_EPT +static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, + u32 access, + struct x86_exception *exception) +{ + struct guest_walker walker; + gpa_t gpa = UNMAPPED_GVA; + int r; + + r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access); + + if (r) { + gpa = gfn_to_gpa(walker.gfn); + gpa |= vaddr & ~PAGE_MASK; + } else if (exception) + *exception = walker.fault; + + return gpa; +} +#endif + +/* + * Using the cached information from sp->gfns is safe because: + * - The spte has a reference to the struct page, so the pfn for a given gfn + * can't change unless all sptes pointing to it are nuked first. + * + * Note: + * We should flush all tlbs if spte is dropped even though guest is + * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page + * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't + * used by guest then tlbs are not flushed, so guest is allowed to access the + * freed pages. + * And we increase kvm->tlbs_dirty to delay tlbs flush in this case. + */ +static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +{ + int i, nr_present = 0; + bool host_writable; + gpa_t first_pte_gpa; + int set_spte_ret = 0; + + /* direct kvm_mmu_page can not be unsync. */ + BUG_ON(sp->role.direct); + + first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); + + for (i = 0; i < PT64_ENT_PER_PAGE; i++) { + unsigned pte_access; + pt_element_t gpte; + gpa_t pte_gpa; + gfn_t gfn; + + if (!sp->spt[i]) + continue; + + pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); + + if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, + sizeof(pt_element_t))) + return 0; + + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { + /* + * Update spte before increasing tlbs_dirty to make + * sure no tlb flush is lost after spte is zapped; see + * the comments in kvm_flush_remote_tlbs(). + */ + smp_wmb(); + vcpu->kvm->tlbs_dirty++; + continue; + } + + gfn = gpte_to_gfn(gpte); + pte_access = sp->role.access; + pte_access &= FNAME(gpte_access)(gpte); + FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); + + if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, + &nr_present)) + continue; + + if (gfn != sp->gfns[i]) { + drop_spte(vcpu->kvm, &sp->spt[i]); + /* + * The same as above where we are doing + * prefetch_invalid_gpte(). + */ + smp_wmb(); + vcpu->kvm->tlbs_dirty++; + continue; + } + + nr_present++; + + host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE; + + set_spte_ret |= set_spte(vcpu, &sp->spt[i], + pte_access, PT_PAGE_TABLE_LEVEL, + gfn, spte_to_pfn(sp->spt[i]), + true, false, host_writable); + } + + if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH) + kvm_flush_remote_tlbs(vcpu->kvm); + + return nr_present; +} + +#undef pt_element_t +#undef guest_walker +#undef FNAME +#undef PT_BASE_ADDR_MASK +#undef PT_INDEX +#undef PT_LVL_ADDR_MASK +#undef PT_LVL_OFFSET_MASK +#undef PT_LEVEL_BITS +#undef PT_MAX_FULL_LEVELS +#undef gpte_to_gfn +#undef gpte_to_gfn_lvl +#undef CMPXCHG +#undef PT_GUEST_ACCESSED_MASK +#undef PT_GUEST_DIRTY_MASK +#undef PT_GUEST_DIRTY_SHIFT +#undef PT_GUEST_ACCESSED_SHIFT +#undef PT_HAVE_ACCESSED_DIRTY diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c deleted file mode 100644 index 3521e2d176f2..000000000000 --- a/arch/x86/kvm/page_track.c +++ /dev/null @@ -1,265 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Support KVM gust page tracking - * - * This feature allows us to track page access in guest. Currently, only - * write access is tracked. - * - * Copyright(C) 2015 Intel Corporation. - * - * Author: - * Xiao Guangrong - */ - -#include -#include - -#include -#include - -#include "mmu.h" - -void kvm_page_track_free_memslot(struct kvm_memory_slot *free, - struct kvm_memory_slot *dont) -{ - int i; - - for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) - if (!dont || free->arch.gfn_track[i] != - dont->arch.gfn_track[i]) { - kvfree(free->arch.gfn_track[i]); - free->arch.gfn_track[i] = NULL; - } -} - -int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, - unsigned long npages) -{ - int i; - - for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { - slot->arch.gfn_track[i] = - kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]), - GFP_KERNEL_ACCOUNT); - if (!slot->arch.gfn_track[i]) - goto track_free; - } - - return 0; - -track_free: - kvm_page_track_free_memslot(slot, NULL); - return -ENOMEM; -} - -static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode) -{ - if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX) - return false; - - return true; -} - -static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode, short count) -{ - int index, val; - - index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); - - val = slot->arch.gfn_track[mode][index]; - - if (WARN_ON(val + count < 0 || val + count > USHRT_MAX)) - return; - - slot->arch.gfn_track[mode][index] += count; -} - -/* - * add guest page to the tracking pool so that corresponding access on that - * page will be intercepted. - * - * It should be called under the protection both of mmu-lock and kvm->srcu - * or kvm->slots_lock. - * - * @kvm: the guest instance we are interested in. - * @slot: the @gfn belongs to. - * @gfn: the guest page. - * @mode: tracking mode, currently only write track is supported. - */ -void kvm_slot_page_track_add_page(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode) -{ - - if (WARN_ON(!page_track_mode_is_valid(mode))) - return; - - update_gfn_track(slot, gfn, mode, 1); - - /* - * new track stops large page mapping for the - * tracked page. - */ - kvm_mmu_gfn_disallow_lpage(slot, gfn); - - if (mode == KVM_PAGE_TRACK_WRITE) - if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) - kvm_flush_remote_tlbs(kvm); -} -EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page); - -/* - * remove the guest page from the tracking pool which stops the interception - * of corresponding access on that page. It is the opposed operation of - * kvm_slot_page_track_add_page(). - * - * It should be called under the protection both of mmu-lock and kvm->srcu - * or kvm->slots_lock. - * - * @kvm: the guest instance we are interested in. - * @slot: the @gfn belongs to. - * @gfn: the guest page. - * @mode: tracking mode, currently only write track is supported. - */ -void kvm_slot_page_track_remove_page(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode) -{ - if (WARN_ON(!page_track_mode_is_valid(mode))) - return; - - update_gfn_track(slot, gfn, mode, -1); - - /* - * allow large page mapping for the tracked page - * after the tracker is gone. - */ - kvm_mmu_gfn_allow_lpage(slot, gfn); -} -EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page); - -/* - * check if the corresponding access on the specified guest page is tracked. - */ -bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, - enum kvm_page_track_mode mode) -{ - struct kvm_memory_slot *slot; - int index; - - if (WARN_ON(!page_track_mode_is_valid(mode))) - return false; - - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - if (!slot) - return false; - - index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); - return !!READ_ONCE(slot->arch.gfn_track[mode][index]); -} - -void kvm_page_track_cleanup(struct kvm *kvm) -{ - struct kvm_page_track_notifier_head *head; - - head = &kvm->arch.track_notifier_head; - cleanup_srcu_struct(&head->track_srcu); -} - -void kvm_page_track_init(struct kvm *kvm) -{ - struct kvm_page_track_notifier_head *head; - - head = &kvm->arch.track_notifier_head; - init_srcu_struct(&head->track_srcu); - INIT_HLIST_HEAD(&head->track_notifier_list); -} - -/* - * register the notifier so that event interception for the tracked guest - * pages can be received. - */ -void -kvm_page_track_register_notifier(struct kvm *kvm, - struct kvm_page_track_notifier_node *n) -{ - struct kvm_page_track_notifier_head *head; - - head = &kvm->arch.track_notifier_head; - - spin_lock(&kvm->mmu_lock); - hlist_add_head_rcu(&n->node, &head->track_notifier_list); - spin_unlock(&kvm->mmu_lock); -} -EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier); - -/* - * stop receiving the event interception. It is the opposed operation of - * kvm_page_track_register_notifier(). - */ -void -kvm_page_track_unregister_notifier(struct kvm *kvm, - struct kvm_page_track_notifier_node *n) -{ - struct kvm_page_track_notifier_head *head; - - head = &kvm->arch.track_notifier_head; - - spin_lock(&kvm->mmu_lock); - hlist_del_rcu(&n->node); - spin_unlock(&kvm->mmu_lock); - synchronize_srcu(&head->track_srcu); -} -EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier); - -/* - * Notify the node that write access is intercepted and write emulation is - * finished at this time. - * - * The node should figure out if the written page is the one that node is - * interested in by itself. - */ -void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, - int bytes) -{ - struct kvm_page_track_notifier_head *head; - struct kvm_page_track_notifier_node *n; - int idx; - - head = &vcpu->kvm->arch.track_notifier_head; - - if (hlist_empty(&head->track_notifier_list)) - return; - - idx = srcu_read_lock(&head->track_srcu); - hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) - if (n->track_write) - n->track_write(vcpu, gpa, new, bytes, n); - srcu_read_unlock(&head->track_srcu, idx); -} - -/* - * Notify the node that memory slot is being removed or moved so that it can - * drop write-protection for the pages in the memory slot. - * - * The node should figure out it has any write-protected pages in this slot - * by itself. - */ -void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot) -{ - struct kvm_page_track_notifier_head *head; - struct kvm_page_track_notifier_node *n; - int idx; - - head = &kvm->arch.track_notifier_head; - - if (hlist_empty(&head->track_notifier_list)) - return; - - idx = srcu_read_lock(&head->track_srcu); - hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) - if (n->track_flush_slot) - n->track_flush_slot(kvm, slot, n); - srcu_read_unlock(&head->track_srcu, idx); -} diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h deleted file mode 100644 index 97b21e7fd013..000000000000 --- a/arch/x86/kvm/paging_tmpl.h +++ /dev/null @@ -1,1090 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Kernel-based Virtual Machine driver for Linux - * - * This module enables machines with Intel VT-x extensions to run virtual - * machines without emulation or binary translation. - * - * MMU support - * - * Copyright (C) 2006 Qumranet, Inc. - * Copyright 2010 Red Hat, Inc. and/or its affiliates. - * - * Authors: - * Yaniv Kamay - * Avi Kivity - */ - -/* - * We need the mmu code to access both 32-bit and 64-bit guest ptes, - * so the code in this file is compiled twice, once per pte size. - */ - -#if PTTYPE == 64 - #define pt_element_t u64 - #define guest_walker guest_walker64 - #define FNAME(name) paging##64_##name - #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK - #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) - #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) - #define PT_INDEX(addr, level) PT64_INDEX(addr, level) - #define PT_LEVEL_BITS PT64_LEVEL_BITS - #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT - #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT - #define PT_HAVE_ACCESSED_DIRTY(mmu) true - #ifdef CONFIG_X86_64 - #define PT_MAX_FULL_LEVELS 4 - #define CMPXCHG cmpxchg - #else - #define CMPXCHG cmpxchg64 - #define PT_MAX_FULL_LEVELS 2 - #endif -#elif PTTYPE == 32 - #define pt_element_t u32 - #define guest_walker guest_walker32 - #define FNAME(name) paging##32_##name - #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK - #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl) - #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl) - #define PT_INDEX(addr, level) PT32_INDEX(addr, level) - #define PT_LEVEL_BITS PT32_LEVEL_BITS - #define PT_MAX_FULL_LEVELS 2 - #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT - #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT - #define PT_HAVE_ACCESSED_DIRTY(mmu) true - #define CMPXCHG cmpxchg -#elif PTTYPE == PTTYPE_EPT - #define pt_element_t u64 - #define guest_walker guest_walkerEPT - #define FNAME(name) ept_##name - #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK - #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) - #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) - #define PT_INDEX(addr, level) PT64_INDEX(addr, level) - #define PT_LEVEL_BITS PT64_LEVEL_BITS - #define PT_GUEST_DIRTY_SHIFT 9 - #define PT_GUEST_ACCESSED_SHIFT 8 - #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) - #define CMPXCHG cmpxchg64 - #define PT_MAX_FULL_LEVELS 4 -#else - #error Invalid PTTYPE value -#endif - -#define PT_GUEST_DIRTY_MASK (1 << PT_GUEST_DIRTY_SHIFT) -#define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT) - -#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl) -#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL) - -/* - * The guest_walker structure emulates the behavior of the hardware page - * table walker. - */ -struct guest_walker { - int level; - unsigned max_level; - gfn_t table_gfn[PT_MAX_FULL_LEVELS]; - pt_element_t ptes[PT_MAX_FULL_LEVELS]; - pt_element_t prefetch_ptes[PTE_PREFETCH_NUM]; - gpa_t pte_gpa[PT_MAX_FULL_LEVELS]; - pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS]; - bool pte_writable[PT_MAX_FULL_LEVELS]; - unsigned pt_access; - unsigned pte_access; - gfn_t gfn; - struct x86_exception fault; -}; - -static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) -{ - return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; -} - -static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access, - unsigned gpte) -{ - unsigned mask; - - /* dirty bit is not supported, so no need to track it */ - if (!PT_HAVE_ACCESSED_DIRTY(mmu)) - return; - - BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); - - mask = (unsigned)~ACC_WRITE_MASK; - /* Allow write access to dirty gptes */ - mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & - PT_WRITABLE_MASK; - *access &= mask; -} - -static inline int FNAME(is_present_gpte)(unsigned long pte) -{ -#if PTTYPE != PTTYPE_EPT - return pte & PT_PRESENT_MASK; -#else - return pte & 7; -#endif -} - -static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - pt_element_t __user *ptep_user, unsigned index, - pt_element_t orig_pte, pt_element_t new_pte) -{ - int npages; - pt_element_t ret; - pt_element_t *table; - struct page *page; - - npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page); - if (likely(npages == 1)) { - table = kmap_atomic(page); - ret = CMPXCHG(&table[index], orig_pte, new_pte); - kunmap_atomic(table); - - kvm_release_page_dirty(page); - } else { - struct vm_area_struct *vma; - unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK; - unsigned long pfn; - unsigned long paddr; - - down_read(¤t->mm->mmap_sem); - vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE); - if (!vma || !(vma->vm_flags & VM_PFNMAP)) { - up_read(¤t->mm->mmap_sem); - return -EFAULT; - } - pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - paddr = pfn << PAGE_SHIFT; - table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB); - if (!table) { - up_read(¤t->mm->mmap_sem); - return -EFAULT; - } - ret = CMPXCHG(&table[index], orig_pte, new_pte); - memunmap(table); - up_read(¤t->mm->mmap_sem); - } - - return (ret != orig_pte); -} - -static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, u64 *spte, - u64 gpte) -{ - if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) - goto no_present; - - if (!FNAME(is_present_gpte)(gpte)) - goto no_present; - - /* if accessed bit is not supported prefetch non accessed gpte */ - if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && - !(gpte & PT_GUEST_ACCESSED_MASK)) - goto no_present; - - return false; - -no_present: - drop_spte(vcpu->kvm, spte); - return true; -} - -/* - * For PTTYPE_EPT, a page table can be executable but not readable - * on supported processors. Therefore, set_spte does not automatically - * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK - * to signify readability since it isn't used in the EPT case - */ -static inline unsigned FNAME(gpte_access)(u64 gpte) -{ - unsigned access; -#if PTTYPE == PTTYPE_EPT - access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) | - ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) | - ((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0); -#else - BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK); - BUILD_BUG_ON(ACC_EXEC_MASK != 1); - access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK); - /* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */ - access ^= (gpte >> PT64_NX_SHIFT); -#endif - - return access; -} - -static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, - struct kvm_mmu *mmu, - struct guest_walker *walker, - int write_fault) -{ - unsigned level, index; - pt_element_t pte, orig_pte; - pt_element_t __user *ptep_user; - gfn_t table_gfn; - int ret; - - /* dirty/accessed bits are not supported, so no need to update them */ - if (!PT_HAVE_ACCESSED_DIRTY(mmu)) - return 0; - - for (level = walker->max_level; level >= walker->level; --level) { - pte = orig_pte = walker->ptes[level - 1]; - table_gfn = walker->table_gfn[level - 1]; - ptep_user = walker->ptep_user[level - 1]; - index = offset_in_page(ptep_user) / sizeof(pt_element_t); - if (!(pte & PT_GUEST_ACCESSED_MASK)) { - trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); - pte |= PT_GUEST_ACCESSED_MASK; - } - if (level == walker->level && write_fault && - !(pte & PT_GUEST_DIRTY_MASK)) { - trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); -#if PTTYPE == PTTYPE_EPT - if (kvm_arch_write_log_dirty(vcpu)) - return -EINVAL; -#endif - pte |= PT_GUEST_DIRTY_MASK; - } - if (pte == orig_pte) - continue; - - /* - * If the slot is read-only, simply do not process the accessed - * and dirty bits. This is the correct thing to do if the slot - * is ROM, and page tables in read-as-ROM/write-as-MMIO slots - * are only supported if the accessed and dirty bits are already - * set in the ROM (so that MMIO writes are never needed). - * - * Note that NPT does not allow this at all and faults, since - * it always wants nested page table entries for the guest - * page tables to be writable. And EPT works but will simply - * overwrite the read-only memory to set the accessed and dirty - * bits. - */ - if (unlikely(!walker->pte_writable[level - 1])) - continue; - - ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); - if (ret) - return ret; - - kvm_vcpu_mark_page_dirty(vcpu, table_gfn); - walker->ptes[level - 1] = pte; - } - return 0; -} - -static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte) -{ - unsigned pkeys = 0; -#if PTTYPE == 64 - pte_t pte = {.pte = gpte}; - - pkeys = pte_flags_pkey(pte_flags(pte)); -#endif - return pkeys; -} - -/* - * Fetch a guest pte for a guest virtual address - */ -static int FNAME(walk_addr_generic)(struct guest_walker *walker, - struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gva_t addr, u32 access) -{ - int ret; - pt_element_t pte; - pt_element_t __user *uninitialized_var(ptep_user); - gfn_t table_gfn; - u64 pt_access, pte_access; - unsigned index, accessed_dirty, pte_pkey; - unsigned nested_access; - gpa_t pte_gpa; - bool have_ad; - int offset; - u64 walk_nx_mask = 0; - const int write_fault = access & PFERR_WRITE_MASK; - const int user_fault = access & PFERR_USER_MASK; - const int fetch_fault = access & PFERR_FETCH_MASK; - u16 errcode = 0; - gpa_t real_gpa; - gfn_t gfn; - - trace_kvm_mmu_pagetable_walk(addr, access); -retry_walk: - walker->level = mmu->root_level; - pte = mmu->get_cr3(vcpu); - have_ad = PT_HAVE_ACCESSED_DIRTY(mmu); - -#if PTTYPE == 64 - walk_nx_mask = 1ULL << PT64_NX_SHIFT; - if (walker->level == PT32E_ROOT_LEVEL) { - pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); - trace_kvm_mmu_paging_element(pte, walker->level); - if (!FNAME(is_present_gpte)(pte)) - goto error; - --walker->level; - } -#endif - walker->max_level = walker->level; - ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu))); - - /* - * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging - * by the MOV to CR instruction are treated as reads and do not cause the - * processor to set the dirty flag in any EPT paging-structure entry. - */ - nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK; - - pte_access = ~0; - ++walker->level; - - do { - gfn_t real_gfn; - unsigned long host_addr; - - pt_access = pte_access; - --walker->level; - - index = PT_INDEX(addr, walker->level); - table_gfn = gpte_to_gfn(pte); - offset = index * sizeof(pt_element_t); - pte_gpa = gfn_to_gpa(table_gfn) + offset; - - BUG_ON(walker->level < 1); - walker->table_gfn[walker->level - 1] = table_gfn; - walker->pte_gpa[walker->level - 1] = pte_gpa; - - real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), - nested_access, - &walker->fault); - - /* - * FIXME: This can happen if emulation (for of an INS/OUTS - * instruction) triggers a nested page fault. The exit - * qualification / exit info field will incorrectly have - * "guest page access" as the nested page fault's cause, - * instead of "guest page structure access". To fix this, - * the x86_exception struct should be augmented with enough - * information to fix the exit_qualification or exit_info_1 - * fields. - */ - if (unlikely(real_gfn == UNMAPPED_GVA)) - return 0; - - real_gfn = gpa_to_gfn(real_gfn); - - host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn, - &walker->pte_writable[walker->level - 1]); - if (unlikely(kvm_is_error_hva(host_addr))) - goto error; - - ptep_user = (pt_element_t __user *)((void *)host_addr + offset); - if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) - goto error; - walker->ptep_user[walker->level - 1] = ptep_user; - - trace_kvm_mmu_paging_element(pte, walker->level); - - /* - * Inverting the NX it lets us AND it like other - * permission bits. - */ - pte_access = pt_access & (pte ^ walk_nx_mask); - - if (unlikely(!FNAME(is_present_gpte)(pte))) - goto error; - - if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) { - errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK; - goto error; - } - - walker->ptes[walker->level - 1] = pte; - } while (!is_last_gpte(mmu, walker->level, pte)); - - pte_pkey = FNAME(gpte_pkeys)(vcpu, pte); - accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0; - - /* Convert to ACC_*_MASK flags for struct guest_walker. */ - walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask); - walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask); - errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access); - if (unlikely(errcode)) - goto error; - - gfn = gpte_to_gfn_lvl(pte, walker->level); - gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; - - if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36()) - gfn += pse36_gfn_delta(pte); - - real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); - if (real_gpa == UNMAPPED_GVA) - return 0; - - walker->gfn = real_gpa >> PAGE_SHIFT; - - if (!write_fault) - FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte); - else - /* - * On a write fault, fold the dirty bit into accessed_dirty. - * For modes without A/D bits support accessed_dirty will be - * always clear. - */ - accessed_dirty &= pte >> - (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); - - if (unlikely(!accessed_dirty)) { - ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); - if (unlikely(ret < 0)) - goto error; - else if (ret) - goto retry_walk; - } - - pgprintk("%s: pte %llx pte_access %x pt_access %x\n", - __func__, (u64)pte, walker->pte_access, walker->pt_access); - return 1; - -error: - errcode |= write_fault | user_fault; - if (fetch_fault && (mmu->nx || - kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))) - errcode |= PFERR_FETCH_MASK; - - walker->fault.vector = PF_VECTOR; - walker->fault.error_code_valid = true; - walker->fault.error_code = errcode; - -#if PTTYPE == PTTYPE_EPT - /* - * Use PFERR_RSVD_MASK in error_code to to tell if EPT - * misconfiguration requires to be injected. The detection is - * done by is_rsvd_bits_set() above. - * - * We set up the value of exit_qualification to inject: - * [2:0] - Derive from the access bits. The exit_qualification might be - * out of date if it is serving an EPT misconfiguration. - * [5:3] - Calculated by the page walk of the guest EPT page tables - * [7:8] - Derived from [7:8] of real exit_qualification - * - * The other bits are set to 0. - */ - if (!(errcode & PFERR_RSVD_MASK)) { - vcpu->arch.exit_qualification &= 0x180; - if (write_fault) - vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE; - if (user_fault) - vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ; - if (fetch_fault) - vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR; - vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3; - } -#endif - walker->fault.address = addr; - walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; - - trace_kvm_mmu_walker_error(walker->fault.error_code); - return 0; -} - -static int FNAME(walk_addr)(struct guest_walker *walker, - struct kvm_vcpu *vcpu, gva_t addr, u32 access) -{ - return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, - access); -} - -#if PTTYPE != PTTYPE_EPT -static int FNAME(walk_addr_nested)(struct guest_walker *walker, - struct kvm_vcpu *vcpu, gva_t addr, - u32 access) -{ - return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, - addr, access); -} -#endif - -static bool -FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - u64 *spte, pt_element_t gpte, bool no_dirty_log) -{ - unsigned pte_access; - gfn_t gfn; - kvm_pfn_t pfn; - - if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) - return false; - - pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); - - gfn = gpte_to_gfn(gpte); - pte_access = sp->role.access & FNAME(gpte_access)(gpte); - FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); - pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, - no_dirty_log && (pte_access & ACC_WRITE_MASK)); - if (is_error_pfn(pfn)) - return false; - - /* - * we call mmu_set_spte() with host_writable = true because - * pte_prefetch_gfn_to_pfn always gets a writable pfn. - */ - mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn, - true, true); - - kvm_release_pfn_clean(pfn); - return true; -} - -static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - u64 *spte, const void *pte) -{ - pt_element_t gpte = *(const pt_element_t *)pte; - - FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); -} - -static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, - struct guest_walker *gw, int level) -{ - pt_element_t curr_pte; - gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1]; - u64 mask; - int r, index; - - if (level == PT_PAGE_TABLE_LEVEL) { - mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1; - base_gpa = pte_gpa & ~mask; - index = (pte_gpa - base_gpa) / sizeof(pt_element_t); - - r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa, - gw->prefetch_ptes, sizeof(gw->prefetch_ptes)); - curr_pte = gw->prefetch_ptes[index]; - } else - r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, - &curr_pte, sizeof(curr_pte)); - - return r || curr_pte != gw->ptes[level - 1]; -} - -static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, - u64 *sptep) -{ - struct kvm_mmu_page *sp; - pt_element_t *gptep = gw->prefetch_ptes; - u64 *spte; - int i; - - sp = page_header(__pa(sptep)); - - if (sp->role.level > PT_PAGE_TABLE_LEVEL) - return; - - if (sp->role.direct) - return __direct_pte_prefetch(vcpu, sp, sptep); - - i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); - spte = sp->spt + i; - - for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { - if (spte == sptep) - continue; - - if (is_shadow_present_pte(*spte)) - continue; - - if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true)) - break; - } -} - -/* - * Fetch a shadow pte for a specific level in the paging hierarchy. - * If the guest tries to write a write-protected page, we need to - * emulate this operation, return 1 to indicate this case. - */ -static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, - struct guest_walker *gw, - int write_fault, int hlevel, - kvm_pfn_t pfn, bool map_writable, bool prefault, - bool lpage_disallowed) -{ - struct kvm_mmu_page *sp = NULL; - struct kvm_shadow_walk_iterator it; - unsigned direct_access, access = gw->pt_access; - int top_level, ret; - gfn_t gfn, base_gfn; - - direct_access = gw->pte_access; - - top_level = vcpu->arch.mmu->root_level; - if (top_level == PT32E_ROOT_LEVEL) - top_level = PT32_ROOT_LEVEL; - /* - * Verify that the top-level gpte is still there. Since the page - * is a root page, it is either write protected (and cannot be - * changed from now on) or it is invalid (in which case, we don't - * really care if it changes underneath us after this point). - */ - if (FNAME(gpte_changed)(vcpu, gw, top_level)) - goto out_gpte_changed; - - if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) - goto out_gpte_changed; - - for (shadow_walk_init(&it, vcpu, addr); - shadow_walk_okay(&it) && it.level > gw->level; - shadow_walk_next(&it)) { - gfn_t table_gfn; - - clear_sp_write_flooding_count(it.sptep); - drop_large_spte(vcpu, it.sptep); - - sp = NULL; - if (!is_shadow_present_pte(*it.sptep)) { - table_gfn = gw->table_gfn[it.level - 2]; - sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1, - false, access); - } - - /* - * Verify that the gpte in the page we've just write - * protected is still there. - */ - if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) - goto out_gpte_changed; - - if (sp) - link_shadow_page(vcpu, it.sptep, sp); - } - - /* - * FNAME(page_fault) might have clobbered the bottom bits of - * gw->gfn, restore them from the virtual address. - */ - gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT); - base_gfn = gfn; - - trace_kvm_mmu_spte_requested(addr, gw->level, pfn); - - for (; shadow_walk_okay(&it); shadow_walk_next(&it)) { - clear_sp_write_flooding_count(it.sptep); - - /* - * We cannot overwrite existing page tables with an NX - * large page, as the leaf could be executable. - */ - disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel); - - base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); - if (it.level == hlevel) - break; - - validate_direct_spte(vcpu, it.sptep, direct_access); - - drop_large_spte(vcpu, it.sptep); - - if (!is_shadow_present_pte(*it.sptep)) { - sp = kvm_mmu_get_page(vcpu, base_gfn, addr, - it.level - 1, true, direct_access); - link_shadow_page(vcpu, it.sptep, sp); - if (lpage_disallowed) - account_huge_nx_page(vcpu->kvm, sp); - } - } - - ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, - it.level, base_gfn, pfn, prefault, map_writable); - FNAME(pte_prefetch)(vcpu, gw, it.sptep); - ++vcpu->stat.pf_fixed; - return ret; - -out_gpte_changed: - return RET_PF_RETRY; -} - - /* - * To see whether the mapped gfn can write its page table in the current - * mapping. - * - * It is the helper function of FNAME(page_fault). When guest uses large page - * size to map the writable gfn which is used as current page table, we should - * force kvm to use small page size to map it because new shadow page will be - * created when kvm establishes shadow page table that stop kvm using large - * page size. Do it early can avoid unnecessary #PF and emulation. - * - * @write_fault_to_shadow_pgtable will return true if the fault gfn is - * currently used as its page table. - * - * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok - * since the PDPT is always shadowed, that means, we can not use large page - * size to map the gfn which is used as PDPT. - */ -static bool -FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, - struct guest_walker *walker, int user_fault, - bool *write_fault_to_shadow_pgtable) -{ - int level; - gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1); - bool self_changed = false; - - if (!(walker->pte_access & ACC_WRITE_MASK || - (!is_write_protection(vcpu) && !user_fault))) - return false; - - for (level = walker->level; level <= walker->max_level; level++) { - gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1]; - - self_changed |= !(gfn & mask); - *write_fault_to_shadow_pgtable |= !gfn; - } - - return self_changed; -} - -/* - * Page fault handler. There are several causes for a page fault: - * - there is no shadow pte for the guest pte - * - write access through a shadow pte marked read only so that we can set - * the dirty bit - * - write access to a shadow pte marked read only so we can update the page - * dirty bitmap, when userspace requests it - * - mmio access; in this case we will never install a present shadow pte - * - normal guest page fault due to the guest pte marked not present, not - * writable, or not executable - * - * Returns: 1 if we need to emulate the instruction, 0 otherwise, or - * a negative value on error. - */ -static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, - bool prefault) -{ - int write_fault = error_code & PFERR_WRITE_MASK; - int user_fault = error_code & PFERR_USER_MASK; - struct guest_walker walker; - int r; - kvm_pfn_t pfn; - int level = PT_PAGE_TABLE_LEVEL; - unsigned long mmu_seq; - bool map_writable, is_self_change_mapping; - bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && - is_nx_huge_page_enabled(); - bool force_pt_level = lpage_disallowed; - - pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); - - r = mmu_topup_memory_caches(vcpu); - if (r) - return r; - - /* - * If PFEC.RSVD is set, this is a shadow page fault. - * The bit needs to be cleared before walking guest page tables. - */ - error_code &= ~PFERR_RSVD_MASK; - - /* - * Look up the guest pte for the faulting address. - */ - r = FNAME(walk_addr)(&walker, vcpu, addr, error_code); - - /* - * The page is not mapped by the guest. Let the guest handle it. - */ - if (!r) { - pgprintk("%s: guest page fault\n", __func__); - if (!prefault) - inject_page_fault(vcpu, &walker.fault); - - return RET_PF_RETRY; - } - - if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) { - shadow_page_table_clear_flood(vcpu, addr); - return RET_PF_EMULATE; - } - - vcpu->arch.write_fault_to_shadow_pgtable = false; - - is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, - &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); - - if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) { - level = mapping_level(vcpu, walker.gfn, &force_pt_level); - if (likely(!force_pt_level)) { - level = min(walker.level, level); - walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); - } - } else - force_pt_level = true; - - mmu_seq = vcpu->kvm->mmu_notifier_seq; - smp_rmb(); - - if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, - &map_writable)) - return RET_PF_RETRY; - - if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r)) - return r; - - /* - * Do not change pte_access if the pfn is a mmio page, otherwise - * we will cache the incorrect access into mmio spte. - */ - if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) && - !is_write_protection(vcpu) && !user_fault && - !is_noslot_pfn(pfn)) { - walker.pte_access |= ACC_WRITE_MASK; - walker.pte_access &= ~ACC_USER_MASK; - - /* - * If we converted a user page to a kernel page, - * so that the kernel can write to it when cr0.wp=0, - * then we should prevent the kernel from executing it - * if SMEP is enabled. - */ - if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) - walker.pte_access &= ~ACC_EXEC_MASK; - } - - r = RET_PF_RETRY; - spin_lock(&vcpu->kvm->mmu_lock); - if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) - goto out_unlock; - - kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); - if (make_mmu_pages_available(vcpu) < 0) - goto out_unlock; - if (!force_pt_level) - transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level); - r = FNAME(fetch)(vcpu, addr, &walker, write_fault, - level, pfn, map_writable, prefault, lpage_disallowed); - kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); - -out_unlock: - spin_unlock(&vcpu->kvm->mmu_lock); - kvm_release_pfn_clean(pfn); - return r; -} - -static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) -{ - int offset = 0; - - WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL); - - if (PTTYPE == 32) - offset = sp->role.quadrant << PT64_LEVEL_BITS; - - return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); -} - -static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) -{ - struct kvm_shadow_walk_iterator iterator; - struct kvm_mmu_page *sp; - int level; - u64 *sptep; - - vcpu_clear_mmio_info(vcpu, gva); - - /* - * No need to check return value here, rmap_can_add() can - * help us to skip pte prefetch later. - */ - mmu_topup_memory_caches(vcpu); - - if (!VALID_PAGE(root_hpa)) { - WARN_ON(1); - return; - } - - spin_lock(&vcpu->kvm->mmu_lock); - for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) { - level = iterator.level; - sptep = iterator.sptep; - - sp = page_header(__pa(sptep)); - if (is_last_spte(*sptep, level)) { - pt_element_t gpte; - gpa_t pte_gpa; - - if (!sp->unsync) - break; - - pte_gpa = FNAME(get_level1_sp_gpa)(sp); - pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); - - if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) - kvm_flush_remote_tlbs_with_address(vcpu->kvm, - sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); - - if (!rmap_can_add(vcpu)) - break; - - if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, - sizeof(pt_element_t))) - break; - - FNAME(update_pte)(vcpu, sp, sptep, &gpte); - } - - if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) - break; - } - spin_unlock(&vcpu->kvm->mmu_lock); -} - -static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, - struct x86_exception *exception) -{ - struct guest_walker walker; - gpa_t gpa = UNMAPPED_GVA; - int r; - - r = FNAME(walk_addr)(&walker, vcpu, vaddr, access); - - if (r) { - gpa = gfn_to_gpa(walker.gfn); - gpa |= vaddr & ~PAGE_MASK; - } else if (exception) - *exception = walker.fault; - - return gpa; -} - -#if PTTYPE != PTTYPE_EPT -static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, - u32 access, - struct x86_exception *exception) -{ - struct guest_walker walker; - gpa_t gpa = UNMAPPED_GVA; - int r; - - r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access); - - if (r) { - gpa = gfn_to_gpa(walker.gfn); - gpa |= vaddr & ~PAGE_MASK; - } else if (exception) - *exception = walker.fault; - - return gpa; -} -#endif - -/* - * Using the cached information from sp->gfns is safe because: - * - The spte has a reference to the struct page, so the pfn for a given gfn - * can't change unless all sptes pointing to it are nuked first. - * - * Note: - * We should flush all tlbs if spte is dropped even though guest is - * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page - * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't - * used by guest then tlbs are not flushed, so guest is allowed to access the - * freed pages. - * And we increase kvm->tlbs_dirty to delay tlbs flush in this case. - */ -static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) -{ - int i, nr_present = 0; - bool host_writable; - gpa_t first_pte_gpa; - int set_spte_ret = 0; - - /* direct kvm_mmu_page can not be unsync. */ - BUG_ON(sp->role.direct); - - first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); - - for (i = 0; i < PT64_ENT_PER_PAGE; i++) { - unsigned pte_access; - pt_element_t gpte; - gpa_t pte_gpa; - gfn_t gfn; - - if (!sp->spt[i]) - continue; - - pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); - - if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, - sizeof(pt_element_t))) - return 0; - - if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { - /* - * Update spte before increasing tlbs_dirty to make - * sure no tlb flush is lost after spte is zapped; see - * the comments in kvm_flush_remote_tlbs(). - */ - smp_wmb(); - vcpu->kvm->tlbs_dirty++; - continue; - } - - gfn = gpte_to_gfn(gpte); - pte_access = sp->role.access; - pte_access &= FNAME(gpte_access)(gpte); - FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); - - if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, - &nr_present)) - continue; - - if (gfn != sp->gfns[i]) { - drop_spte(vcpu->kvm, &sp->spt[i]); - /* - * The same as above where we are doing - * prefetch_invalid_gpte(). - */ - smp_wmb(); - vcpu->kvm->tlbs_dirty++; - continue; - } - - nr_present++; - - host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE; - - set_spte_ret |= set_spte(vcpu, &sp->spt[i], - pte_access, PT_PAGE_TABLE_LEVEL, - gfn, spte_to_pfn(sp->spt[i]), - true, false, host_writable); - } - - if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH) - kvm_flush_remote_tlbs(vcpu->kvm); - - return nr_present; -} - -#undef pt_element_t -#undef guest_walker -#undef FNAME -#undef PT_BASE_ADDR_MASK -#undef PT_INDEX -#undef PT_LVL_ADDR_MASK -#undef PT_LVL_OFFSET_MASK -#undef PT_LEVEL_BITS -#undef PT_MAX_FULL_LEVELS -#undef gpte_to_gfn -#undef gpte_to_gfn_lvl -#undef CMPXCHG -#undef PT_GUEST_ACCESSED_MASK -#undef PT_GUEST_DIRTY_MASK -#undef PT_GUEST_DIRTY_SHIFT -#undef PT_GUEST_ACCESSED_SHIFT -#undef PT_HAVE_ACCESSED_DIRTY -- cgit v1.2.3 From 24885d1d79e2e83d49201aeae0bc59f1402fd4f1 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 22 Nov 2019 12:15:49 -0800 Subject: KVM: x86: Remove a spurious export of a static function A recent change inadvertently exported a static function, which results in modpost throwing a warning. Fix it. Fixes: cbbaa2727aa3 ("KVM: x86: fix presentation of TSX feature in ARCH_CAPABILITIES") Signed-off-by: Sean Christopherson Cc: stable@vger.kernel.org Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a256e09f321a..3e9ab2d1ea77 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1329,7 +1329,6 @@ static u64 kvm_get_arch_capabilities(void) return data; } -EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities); static int kvm_get_msr_feature(struct kvm_msr_entry *msr) { -- cgit v1.2.3 From faf0be22167486feb1ee386f0e819a336e829d0a Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 23 Nov 2019 10:45:50 +0800 Subject: KVM: Fix jump label out_free_* in kvm_init() The jump label out_free_1 and out_free_2 deal with the same stuff, so git rid of one and rename the label out_free_0a to retain the label name order. Signed-off-by: Miaohe Lin Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 13e6b7094596..00268290dcbd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4354,12 +4354,12 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, r = kvm_arch_hardware_setup(); if (r < 0) - goto out_free_0a; + goto out_free_1; for_each_online_cpu(cpu) { smp_call_function_single(cpu, check_processor_compat, &r, 1); if (r < 0) - goto out_free_1; + goto out_free_2; } r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", @@ -4416,9 +4416,8 @@ out_free_3: unregister_reboot_notifier(&kvm_reboot_notifier); cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); out_free_2: -out_free_1: kvm_arch_hardware_unsetup(); -out_free_0a: +out_free_1: free_cpumask_var(cpus_hardware_enabled); out_free_0: kvm_irqfd_exit(); -- cgit v1.2.3 From 05c19c2fe17c331ec384a2953be50101e8a15a73 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 22 Nov 2019 12:04:50 -0800 Subject: KVM: x86: Open code shared_msr_update() in its only caller Fold shared_msr_update() into its sole user to eliminate its pointless bounds check, its godawful printk, its misleading comment (it's called under a global lock), and its woefully inaccurate name. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3e9ab2d1ea77..f5db520f2ac3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -262,23 +262,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn) } } -static void shared_msr_update(unsigned slot, u32 msr) -{ - u64 value; - unsigned int cpu = smp_processor_id(); - struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); - - /* only read, and nobody should modify it at this time, - * so don't need lock */ - if (slot >= shared_msrs_global.nr) { - printk(KERN_ERR "kvm: invalid MSR slot!"); - return; - } - rdmsrl_safe(msr, &value); - smsr->values[slot].host = value; - smsr->values[slot].curr = value; -} - void kvm_define_shared_msr(unsigned slot, u32 msr) { BUG_ON(slot >= KVM_NR_SHARED_MSRS); @@ -290,10 +273,16 @@ EXPORT_SYMBOL_GPL(kvm_define_shared_msr); static void kvm_shared_msr_cpu_online(void) { - unsigned i; + unsigned int cpu = smp_processor_id(); + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); + u64 value; + int i; - for (i = 0; i < shared_msrs_global.nr; ++i) - shared_msr_update(i, shared_msrs_global.msrs[i]); + for (i = 0; i < shared_msrs_global.nr; ++i) { + rdmsrl_safe(shared_msrs_global.msrs[i], &value); + smsr->values[i].host = value; + smsr->values[i].curr = value; + } } int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) -- cgit v1.2.3 From ad5996d9a0e8019c3ae5151e687939369acfe044 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 22 Nov 2019 08:58:18 -0800 Subject: KVM: x86: Grab KVM's srcu lock when setting nested state Acquire kvm->srcu for the duration of ->set_nested_state() to fix a bug where nVMX derefences ->memslots without holding ->srcu or ->slots_lock. The other half of nested migration, ->get_nested_state(), does not need to acquire ->srcu as it is a purely a dump of internal KVM (and CPU) state to userspace. Detected as an RCU lockdep splat that is 100% reproducible by running KVM's state_test selftest with CONFIG_PROVE_LOCKING=y. Note that the failing function, kvm_is_visible_gfn(), is only checking the validity of a gfn, it's not actually accessing guest memory (which is more or less unsupported during vmx_set_nested_state() due to incorrect MMU state), i.e. vmx_set_nested_state() itself isn't fundamentally broken. In any case, setting nested state isn't a fast path so there's no reason to go out of our way to avoid taking ->srcu. ============================= WARNING: suspicious RCU usage 5.4.0-rc7+ #94 Not tainted ----------------------------- include/linux/kvm_host.h:626 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by evmcs_test/10939: #0: ffff88826ffcb800 (&vcpu->mutex){+.+.}, at: kvm_vcpu_ioctl+0x85/0x630 [kvm] stack backtrace: CPU: 1 PID: 10939 Comm: evmcs_test Not tainted 5.4.0-rc7+ #94 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 Call Trace: dump_stack+0x68/0x9b kvm_is_visible_gfn+0x179/0x180 [kvm] mmu_check_root+0x11/0x30 [kvm] fast_cr3_switch+0x40/0x120 [kvm] kvm_mmu_new_cr3+0x34/0x60 [kvm] nested_vmx_load_cr3+0xbd/0x1f0 [kvm_intel] nested_vmx_enter_non_root_mode+0xab8/0x1d60 [kvm_intel] vmx_set_nested_state+0x256/0x340 [kvm_intel] kvm_arch_vcpu_ioctl+0x491/0x11a0 [kvm] kvm_vcpu_ioctl+0xde/0x630 [kvm] do_vfs_ioctl+0xa2/0x6c0 ksys_ioctl+0x66/0x70 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x54/0x200 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x7f59a2b95f47 Fixes: 8fcc4b5923af5 ("kvm: nVMX: Introduce KVM_CAP_NESTED_STATE") Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f5db520f2ac3..3ed167e039e5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4442,6 +4442,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_NESTED_STATE: { struct kvm_nested_state __user *user_kvm_nested_state = argp; struct kvm_nested_state kvm_state; + int idx; r = -EINVAL; if (!kvm_x86_ops->set_nested_state) @@ -4465,7 +4466,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) break; + idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } case KVM_GET_SUPPORTED_HV_CPUID: { -- cgit v1.2.3 From 85c9aae9ac8b228f2134b56d4fc743afc446947a Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Fri, 22 Nov 2019 15:43:55 -0800 Subject: kvm: nVMX: Relax guest IA32_FEATURE_CONTROL constraints Commit 37e4c997dadf ("KVM: VMX: validate individual bits of guest MSR_IA32_FEATURE_CONTROL") broke the KVM_SET_MSRS ABI by instituting new constraints on the data values that kvm would accept for the guest MSR, IA32_FEATURE_CONTROL. Perhaps these constraints should have been opt-in via a new KVM capability, but they were applied indiscriminately, breaking at least one existing hypervisor. Relax the constraints to allow either or both of FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX and FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX to be set when nVMX is enabled. This change is sufficient to fix the aforementioned breakage. Fixes: 37e4c997dadf ("KVM: VMX: validate individual bits of guest MSR_IA32_FEATURE_CONTROL") Signed-off-by: Jim Mattson Reviewed-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d39475e2d44e..d175429c91b0 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7115,10 +7115,12 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) if (nested_vmx_allowed(vcpu)) to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= + FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; else to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= - ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; + ~(FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX | + FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX); if (nested_vmx_allowed(vcpu)) { nested_vmx_cr_fixed1_bits_update(vcpu); -- cgit v1.2.3 From c1de0f25221c3abc7031fa482c109ccb079e7938 Mon Sep 17 00:00:00 2001 From: Peter Gonda Date: Thu, 21 Nov 2019 12:33:43 -0800 Subject: KVM x86: Move kvm cpuid support out of svm Memory encryption support does not have module parameter dependencies and can be moved into the general x86 cpuid __do_cpuid_ent function. This changes maintains current behavior of passing through all of CPUID.8000001F. Suggested-by: Jim Mattson Signed-off-by: Peter Gonda Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 5 +++++ arch/x86/kvm/svm.c | 7 ------- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index c0aa07487eb8..813a4d2e5c0c 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -778,6 +778,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function, case 0x8000001a: case 0x8000001e: break; + /* Support memory encryption cpuid if host supports it */ + case 0x8000001F: + if (!boot_cpu_has(X86_FEATURE_SEV)) + entry->eax = entry->ebx = entry->ecx = entry->edx = 0; + break; /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: /*Just support up to 0xC0000004 now*/ diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 362e874297e4..122d4ce3b1ab 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -5958,13 +5958,6 @@ static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) if (npt_enabled) entry->edx |= F(NPT); - break; - case 0x8000001F: - /* Support memory encryption cpuid if host supports it */ - if (boot_cpu_has(X86_FEATURE_SEV)) - cpuid(0x8000001f, &entry->eax, &entry->ebx, - &entry->ecx, &entry->edx); - } } -- cgit v1.2.3 From 33cf170715e87efd0608af6f2c056cafe0e7fc47 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Mon, 25 Nov 2019 08:36:25 +0530 Subject: mm: ksm: Export ksm_madvise() On PEF-enabled POWER platforms that support running of secure guests, secure pages of the guest are represented by device private pages in the host. Such pages needn't participate in KSM merging. This is achieved by using ksm_madvise() call which need to be exported since KVM PPC can be a kernel module. Signed-off-by: Bharata B Rao Acked-by: Hugh Dickins Cc: Andrea Arcangeli Signed-off-by: Paul Mackerras --- mm/ksm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/ksm.c b/mm/ksm.c index dbee2eb4dd05..e45b02ad3f0b 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2478,6 +2478,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, return 0; } +EXPORT_SYMBOL_GPL(ksm_madvise); int __ksm_enter(struct mm_struct *mm) { -- cgit v1.2.3 From ca9f4942670c37407bb109090eaf776ce2ccc54c Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Mon, 25 Nov 2019 08:36:26 +0530 Subject: KVM: PPC: Book3S HV: Support for running secure guests A pseries guest can be run as secure guest on Ultravisor-enabled POWER platforms. On such platforms, this driver will be used to manage the movement of guest pages between the normal memory managed by hypervisor (HV) and secure memory managed by Ultravisor (UV). HV is informed about the guest's transition to secure mode via hcalls: H_SVM_INIT_START: Initiate securing a VM H_SVM_INIT_DONE: Conclude securing a VM As part of H_SVM_INIT_START, register all existing memslots with the UV. H_SVM_INIT_DONE call by UV informs HV that transition of the guest to secure mode is complete. These two states (transition to secure mode STARTED and transition to secure mode COMPLETED) are recorded in kvm->arch.secure_guest. Setting these states will cause the assembly code that enters the guest to call the UV_RETURN ucall instead of trying to enter the guest directly. Migration of pages betwen normal and secure memory of secure guest is implemented in H_SVM_PAGE_IN and H_SVM_PAGE_OUT hcalls. H_SVM_PAGE_IN: Move the content of a normal page to secure page H_SVM_PAGE_OUT: Move the content of a secure page to normal page Private ZONE_DEVICE memory equal to the amount of secure memory available in the platform for running secure guests is created. Whenever a page belonging to the guest becomes secure, a page from this private device memory is used to represent and track that secure page on the HV side. The movement of pages between normal and secure memory is done via migrate_vma_pages() using UV_PAGE_IN and UV_PAGE_OUT ucalls. In order to prevent the device private pages (that correspond to pages of secure guest) from participating in KSM merging, H_SVM_PAGE_IN calls ksm_madvise() under read version of mmap_sem. However ksm_madvise() needs to be under write lock. Hence we call kvmppc_svm_page_in with mmap_sem held for writing, and it then downgrades to a read lock after calling ksm_madvise. [paulus@ozlabs.org - roll in patch "KVM: PPC: Book3S HV: Take write mmap_sem when calling ksm_madvise"] Signed-off-by: Bharata B Rao Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/hvcall.h | 6 + arch/powerpc/include/asm/kvm_book3s_uvmem.h | 62 +++ arch/powerpc/include/asm/kvm_host.h | 6 + arch/powerpc/include/asm/ultravisor-api.h | 3 + arch/powerpc/include/asm/ultravisor.h | 21 + arch/powerpc/kvm/Makefile | 3 + arch/powerpc/kvm/book3s_hv.c | 29 ++ arch/powerpc/kvm/book3s_hv_uvmem.c | 639 ++++++++++++++++++++++++++++ 8 files changed, 769 insertions(+) create mode 100644 arch/powerpc/include/asm/kvm_book3s_uvmem.h create mode 100644 arch/powerpc/kvm/book3s_hv_uvmem.c diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 11112023e327..4150732c81a0 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -342,6 +342,12 @@ #define H_TLB_INVALIDATE 0xF808 #define H_COPY_TOFROM_GUEST 0xF80C +/* Platform-specific hcalls used by the Ultravisor */ +#define H_SVM_PAGE_IN 0xEF00 +#define H_SVM_PAGE_OUT 0xEF04 +#define H_SVM_INIT_START 0xEF08 +#define H_SVM_INIT_DONE 0xEF0C + /* Values for 2nd argument to H_SET_MODE */ #define H_SET_MODE_RESOURCE_SET_CIABR 1 #define H_SET_MODE_RESOURCE_SET_DAWR 2 diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h new file mode 100644 index 000000000000..95f389c2937b --- /dev/null +++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_KVM_BOOK3S_UVMEM_H__ +#define __ASM_KVM_BOOK3S_UVMEM_H__ + +#ifdef CONFIG_PPC_UV +int kvmppc_uvmem_init(void); +void kvmppc_uvmem_free(void); +int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot); +void kvmppc_uvmem_slot_free(struct kvm *kvm, + const struct kvm_memory_slot *slot); +unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, + unsigned long gra, + unsigned long flags, + unsigned long page_shift); +unsigned long kvmppc_h_svm_page_out(struct kvm *kvm, + unsigned long gra, + unsigned long flags, + unsigned long page_shift); +unsigned long kvmppc_h_svm_init_start(struct kvm *kvm); +unsigned long kvmppc_h_svm_init_done(struct kvm *kvm); +#else +static inline int kvmppc_uvmem_init(void) +{ + return 0; +} + +static inline void kvmppc_uvmem_free(void) { } + +static inline int +kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) +{ + return 0; +} + +static inline void +kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) { } + +static inline unsigned long +kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gra, + unsigned long flags, unsigned long page_shift) +{ + return H_UNSUPPORTED; +} + +static inline unsigned long +kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gra, + unsigned long flags, unsigned long page_shift) +{ + return H_UNSUPPORTED; +} + +static inline unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) +{ + return H_UNSUPPORTED; +} + +static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) +{ + return H_UNSUPPORTED; +} +#endif /* CONFIG_PPC_UV */ +#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 4273e799203d..0a398f2321c2 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -275,6 +275,10 @@ struct kvm_hpt_info { struct kvm_resize_hpt; +/* Flag values for kvm_arch.secure_guest */ +#define KVMPPC_SECURE_INIT_START 0x1 /* H_SVM_INIT_START has been called */ +#define KVMPPC_SECURE_INIT_DONE 0x2 /* H_SVM_INIT_DONE completed */ + struct kvm_arch { unsigned int lpid; unsigned int smt_mode; /* # vcpus per virtual core */ @@ -330,6 +334,8 @@ struct kvm_arch { #endif struct kvmppc_ops *kvm_ops; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + struct mutex uvmem_lock; + struct list_head uvmem_pfns; struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */ u64 l1_ptcr; int max_nested_lpid; diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h index 4fcda1d5793d..2483f15bd71a 100644 --- a/arch/powerpc/include/asm/ultravisor-api.h +++ b/arch/powerpc/include/asm/ultravisor-api.h @@ -26,6 +26,9 @@ #define UV_WRITE_PATE 0xF104 #define UV_RETURN 0xF11C #define UV_ESM 0xF110 +#define UV_REGISTER_MEM_SLOT 0xF120 +#define UV_PAGE_IN 0xF128 +#define UV_PAGE_OUT 0xF12C #define UV_SHARE_PAGE 0xF130 #define UV_UNSHARE_PAGE 0xF134 #define UV_UNSHARE_ALL_PAGES 0xF140 diff --git a/arch/powerpc/include/asm/ultravisor.h b/arch/powerpc/include/asm/ultravisor.h index b1bc2e043ed4..79bb005e8ee9 100644 --- a/arch/powerpc/include/asm/ultravisor.h +++ b/arch/powerpc/include/asm/ultravisor.h @@ -46,4 +46,25 @@ static inline int uv_unshare_all_pages(void) return ucall_norets(UV_UNSHARE_ALL_PAGES); } +static inline int uv_page_in(u64 lpid, u64 src_ra, u64 dst_gpa, u64 flags, + u64 page_shift) +{ + return ucall_norets(UV_PAGE_IN, lpid, src_ra, dst_gpa, flags, + page_shift); +} + +static inline int uv_page_out(u64 lpid, u64 dst_ra, u64 src_gpa, u64 flags, + u64 page_shift) +{ + return ucall_norets(UV_PAGE_OUT, lpid, dst_ra, src_gpa, flags, + page_shift); +} + +static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size, + u64 flags, u64 slotid) +{ + return ucall_norets(UV_REGISTER_MEM_SLOT, lpid, start_gpa, + size, flags, slotid); +} + #endif /* _ASM_POWERPC_ULTRAVISOR_H */ diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 4c67cc79de7c..2bfeaa13befb 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -71,6 +71,9 @@ kvm-hv-y += \ book3s_64_mmu_radix.o \ book3s_hv_nested.o +kvm-hv-$(CONFIG_PPC_UV) += \ + book3s_hv_uvmem.o + kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \ book3s_hv_tm.o diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index ec5c0379296a..03d56aeec714 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -72,6 +72,8 @@ #include #include #include +#include +#include #include "book3s.h" @@ -1070,6 +1072,25 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); break; + case H_SVM_PAGE_IN: + ret = kvmppc_h_svm_page_in(vcpu->kvm, + kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); + break; + case H_SVM_PAGE_OUT: + ret = kvmppc_h_svm_page_out(vcpu->kvm, + kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); + break; + case H_SVM_INIT_START: + ret = kvmppc_h_svm_init_start(vcpu->kvm); + break; + case H_SVM_INIT_DONE: + ret = kvmppc_h_svm_init_done(vcpu->kvm); + break; + default: return RESUME_HOST; } @@ -4767,6 +4788,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) char buf[32]; int ret; + mutex_init(&kvm->arch.uvmem_lock); + INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); mutex_init(&kvm->arch.mmu_setup_lock); /* Allocate the guest's logical partition ID */ @@ -4938,6 +4961,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) kvm->arch.process_table = 0; kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); } + kvmppc_free_lpid(kvm->arch.lpid); kvmppc_free_pimap(kvm); @@ -5528,11 +5552,16 @@ static int kvmppc_book3s_init_hv(void) no_mixing_hpt_and_radix = true; } + r = kvmppc_uvmem_init(); + if (r < 0) + pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r); + return r; } static void kvmppc_book3s_exit_hv(void) { + kvmppc_uvmem_free(); kvmppc_free_host_rm_ops(); if (kvmppc_radix_possible()) kvmppc_radix_exit(); diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c new file mode 100644 index 000000000000..5b99537c6992 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -0,0 +1,639 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Secure pages management: Migration of pages between normal and secure + * memory of KVM guests. + * + * Copyright 2018 Bharata B Rao, IBM Corp. + */ + +/* + * A pseries guest can be run as secure guest on Ultravisor-enabled + * POWER platforms. On such platforms, this driver will be used to manage + * the movement of guest pages between the normal memory managed by + * hypervisor (HV) and secure memory managed by Ultravisor (UV). + * + * The page-in or page-out requests from UV will come to HV as hcalls and + * HV will call back into UV via ultracalls to satisfy these page requests. + * + * Private ZONE_DEVICE memory equal to the amount of secure memory + * available in the platform for running secure guests is hotplugged. + * Whenever a page belonging to the guest becomes secure, a page from this + * private device memory is used to represent and track that secure page + * on the HV side. + */ + +/* + * Notes on locking + * + * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent + * page-in and page-out requests for the same GPA. Concurrent accesses + * can either come via UV (guest vCPUs requesting for same page) + * or when HV and guest simultaneously access the same page. + * This mutex serializes the migration of page from HV(normal) to + * UV(secure) and vice versa. So the serialization points are around + * migrate_vma routines and page-in/out routines. + * + * Per-guest mutex comes with a cost though. Mainly it serializes the + * fault path as page-out can occur when HV faults on accessing secure + * guest pages. Currently UV issues page-in requests for all the guest + * PFNs one at a time during early boot (UV_ESM uvcall), so this is + * not a cause for concern. Also currently the number of page-outs caused + * by HV touching secure pages is very very low. If an when UV supports + * overcommitting, then we might see concurrent guest driven page-outs. + * + * Locking order + * + * 1. kvm->srcu - Protects KVM memslots + * 2. kvm->mm->mmap_sem - find_vma, migrate_vma_pages and helpers, ksm_madvise + * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting + * as sync-points for page-in/out + */ + +/* + * Notes on page size + * + * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN + * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks + * secure GPAs at 64K page size and maintains one device PFN for each + * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued + * for 64K page at a time. + * + * HV faulting on secure pages: When HV touches any secure page, it + * faults and issues a UV_PAGE_OUT request with 64K page size. Currently + * UV splits and remaps the 2MB page if necessary and copies out the + * required 64K page contents. + * + * In summary, the current secure pages handling code in HV assumes + * 64K page size and in fact fails any page-in/page-out requests of + * non-64K size upfront. If and when UV starts supporting multiple + * page-sizes, we need to break this assumption. + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct dev_pagemap kvmppc_uvmem_pgmap; +static unsigned long *kvmppc_uvmem_bitmap; +static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock); + +#define KVMPPC_UVMEM_PFN (1UL << 63) + +struct kvmppc_uvmem_slot { + struct list_head list; + unsigned long nr_pfns; + unsigned long base_pfn; + unsigned long *pfns; +}; + +struct kvmppc_uvmem_page_pvt { + struct kvm *kvm; + unsigned long gpa; +}; + +int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) +{ + struct kvmppc_uvmem_slot *p; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + p->pfns = vzalloc(array_size(slot->npages, sizeof(*p->pfns))); + if (!p->pfns) { + kfree(p); + return -ENOMEM; + } + p->nr_pfns = slot->npages; + p->base_pfn = slot->base_gfn; + + mutex_lock(&kvm->arch.uvmem_lock); + list_add(&p->list, &kvm->arch.uvmem_pfns); + mutex_unlock(&kvm->arch.uvmem_lock); + + return 0; +} + +/* + * All device PFNs are already released by the time we come here. + */ +void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) +{ + struct kvmppc_uvmem_slot *p, *next; + + mutex_lock(&kvm->arch.uvmem_lock); + list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) { + if (p->base_pfn == slot->base_gfn) { + vfree(p->pfns); + list_del(&p->list); + kfree(p); + break; + } + } + mutex_unlock(&kvm->arch.uvmem_lock); +} + +static void kvmppc_uvmem_pfn_insert(unsigned long gfn, unsigned long uvmem_pfn, + struct kvm *kvm) +{ + struct kvmppc_uvmem_slot *p; + + list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { + if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { + unsigned long index = gfn - p->base_pfn; + + p->pfns[index] = uvmem_pfn | KVMPPC_UVMEM_PFN; + return; + } + } +} + +static void kvmppc_uvmem_pfn_remove(unsigned long gfn, struct kvm *kvm) +{ + struct kvmppc_uvmem_slot *p; + + list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { + if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { + p->pfns[gfn - p->base_pfn] = 0; + return; + } + } +} + +static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, + unsigned long *uvmem_pfn) +{ + struct kvmppc_uvmem_slot *p; + + list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { + if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { + unsigned long index = gfn - p->base_pfn; + + if (p->pfns[index] & KVMPPC_UVMEM_PFN) { + if (uvmem_pfn) + *uvmem_pfn = p->pfns[index] & + ~KVMPPC_UVMEM_PFN; + return true; + } else + return false; + } + } + return false; +} + +unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int ret = H_SUCCESS; + int srcu_idx; + + if (!kvmppc_uvmem_bitmap) + return H_UNSUPPORTED; + + /* Only radix guests can be secure guests */ + if (!kvm_is_radix(kvm)) + return H_UNSUPPORTED; + + srcu_idx = srcu_read_lock(&kvm->srcu); + slots = kvm_memslots(kvm); + kvm_for_each_memslot(memslot, slots) { + if (kvmppc_uvmem_slot_init(kvm, memslot)) { + ret = H_PARAMETER; + goto out; + } + ret = uv_register_mem_slot(kvm->arch.lpid, + memslot->base_gfn << PAGE_SHIFT, + memslot->npages * PAGE_SIZE, + 0, memslot->id); + if (ret < 0) { + kvmppc_uvmem_slot_free(kvm, memslot); + ret = H_PARAMETER; + goto out; + } + } + kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_START; +out: + srcu_read_unlock(&kvm->srcu, srcu_idx); + return ret; +} + +unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) +{ + if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) + return H_UNSUPPORTED; + + kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; + pr_info("LPID %d went secure\n", kvm->arch.lpid); + return H_SUCCESS; +} + +/* + * Get a free device PFN from the pool + * + * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device + * PFN will be used to keep track of the secure page on HV side. + * + * Called with kvm->arch.uvmem_lock held + */ +static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) +{ + struct page *dpage = NULL; + unsigned long bit, uvmem_pfn; + struct kvmppc_uvmem_page_pvt *pvt; + unsigned long pfn_last, pfn_first; + + pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT; + pfn_last = pfn_first + + (resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT); + + spin_lock(&kvmppc_uvmem_bitmap_lock); + bit = find_first_zero_bit(kvmppc_uvmem_bitmap, + pfn_last - pfn_first); + if (bit >= (pfn_last - pfn_first)) + goto out; + bitmap_set(kvmppc_uvmem_bitmap, bit, 1); + spin_unlock(&kvmppc_uvmem_bitmap_lock); + + pvt = kzalloc(sizeof(*pvt), GFP_KERNEL); + if (!pvt) + goto out_clear; + + uvmem_pfn = bit + pfn_first; + kvmppc_uvmem_pfn_insert(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); + + pvt->gpa = gpa; + pvt->kvm = kvm; + + dpage = pfn_to_page(uvmem_pfn); + dpage->zone_device_data = pvt; + get_page(dpage); + lock_page(dpage); + return dpage; +out_clear: + spin_lock(&kvmppc_uvmem_bitmap_lock); + bitmap_clear(kvmppc_uvmem_bitmap, bit, 1); +out: + spin_unlock(&kvmppc_uvmem_bitmap_lock); + return NULL; +} + +/* + * Alloc a PFN from private device memory pool and copy page from normal + * memory to secure memory using UV_PAGE_IN uvcall. + */ +static int +kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long gpa, struct kvm *kvm, + unsigned long page_shift, bool *downgrade) +{ + unsigned long src_pfn, dst_pfn = 0; + struct migrate_vma mig; + struct page *spage; + unsigned long pfn; + struct page *dpage; + int ret = 0; + + memset(&mig, 0, sizeof(mig)); + mig.vma = vma; + mig.start = start; + mig.end = end; + mig.src = &src_pfn; + mig.dst = &dst_pfn; + + /* + * We come here with mmap_sem write lock held just for + * ksm_madvise(), otherwise we only need read mmap_sem. + * Hence downgrade to read lock once ksm_madvise() is done. + */ + ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, + MADV_UNMERGEABLE, &vma->vm_flags); + downgrade_write(&kvm->mm->mmap_sem); + *downgrade = true; + if (ret) + return ret; + + ret = migrate_vma_setup(&mig); + if (ret) + return ret; + + if (!(*mig.src & MIGRATE_PFN_MIGRATE)) { + ret = -1; + goto out_finalize; + } + + dpage = kvmppc_uvmem_get_page(gpa, kvm); + if (!dpage) { + ret = -1; + goto out_finalize; + } + + pfn = *mig.src >> MIGRATE_PFN_SHIFT; + spage = migrate_pfn_to_page(*mig.src); + if (spage) + uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, + page_shift); + + *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + migrate_vma_pages(&mig); +out_finalize: + migrate_vma_finalize(&mig); + return ret; +} + +/* + * H_SVM_PAGE_IN: Move page from normal memory to secure memory. + */ +unsigned long +kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, + unsigned long flags, unsigned long page_shift) +{ + bool downgrade = false; + unsigned long start, end; + struct vm_area_struct *vma; + int srcu_idx; + unsigned long gfn = gpa >> page_shift; + int ret; + + if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) + return H_UNSUPPORTED; + + if (page_shift != PAGE_SHIFT) + return H_P3; + + if (flags) + return H_P2; + + ret = H_PARAMETER; + srcu_idx = srcu_read_lock(&kvm->srcu); + down_write(&kvm->mm->mmap_sem); + + start = gfn_to_hva(kvm, gfn); + if (kvm_is_error_hva(start)) + goto out; + + mutex_lock(&kvm->arch.uvmem_lock); + /* Fail the page-in request of an already paged-in page */ + if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) + goto out_unlock; + + end = start + (1UL << page_shift); + vma = find_vma_intersection(kvm->mm, start, end); + if (!vma || vma->vm_start > start || vma->vm_end < end) + goto out_unlock; + + if (!kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift, + &downgrade)) + ret = H_SUCCESS; +out_unlock: + mutex_unlock(&kvm->arch.uvmem_lock); +out: + if (downgrade) + up_read(&kvm->mm->mmap_sem); + else + up_write(&kvm->mm->mmap_sem); + srcu_read_unlock(&kvm->srcu, srcu_idx); + return ret; +} + +/* + * Provision a new page on HV side and copy over the contents + * from secure memory using UV_PAGE_OUT uvcall. + */ +static int +kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long page_shift, + struct kvm *kvm, unsigned long gpa) +{ + unsigned long src_pfn, dst_pfn = 0; + struct migrate_vma mig; + struct page *dpage, *spage; + unsigned long pfn; + int ret = U_SUCCESS; + + memset(&mig, 0, sizeof(mig)); + mig.vma = vma; + mig.start = start; + mig.end = end; + mig.src = &src_pfn; + mig.dst = &dst_pfn; + + mutex_lock(&kvm->arch.uvmem_lock); + /* The requested page is already paged-out, nothing to do */ + if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) + goto out; + + ret = migrate_vma_setup(&mig); + if (ret) + return ret; + + spage = migrate_pfn_to_page(*mig.src); + if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE)) + goto out_finalize; + + if (!is_zone_device_page(spage)) + goto out_finalize; + + dpage = alloc_page_vma(GFP_HIGHUSER, vma, start); + if (!dpage) { + ret = -1; + goto out_finalize; + } + + lock_page(dpage); + pfn = page_to_pfn(dpage); + + ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, + gpa, 0, page_shift); + if (ret == U_SUCCESS) + *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED; + else { + unlock_page(dpage); + __free_page(dpage); + goto out_finalize; + } + + migrate_vma_pages(&mig); +out_finalize: + migrate_vma_finalize(&mig); +out: + mutex_unlock(&kvm->arch.uvmem_lock); + return ret; +} + +/* + * Fault handler callback that gets called when HV touches any page that + * has been moved to secure memory, we ask UV to give back the page by + * issuing UV_PAGE_OUT uvcall. + * + * This eventually results in dropping of device PFN and the newly + * provisioned page/PFN gets populated in QEMU page tables. + */ +static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf) +{ + struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data; + + if (kvmppc_svm_page_out(vmf->vma, vmf->address, + vmf->address + PAGE_SIZE, PAGE_SHIFT, + pvt->kvm, pvt->gpa)) + return VM_FAULT_SIGBUS; + else + return 0; +} + +/* + * Release the device PFN back to the pool + * + * Gets called when secure page becomes a normal page during H_SVM_PAGE_OUT. + * Gets called with kvm->arch.uvmem_lock held. + */ +static void kvmppc_uvmem_page_free(struct page *page) +{ + unsigned long pfn = page_to_pfn(page) - + (kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT); + struct kvmppc_uvmem_page_pvt *pvt; + + spin_lock(&kvmppc_uvmem_bitmap_lock); + bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1); + spin_unlock(&kvmppc_uvmem_bitmap_lock); + + pvt = page->zone_device_data; + page->zone_device_data = NULL; + kvmppc_uvmem_pfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm); + kfree(pvt); +} + +static const struct dev_pagemap_ops kvmppc_uvmem_ops = { + .page_free = kvmppc_uvmem_page_free, + .migrate_to_ram = kvmppc_uvmem_migrate_to_ram, +}; + +/* + * H_SVM_PAGE_OUT: Move page from secure memory to normal memory. + */ +unsigned long +kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, + unsigned long flags, unsigned long page_shift) +{ + unsigned long gfn = gpa >> page_shift; + unsigned long start, end; + struct vm_area_struct *vma; + int srcu_idx; + int ret; + + if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) + return H_UNSUPPORTED; + + if (page_shift != PAGE_SHIFT) + return H_P3; + + if (flags) + return H_P2; + + ret = H_PARAMETER; + srcu_idx = srcu_read_lock(&kvm->srcu); + down_read(&kvm->mm->mmap_sem); + start = gfn_to_hva(kvm, gfn); + if (kvm_is_error_hva(start)) + goto out; + + end = start + (1UL << page_shift); + vma = find_vma_intersection(kvm->mm, start, end); + if (!vma || vma->vm_start > start || vma->vm_end < end) + goto out; + + if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa)) + ret = H_SUCCESS; +out: + up_read(&kvm->mm->mmap_sem); + srcu_read_unlock(&kvm->srcu, srcu_idx); + return ret; +} + +static u64 kvmppc_get_secmem_size(void) +{ + struct device_node *np; + int i, len; + const __be32 *prop; + u64 size = 0; + + np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware"); + if (!np) + goto out; + + prop = of_get_property(np, "secure-memory-ranges", &len); + if (!prop) + goto out_put; + + for (i = 0; i < len / (sizeof(*prop) * 4); i++) + size += of_read_number(prop + (i * 4) + 2, 2); + +out_put: + of_node_put(np); +out: + return size; +} + +int kvmppc_uvmem_init(void) +{ + int ret = 0; + unsigned long size; + struct resource *res; + void *addr; + unsigned long pfn_last, pfn_first; + + size = kvmppc_get_secmem_size(); + if (!size) { + /* + * Don't fail the initialization of kvm-hv module if + * the platform doesn't export ibm,uv-firmware node. + * Let normal guests run on such PEF-disabled platform. + */ + pr_info("KVMPPC-UVMEM: No support for secure guests\n"); + goto out; + } + + res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem"); + if (IS_ERR(res)) { + ret = PTR_ERR(res); + goto out; + } + + kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE; + kvmppc_uvmem_pgmap.res = *res; + kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops; + addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE); + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); + goto out_free_region; + } + + pfn_first = res->start >> PAGE_SHIFT; + pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT); + kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first), + sizeof(unsigned long), GFP_KERNEL); + if (!kvmppc_uvmem_bitmap) { + ret = -ENOMEM; + goto out_unmap; + } + + pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size); + return ret; +out_unmap: + memunmap_pages(&kvmppc_uvmem_pgmap); +out_free_region: + release_mem_region(res->start, size); +out: + return ret; +} + +void kvmppc_uvmem_free(void) +{ + memunmap_pages(&kvmppc_uvmem_pgmap); + release_mem_region(kvmppc_uvmem_pgmap.res.start, + resource_size(&kvmppc_uvmem_pgmap.res)); + kfree(kvmppc_uvmem_bitmap); +} -- cgit v1.2.3 From 60f0a643aa44e4bed3a74ea671110707dd64d892 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Mon, 25 Nov 2019 08:36:27 +0530 Subject: KVM: PPC: Book3S HV: Shared pages support for secure guests A secure guest will share some of its pages with hypervisor (Eg. virtio bounce buffers etc). Support sharing of pages between hypervisor and ultravisor. Shared page is reachable via both HV and UV side page tables. Once a secure page is converted to shared page, the device page that represents the secure page is unmapped from the HV side page tables. Signed-off-by: Bharata B Rao Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/hvcall.h | 3 ++ arch/powerpc/kvm/book3s_hv_uvmem.c | 85 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 84 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 4150732c81a0..13bd870609c3 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -342,6 +342,9 @@ #define H_TLB_INVALIDATE 0xF808 #define H_COPY_TOFROM_GUEST 0xF80C +/* Flags for H_SVM_PAGE_IN */ +#define H_PAGE_IN_SHARED 0x1 + /* Platform-specific hcalls used by the Ultravisor */ #define H_SVM_PAGE_IN 0xEF00 #define H_SVM_PAGE_OUT 0xEF04 diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 5b99537c6992..4d0f544ad5f5 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -19,7 +19,10 @@ * available in the platform for running secure guests is hotplugged. * Whenever a page belonging to the guest becomes secure, a page from this * private device memory is used to represent and track that secure page - * on the HV side. + * on the HV side. Some pages (like virtio buffers, VPA pages etc) are + * shared between UV and HV. However such pages aren't represented by + * device private memory and mappings to shared memory exist in both + * UV and HV page tables. */ /* @@ -63,6 +66,9 @@ * UV splits and remaps the 2MB page if necessary and copies out the * required 64K page contents. * + * Shared pages: Whenever guest shares a secure page, UV will split and + * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size. + * * In summary, the current secure pages handling code in HV assumes * 64K page size and in fact fails any page-in/page-out requests of * non-64K size upfront. If and when UV starts supporting multiple @@ -93,6 +99,7 @@ struct kvmppc_uvmem_slot { struct kvmppc_uvmem_page_pvt { struct kvm *kvm; unsigned long gpa; + bool skip_page_out; }; int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) @@ -344,8 +351,64 @@ out_finalize: return ret; } +/* + * Shares the page with HV, thus making it a normal page. + * + * - If the page is already secure, then provision a new page and share + * - If the page is a normal page, share the existing page + * + * In the former case, uses dev_pagemap_ops.migrate_to_ram handler + * to unmap the device page from QEMU's page tables. + */ +static unsigned long +kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift) +{ + + int ret = H_PARAMETER; + struct page *uvmem_page; + struct kvmppc_uvmem_page_pvt *pvt; + unsigned long pfn; + unsigned long gfn = gpa >> page_shift; + int srcu_idx; + unsigned long uvmem_pfn; + + srcu_idx = srcu_read_lock(&kvm->srcu); + mutex_lock(&kvm->arch.uvmem_lock); + if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { + uvmem_page = pfn_to_page(uvmem_pfn); + pvt = uvmem_page->zone_device_data; + pvt->skip_page_out = true; + } + +retry: + mutex_unlock(&kvm->arch.uvmem_lock); + pfn = gfn_to_pfn(kvm, gfn); + if (is_error_noslot_pfn(pfn)) + goto out; + + mutex_lock(&kvm->arch.uvmem_lock); + if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { + uvmem_page = pfn_to_page(uvmem_pfn); + pvt = uvmem_page->zone_device_data; + pvt->skip_page_out = true; + kvm_release_pfn_clean(pfn); + goto retry; + } + + if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift)) + ret = H_SUCCESS; + kvm_release_pfn_clean(pfn); + mutex_unlock(&kvm->arch.uvmem_lock); +out: + srcu_read_unlock(&kvm->srcu, srcu_idx); + return ret; +} + /* * H_SVM_PAGE_IN: Move page from normal memory to secure memory. + * + * H_PAGE_IN_SHARED flag makes the page shared which means that the same + * memory in is visible from both UV and HV. */ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, @@ -364,9 +427,12 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, if (page_shift != PAGE_SHIFT) return H_P3; - if (flags) + if (flags & ~H_PAGE_IN_SHARED) return H_P2; + if (flags & H_PAGE_IN_SHARED) + return kvmppc_share_page(kvm, gpa, page_shift); + ret = H_PARAMETER; srcu_idx = srcu_read_lock(&kvm->srcu); down_write(&kvm->mm->mmap_sem); @@ -411,6 +477,7 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, unsigned long src_pfn, dst_pfn = 0; struct migrate_vma mig; struct page *dpage, *spage; + struct kvmppc_uvmem_page_pvt *pvt; unsigned long pfn; int ret = U_SUCCESS; @@ -444,10 +511,20 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, } lock_page(dpage); + pvt = spage->zone_device_data; pfn = page_to_pfn(dpage); - ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, - gpa, 0, page_shift); + /* + * This function is used in two cases: + * - When HV touches a secure page, for which we do UV_PAGE_OUT + * - When a secure page is converted to shared page, we *get* + * the page to essentially unmap the device page. In this + * case we skip page-out. + */ + if (!pvt->skip_page_out) + ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, + gpa, 0, page_shift); + if (ret == U_SUCCESS) *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED; else { -- cgit v1.2.3 From 008e359c76d85facb10d10fa21fd5bc8c3a4e5d6 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Mon, 25 Nov 2019 08:36:28 +0530 Subject: KVM: PPC: Book3S HV: Radix changes for secure guest - After the guest becomes secure, when we handle a page fault of a page belonging to SVM in HV, send that page to UV via UV_PAGE_IN. - Whenever a page is unmapped on the HV side, inform UV via UV_PAGE_INVAL. - Ensure all those routines that walk the secondary page tables of the guest don't do so in case of secure VM. For secure guest, the active secondary page tables are in secure memory and the secondary page tables in HV are freed when guest becomes secure. Signed-off-by: Bharata B Rao Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_book3s_uvmem.h | 6 ++++++ arch/powerpc/include/asm/ultravisor-api.h | 1 + arch/powerpc/include/asm/ultravisor.h | 5 +++++ arch/powerpc/kvm/book3s_64_mmu_radix.c | 22 ++++++++++++++++++++ arch/powerpc/kvm/book3s_hv_uvmem.c | 32 +++++++++++++++++++++++++++++ 5 files changed, 66 insertions(+) diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h index 95f389c2937b..3033a9585b43 100644 --- a/arch/powerpc/include/asm/kvm_book3s_uvmem.h +++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h @@ -18,6 +18,7 @@ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long page_shift); unsigned long kvmppc_h_svm_init_start(struct kvm *kvm); unsigned long kvmppc_h_svm_init_done(struct kvm *kvm); +int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn); #else static inline int kvmppc_uvmem_init(void) { @@ -58,5 +59,10 @@ static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) { return H_UNSUPPORTED; } + +static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) +{ + return -EFAULT; +} #endif /* CONFIG_PPC_UV */ #endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */ diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h index 2483f15bd71a..e774274ab30e 100644 --- a/arch/powerpc/include/asm/ultravisor-api.h +++ b/arch/powerpc/include/asm/ultravisor-api.h @@ -32,5 +32,6 @@ #define UV_SHARE_PAGE 0xF130 #define UV_UNSHARE_PAGE 0xF134 #define UV_UNSHARE_ALL_PAGES 0xF140 +#define UV_PAGE_INVAL 0xF138 #endif /* _ASM_POWERPC_ULTRAVISOR_API_H */ diff --git a/arch/powerpc/include/asm/ultravisor.h b/arch/powerpc/include/asm/ultravisor.h index 79bb005e8ee9..40cc8bace654 100644 --- a/arch/powerpc/include/asm/ultravisor.h +++ b/arch/powerpc/include/asm/ultravisor.h @@ -67,4 +67,9 @@ static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size, size, flags, slotid); } +static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift) +{ + return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift); +} + #endif /* _ASM_POWERPC_ULTRAVISOR_H */ diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 2d415c36a61d..9f6ba113ffe3 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include /* * Supported radix tree geometry. @@ -915,6 +917,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, if (!(dsisr & DSISR_PRTABLE_FAULT)) gpa |= ea & 0xfff; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) + return kvmppc_send_page_to_uv(kvm, gfn); + /* Get the corresponding memslot */ memslot = gfn_to_memslot(kvm, gfn); @@ -972,6 +977,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) { + uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT); + return 0; + } + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep)) kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, @@ -989,6 +999,9 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, int ref = 0; unsigned long old, *rmapp; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) + return ref; + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) { old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0, @@ -1013,6 +1026,9 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned int shift; int ref = 0; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) + return ref; + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) ref = 1; @@ -1030,6 +1046,9 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm, int ret = 0; unsigned long old, *rmapp; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) + return ret; + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) { ret = 1; @@ -1082,6 +1101,9 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm, unsigned long gpa; unsigned int shift; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) + return; + gpa = memslot->base_gfn << PAGE_SHIFT; spin_lock(&kvm->mmu_lock); for (n = memslot->npages; n; --n) { diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 4d0f544ad5f5..ed51498b20ee 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -69,6 +69,17 @@ * Shared pages: Whenever guest shares a secure page, UV will split and * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size. * + * HV invalidating a page: When a regular page belonging to secure + * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K + * page size. Using 64K page size is correct here because any non-secure + * page will essentially be of 64K page size. Splitting by UV during sharing + * and page-out ensures this. + * + * Page fault handling: When HV handles page fault of a page belonging + * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request. + * Using 64K size is correct here too as UV would have split the 2MB page + * into 64k mappings and would have done page-outs earlier. + * * In summary, the current secure pages handling code in HV assumes * 64K page size and in fact fails any page-in/page-out requests of * non-64K size upfront. If and when UV starts supporting multiple @@ -630,6 +641,27 @@ out: return ret; } +int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) +{ + unsigned long pfn; + int ret = U_SUCCESS; + + pfn = gfn_to_pfn(kvm, gfn); + if (is_error_noslot_pfn(pfn)) + return -EFAULT; + + mutex_lock(&kvm->arch.uvmem_lock); + if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) + goto out; + + ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT, + 0, PAGE_SHIFT); +out: + kvm_release_pfn_clean(pfn); + mutex_unlock(&kvm->arch.uvmem_lock); + return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT; +} + static u64 kvmppc_get_secmem_size(void) { struct device_node *np; -- cgit v1.2.3 From c32622575dd0ecb6fd0b41e3a451bd58152971ba Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Mon, 25 Nov 2019 08:36:29 +0530 Subject: KVM: PPC: Book3S HV: Handle memory plug/unplug to secure VM Register the new memslot with UV during plug and unregister the memslot during unplug. In addition, release all the device pages during unplug. Signed-off-by: Bharata B Rao Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_book3s_uvmem.h | 6 +++++ arch/powerpc/include/asm/ultravisor-api.h | 1 + arch/powerpc/include/asm/ultravisor.h | 5 ++++ arch/powerpc/kvm/book3s_64_mmu_radix.c | 3 +++ arch/powerpc/kvm/book3s_hv.c | 24 +++++++++++++++++++ arch/powerpc/kvm/book3s_hv_uvmem.c | 37 +++++++++++++++++++++++++++++ 6 files changed, 76 insertions(+) diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h index 3033a9585b43..50204e228f16 100644 --- a/arch/powerpc/include/asm/kvm_book3s_uvmem.h +++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h @@ -19,6 +19,8 @@ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long kvmppc_h_svm_init_start(struct kvm *kvm); unsigned long kvmppc_h_svm_init_done(struct kvm *kvm); int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn); +void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free, + struct kvm *kvm); #else static inline int kvmppc_uvmem_init(void) { @@ -64,5 +66,9 @@ static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) { return -EFAULT; } + +static inline void +kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free, + struct kvm *kvm) { } #endif /* CONFIG_PPC_UV */ #endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */ diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h index e774274ab30e..4b0d044caa2a 100644 --- a/arch/powerpc/include/asm/ultravisor-api.h +++ b/arch/powerpc/include/asm/ultravisor-api.h @@ -27,6 +27,7 @@ #define UV_RETURN 0xF11C #define UV_ESM 0xF110 #define UV_REGISTER_MEM_SLOT 0xF120 +#define UV_UNREGISTER_MEM_SLOT 0xF124 #define UV_PAGE_IN 0xF128 #define UV_PAGE_OUT 0xF12C #define UV_SHARE_PAGE 0xF130 diff --git a/arch/powerpc/include/asm/ultravisor.h b/arch/powerpc/include/asm/ultravisor.h index 40cc8bace654..b8e59b7b4ac8 100644 --- a/arch/powerpc/include/asm/ultravisor.h +++ b/arch/powerpc/include/asm/ultravisor.h @@ -67,6 +67,11 @@ static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size, size, flags, slotid); } +static inline int uv_unregister_mem_slot(u64 lpid, u64 slotid) +{ + return ucall_norets(UV_UNREGISTER_MEM_SLOT, lpid, slotid); +} + static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift) { return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift); diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 9f6ba113ffe3..da857c8ba6e4 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -1101,6 +1101,9 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm, unsigned long gpa; unsigned int shift; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START) + kvmppc_uvmem_drop_pages(memslot, kvm); + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 03d56aeec714..a8e815648b0a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -74,6 +74,7 @@ #include #include #include +#include #include "book3s.h" @@ -4515,6 +4516,29 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) && ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES)) kvmppc_radix_flush_memslot(kvm, old); + /* + * If UV hasn't yet called H_SVM_INIT_START, don't register memslots. + */ + if (!kvm->arch.secure_guest) + return; + + switch (change) { + case KVM_MR_CREATE: + if (kvmppc_uvmem_slot_init(kvm, new)) + return; + uv_register_mem_slot(kvm->arch.lpid, + new->base_gfn << PAGE_SHIFT, + new->npages * PAGE_SIZE, + 0, new->id); + break; + case KVM_MR_DELETE: + uv_unregister_mem_slot(kvm->arch.lpid, old->id); + kvmppc_uvmem_slot_free(kvm, old); + break; + default: + /* TODO: Handle KVM_MR_MOVE */ + break; + } } /* diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index ed51498b20ee..2de264fc3156 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -249,6 +249,43 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) return H_SUCCESS; } +/* + * Drop device pages that we maintain for the secure guest + * + * We first mark the pages to be skipped from UV_PAGE_OUT when there + * is HV side fault on these pages. Next we *get* these pages, forcing + * fault on them, do fault time migration to replace the device PTEs in + * QEMU page table with normal PTEs from newly allocated pages. + */ +void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free, + struct kvm *kvm) +{ + int i; + struct kvmppc_uvmem_page_pvt *pvt; + unsigned long pfn, uvmem_pfn; + unsigned long gfn = free->base_gfn; + + for (i = free->npages; i; --i, ++gfn) { + struct page *uvmem_page; + + mutex_lock(&kvm->arch.uvmem_lock); + if (!kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { + mutex_unlock(&kvm->arch.uvmem_lock); + continue; + } + + uvmem_page = pfn_to_page(uvmem_pfn); + pvt = uvmem_page->zone_device_data; + pvt->skip_page_out = true; + mutex_unlock(&kvm->arch.uvmem_lock); + + pfn = gfn_to_pfn(kvm, gfn); + if (is_error_noslot_pfn(pfn)) + continue; + kvm_release_pfn_clean(pfn); + } +} + /* * Get a free device PFN from the pool * -- cgit v1.2.3 From 22945688acd4d0ec2620b0670a53110401ed9c59 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Mon, 25 Nov 2019 08:36:30 +0530 Subject: KVM: PPC: Book3S HV: Support reset of secure guest Add support for reset of secure guest via a new ioctl KVM_PPC_SVM_OFF. This ioctl will be issued by QEMU during reset and includes the the following steps: - Release all device pages of the secure guest. - Ask UV to terminate the guest via UV_SVM_TERMINATE ucall - Unpin the VPA pages so that they can be migrated back to secure side when guest becomes secure again. This is required because pinned pages can't be migrated. - Reinit the partition scoped page tables After these steps, guest is ready to issue UV_ESM call once again to switch to secure mode. Signed-off-by: Bharata B Rao Signed-off-by: Sukadev Bhattiprolu [Implementation of uv_svm_terminate() and its call from guest shutdown path] Signed-off-by: Ram Pai [Unpinning of VPA pages] Signed-off-by: Paul Mackerras --- Documentation/virt/kvm/api.txt | 18 +++++++ arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/include/asm/ultravisor-api.h | 1 + arch/powerpc/include/asm/ultravisor.h | 5 ++ arch/powerpc/kvm/book3s_hv.c | 90 +++++++++++++++++++++++++++++++ arch/powerpc/kvm/powerpc.c | 12 +++++ include/uapi/linux/kvm.h | 1 + 7 files changed, 128 insertions(+) diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt index 49183add44e7..9fecec6b4e86 100644 --- a/Documentation/virt/kvm/api.txt +++ b/Documentation/virt/kvm/api.txt @@ -4149,6 +4149,24 @@ Valid values for 'action': #define KVM_PMU_EVENT_ALLOW 0 #define KVM_PMU_EVENT_DENY 1 +4.121 KVM_PPC_SVM_OFF + +Capability: basic +Architectures: powerpc +Type: vm ioctl +Parameters: none +Returns: 0 on successful completion, +Errors: + EINVAL: if ultravisor failed to terminate the secure guest + ENOMEM: if hypervisor failed to allocate new radix page tables for guest + +This ioctl is used to turn off the secure mode of the guest or transition +the guest from secure mode to normal mode. This is invoked when the guest +is reset. This has no effect if called for a normal guest. + +This ioctl issues an ultravisor call to terminate the secure guest, +unpins the VPA pages and releases all the device pages that are used to +track the secure pages by hypervisor. 5. The kvm_run structure ------------------------ diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index d63f649fe713..3d2f871241a8 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -322,6 +322,7 @@ struct kvmppc_ops { int size); int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, int size); + int (*svm_off)(struct kvm *kvm); }; extern struct kvmppc_ops *kvmppc_hv_ops; diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h index 4b0d044caa2a..b66f6db7be6c 100644 --- a/arch/powerpc/include/asm/ultravisor-api.h +++ b/arch/powerpc/include/asm/ultravisor-api.h @@ -34,5 +34,6 @@ #define UV_UNSHARE_PAGE 0xF134 #define UV_UNSHARE_ALL_PAGES 0xF140 #define UV_PAGE_INVAL 0xF138 +#define UV_SVM_TERMINATE 0xF13C #endif /* _ASM_POWERPC_ULTRAVISOR_API_H */ diff --git a/arch/powerpc/include/asm/ultravisor.h b/arch/powerpc/include/asm/ultravisor.h index b8e59b7b4ac8..790b0e63681f 100644 --- a/arch/powerpc/include/asm/ultravisor.h +++ b/arch/powerpc/include/asm/ultravisor.h @@ -77,4 +77,9 @@ static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift) return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift); } +static inline int uv_svm_terminate(u64 lpid) +{ + return ucall_norets(UV_SVM_TERMINATE, lpid); +} + #endif /* _ASM_POWERPC_ULTRAVISOR_H */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index a8e815648b0a..dc53578193ee 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4983,6 +4983,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) if (nesting_enabled(kvm)) kvmhv_release_all_nested(kvm); kvm->arch.process_table = 0; + uv_svm_terminate(kvm->arch.lpid); kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); } @@ -5425,6 +5426,94 @@ static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, return rc; } +static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa) +{ + unpin_vpa(kvm, vpa); + vpa->gpa = 0; + vpa->pinned_addr = NULL; + vpa->dirty = false; + vpa->update_pending = 0; +} + +/* + * IOCTL handler to turn off secure mode of guest + * + * - Release all device pages + * - Issue ucall to terminate the guest on the UV side + * - Unpin the VPA pages. + * - Reinit the partition scoped page tables + */ +static int kvmhv_svm_off(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + int mmu_was_ready; + int srcu_idx; + int ret = 0; + int i; + + if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) + return ret; + + mutex_lock(&kvm->arch.mmu_setup_lock); + mmu_was_ready = kvm->arch.mmu_ready; + if (kvm->arch.mmu_ready) { + kvm->arch.mmu_ready = 0; + /* order mmu_ready vs. vcpus_running */ + smp_mb(); + if (atomic_read(&kvm->arch.vcpus_running)) { + kvm->arch.mmu_ready = 1; + ret = -EBUSY; + goto out; + } + } + + srcu_idx = srcu_read_lock(&kvm->srcu); + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + struct kvm_memory_slot *memslot; + struct kvm_memslots *slots = __kvm_memslots(kvm, i); + + if (!slots) + continue; + + kvm_for_each_memslot(memslot, slots) { + kvmppc_uvmem_drop_pages(memslot, kvm); + uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); + } + } + srcu_read_unlock(&kvm->srcu, srcu_idx); + + ret = uv_svm_terminate(kvm->arch.lpid); + if (ret != U_SUCCESS) { + ret = -EINVAL; + goto out; + } + + /* + * When secure guest is reset, all the guest pages are sent + * to UV via UV_PAGE_IN before the non-boot vcpus get a + * chance to run and unpin their VPA pages. Unpinning of all + * VPA pages is done here explicitly so that VPA pages + * can be migrated to the secure side. + * + * This is required to for the secure SMP guest to reboot + * correctly. + */ + kvm_for_each_vcpu(i, vcpu, kvm) { + spin_lock(&vcpu->arch.vpa_update_lock); + unpin_vpa_reset(kvm, &vcpu->arch.dtl); + unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); + unpin_vpa_reset(kvm, &vcpu->arch.vpa); + spin_unlock(&vcpu->arch.vpa_update_lock); + } + + kvmppc_setup_partition_table(kvm); + kvm->arch.secure_guest = 0; + kvm->arch.mmu_ready = mmu_was_ready; +out: + mutex_unlock(&kvm->arch.mmu_setup_lock); + return ret; +} + static struct kvmppc_ops kvm_ops_hv = { .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, @@ -5468,6 +5557,7 @@ static struct kvmppc_ops kvm_ops_hv = { .enable_nested = kvmhv_enable_nested, .load_from_eaddr = kvmhv_load_from_eaddr, .store_to_eaddr = kvmhv_store_to_eaddr, + .svm_off = kvmhv_svm_off, }; static int kvm_init_subcore_bitmap(void) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 9e085e931d74..416fb3d2a1d0 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -31,6 +31,8 @@ #include #include #endif +#include +#include #include "timing.h" #include "irq.h" @@ -2413,6 +2415,16 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; break; } + case KVM_PPC_SVM_OFF: { + struct kvm *kvm = filp->private_data; + + r = 0; + if (!kvm->arch.kvm_ops->svm_off) + goto out; + + r = kvm->arch.kvm_ops->svm_off(kvm); + break; + } default: { struct kvm *kvm = filp->private_data; r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index e6f17c8e2dba..f0a16b4adbbd 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1348,6 +1348,7 @@ struct kvm_s390_ucas_mapping { #define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char) /* Available with KVM_CAP_PMU_EVENT_FILTER */ #define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter) +#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) -- cgit v1.2.3 From 013a53f2d25a9fa9b9e1f70f5baa3f56e3454052 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 25 Nov 2019 08:36:31 +0530 Subject: powerpc: Ultravisor: Add PPC_UV config option CONFIG_PPC_UV adds support for ultravisor. Signed-off-by: Anshuman Khandual Signed-off-by: Bharata B Rao Signed-off-by: Ram Pai [ Update config help and commit message ] Signed-off-by: Claudio Carvalho Reviewed-by: Sukadev Bhattiprolu Signed-off-by: Paul Mackerras --- arch/powerpc/Kconfig | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 3e56c9c2f16e..d7fef29b47c9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -451,6 +451,23 @@ config PPC_TRANSACTIONAL_MEM help Support user-mode Transactional Memory on POWERPC. +config PPC_UV + bool "Ultravisor support" + depends on KVM_BOOK3S_HV_POSSIBLE + select ZONE_DEVICE + select DEV_PAGEMAP_OPS + select DEVICE_PRIVATE + select MEMORY_HOTPLUG + select MEMORY_HOTREMOVE + default n + help + This option paravirtualizes the kernel to run in POWER platforms that + supports the Protected Execution Facility (PEF). On such platforms, + the ultravisor firmware runs at a privilege level above the + hypervisor. + + If unsure, say "N". + config LD_HEAD_STUB_CATCH bool "Reserve 256 bytes to cope with linker stubs in HEAD text" if EXPERT depends on PPC64 -- cgit v1.2.3 From 80b10aa92448915d35e9f65591e9325397dc40fe Mon Sep 17 00:00:00 2001 From: Wainer dos Santos Moschetta Date: Fri, 29 Nov 2019 13:17:30 -0500 Subject: Documentation: kvm: Fix mention to number of ioctls classes In api.txt it is said that KVM ioctls belong to three classes but in reality it is four. Fixed this, but do not count categories anymore to avoid such as outdated information in the future. Signed-off-by: Wainer dos Santos Moschetta Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt index 49183add44e7..cc8d18b5223e 100644 --- a/Documentation/virt/kvm/api.txt +++ b/Documentation/virt/kvm/api.txt @@ -5,7 +5,7 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation ---------------------- The kvm API is a set of ioctls that are issued to control various aspects -of a virtual machine. The ioctls belong to three classes: +of a virtual machine. The ioctls belong to the following classes: - System ioctls: These query and set global attributes which affect the whole kvm subsystem. In addition a system ioctl is used to create -- cgit v1.2.3 From 433f4ba1904100da65a311033f17a9bf586b287e Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 4 Dec 2019 10:28:54 +0100 Subject: KVM: x86: fix out-of-bounds write in KVM_GET_EMULATED_CPUID (CVE-2019-19332) The bounds check was present in KVM_GET_SUPPORTED_CPUID but not KVM_GET_EMULATED_CPUID. Reported-by: syzbot+e3f4897236c4eeb8af4f@syzkaller.appspotmail.com Fixes: 84cffe499b94 ("kvm: Emulate MOVBE", 2013-10-29) Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 813a4d2e5c0c..cfafa320a8cf 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -504,7 +504,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function, r = -E2BIG; - if (*nent >= maxnent) + if (WARN_ON(*nent >= maxnent)) goto out; do_host_cpuid(entry, function, 0); @@ -815,6 +815,9 @@ out: static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func, int *nent, int maxnent, unsigned int type) { + if (*nent >= maxnent) + return -E2BIG; + if (type == KVM_GET_EMULATED_CPUID) return __do_cpuid_func_emulated(entry, func, nent, maxnent); -- cgit v1.2.3 From 7d73710d9ca2564f29d291d0b3badc09efdf25e9 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Tue, 3 Dec 2019 16:24:42 -0800 Subject: kvm: vmx: Stop wasting a page for guest_msrs We will never need more guest_msrs than there are indices in vmx_msr_index. Thus, at present, the guest_msrs array will not exceed 168 bytes. Signed-off-by: Jim Mattson Reviewed-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 12 ++---------- arch/x86/kvm/vmx/vmx.h | 8 +++++++- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d175429c91b0..e7ea332ad1e8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6674,7 +6674,6 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) free_vpid(vmx->vpid); nested_vmx_free_vcpu(vcpu); free_loaded_vmcs(vmx->loaded_vmcs); - kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu); kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); @@ -6731,12 +6730,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) goto uninit_vcpu; } - vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); - BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) - > PAGE_SIZE); - - if (!vmx->guest_msrs) - goto free_pml; + BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) != NR_SHARED_MSRS); for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { u32 index = vmx_msr_index[i]; @@ -6768,7 +6762,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) err = alloc_loaded_vmcs(&vmx->vmcs01); if (err < 0) - goto free_msrs; + goto free_pml; msr_bitmap = vmx->vmcs01.msr_bitmap; vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_TSC, MSR_TYPE_R); @@ -6830,8 +6824,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) free_vmcs: free_loaded_vmcs(vmx->loaded_vmcs); -free_msrs: - kfree(vmx->guest_msrs); free_pml: vmx_destroy_pml_buffer(vmx); uninit_vcpu: diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 7c1b978b2df4..a4f7f737c5d4 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -22,6 +22,12 @@ extern u32 get_umwait_control_msr(void); #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) +#ifdef CONFIG_X86_64 +#define NR_SHARED_MSRS 7 +#else +#define NR_SHARED_MSRS 4 +#endif + #define NR_LOADSTORE_MSRS 8 struct vmx_msrs { @@ -206,7 +212,7 @@ struct vcpu_vmx { u32 idt_vectoring_info; ulong rflags; - struct shared_msr_entry *guest_msrs; + struct shared_msr_entry guest_msrs[NR_SHARED_MSRS]; int nmsrs; int save_nmsrs; bool guest_msrs_ready; -- cgit v1.2.3 From d89c69f42bf0fe42d1f52ea9b3dca15b1ade7601 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 18 Dec 2019 11:43:06 +1100 Subject: KVM: PPC: Book3S HV: Don't do ultravisor calls on systems without ultravisor Commit 22945688acd4 ("KVM: PPC: Book3S HV: Support reset of secure guest") added a call to uv_svm_terminate, which is an ultravisor call, without any check that the guest is a secure guest or even that the system has an ultravisor. On a system without an ultravisor, the ultracall will degenerate to a hypercall, but since we are not in KVM guest context, the hypercall will get treated as a system call, which could have random effects depending on what happens to be in r0, and could also corrupt the current task's kernel stack. Hence this adds a test for the guest being a secure guest before doing uv_svm_terminate(). Fixes: 22945688acd4 ("KVM: PPC: Book3S HV: Support reset of secure guest") Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index dc53578193ee..6ff3f896d908 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4983,7 +4983,8 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) if (nesting_enabled(kvm)) kvmhv_release_all_nested(kvm); kvm->arch.process_table = 0; - uv_svm_terminate(kvm->arch.lpid); + if (kvm->arch.secure_guest) + uv_svm_terminate(kvm->arch.lpid); kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); } -- cgit v1.2.3