From 8e39efd840b8d4eae5ab398b43e20ffaff0010cc Mon Sep 17 00:00:00 2001 From: David Matlack Date: Tue, 10 May 2022 15:40:34 -0700 Subject: KVM: VMX: Print VM-instruction error when it may be helpful Include the value of the "VM-instruction error" field from the current VMCS (if any) in the error message for VMCLEAR and VMPTRLD, since each of these instructions may result in more than one VM-instruction error. Previously, this field was only reported for VMWRITE errors. Signed-off-by: David Matlack [Rebased and refactored code; dropped the error number for INVVPID and INVEPT; reworded commit message.] Signed-off-by: Jim Mattson Message-Id: <20220510224035.1792952-1-jmattson@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f5aeade623d6..673ba5ca0beb 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -392,12 +392,14 @@ noinline void vmwrite_error(unsigned long field, unsigned long value) noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr) { - vmx_insn_failed("kvm: vmclear failed: %p/%llx\n", vmcs, phys_addr); + vmx_insn_failed("kvm: vmclear failed: %p/%llx err=%u\n", + vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR)); } noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr) { - vmx_insn_failed("kvm: vmptrld failed: %p/%llx\n", vmcs, phys_addr); + vmx_insn_failed("kvm: vmptrld failed: %p/%llx err=%u\n", + vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR)); } noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva) -- cgit v1.2.3 From cc07e60b0811eeeca769fb342aa6e13da5977657 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Tue, 10 May 2022 15:40:35 -0700 Subject: KVM: VMX: Print VM-instruction error as unsigned Change the printf format character from 'd' to 'u' for the VM-instruction error in vmwrite_error(). Fixes: 6aa8b732ca01 ("[PATCH] kvm: userspace interface") Reported-by: Sean Christopherson Signed-off-by: Jim Mattson Message-Id: <20220510224035.1792952-2-jmattson@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 673ba5ca0beb..e0d3bea73b28 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -386,7 +386,7 @@ asmlinkage void vmread_error(unsigned long field, bool fault) noinline void vmwrite_error(unsigned long field, unsigned long value) { - vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n", + vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%u\n", field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); } -- cgit v1.2.3 From 0471a7bd1bca2a47a5f378f2222c5cf39ce94152 Mon Sep 17 00:00:00 2001 From: Lev Kujawski Date: Sat, 21 May 2022 08:15:11 +0000 Subject: KVM: set_msr_mce: Permit guests to ignore single-bit ECC errors Certain guest operating systems (e.g., UNIXWARE) clear bit 0 of MC1_CTL to ignore single-bit ECC data errors. Single-bit ECC data errors are always correctable and thus are safe to ignore because they are informational in nature rather than signaling a loss of data integrity. Prior to this patch, these guests would crash upon writing MC1_CTL, with resultant error messages like the following: error: kvm run failed Operation not permitted EAX=fffffffe EBX=fffffffe ECX=00000404 EDX=ffffffff ESI=ffffffff EDI=00000001 EBP=fffdaba4 ESP=fffdab20 EIP=c01333a5 EFL=00000246 [---Z-P-] CPL=0 II=0 A20=1 SMM=0 HLT=0 ES =0108 00000000 ffffffff 00c09300 DPL=0 DS [-WA] CS =0100 00000000 ffffffff 00c09b00 DPL=0 CS32 [-RA] SS =0108 00000000 ffffffff 00c09300 DPL=0 DS [-WA] DS =0108 00000000 ffffffff 00c09300 DPL=0 DS [-WA] FS =0000 00000000 ffffffff 00c00000 GS =0000 00000000 ffffffff 00c00000 LDT=0118 c1026390 00000047 00008200 DPL=0 LDT TR =0110 ffff5af0 00000067 00008b00 DPL=0 TSS32-busy GDT= ffff5020 000002cf IDT= ffff52f0 000007ff CR0=8001003b CR2=00000000 CR3=0100a000 CR4=00000230 DR0=00000000 DR1=00000000 DR2=00000000 DR3=00000000 DR6=ffff0ff0 DR7=00000400 EFER=0000000000000000 Code=08 89 01 89 51 04 c3 8b 4c 24 08 8b 01 8b 51 04 8b 4c 24 04 <0f> 30 c3 f7 05 a4 6d ff ff 10 00 00 00 74 03 0f 31 c3 33 c0 33 d2 c3 8d 74 26 00 0f 31 c3 Signed-off-by: Lev Kujawski Message-Id: <20220521081511.187388-1-lkujaw@member.fsf.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b81ef4f497f4..b5aeed18b9f5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3234,10 +3234,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* only 0 or all 1s can be written to IA32_MCi_CTL * some Linux kernels though clear bit 10 in bank 4 to * workaround a BIOS/GART TBL issue on AMD K8s, ignore - * this to avoid an uncatched #GP in the guest + * this to avoid an uncatched #GP in the guest. + * + * UNIXWARE clears bit 0 of MC1_CTL to ignore + * correctable, single-bit ECC data errors. */ if ((offset & 0x3) == 0 && - data != 0 && (data | (1 << 10)) != ~(u64)0) + data != 0 && (data | (1 << 10) | 1) != ~(u64)0) return -1; /* MCi_STATUS */ -- cgit v1.2.3 From 345b0fd6fe5f66dfe841bad0b39dd11a5672df68 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 29 Apr 2022 21:00:20 +0000 Subject: KVM: Drop unused @gpa param from gfn=>pfn cache's __release_gpc() helper Drop the @pga param from __release_gpc() and rename the helper to make it more obvious that the cache itself is not being released. The helper will be reused by a future commit to release a pfn+khva combination that is _never_ associated with the cache, at which point the current name would go from slightly misleading to blatantly wrong. No functional change intended. Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson Message-Id: <20220429210025.3293691-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/pfncache.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index dd84676615f1..e05a6a1b8eff 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -95,7 +95,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, } EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check); -static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, gpa_t gpa) +static void gpc_release_pfn_and_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva) { /* Unmap the old page if it was mapped before, and release it */ if (!is_error_noslot_pfn(pfn)) { @@ -146,7 +146,6 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, unsigned long page_offset = gpa & ~PAGE_MASK; kvm_pfn_t old_pfn, new_pfn; unsigned long old_uhva; - gpa_t old_gpa; void *old_khva; bool old_valid; int ret = 0; @@ -160,7 +159,6 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, write_lock_irq(&gpc->lock); - old_gpa = gpc->gpa; old_pfn = gpc->pfn; old_khva = gpc->khva - offset_in_page(gpc->khva); old_uhva = gpc->uhva; @@ -244,7 +242,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, out: write_unlock_irq(&gpc->lock); - __release_gpc(kvm, old_pfn, old_khva, old_gpa); + gpc_release_pfn_and_khva(kvm, old_pfn, old_khva); return ret; } @@ -254,14 +252,12 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) { void *old_khva; kvm_pfn_t old_pfn; - gpa_t old_gpa; write_lock_irq(&gpc->lock); gpc->valid = false; old_khva = gpc->khva - offset_in_page(gpc->khva); - old_gpa = gpc->gpa; old_pfn = gpc->pfn; /* @@ -273,7 +269,7 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) write_unlock_irq(&gpc->lock); - __release_gpc(kvm, old_pfn, old_khva, old_gpa); + gpc_release_pfn_and_khva(kvm, old_pfn, old_khva); } EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap); -- cgit v1.2.3 From 3dddf65b4f4c451c345d34ae85bdf1791a746e49 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 29 Apr 2022 21:00:21 +0000 Subject: KVM: Put the extra pfn reference when reusing a pfn in the gpc cache Put the struct page reference to pfn acquired by hva_to_pfn() when the old and new pfns for a gfn=>pfn cache match. The cache already has a reference via the old/current pfn, and will only put one reference when the cache is done with the pfn. Fixes: 982ed0de4753 ("KVM: Reinstate gfn_to_pfn_cache with invalidation support") Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson Message-Id: <20220429210025.3293691-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/pfncache.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index e05a6a1b8eff..40cbe90d52e0 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -206,6 +206,14 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, if (gpc->usage & KVM_HOST_USES_PFN) { if (new_pfn == old_pfn) { + /* + * Reuse the existing pfn and khva, but put the + * reference acquired hva_to_pfn_retry(); the + * cache still holds a reference to the pfn + * from the previous refresh. + */ + gpc_release_pfn_and_khva(kvm, new_pfn, NULL); + new_khva = old_khva; old_pfn = KVM_PFN_ERR_FAULT; old_khva = NULL; -- cgit v1.2.3 From 3ba2c95ea180740b16281fa43a3ee5f47279c0ed Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 29 Apr 2022 21:00:22 +0000 Subject: KVM: Do not incorporate page offset into gfn=>pfn cache user address Don't adjust the userspace address in the gfn=>pfn cache by the page offset from the gpa. KVM should never use the user address directly, and all KVM operations that translate a user address to something else require the user address to be page aligned. Ignoring the offset will allow the cache to reuse a gfn=>hva translation in the unlikely event that the page offset of the gpa changes, but the gfn does not. And more importantly, not having to (un)adjust the user address will simplify a future bug fix. Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson Message-Id: <20220429210025.3293691-6-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/pfncache.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 40cbe90d52e0..05cb0bcbf662 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -179,8 +179,6 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, ret = -EFAULT; goto out; } - - gpc->uhva += page_offset; } /* -- cgit v1.2.3 From 93984f19e7bce4c18084a6ef3dacafb155b806ed Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 29 Apr 2022 21:00:23 +0000 Subject: KVM: Fully serialize gfn=>pfn cache refresh via mutex Protect gfn=>pfn cache refresh with a mutex to fully serialize refreshes. The refresh logic doesn't protect against - concurrent unmaps, or refreshes with different GPAs (which may or may not happen in practice, for example if a cache is only used under vcpu->mutex; but it's allowed in the code) - a false negative on the memslot generation. If the first refresh sees a stale memslot generation, it will refresh the hva and generation before moving on to the hva=>pfn translation. If it then drops gpc->lock, a different user of the cache can come along, acquire gpc->lock, see that the memslot generation is fresh, and skip the hva=>pfn update due to the userspace address also matching (because it too was updated). The refresh path can already sleep during hva=>pfn resolution, so wrap the refresh with a mutex to ensure that any given refresh runs to completion before other callers can start their refresh. Cc: stable@vger.kernel.org Cc: Lai Jiangshan Signed-off-by: Sean Christopherson Message-Id: <20220429210025.3293691-7-seanjc@google.com> Signed-off-by: Paolo Bonzini --- include/linux/kvm_types.h | 2 ++ virt/kvm/pfncache.c | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index ac1ebb37a0ff..f328a01db4fe 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -19,6 +19,7 @@ struct kvm_memslots; enum kvm_mr_change; #include +#include #include #include @@ -69,6 +70,7 @@ struct gfn_to_pfn_cache { struct kvm_vcpu *vcpu; struct list_head list; rwlock_t lock; + struct mutex refresh_lock; void *khva; kvm_pfn_t pfn; enum pfn_cache_usage usage; diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 05cb0bcbf662..f610d3945b69 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -157,6 +157,13 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, if (page_offset + len > PAGE_SIZE) return -EINVAL; + /* + * If another task is refreshing the cache, wait for it to complete. + * There is no guarantee that concurrent refreshes will see the same + * gpa, memslots generation, etc..., so they must be fully serialized. + */ + mutex_lock(&gpc->refresh_lock); + write_lock_irq(&gpc->lock); old_pfn = gpc->pfn; @@ -248,6 +255,8 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, out: write_unlock_irq(&gpc->lock); + mutex_unlock(&gpc->refresh_lock); + gpc_release_pfn_and_khva(kvm, old_pfn, old_khva); return ret; @@ -259,6 +268,7 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) void *old_khva; kvm_pfn_t old_pfn; + mutex_lock(&gpc->refresh_lock); write_lock_irq(&gpc->lock); gpc->valid = false; @@ -274,6 +284,7 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) gpc->pfn = KVM_PFN_ERR_FAULT; write_unlock_irq(&gpc->lock); + mutex_unlock(&gpc->refresh_lock); gpc_release_pfn_and_khva(kvm, old_pfn, old_khva); } @@ -288,6 +299,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, if (!gpc->active) { rwlock_init(&gpc->lock); + mutex_init(&gpc->refresh_lock); gpc->khva = NULL; gpc->pfn = KVM_PFN_ERR_FAULT; -- cgit v1.2.3 From 58cd407ca4c6278cf9f9d09a2e663bf645b0c982 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 29 Apr 2022 21:00:24 +0000 Subject: KVM: Fix multiple races in gfn=>pfn cache refresh Rework the gfn=>pfn cache (gpc) refresh logic to address multiple races between the cache itself, and between the cache and mmu_notifier events. The existing refresh code attempts to guard against races with the mmu_notifier by speculatively marking the cache valid, and then marking it invalid if a mmu_notifier invalidation occurs. That handles the case where an invalidation occurs between dropping and re-acquiring gpc->lock, but it doesn't handle the scenario where the cache is refreshed after the cache was invalidated by the notifier, but before the notifier elevates mmu_notifier_count. The gpc refresh can't use the "retry" helper as its invalidation occurs _before_ mmu_notifier_count is elevated and before mmu_notifier_range_start is set/updated. CPU0 CPU1 ---- ---- gfn_to_pfn_cache_invalidate_start() | -> gpc->valid = false; kvm_gfn_to_pfn_cache_refresh() | |-> gpc->valid = true; hva_to_pfn_retry() | -> acquire kvm->mmu_lock kvm->mmu_notifier_count == 0 mmu_seq == kvm->mmu_notifier_seq drop kvm->mmu_lock return pfn 'X' acquire kvm->mmu_lock kvm_inc_notifier_count() drop kvm->mmu_lock() kernel frees pfn 'X' kvm_gfn_to_pfn_cache_check() | |-> gpc->valid == true caller accesses freed pfn 'X' Key off of mn_active_invalidate_count to detect that a pfncache refresh needs to wait for an in-progress mmu_notifier invalidation. While mn_active_invalidate_count is not guaranteed to be stable, it is guaranteed to be elevated prior to an invalidation acquiring gpc->lock, so either the refresh will see an active invalidation and wait, or the invalidation will run after the refresh completes. Speculatively marking the cache valid is itself flawed, as a concurrent kvm_gfn_to_pfn_cache_check() would see a valid cache with stale pfn/khva values. The KVM Xen use case explicitly allows/wants multiple users; even though the caches are allocated per vCPU, __kvm_xen_has_interrupt() can read a different vCPU (or vCPUs). Address this race by invalidating the cache prior to dropping gpc->lock (this is made possible by fixing the above mmu_notifier race). Complicating all of this is the fact that both the hva=>pfn resolution and mapping of the kernel address can sleep, i.e. must be done outside of gpc->lock. Fix the above races in one fell swoop, trying to fix each individual race is largely pointless and essentially impossible to test, e.g. closing one hole just shifts the focus to the other hole. Fixes: 982ed0de4753 ("KVM: Reinstate gfn_to_pfn_cache with invalidation support") Cc: stable@vger.kernel.org Cc: David Woodhouse Cc: Mingwei Zhang Signed-off-by: Sean Christopherson Message-Id: <20220429210025.3293691-8-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 9 +++ virt/kvm/pfncache.c | 193 +++++++++++++++++++++++++++++++++------------------- 2 files changed, 131 insertions(+), 71 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 342043b30125..a65a2369f788 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -724,6 +724,15 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm->mn_active_invalidate_count++; spin_unlock(&kvm->mn_invalidate_lock); + /* + * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e. + * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring + * each cache's lock. There are relatively few caches in existence at + * any given time, and the caches themselves can check for hva overlap, + * i.e. don't need to rely on memslot overlap checks for performance. + * Because this runs without holding mmu_lock, the pfn caches must use + * mn_active_invalidate_count (see above) instead of mmu_notifier_count. + */ gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end, hva_range.may_block); diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index f610d3945b69..b0b678367376 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -112,31 +112,122 @@ static void gpc_release_pfn_and_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva) } } -static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, unsigned long uhva) +static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq) { + /* + * mn_active_invalidate_count acts for all intents and purposes + * like mmu_notifier_count here; but the latter cannot be used + * here because the invalidation of caches in the mmu_notifier + * event occurs _before_ mmu_notifier_count is elevated. + * + * Note, it does not matter that mn_active_invalidate_count + * is not protected by gpc->lock. It is guaranteed to + * be elevated before the mmu_notifier acquires gpc->lock, and + * isn't dropped until after mmu_notifier_seq is updated. + */ + if (kvm->mn_active_invalidate_count) + return true; + + /* + * Ensure mn_active_invalidate_count is read before + * mmu_notifier_seq. This pairs with the smp_wmb() in + * mmu_notifier_invalidate_range_end() to guarantee either the + * old (non-zero) value of mn_active_invalidate_count or the + * new (incremented) value of mmu_notifier_seq is observed. + */ + smp_rmb(); + return kvm->mmu_notifier_seq != mmu_seq; +} + +static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) +{ + /* Note, the new page offset may be different than the old! */ + void *old_khva = gpc->khva - offset_in_page(gpc->khva); + kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT; + void *new_khva = NULL; unsigned long mmu_seq; - kvm_pfn_t new_pfn; - int retry; + + lockdep_assert_held(&gpc->refresh_lock); + + lockdep_assert_held_write(&gpc->lock); + + /* + * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva + * assets have already been updated and so a concurrent check() from a + * different task may not fail the gpa/uhva/generation checks. + */ + gpc->valid = false; do { mmu_seq = kvm->mmu_notifier_seq; smp_rmb(); + write_unlock_irq(&gpc->lock); + + /* + * If the previous iteration "failed" due to an mmu_notifier + * event, release the pfn and unmap the kernel virtual address + * from the previous attempt. Unmapping might sleep, so this + * needs to be done after dropping the lock. Opportunistically + * check for resched while the lock isn't held. + */ + if (new_pfn != KVM_PFN_ERR_FAULT) { + /* + * Keep the mapping if the previous iteration reused + * the existing mapping and didn't create a new one. + */ + if (new_khva == old_khva) + new_khva = NULL; + + gpc_release_pfn_and_khva(kvm, new_pfn, new_khva); + + cond_resched(); + } + /* We always request a writeable mapping */ - new_pfn = hva_to_pfn(uhva, false, NULL, true, NULL); + new_pfn = hva_to_pfn(gpc->uhva, false, NULL, true, NULL); if (is_error_noslot_pfn(new_pfn)) - break; + goto out_error; + + /* + * Obtain a new kernel mapping if KVM itself will access the + * pfn. Note, kmap() and memremap() can both sleep, so this + * too must be done outside of gpc->lock! + */ + if (gpc->usage & KVM_HOST_USES_PFN) { + if (new_pfn == gpc->pfn) { + new_khva = old_khva; + } else if (pfn_valid(new_pfn)) { + new_khva = kmap(pfn_to_page(new_pfn)); +#ifdef CONFIG_HAS_IOMEM + } else { + new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB); +#endif + } + if (!new_khva) { + kvm_release_pfn_clean(new_pfn); + goto out_error; + } + } + + write_lock_irq(&gpc->lock); - KVM_MMU_READ_LOCK(kvm); - retry = mmu_notifier_retry_hva(kvm, mmu_seq, uhva); - KVM_MMU_READ_UNLOCK(kvm); - if (!retry) - break; + /* + * Other tasks must wait for _this_ refresh to complete before + * attempting to refresh. + */ + WARN_ON_ONCE(gpc->valid); + } while (mmu_notifier_retry_cache(kvm, mmu_seq)); - cond_resched(); - } while (1); + gpc->valid = true; + gpc->pfn = new_pfn; + gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK); + return 0; + +out_error: + write_lock_irq(&gpc->lock); - return new_pfn; + return -EFAULT; } int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, @@ -147,7 +238,6 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, kvm_pfn_t old_pfn, new_pfn; unsigned long old_uhva; void *old_khva; - bool old_valid; int ret = 0; /* @@ -169,7 +259,6 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, old_pfn = gpc->pfn; old_khva = gpc->khva - offset_in_page(gpc->khva); old_uhva = gpc->uhva; - old_valid = gpc->valid; /* If the userspace HVA is invalid, refresh that first */ if (gpc->gpa != gpa || gpc->generation != slots->generation || @@ -182,7 +271,6 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); if (kvm_is_error_hva(gpc->uhva)) { - gpc->pfn = KVM_PFN_ERR_FAULT; ret = -EFAULT; goto out; } @@ -192,60 +280,8 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * If the userspace HVA changed or the PFN was already invalid, * drop the lock and do the HVA to PFN lookup again. */ - if (!old_valid || old_uhva != gpc->uhva) { - unsigned long uhva = gpc->uhva; - void *new_khva = NULL; - - /* Placeholders for "hva is valid but not yet mapped" */ - gpc->pfn = KVM_PFN_ERR_FAULT; - gpc->khva = NULL; - gpc->valid = true; - - write_unlock_irq(&gpc->lock); - - new_pfn = hva_to_pfn_retry(kvm, uhva); - if (is_error_noslot_pfn(new_pfn)) { - ret = -EFAULT; - goto map_done; - } - - if (gpc->usage & KVM_HOST_USES_PFN) { - if (new_pfn == old_pfn) { - /* - * Reuse the existing pfn and khva, but put the - * reference acquired hva_to_pfn_retry(); the - * cache still holds a reference to the pfn - * from the previous refresh. - */ - gpc_release_pfn_and_khva(kvm, new_pfn, NULL); - - new_khva = old_khva; - old_pfn = KVM_PFN_ERR_FAULT; - old_khva = NULL; - } else if (pfn_valid(new_pfn)) { - new_khva = kmap(pfn_to_page(new_pfn)); -#ifdef CONFIG_HAS_IOMEM - } else { - new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB); -#endif - } - if (new_khva) - new_khva += page_offset; - else - ret = -EFAULT; - } - - map_done: - write_lock_irq(&gpc->lock); - if (ret) { - gpc->valid = false; - gpc->pfn = KVM_PFN_ERR_FAULT; - gpc->khva = NULL; - } else { - /* At this point, gpc->valid may already have been cleared */ - gpc->pfn = new_pfn; - gpc->khva = new_khva; - } + if (!gpc->valid || old_uhva != gpc->uhva) { + ret = hva_to_pfn_retry(kvm, gpc); } else { /* If the HVA→PFN mapping was already valid, don't unmap it. */ old_pfn = KVM_PFN_ERR_FAULT; @@ -253,11 +289,26 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, } out: + /* + * Invalidate the cache and purge the pfn/khva if the refresh failed. + * Some/all of the uhva, gpa, and memslot generation info may still be + * valid, leave it as is. + */ + if (ret) { + gpc->valid = false; + gpc->pfn = KVM_PFN_ERR_FAULT; + gpc->khva = NULL; + } + + /* Snapshot the new pfn before dropping the lock! */ + new_pfn = gpc->pfn; + write_unlock_irq(&gpc->lock); mutex_unlock(&gpc->refresh_lock); - gpc_release_pfn_and_khva(kvm, old_pfn, old_khva); + if (old_pfn != new_pfn) + gpc_release_pfn_and_khva(kvm, old_pfn, old_khva); return ret; } -- cgit v1.2.3 From 85165781c5d900d97052be1d2723f6929d56768d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 29 Apr 2022 21:00:25 +0000 Subject: KVM: Do not pin pages tracked by gfn=>pfn caches Put the reference to any struct page mapped/tracked by a gfn=>pfn cache upon inserting the pfn into its associated cache, as opposed to putting the reference only when the cache is done using the pfn. In other words, don't pin pages while they're in the cache. One of the major roles of the gfn=>pfn cache is to play nicely with invalidation events, i.e. it exists in large part so that KVM doesn't rely on pinning pages. Signed-off-by: Sean Christopherson Message-Id: <20220429210025.3293691-9-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/pfncache.c | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index b0b678367376..ab519f72f2cd 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -95,20 +95,16 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, } EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check); -static void gpc_release_pfn_and_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva) +static void gpc_unmap_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva) { - /* Unmap the old page if it was mapped before, and release it */ - if (!is_error_noslot_pfn(pfn)) { - if (khva) { - if (pfn_valid(pfn)) - kunmap(pfn_to_page(pfn)); + /* Unmap the old pfn/page if it was mapped before. */ + if (!is_error_noslot_pfn(pfn) && khva) { + if (pfn_valid(pfn)) + kunmap(pfn_to_page(pfn)); #ifdef CONFIG_HAS_IOMEM - else - memunmap(khva); + else + memunmap(khva); #endif - } - - kvm_release_pfn(pfn, false); } } @@ -176,10 +172,10 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) * Keep the mapping if the previous iteration reused * the existing mapping and didn't create a new one. */ - if (new_khva == old_khva) - new_khva = NULL; + if (new_khva != old_khva) + gpc_unmap_khva(kvm, new_pfn, new_khva); - gpc_release_pfn_and_khva(kvm, new_pfn, new_khva); + kvm_release_pfn_clean(new_pfn); cond_resched(); } @@ -222,6 +218,14 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) gpc->valid = true; gpc->pfn = new_pfn; gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK); + + /* + * Put the reference to the _new_ pfn. The pfn is now tracked by the + * cache and can be safely migrated, swapped, etc... as the cache will + * invalidate any mappings in response to relevant mmu_notifier events. + */ + kvm_release_pfn_clean(new_pfn); + return 0; out_error: @@ -308,7 +312,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, mutex_unlock(&gpc->refresh_lock); if (old_pfn != new_pfn) - gpc_release_pfn_and_khva(kvm, old_pfn, old_khva); + gpc_unmap_khva(kvm, old_pfn, old_khva); return ret; } @@ -337,7 +341,7 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) write_unlock_irq(&gpc->lock); mutex_unlock(&gpc->refresh_lock); - gpc_release_pfn_and_khva(kvm, old_pfn, old_khva); + gpc_unmap_khva(kvm, old_pfn, old_khva); } EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap); -- cgit v1.2.3