summaryrefslogtreecommitdiff
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-08-09 15:46:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-08-09 15:46:29 -0700
commit7f20fd23377ac3356657ce35fcaf19ee2fea8345 (patch)
tree3b275e1a0fc8df2567a02d9e40ca219ac3f09c6f /virt/kvm/kvm_main.c
parent15abf14202a2fe7e5c5fc0e815587f45de4fd500 (diff)
parenta738b5e75b4c13be3485c82eb62c30047aa9f164 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "Bugfixes (arm and x86) and cleanups" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: selftests: kvm: Adding config fragments KVM: selftests: Update gitignore file for latest changes kvm: remove unnecessary PageReserved check KVM: arm/arm64: vgic: Reevaluate level sensitive interrupts on enable KVM: arm: Don't write junk to CP15 registers on reset KVM: arm64: Don't write junk to sysregs on reset KVM: arm/arm64: Sync ICH_VMCR_EL2 back when about to block x86: kvm: remove useless calls to kvm_para_available KVM: no need to check return value of debugfs_create functions KVM: remove kvm_arch_has_vcpu_debugfs() KVM: Fix leak vCPU's VMCS value into other pCPU KVM: Check preempted_in_kernel for involuntary preemption KVM: LAPIC: Don't need to wakeup vCPU twice afer timer fire arm64: KVM: hyp: debug-sr: Mark expected switch fall-through KVM: arm64: Update kvm_arm_exception_class and esr_class_str for new EC KVM: arm: vgic-v3: Mark expected switch fall-through arm64: KVM: regmap: Fix unexpected switch fall-through KVM: arm/arm64: Introduce kvm_pmu_vcpu_init() to setup PMU counter index
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c61
1 files changed, 36 insertions, 25 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 887f3b0c2b60..c6a91b044d8d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1855,8 +1855,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn)
if (!kvm_is_reserved_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
- if (!PageReserved(page))
- SetPageDirty(page);
+ SetPageDirty(page);
}
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
@@ -2477,6 +2476,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
#endif
}
+/*
+ * Unlike kvm_arch_vcpu_runnable, this function is called outside
+ * a vcpu_load/vcpu_put pair. However, for most architectures
+ * kvm_arch_vcpu_runnable does not require vcpu_load.
+ */
+bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ return kvm_arch_vcpu_runnable(vcpu);
+}
+
+static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ if (kvm_arch_dy_runnable(vcpu))
+ return true;
+
+#ifdef CONFIG_KVM_ASYNC_PF
+ if (!list_empty_careful(&vcpu->async_pf.done))
+ return true;
+#endif
+
+ return false;
+}
+
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
struct kvm *kvm = me->kvm;
@@ -2506,9 +2528,10 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
continue;
if (vcpu == me)
continue;
- if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
+ if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
continue;
- if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
+ if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
+ !kvm_arch_vcpu_in_kernel(vcpu))
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;
@@ -2591,30 +2614,20 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
}
-static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
+static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
+#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
char dir_name[ITOA_MAX_LEN * 2];
- int ret;
-
- if (!kvm_arch_has_vcpu_debugfs())
- return 0;
if (!debugfs_initialized())
- return 0;
+ return;
snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
vcpu->debugfs_dentry = debugfs_create_dir(dir_name,
- vcpu->kvm->debugfs_dentry);
- if (!vcpu->debugfs_dentry)
- return -ENOMEM;
-
- ret = kvm_arch_create_vcpu_debugfs(vcpu);
- if (ret < 0) {
- debugfs_remove_recursive(vcpu->debugfs_dentry);
- return ret;
- }
+ vcpu->kvm->debugfs_dentry);
- return 0;
+ kvm_arch_create_vcpu_debugfs(vcpu);
+#endif
}
/*
@@ -2649,9 +2662,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
if (r)
goto vcpu_destroy;
- r = kvm_create_vcpu_debugfs(vcpu);
- if (r)
- goto vcpu_destroy;
+ kvm_create_vcpu_debugfs(vcpu);
mutex_lock(&kvm->lock);
if (kvm_get_vcpu_by_id(kvm, id)) {
@@ -4205,7 +4216,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- vcpu->preempted = false;
+ WRITE_ONCE(vcpu->preempted, false);
WRITE_ONCE(vcpu->ready, false);
kvm_arch_sched_in(vcpu, cpu);
@@ -4219,7 +4230,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
if (current->state == TASK_RUNNING) {
- vcpu->preempted = true;
+ WRITE_ONCE(vcpu->preempted, true);
WRITE_ONCE(vcpu->ready, true);
}
kvm_arch_vcpu_put(vcpu);