From 64d4d521757117aa5c1cfe79d3baa6cf57703f81 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 9 Oct 2008 16:01:57 +0800 Subject: KVM: Enable MTRR for EPT The effective memory type of EPT is the mixture of MSR_IA32_CR_PAT and memory type field of EPT entry. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/x86/kvm/svm.c') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 9c4ce657d96..05efc4ef75a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1912,6 +1912,11 @@ static int get_npt_level(void) #endif } +static int svm_get_mt_mask_shift(void) +{ + return 0; +} + static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, @@ -1967,6 +1972,7 @@ static struct kvm_x86_ops svm_x86_ops = { .set_tss_addr = svm_set_tss_addr, .get_tdp_level = get_npt_level, + .get_mt_mask_shift = svm_get_mt_mask_shift, }; static int __init svm_init(void) -- cgit v1.2.3 From 25022acc3dd5f0b54071c7ba7c371860f2971b52 Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Mon, 27 Oct 2008 09:04:17 +0000 Subject: KVM: SVM: Set the 'g' bit of the cs selector for cross-vendor migration The hardware does not set the 'g' bit of the cs selector and this breaks migration from amd hosts to intel hosts. Set this bit if the segment limit is beyond 1 MB. Signed-off-by: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/x86/kvm/svm.c') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 05efc4ef75a..665008d9785 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -772,6 +772,15 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; + + /* + * SVM always stores 0 for the 'G' bit in the CS selector in + * the VMCB on a VMEXIT. This hurts cross-vendor migration: + * Intel's VMENTRY has a check on the 'G' bit. + */ + if (seg == VCPU_SREG_CS) + var->g = s->limit > 0xfffff; + var->unusable = !var->present; } -- cgit v1.2.3 From c0d09828c870f90c6bc72070ada281568f89c63b Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Mon, 27 Oct 2008 09:04:18 +0000 Subject: KVM: SVM: Set the 'busy' flag of the TR selector The busy flag of the TR selector is not set by the hardware. This breaks migration from amd hosts to intel hosts. Signed-off-by: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/x86/kvm/svm.c') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 665008d9785..743aebd7bfc 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -781,6 +781,13 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, if (seg == VCPU_SREG_CS) var->g = s->limit > 0xfffff; + /* + * Work around a bug where the busy flag in the tr selector + * isn't exposed + */ + if (seg == VCPU_SREG_TR) + var->type |= 0x2; + var->unusable = !var->present; } -- cgit v1.2.3 From e93f36bcfaa9e899c595e1c446c784a69021854a Mon Sep 17 00:00:00 2001 From: Guillaume Thouvenin Date: Tue, 28 Oct 2008 10:51:30 +0100 Subject: KVM: allow emulator to adjust rip for emulated pio instructions If we call the emulator we shouldn't call skip_emulated_instruction() in the first place, since the emulator already computes the next rip for us. Thus we move ->skip_emulated_instruction() out of kvm_emulate_pio() and into handle_io() (and the svm equivalent). We also replaced "return 0" by "break" in the "do_io:" case because now the shadow register state needs to be committed. Otherwise eip will never be updated. Signed-off-by: Guillaume Thouvenin Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 1 + arch/x86/kvm/vmx.c | 1 + arch/x86/kvm/x86.c | 2 -- arch/x86/kvm/x86_emulate.c | 2 +- 4 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86/kvm/svm.c') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 743aebd7bfc..f0ad4d4217e 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1115,6 +1115,7 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) rep = (io_info & SVM_IOIO_REP_MASK) != 0; down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; + skip_emulated_instruction(&svm->vcpu); return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7623eb7b68d..816d23185fb 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2687,6 +2687,7 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) rep = (exit_qualification & 32) != 0; port = exit_qualification >> 16; + skip_emulated_instruction(vcpu); return kvm_emulate_pio(vcpu, kvm_run, in, size, port); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ceeac889714..38f79b6aaf1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2478,8 +2478,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, val = kvm_register_read(vcpu, VCPU_REGS_RAX); memcpy(vcpu->arch.pio_data, &val, 4); - kvm_x86_ops->skip_emulated_instruction(vcpu); - pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in); if (pio_dev) { kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data); diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 57d7cc45be4..8f60ace1387 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1772,7 +1772,7 @@ special_insn: c->eip = saved_eip; goto cannot_emulate; } - return 0; + break; case 0xf4: /* hlt */ ctxt->vcpu->arch.halt_request = 1; break; -- cgit v1.2.3 From 63d1142f8f69e39468bc6079ab2239e902828134 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:20 -0200 Subject: KVM: SVM: move has_svm() code to asm/virtext.h Use a trick to keep the printk()s on has_svm() working as before. gcc will take care of not generating code for the 'msg' stuff when the function is called with a NULL msg argument. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/include/asm/virtext.h | 41 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm.c | 19 +++++-------------- 2 files changed, 46 insertions(+), 14 deletions(-) (limited to 'arch/x86/kvm/svm.c') diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 6bcf0acb4ef..6f0d409c368 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -19,6 +19,7 @@ #include #include +#include /* * VMX functions: @@ -66,4 +67,44 @@ static inline void cpu_emergency_vmxoff(void) __cpu_emergency_vmxoff(); } + + + +/* + * SVM functions: + */ + +/** Check if the CPU has SVM support + * + * You can use the 'msg' arg to get a message describing the problem, + * if the function returns zero. Simply pass NULL if you are not interested + * on the messages; gcc should take care of not generating code for + * the messages on this case. + */ +static inline int cpu_has_svm(const char **msg) +{ + uint32_t eax, ebx, ecx, edx; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { + if (msg) + *msg = "not amd"; + return 0; + } + + cpuid(0x80000000, &eax, &ebx, &ecx, &edx); + if (eax < SVM_CPUID_FUNC) { + if (msg) + *msg = "can't execute cpuid_8000000a"; + return 0; + } + + cpuid(0x80000001, &eax, &ebx, &ecx, &edx); + if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { + if (msg) + *msg = "svm not available"; + return 0; + } + return 1; +} + #endif /* _ASM_X86_VIRTEX_H */ diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f0ad4d4217e..0667c6d13d3 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -28,6 +28,8 @@ #include +#include + #define __ex(x) __kvm_handle_fault_on_reboot(x) MODULE_AUTHOR("Qumranet"); @@ -245,24 +247,13 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) static int has_svm(void) { - uint32_t eax, ebx, ecx, edx; - - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { - printk(KERN_INFO "has_svm: not amd\n"); - return 0; - } + const char *msg; - cpuid(0x80000000, &eax, &ebx, &ecx, &edx); - if (eax < SVM_CPUID_FUNC) { - printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n"); + if (!cpu_has_svm(&msg)) { + printk(KERN_INFO "has_svn: %s\n", msg); return 0; } - cpuid(0x80000001, &eax, &ebx, &ecx, &edx); - if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { - printk(KERN_DEBUG "has_svm: svm not available\n"); - return 0; - } return 1; } -- cgit v1.2.3 From 2c8dceebb238680d5577500f8283397d41ca5590 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:21 -0200 Subject: KVM: SVM: move svm_hardware_disable() code to asm/virtext.h Create cpu_svm_disable() function. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/include/asm/virtext.h | 14 ++++++++++++++ arch/x86/kvm/svm.c | 6 +----- 2 files changed, 15 insertions(+), 5 deletions(-) (limited to 'arch/x86/kvm/svm.c') diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 6f0d409c368..2cfe363729c 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -107,4 +107,18 @@ static inline int cpu_has_svm(const char **msg) return 1; } + +/** Disable SVM on the current CPU + * + * You should call this only if cpu_has_svm() returned true. + */ +static inline void cpu_svm_disable(void) +{ + uint64_t efer; + + wrmsrl(MSR_VM_HSAVE_PA, 0); + rdmsrl(MSR_EFER, efer); + wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); +} + #endif /* _ASM_X86_VIRTEX_H */ diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 0667c6d13d3..1452851ae25 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -259,11 +259,7 @@ static int has_svm(void) static void svm_hardware_disable(void *garbage) { - uint64_t efer; - - wrmsrl(MSR_VM_HSAVE_PA, 0); - rdmsrl(MSR_EFER, efer); - wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); + cpu_svm_disable(); } static void svm_hardware_enable(void *garbage) -- cgit v1.2.3