diff options
author | Avi Kivity <avi@qumranet.com> | 2007-01-05 16:36:45 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2007-01-05 23:55:25 -0800 |
commit | a436036baf331703b4d2c8e8a45f02c597bf6913 (patch) | |
tree | ba134b0b42ca42c53e818073af8d51d73771c56b | |
parent | 9b7a032567ee1128daeebebfc14d3acedfe28c8c (diff) |
[PATCH] KVM: MMU: If emulating an instruction fails, try unprotecting the page
A page table may have been recycled into a regular page, and so any
instruction can be executed on it. Unprotect the page and let the cpu do its
thing.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | drivers/kvm/kvm.h | 1 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 2 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 58 |
3 files changed, 61 insertions, 0 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index b7068ecd776..34c43bb4d34 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -450,6 +450,7 @@ unsigned long segment_base(u16 selector); void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); +int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn) { diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 047f6f6ed3f..79032438dd1 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -1063,6 +1063,8 @@ int emulate_instruction(struct kvm_vcpu *vcpu, } if (r) { + if (kvm_mmu_unprotect_page_virt(vcpu, cr2)) + return EMULATE_DONE; if (!vcpu->mmio_needed) { report_emulation_failure(&emulate_ctxt); return EMULATE_FAIL; diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 6dbd83b8662..1484b721171 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c @@ -478,11 +478,62 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, return page; } +static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *page) +{ + BUG(); +} + static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, u64 *parent_pte) { mmu_page_remove_parent_pte(page, parent_pte); + if (page->role.level > PT_PAGE_TABLE_LEVEL) + kvm_mmu_page_unlink_children(vcpu, page); + hlist_del(&page->hash_link); + list_del(&page->link); + list_add(&page->link, &vcpu->free_pages); +} + +static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *page) +{ + u64 *parent_pte; + + while (page->multimapped || page->parent_pte) { + if (!page->multimapped) + parent_pte = page->parent_pte; + else { + struct kvm_pte_chain *chain; + + chain = container_of(page->parent_ptes.first, + struct kvm_pte_chain, link); + parent_pte = chain->parent_ptes[0]; + } + kvm_mmu_put_page(vcpu, page, parent_pte); + *parent_pte = 0; + } +} + +static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + unsigned index; + struct hlist_head *bucket; + struct kvm_mmu_page *page; + struct hlist_node *node, *n; + int r; + + pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); + r = 0; + index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; + bucket = &vcpu->kvm->mmu_page_hash[index]; + hlist_for_each_entry_safe(page, node, n, bucket, hash_link) + if (page->gfn == gfn && !page->role.metaphysical) { + kvm_mmu_zap_page(vcpu, page); + r = 1; + } + return r; } static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) @@ -1001,6 +1052,13 @@ void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) { } +int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) +{ + gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); + + return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT); +} + static void free_mmu_pages(struct kvm_vcpu *vcpu) { while (!list_empty(&vcpu->free_pages)) { |