dect
/
linux-2.6
Archived
13
0
Fork 0

KVM: do not release the error page

After commit a2766325cf, the error page is replaced by the
error code, it need not be released anymore

[ The patch has been compiling tested for powerpc ]

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Xiao Guangrong 2012-08-03 15:42:52 +08:00 committed by Avi Kivity
parent cb9aaa30b1
commit 32cad84f44
8 changed files with 12 additions and 19 deletions

View File

@ -319,7 +319,6 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
if (is_error_page(new_page)) { if (is_error_page(new_page)) {
printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n", printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n",
(unsigned long long)gfn); (unsigned long long)gfn);
kvm_release_page_clean(new_page);
return; return;
} }
hpaddr = page_to_phys(new_page); hpaddr = page_to_phys(new_page);

View File

@ -242,10 +242,8 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
int i; int i;
hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
if (is_error_page(hpage)) { if (is_error_page(hpage))
kvm_release_page_clean(hpage);
return; return;
}
hpage_offset = pte->raddr & ~PAGE_MASK; hpage_offset = pte->raddr & ~PAGE_MASK;
hpage_offset &= ~0xFFFULL; hpage_offset &= ~0xFFFULL;

View File

@ -2105,7 +2105,6 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
return kmap(page); return kmap(page);
error: error:
kvm_release_page_clean(page);
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(&svm->vcpu, 0);
return NULL; return NULL;

View File

@ -596,10 +596,9 @@ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
{ {
struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT); struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
if (is_error_page(page)) { if (is_error_page(page))
kvm_release_page_clean(page);
return NULL; return NULL;
}
return page; return page;
} }

View File

@ -1639,10 +1639,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
vcpu->arch.time_page = vcpu->arch.time_page =
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
if (is_error_page(vcpu->arch.time_page)) { if (is_error_page(vcpu->arch.time_page))
kvm_release_page_clean(vcpu->arch.time_page);
vcpu->arch.time_page = NULL; vcpu->arch.time_page = NULL;
}
break; break;
} }
case MSR_KVM_ASYNC_PF_EN: case MSR_KVM_ASYNC_PF_EN:
@ -3945,10 +3944,8 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
goto emul_write; goto emul_write;
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
if (is_error_page(page)) { if (is_error_page(page))
kvm_release_page_clean(page);
goto emul_write; goto emul_write;
}
kaddr = kmap_atomic(page); kaddr = kmap_atomic(page);
kaddr += offset_in_page(gpa); kaddr += offset_in_page(gpa);

View File

@ -457,7 +457,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable); bool *writable);
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_release_pfn_dirty(pfn_t); void kvm_release_pfn_dirty(pfn_t pfn);
void kvm_release_pfn_clean(pfn_t pfn); void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn); void kvm_set_pfn_dirty(pfn_t pfn);
void kvm_set_pfn_accessed(pfn_t pfn); void kvm_set_pfn_accessed(pfn_t pfn);

View File

@ -111,7 +111,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
list_entry(vcpu->async_pf.done.next, list_entry(vcpu->async_pf.done.next,
typeof(*work), link); typeof(*work), link);
list_del(&work->link); list_del(&work->link);
if (work->page) if (!is_error_page(work->page))
kvm_release_page_clean(work->page); kvm_release_page_clean(work->page);
kmem_cache_free(async_pf_cache, work); kmem_cache_free(async_pf_cache, work);
} }
@ -138,7 +138,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
list_del(&work->queue); list_del(&work->queue);
vcpu->async_pf.queued--; vcpu->async_pf.queued--;
if (work->page) if (!is_error_page(work->page))
kvm_release_page_clean(work->page); kvm_release_page_clean(work->page);
kmem_cache_free(async_pf_cache, work); kmem_cache_free(async_pf_cache, work);
} }

View File

@ -1186,8 +1186,9 @@ EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_page_clean(struct page *page) void kvm_release_page_clean(struct page *page)
{ {
if (!is_error_page(page)) WARN_ON(is_error_page(page));
kvm_release_pfn_clean(page_to_pfn(page));
kvm_release_pfn_clean(page_to_pfn(page));
} }
EXPORT_SYMBOL_GPL(kvm_release_page_clean); EXPORT_SYMBOL_GPL(kvm_release_page_clean);