[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] VMX: allocate APIC access page from domain heap



... since we don't need its virtual address anywhere (it's a
placeholder page only after all). For this wo work (and possibly be
done elsewhere too) share_xen_page_with_guest() needs to mark pages
handed to it as Xen heap ones.

To be on the safe side, also explicitly clear the page (not having done
so was okay due to the XSA-100 fix, but is still a latent bug since we
don't formally guarantee allocations to come out zeroed, and in fact
this property may disappear again as soon as the asynchronous runtime
scrubbing patches arrive).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Alternatives might be to use a
- global (or perhaps per-node) page across VMs (on the basis that VMs
  shouldn't be writing into that page anyway)
- fake MFN pointing into nowhere (would need to ensure no side effects
  can occur, like PCIe errors or NMIs)

--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2402,19 +2402,21 @@ gp_fault:
 
 static int vmx_alloc_vlapic_mapping(struct domain *d)
 {
-    void *apic_va;
+    struct page_info *pg;
+    unsigned long mfn;
 
     if ( !cpu_has_vmx_virtualize_apic_accesses )
         return 0;
 
-    apic_va = alloc_xenheap_page();
-    if ( apic_va == NULL )
+    pg = alloc_domheap_page(d, MEMF_no_owner);
+    if ( !pg )
         return -ENOMEM;
-    share_xen_page_with_guest(virt_to_page(apic_va), d, XENSHARE_writable);
-    d->arch.hvm_domain.vmx.apic_access_mfn = virt_to_mfn(apic_va);
-    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE),
-                       _mfn(virt_to_mfn(apic_va)), PAGE_ORDER_4K,
-                       p2m_get_hostp2m(d)->default_access);
+    mfn = page_to_mfn(pg);
+    clear_domain_page(mfn);
+    share_xen_page_with_guest(pg, d, XENSHARE_writable);
+    d->arch.hvm_domain.vmx.apic_access_mfn = mfn;
+    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), _mfn(mfn),
+                       PAGE_ORDER_4K, p2m_get_hostp2m(d)->default_access);
 
     return 0;
 }
@@ -2422,8 +2424,16 @@ static int vmx_alloc_vlapic_mapping(stru
 static void vmx_free_vlapic_mapping(struct domain *d)
 {
     unsigned long mfn = d->arch.hvm_domain.vmx.apic_access_mfn;
+
     if ( mfn != 0 )
-        free_xenheap_page(mfn_to_virt(mfn));
+    {
+        struct page_info *pg = mfn_to_page(mfn);
+
+        if ( test_and_clear_bit(_PGC_allocated, &pg->count_info) )
+            put_page(pg);
+        clear_bit(_PGC_xen_heap, &pg->count_info);
+        free_domheap_page(pg);
+    }
 }
 
 static void vmx_install_vlapic_mapping(struct vcpu *v)
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -454,7 +454,7 @@ void share_xen_page_with_guest(
     /* Only add to the allocation list if the domain isn't dying. */
     if ( !d->is_dying )
     {
-        page->count_info |= PGC_allocated | 1;
+        page->count_info |= PGC_xen_heap | PGC_allocated | 1;
         if ( unlikely(d->xenheap_pages++ == 0) )
             get_knownalive_domain(d);
         page_list_add_tail(page, &d->xenpage_list);



Attachment: VMX-domheap-APIC-access-page.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.