2m page support. Signed-off-by: Christoph Egger diff -r c0ab99142868 -r b857db54bca3 xen/arch/x86/mm/hap/nested_hap.c --- a/xen/arch/x86/mm/hap/nested_hap.c +++ b/xen/arch/x86/mm/hap/nested_hap.c @@ -96,6 +96,11 @@ nestedp2m_write_p2m_entry(struct p2m_dom /********************************************/ /* NESTED VIRT FUNCTIONS */ /********************************************/ +/* Mask of the GFNs covered by an L2 or L3 superpage */ +/* XXX: defines should be moved to a proper header */ +#define GUEST_L2_GFN_MASK (512 - 1) +#define GUEST_L3_GFN_MASK ((512 * 512) - 1) + static void nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m, paddr_t L2_gpa, paddr_t L0_gpa, @@ -110,10 +115,33 @@ nestedhap_fix_p2m(struct vcpu *v, struct /* If this p2m table has been flushed or recycled under our feet, * leave it alone. We'll pick up the right one as we try to * vmenter the guest. */ - if ( p2m->cr3 == nhvm_vcpu_hostcr3(v) ) - rv = set_p2m_entry(p2m, L2_gpa >> PAGE_SHIFT, - page_to_mfn(maddr_to_page(L0_gpa)), - 0 /*4K*/, p2mt, p2ma); + if ( p2m->cr3 == nhvm_vcpu_hostcr3(v) ) { + unsigned long l2gfn; + mfn_t l0mfn; + struct page_info *l0pg; + + l2gfn = L2_gpa >> PAGE_SHIFT; + l0pg = maddr_to_page(L0_gpa); + l0mfn = page_to_mfn(l0pg); + + /* Workaround: Map 2m pages instead of 1gb pages till + * spage_to_mfn() and page_to_spage() work with + * 1gb pages. */ + if (page_order == PAGE_ORDER_1G) + page_order = PAGE_ORDER_2M; + + switch (page_order) { + case PAGE_ORDER_4K: + break; + case PAGE_ORDER_2M: + l2gfn &= ~GUEST_L2_GFN_MASK; + l0mfn = _mfn(spage_to_mfn(page_to_spage(l0pg))); + break; + } + + rv = set_p2m_entry(p2m, l2gfn, l0mfn, + page_order, p2mt, p2ma); + } p2m_unlock(p2m); if (rv == 0) { @@ -185,12 +213,13 @@ nestedhvm_hap_nested_page_fault(struct v paddr_t L1_gpa, L0_gpa; struct domain *d = v->domain; struct p2m_domain *p2m, *nested_p2m; + unsigned int page_order_21, page_order_10, page_order_20; p2m = p2m_get_hostp2m(d); /* L0 p2m */ nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v)); /* walk the L1 P2M table */ - rv = nestedhap_walk_L1_p2m(v, L2_gpa, &L1_gpa, NULL); + rv = nestedhap_walk_L1_p2m(v, L2_gpa, &L1_gpa, &page_order_21); /* let caller to handle these two cases */ switch (rv) { @@ -206,7 +235,7 @@ nestedhvm_hap_nested_page_fault(struct v } /* ==> we have to walk L0 P2M */ - rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa, NULL); + rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa, &page_order_10); /* let upper level caller to handle these two cases */ switch (rv) { @@ -221,8 +250,10 @@ nestedhvm_hap_nested_page_fault(struct v break; } + page_order_20 = min(page_order_21, page_order_10); + /* fix p2m_get_pagetable(nested_p2m) */ - nestedhap_fix_p2m(v, nested_p2m, L2_gpa, L0_gpa, PAGE_ORDER_4K, + nestedhap_fix_p2m(v, nested_p2m, L2_gpa, L0_gpa, page_order_20, p2m_ram_rw, p2m_access_rwx /* FIXME: Should use same permission as l1 guest */);