adjust p2m interfaces to track page sizes Signed-off-by: Christoph Egger diff -r ec93fa9caebb -r 61ab53a4af83 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1220,7 +1220,7 @@ int hvm_hap_nested_page_fault(unsigned l } p2m = p2m_get_hostp2m(v->domain); - mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest); + mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest, NULL); /* Check access permissions first, then handle faults */ if ( access_valid && (mfn_x(mfn) != INVALID_MFN) ) diff -r ec93fa9caebb -r 61ab53a4af83 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1160,7 +1160,7 @@ static void svm_do_nested_pgfault(struct p2m = p2m_get_p2m(v); _d.gpa = gpa; _d.qualification = 0; - _d.mfn = mfn_x(gfn_to_mfn_type_p2m(p2m, gfn, &_d.p2mt, &p2ma, p2m_query)); + _d.mfn = mfn_x(gfn_to_mfn_type_p2m(p2m, gfn, &_d.p2mt, &p2ma, p2m_query, NULL)); __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d); } @@ -1180,7 +1180,7 @@ static void svm_do_nested_pgfault(struct if ( p2m == NULL ) p2m = p2m_get_p2m(v); /* Everything else is an error. */ - mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest); + mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest, NULL); gdprintk(XENLOG_ERR, "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n", gpa, mfn_x(mfn), p2mt); diff -r ec93fa9caebb -r 61ab53a4af83 xen/arch/x86/mm/guest_walk.c --- a/xen/arch/x86/mm/guest_walk.c +++ b/xen/arch/x86/mm/guest_walk.c @@ -95,7 +95,7 @@ static inline void *map_domain_gfn(struc p2m_access_t a; /* Translate the gfn, unsharing if shared */ - *mfn = gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), p2mt, &a, p2m_unshare); + *mfn = gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), p2mt, &a, p2m_unshare, NULL); if ( p2m_is_paging(*p2mt) ) { ASSERT(!p2m_is_nestedp2m(p2m)); diff -r ec93fa9caebb -r 61ab53a4af83 xen/arch/x86/mm/hap/guest_walk.c --- a/xen/arch/x86/mm/hap/guest_walk.c +++ b/xen/arch/x86/mm/hap/guest_walk.c @@ -40,15 +40,17 @@ #include unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)( - struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec) + struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec, + unsigned int *page_order) { unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3]; - return hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(v, p2m, cr3, gva, pfec); + return hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(v, p2m, cr3, gva, pfec, + page_order); } unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)( struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3, - paddr_t ga, uint32_t *pfec) + paddr_t ga, uint32_t *pfec, unsigned int *page_order) { uint32_t missing; mfn_t top_mfn; @@ -59,7 +61,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA /* Get the top-level table's MFN */ top_mfn = gfn_to_mfn_type_p2m(p2m, cr3 >> PAGE_SHIFT, - &p2mt, &p2ma, p2m_unshare); + &p2mt, &p2ma, p2m_unshare, NULL); if ( p2m_is_paging(p2mt) ) { ASSERT(!p2m_is_nestedp2m(p2m)); @@ -92,7 +94,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA if ( missing == 0 ) { gfn_t gfn = guest_l1e_get_gfn(gw.l1e); - gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), &p2mt, &p2ma, p2m_unshare); + gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), &p2mt, &p2ma, p2m_unshare, NULL); if ( p2m_is_paging(p2mt) ) { ASSERT(!p2m_is_nestedp2m(p2m)); diff -r ec93fa9caebb -r 61ab53a4af83 xen/arch/x86/mm/hap/hap.c --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -890,14 +890,15 @@ hap_write_p2m_entry(struct vcpu *v, unsi } static unsigned long hap_gva_to_gfn_real_mode( - struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec) + struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec, + unsigned int *page_order) { return ((paddr_t)gva >> PAGE_SHIFT); } static unsigned long hap_p2m_ga_to_gfn_real_mode( struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3, - paddr_t ga, uint32_t *pfec) + paddr_t ga, uint32_t *pfec, unsigned int *page_order) { return (ga >> PAGE_SHIFT); } diff -r ec93fa9caebb -r 61ab53a4af83 xen/arch/x86/mm/hap/nested_hap.c --- a/xen/arch/x86/mm/hap/nested_hap.c +++ b/xen/arch/x86/mm/hap/nested_hap.c @@ -99,7 +99,7 @@ nestedp2m_write_p2m_entry(struct p2m_dom static void nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m, paddr_t L2_gpa, paddr_t L0_gpa, - p2m_type_t p2mt, p2m_access_t p2ma) + unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma) { int rv = 1; ASSERT(p2m); @@ -129,14 +129,16 @@ nestedhap_fix_p2m(struct vcpu *v, struct * value tells the upper level what to do. */ static int -nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa) +nestedhap_walk_L0_p2m(struct p2m_domain *p2m, + paddr_t L1_gpa, paddr_t *L0_gpa, unsigned int *page_order) { mfn_t mfn; p2m_type_t p2mt; p2m_access_t p2ma; /* walk L0 P2M table */ - mfn = gfn_to_mfn_type_p2m(p2m, L1_gpa >> PAGE_SHIFT, &p2mt, &p2ma, p2m_query); + mfn = gfn_to_mfn_type_p2m(p2m, L1_gpa >> PAGE_SHIFT, + &p2mt, &p2ma, p2m_query, page_order); if ( p2m_is_paging(p2mt) || p2m_is_shared(p2mt) || !p2m_is_ram(p2mt) ) return NESTEDHVM_PAGEFAULT_ERROR; @@ -153,7 +155,8 @@ nestedhap_walk_L0_p2m(struct p2m_domain * L1_gpa. The result value tells what to do next. */ static int -nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa) +nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, + unsigned int *page_order) { uint32_t pfec; unsigned long nested_cr3, gfn; @@ -161,7 +164,7 @@ nestedhap_walk_L1_p2m(struct vcpu *v, pa nested_cr3 = nhvm_vcpu_hostcr3(v); /* Walk the guest-supplied NPT table, just as if it were a pagetable */ - gfn = paging_ga_to_gfn_cr3(v, nested_cr3, L2_gpa, &pfec); + gfn = paging_ga_to_gfn_cr3(v, nested_cr3, L2_gpa, &pfec, page_order); if ( gfn == INVALID_GFN ) return NESTEDHVM_PAGEFAULT_INJECT; @@ -187,7 +190,7 @@ nestedhvm_hap_nested_page_fault(struct v nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v)); /* walk the L1 P2M table */ - rv = nestedhap_walk_L1_p2m(v, L2_gpa, &L1_gpa); + rv = nestedhap_walk_L1_p2m(v, L2_gpa, &L1_gpa, NULL); /* let caller to handle these two cases */ switch (rv) { @@ -203,7 +206,7 @@ nestedhvm_hap_nested_page_fault(struct v } /* ==> we have to walk L0 P2M */ - rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa); + rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa, NULL); /* let upper level caller to handle these two cases */ switch (rv) { @@ -219,7 +222,7 @@ nestedhvm_hap_nested_page_fault(struct v } /* fix p2m_get_pagetable(nested_p2m) */ - nestedhap_fix_p2m(v, nested_p2m, L2_gpa, L0_gpa, + nestedhap_fix_p2m(v, nested_p2m, L2_gpa, L0_gpa, PAGE_ORDER_4K, p2m_ram_rw, p2m_access_rwx /* FIXME: Should use same permission as l1 guest */); diff -r ec93fa9caebb -r 61ab53a4af83 xen/arch/x86/mm/hap/private.h --- a/xen/arch/x86/mm/hap/private.h +++ b/xen/arch/x86/mm/hap/private.h @@ -28,24 +28,27 @@ unsigned long hap_gva_to_gfn_2_levels(struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, - uint32_t *pfec); + uint32_t *pfec, + unsigned int *page_order); unsigned long hap_gva_to_gfn_3_levels(struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, - uint32_t *pfec); + uint32_t *pfec, + unsigned int *page_order); unsigned long hap_gva_to_gfn_4_levels(struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, - uint32_t *pfec); + uint32_t *pfec, + unsigned int *page_order); unsigned long hap_p2m_ga_to_gfn_2_levels(struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3, - paddr_t ga, uint32_t *pfec); + paddr_t ga, uint32_t *pfec, unsigned int *page_order); unsigned long hap_p2m_ga_to_gfn_3_levels(struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3, - paddr_t ga, uint32_t *pfec); + paddr_t ga, uint32_t *pfec, unsigned int *page_order); unsigned long hap_p2m_ga_to_gfn_4_levels(struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3, - paddr_t ga, uint32_t *pfec); + paddr_t ga, uint32_t *pfec, unsigned int *page_order); #endif /* __HAP_PRIVATE_H__ */ diff -r ec93fa9caebb -r 61ab53a4af83 xen/arch/x86/mm/p2m-ept.c --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -509,7 +509,7 @@ out: /* Read ept p2m entries */ static mfn_t ept_get_entry(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_access_t* a, - p2m_query_t q) + p2m_query_t q, unsigned int *page_order) { struct domain *d = p2m->domain; ept_entry_t *table = map_domain_page(ept_get_asr(d)); diff -r ec93fa9caebb -r 61ab53a4af83 xen/arch/x86/mm/p2m-pt.c --- a/xen/arch/x86/mm/p2m-pt.c +++ b/xen/arch/x86/mm/p2m-pt.c @@ -503,7 +503,8 @@ static int p2m_pod_check_and_populate(st /* Read the current domain's p2m table (through the linear mapping). */ static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, - p2m_access_t *a, p2m_query_t q) + p2m_access_t *a, p2m_query_t q, + unsigned int *page_order) { mfn_t mfn = _mfn(INVALID_MFN); p2m_type_t p2mt = p2m_mmio_dm; @@ -676,7 +677,8 @@ out: static mfn_t p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, - p2m_type_t *t, p2m_access_t *a, p2m_query_t q) + p2m_type_t *t, p2m_access_t *a, p2m_query_t q, + unsigned int *page_order) { mfn_t mfn; paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT; @@ -699,7 +701,7 @@ p2m_gfn_to_mfn(struct p2m_domain *p2m, u /* Use the fast path with the linear mapping if we can */ if ( p2m == p2m_get_hostp2m(current->domain) ) - return p2m_gfn_to_mfn_current(p2m, gfn, t, a, q); + return p2m_gfn_to_mfn_current(p2m, gfn, t, a, q, page_order); mfn = pagetable_get_mfn(p2m_get_pagetable(p2m)); diff -r ec93fa9caebb -r 61ab53a4af83 xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -307,7 +307,7 @@ void p2m_teardown(struct p2m_domain *p2m #ifdef __x86_64__ for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ ) { - mfn = gfn_to_mfn_type_p2m(p2m, gfn, &t, &a, p2m_query); + mfn = gfn_to_mfn_type_p2m(p2m, gfn, &t, &a, p2m_query, NULL); if ( mfn_valid(mfn) && (t == p2m_ram_shared) ) { ASSERT(!p2m_is_nestedp2m(p2m)); @@ -372,7 +372,7 @@ p2m_remove_page(struct p2m_domain *p2m, { for ( i = 0; i < (1UL << page_order); i++ ) { - mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query); + mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query, NULL); if ( !p2m_is_grant(t) ) set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY); ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) ); @@ -877,7 +877,7 @@ void p2m_mem_access_check(unsigned long /* First, handle rx2rw conversion automatically */ p2m_lock(p2m); - mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, p2m_query); + mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, p2m_query, NULL); if ( access_w && p2ma == p2m_access_rx2rw ) { @@ -1035,7 +1035,7 @@ int p2m_get_mem_access(struct domain *d, return 0; } - mfn = p2m->get_entry(p2m, pfn, &t, &a, p2m_query); + mfn = p2m->get_entry(p2m, pfn, &t, &a, p2m_query, NULL); if ( mfn_x(mfn) == INVALID_MFN ) return -ESRCH; @@ -1182,9 +1182,8 @@ p2m_get_p2m(struct vcpu *v) return p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v)); } -unsigned long paging_gva_to_gfn(struct vcpu *v, - unsigned long va, - uint32_t *pfec) +unsigned long paging_gva_to_gfn1(struct vcpu *v, unsigned long va, + uint32_t *pfec, unsigned int *page_order) { struct p2m_domain *hostp2m = p2m_get_hostp2m(v->domain); const struct paging_mode *hostmode = paging_get_hostmode(v); @@ -1201,14 +1200,14 @@ unsigned long paging_gva_to_gfn(struct v /* translate l2 guest va into l2 guest gfn */ p2m = p2m_get_nestedp2m(v, ncr3); mode = paging_get_nestedmode(v); - gfn = mode->gva_to_gfn(v, p2m, va, pfec); + gfn = mode->gva_to_gfn(v, p2m, va, pfec, page_order); /* translate l2 guest gfn into l1 guest gfn */ return hostmode->p2m_ga_to_gfn(v, hostp2m, ncr3, - gfn << PAGE_SHIFT, pfec); + gfn << PAGE_SHIFT, pfec, page_order); } - return hostmode->gva_to_gfn(v, hostp2m, va, pfec); + return hostmode->gva_to_gfn(v, hostp2m, va, pfec, page_order); } /* diff -r ec93fa9caebb -r 61ab53a4af83 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -3749,7 +3749,7 @@ sh_invlpg(struct vcpu *v, unsigned long static unsigned long sh_gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m, - unsigned long va, uint32_t *pfec) + unsigned long va, uint32_t *pfec, unsigned int *page_order) /* Called to translate a guest virtual address to what the *guest* * pagetables would map it to. */ { @@ -4799,7 +4799,7 @@ static mfn_t emulate_gva_to_mfn(struct v uint32_t pfec = PFEC_page_present | PFEC_write_access; /* Translate the VA to a GFN */ - gfn = sh_gva_to_gfn(v, NULL, vaddr, &pfec); + gfn = sh_gva_to_gfn(v, NULL, vaddr, &pfec, NULL); if ( gfn == INVALID_GFN ) { if ( is_hvm_vcpu(v) ) diff -r ec93fa9caebb -r 61ab53a4af83 xen/arch/x86/mm/shadow/types.h --- a/xen/arch/x86/mm/shadow/types.h +++ b/xen/arch/x86/mm/shadow/types.h @@ -193,9 +193,11 @@ static inline shadow_l4e_t shadow_l4e_fr /* Override gfn_to_mfn to work with gfn_t */ #undef gfn_to_mfn_query -#define gfn_to_mfn_query(d, g, t) gfn_to_mfn_type((d), gfn_x(g), (t), p2m_query) +#define gfn_to_mfn_query(d, g, t) \ + _gfn_to_mfn_type((d), gfn_x(g), (t), p2m_query, NULL) #undef gfn_to_mfn_guest -#define gfn_to_mfn_guest(d, g, t) gfn_to_mfn_type((d), gfn_x(g), (t), p2m_guest) +#define gfn_to_mfn_guest(d, g, t) \ + _gfn_to_mfn_type((d), gfn_x(g), (t), p2m_guest, NULL) /* The shadow types needed for the various levels. */ diff -r ec93fa9caebb -r 61ab53a4af83 xen/include/asm-x86/guest_pt.h --- a/xen/include/asm-x86/guest_pt.h +++ b/xen/include/asm-x86/guest_pt.h @@ -53,7 +53,8 @@ gfn_to_paddr(gfn_t gfn) /* Override gfn_to_mfn to work with gfn_t */ #undef gfn_to_mfn -#define gfn_to_mfn(d, g, t) gfn_to_mfn_type((d), gfn_x(g), (t), p2m_alloc) +#define gfn_to_mfn(d, g, t) \ + gfn_to_mfn_type((d), gfn_x(g), (t), p2m_alloc, NULL) /* Types of the guest's page tables and access functions for them */ diff -r ec93fa9caebb -r 61ab53a4af83 xen/include/asm-x86/p2m.h --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -233,7 +233,8 @@ struct p2m_domain { unsigned long gfn, p2m_type_t *p2mt, p2m_access_t *p2ma, - p2m_query_t q); + p2m_query_t q, + unsigned int *page_order); void (*change_entry_type_global)(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt); @@ -306,7 +307,8 @@ struct p2m_domain *p2m_get_p2m(struct vc * main one. */ static inline mfn_t gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn, - p2m_type_t *t, p2m_access_t *a, p2m_query_t q) + p2m_type_t *t, p2m_access_t *a, p2m_query_t q, + unsigned int *page_order) { mfn_t mfn; @@ -318,14 +320,14 @@ gfn_to_mfn_type_p2m(struct p2m_domain *p return _mfn(gfn); } - mfn = p2m->get_entry(p2m, gfn, t, a, q); + mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order); #ifdef __x86_64__ if ( q == p2m_unshare && p2m_is_shared(*t) ) { ASSERT(!p2m_is_nestedp2m(p2m)); mem_sharing_unshare_page(p2m->domain, gfn, 0); - mfn = p2m->get_entry(p2m, gfn, t, a, q); + mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order); } #endif @@ -344,22 +346,26 @@ gfn_to_mfn_type_p2m(struct p2m_domain *p /* General conversion function from gfn to mfn */ -static inline mfn_t gfn_to_mfn_type(struct domain *d, +static inline mfn_t _gfn_to_mfn_type(struct domain *d, unsigned long gfn, p2m_type_t *t, - p2m_query_t q) + p2m_query_t q, unsigned int *page_order) { p2m_access_t a; - return gfn_to_mfn_type_p2m(p2m_get_hostp2m(d), gfn, t, &a, q); + return gfn_to_mfn_type_p2m(p2m_get_hostp2m(d), gfn, t, &a, q, page_order); } /* Syntactic sugar: most callers will use one of these. * N.B. gfn_to_mfn_query() is the _only_ one guaranteed not to take the * p2m lock; none of the others can be called with the p2m or paging * lock held. */ -#define gfn_to_mfn(d, g, t) gfn_to_mfn_type((d), (g), (t), p2m_alloc) -#define gfn_to_mfn_query(d, g, t) gfn_to_mfn_type((d), (g), (t), p2m_query) -#define gfn_to_mfn_guest(d, g, t) gfn_to_mfn_type((d), (g), (t), p2m_guest) -#define gfn_to_mfn_unshare(d, g, t) gfn_to_mfn_type((d), (g), (t), p2m_unshare) +#define gfn_to_mfn(d, g, t) \ + _gfn_to_mfn_type((d), (g), (t), p2m_alloc, NULL) +#define gfn_to_mfn_query(d, g, t) \ + _gfn_to_mfn_type((d), (g), (t), p2m_query, NULL) +#define gfn_to_mfn_guest(d, g, t) \ + _gfn_to_mfn_type((d), (g), (t), p2m_guest, NULL) +#define gfn_to_mfn_unshare(d, g, t) \ + _gfn_to_mfn_type((d), (g), (t), p2m_unshare, NULL) /* Compatibility function exporting the old untyped interface */ static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn) diff -r ec93fa9caebb -r 61ab53a4af83 xen/include/asm-x86/paging.h --- a/xen/include/asm-x86/paging.h +++ b/xen/include/asm-x86/paging.h @@ -111,11 +111,13 @@ struct paging_mode { unsigned long (*gva_to_gfn )(struct vcpu *v, struct p2m_domain *p2m, unsigned long va, - uint32_t *pfec); + uint32_t *pfec, + unsigned int *page_order); unsigned long (*p2m_ga_to_gfn )(struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3, - paddr_t ga, uint32_t *pfec); + paddr_t ga, uint32_t *pfec, + unsigned int *page_order); void (*update_cr3 )(struct vcpu *v, int do_locking); void (*update_paging_modes )(struct vcpu *v); void (*write_p2m_entry )(struct vcpu *v, unsigned long gfn, @@ -262,23 +264,29 @@ static inline int paging_invlpg(struct v * walking the tables. The caller should set the PFEC_page_present bit * in pfec[0]; in the failure case, that bit will be cleared if appropriate. */ #define INVALID_GFN (-1UL) -unsigned long paging_gva_to_gfn(struct vcpu *v, +unsigned long paging_gva_to_gfn1(struct vcpu *v, unsigned long va, - uint32_t *pfec); + uint32_t *pfec, + unsigned int *page_order); + +#define paging_gva_to_gfn(v, va, pfec) \ + paging_gva_to_gfn1((v), (va), (pfec), NULL) /* Translate a guest address using a particular CR3 value. This is used * to by nested HAP code, to walk the guest-supplied NPT tables as if * they were pagetables. * Use 'paddr_t' for the guest address so it won't overflow when - * guest or nested guest is in 32bit PAE mode. + * l1 or l2 guest is in 32bit PAE mode. */ static inline unsigned long paging_ga_to_gfn_cr3(struct vcpu *v, unsigned long cr3, paddr_t ga, - uint32_t *pfec) + uint32_t *pfec, + unsigned int *page_order) { struct p2m_domain *p2m = v->domain->arch.p2m; - return paging_get_hostmode(v)->p2m_ga_to_gfn(v, p2m, cr3, ga, pfec); + return paging_get_hostmode(v)->p2m_ga_to_gfn(v, p2m, cr3, ga, pfec, + page_order); } /* Update all the things that are derived from the guest's CR3.