[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 08/15] xen/x86: p2m: Use typesafe gfn for the P2M callbacks get_entry and set_entry
On 09/13/2017 08:59 PM, Julien Grall wrote: > Signed-off-by: Julien Grall <julien.grall@xxxxxxx> > > Cc: Jan Beulich <jbeulich@xxxxxxxx> > Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> > Cc: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx> > Cc: Tamas K Lengyel <tamas@xxxxxxxxxxxxx> > Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx> > Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx> > Cc: Kevin Tian <kevin.tian@xxxxxxxxx> > --- > xen/arch/x86/hvm/hvm.c | 2 +- > xen/arch/x86/mm/mem_access.c | 19 +++++------ > xen/arch/x86/mm/mem_sharing.c | 4 +-- > xen/arch/x86/mm/p2m-ept.c | 6 ++-- > xen/arch/x86/mm/p2m-pod.c | 15 +++++---- > xen/arch/x86/mm/p2m-pt.c | 6 ++-- > xen/arch/x86/mm/p2m.c | 77 > +++++++++++++++++++++++-------------------- > xen/include/asm-x86/p2m.h | 4 +-- > 8 files changed, 73 insertions(+), 60 deletions(-) > > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c > index 6cb903def5..53be8c093c 100644 > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -1787,7 +1787,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned > long gla, > { > bool_t sve; > > - p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, &sve); > + p2m->get_entry(p2m, _gfn(gfn), &p2mt, &p2ma, 0, NULL, &sve); > > if ( !sve && altp2m_vcpu_emulate_ve(curr) ) > { > diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c > index 9211fc0abe..948e377e69 100644 > --- a/xen/arch/x86/mm/mem_access.c > +++ b/xen/arch/x86/mm/mem_access.c > @@ -66,7 +66,7 @@ static int _p2m_get_mem_access(struct p2m_domain *p2m, > gfn_t gfn, > } > > gfn_lock(p2m, gfn, 0); > - mfn = p2m->get_entry(p2m, gfn_x(gfn), &t, &a, 0, NULL, NULL); > + mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL); > gfn_unlock(p2m, gfn, 0); > > if ( mfn_eq(mfn, INVALID_MFN) ) > @@ -142,7 +142,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, > vm_event_request_t **req_ptr) > { > struct vcpu *v = current; > - unsigned long gfn = gpa >> PAGE_SHIFT; > + gfn_t gfn = gaddr_to_gfn(gpa); > struct domain *d = v->domain; > struct p2m_domain *p2m = NULL; > mfn_t mfn; > @@ -215,7 +215,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, > *req_ptr = req; > > req->reason = VM_EVENT_REASON_MEM_ACCESS; > - req->u.mem_access.gfn = gfn; > + req->u.mem_access.gfn = gfn_x(gfn); > req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1); > if ( npfec.gla_valid ) > { > @@ -247,7 +247,7 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct > p2m_domain *hp2m, > unsigned long gfn_l = gfn_x(gfn); I'd drop gfn_l completely here and just use gfn_x(gfn) in the two places below where it's used. It would get rid of a line of code in this function. But I wouldn't hold the patch over this, so if nobody else has comments it's fine as is. > int rc; > > - mfn = ap2m->get_entry(ap2m, gfn_l, &t, &old_a, 0, NULL, NULL); > + mfn = ap2m->get_entry(ap2m, gfn, &t, &old_a, 0, NULL, NULL); > > /* Check host p2m if no valid entry in alternate */ > if ( !mfn_valid(mfn) ) > @@ -264,16 +264,16 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct > p2m_domain *hp2m, > if ( page_order != PAGE_ORDER_4K ) > { > unsigned long mask = ~((1UL << page_order) - 1); > - unsigned long gfn2_l = gfn_l & mask; > + gfn_t gfn2 = _gfn(gfn_l & mask); > mfn_t mfn2 = _mfn(mfn_x(mfn) & mask); > > - rc = ap2m->set_entry(ap2m, gfn2_l, mfn2, page_order, t, old_a, > 1); > + rc = ap2m->set_entry(ap2m, gfn2, mfn2, page_order, t, old_a, 1); > if ( rc ) > return rc; > } > } > > - return ap2m->set_entry(ap2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, > + return ap2m->set_entry(ap2m, gfn, mfn, PAGE_ORDER_4K, t, a, > (current->domain != d)); If you need to send V2, could you please also indent the last line so that '(' is under the 'a' in 'ap2m'? You don't have to, but it'd be a coding style fix coming in on top of this patch. > } > > @@ -295,10 +295,9 @@ static int set_mem_access(struct domain *d, struct > p2m_domain *p2m, > mfn_t mfn; > p2m_access_t _a; > p2m_type_t t; > - unsigned long gfn_l = gfn_x(gfn); > > - mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL); > - rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1); > + mfn = p2m->get_entry(p2m, gfn, &t, &_a, 0, NULL, NULL); > + rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1); > } > > return rc; > diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c > index 12fb9cc51f..4c1ace9b21 100644 > --- a/xen/arch/x86/mm/mem_sharing.c > +++ b/xen/arch/x86/mm/mem_sharing.c > @@ -1234,7 +1234,7 @@ int relinquish_shared_pages(struct domain *d) > > if ( atomic_read(&d->shr_pages) == 0 ) > break; > - mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL); > + mfn = p2m->get_entry(p2m, _gfn(gfn), &t, &a, 0, NULL, NULL); > if ( mfn_valid(mfn) && (t == p2m_ram_shared) ) > { > /* Does not fail with ENOMEM given the DESTROY flag */ > @@ -1243,7 +1243,7 @@ int relinquish_shared_pages(struct domain *d) > /* Clear out the p2m entry so no one else may try to > * unshare. Must succeed: we just read the old entry and > * we hold the p2m lock. */ > - set_rc = p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K, > + set_rc = p2m->set_entry(p2m, _gfn(gfn), _mfn(0), PAGE_ORDER_4K, > p2m_invalid, p2m_access_rwx, -1); > ASSERT(set_rc == 0); > count += 0x10; > diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c > index 23c0518733..dff214cf7b 100644 > --- a/xen/arch/x86/mm/p2m-ept.c > +++ b/xen/arch/x86/mm/p2m-ept.c > @@ -674,11 +674,12 @@ bool_t ept_handle_misconfig(uint64_t gpa) > * Returns: 0 for success, -errno for failure > */ > static int > -ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, > +ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_t, mfn_t mfn, > unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma, > int sve) > { > ept_entry_t *table, *ept_entry = NULL; > + unsigned long gfn = gfn_x(gfn_t); Should we call this gfn_l, as done above? > unsigned long gfn_remainder = gfn; > unsigned int i, target = order / EPT_TABLE_ORDER; > unsigned long fn_mask = !mfn_eq(mfn, INVALID_MFN) ? (gfn | mfn_x(mfn)) : > gfn; > @@ -910,11 +911,12 @@ out: > > /* Read ept p2m entries */ > static mfn_t ept_get_entry(struct p2m_domain *p2m, > - unsigned long gfn, p2m_type_t *t, p2m_access_t* a, > + gfn_t gfn_t, p2m_type_t *t, p2m_access_t* a, > p2m_query_t q, unsigned int *page_order, > bool_t *sve) > { > ept_entry_t *table = > map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m)))); > + unsigned long gfn = gfn_x(gfn_t); gfn_l here too? > unsigned long gfn_remainder = gfn; > ept_entry_t *ept_entry; > u32 index; > diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c > index eb74e5c01f..c8c8cff014 100644 > --- a/xen/arch/x86/mm/p2m-pod.c > +++ b/xen/arch/x86/mm/p2m-pod.c > @@ -543,7 +543,7 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, > unsigned int order) > p2m_type_t t; > unsigned int cur_order; > > - p2m->get_entry(p2m, gfn_x(gfn) + i, &t, &a, 0, &cur_order, NULL); > + p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, &cur_order, NULL); > n = 1UL << min(order, cur_order); > if ( t == p2m_populate_on_demand ) > pod += n; > @@ -603,7 +603,7 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, > unsigned int order) > p2m_access_t a; > unsigned int cur_order; > > - mfn = p2m->get_entry(p2m, gfn_x(gfn) + i, &t, &a, 0, &cur_order, > NULL); > + mfn = p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, &cur_order, > NULL); > if ( order < cur_order ) > cur_order = order; > n = 1UL << cur_order; > @@ -717,7 +717,8 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, > unsigned long gfn) > unsigned long k; > const struct page_info *page; > > - mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, &cur_order, NULL); > + mfn = p2m->get_entry(p2m, _gfn(gfn + i), &type, &a, 0, > + &cur_order, NULL); > > /* > * Conditions that must be met for superpage-superpage: > @@ -859,7 +860,9 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long > *gfns, int count) > for ( i = 0; i < count; i++ ) > { > p2m_access_t a; > - mfns[i] = p2m->get_entry(p2m, gfns[i], types + i, &a, 0, NULL, NULL); > + > + mfns[i] = p2m->get_entry(p2m, _gfn(gfns[i]), types + i, &a, > + 0, NULL, NULL); > /* > * If this is ram, and not a pagetable or from the xen heap, and > * probably not mapped elsewhere, map it; otherwise, skip. > @@ -988,7 +991,7 @@ p2m_pod_emergency_sweep(struct p2m_domain *p2m) > for ( i = p2m->pod.reclaim_single; i > 0 ; i-- ) > { > p2m_access_t a; > - (void)p2m->get_entry(p2m, i, &t, &a, 0, NULL, NULL); > + (void)p2m->get_entry(p2m, _gfn(i), &t, &a, 0, NULL, NULL); > if ( p2m_is_ram(t) ) > { > gfns[j] = i; > @@ -1237,7 +1240,7 @@ guest_physmap_mark_populate_on_demand(struct domain *d, > unsigned long gfn, > p2m_access_t a; > unsigned int cur_order; > > - p2m->get_entry(p2m, gfn + i, &ot, &a, 0, &cur_order, NULL); > + p2m->get_entry(p2m, _gfn(gfn + i), &ot, &a, 0, &cur_order, NULL); > n = 1UL << min(order, cur_order); > if ( p2m_is_ram(ot) ) > { > diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c > index 0e63d6ed11..57878b1886 100644 > --- a/xen/arch/x86/mm/p2m-pt.c > +++ b/xen/arch/x86/mm/p2m-pt.c > @@ -479,12 +479,13 @@ int p2m_pt_handle_deferred_changes(uint64_t gpa) > > /* Returns: 0 for success, -errno for failure */ > static int > -p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, > +p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_t, mfn_t mfn, > unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma, > int sve) > { > /* XXX -- this might be able to be faster iff current->domain == d */ > void *table; > + unsigned long gfn = gfn_x(gfn_t); gfn_l? > unsigned long i, gfn_remainder = gfn; > l1_pgentry_t *p2m_entry, entry_content; > /* Intermediate table to free if we're replacing it with a superpage. */ > @@ -731,11 +732,12 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long > gfn, mfn_t mfn, > } > > static mfn_t > -p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn, > +p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_t, > p2m_type_t *t, p2m_access_t *a, p2m_query_t q, > unsigned int *page_order, bool_t *sve) > { > mfn_t mfn; > + unsigned long gfn = gfn_x(gfn_t); gfn_l? Other than that, the changes look completely mechanical, so with those (optional) changes: Acked-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx> Thanks, Razvan _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |