[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v5] x86/p2m: use large pages for MMIO mappings
On Mon, 2016-01-25 at 09:18 -0700, Jan Beulich wrote: > When mapping large BARs (e.g. the frame buffer of a graphics card) the > overhead of establishing such mappings using only 4k pages has, > particularly after the XSA-125 fix, become unacceptable. Alter the > XEN_DOMCTL_memory_mapping semantics once again, so that there's no > longer a fixed amount of guest frames that represents the upper limit > of what a single invocation can map. Instead bound execution time by > limiting the number of iterations (regardless of page size). > > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Tools and domctl API side: Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx> > --- > Open issues (perhaps for subsequent changes): > - ARM side unimplemented (and hence libxc for now made cope with both > Â models), the main issue (besides my inability to test any change > Â there) being the many internal uses of map_mmio_regions()) > - iommu_{,un}map_page() interfaces don't support "order" (hence > Â mmio_order() for now returns zero when !iommu_hap_pt_share, which in > Â particular means the AMD side isn't being taken care of just yet, but > Â note that this also has the intended effect of suppressing non-zero > Â order mappings in the shadow mode case) > --- > v5: Refine comment in domctl.h. > v4: Move cleanup duty entirely to the caller of the hypercall. Move > ÂÂÂÂreturn value description to from commit message to domctl.h. > v3: Re-base on top of "x86/hvm: fold opt_hap_{2mb,1gb} into > ÂÂÂÂhap_capabilities". Extend description to spell out new return value > ÂÂÂÂmeaning. Add a couple of code comments. Use PAGE_ORDER_4K instead > ÂÂÂÂof literal 0. Take into consideration r/o MMIO pages. > v2: Produce valid entries for large p2m_mmio_direct mappings in > ÂÂÂÂp2m_pt_set_entry(). Don't open code iommu_use_hap_pt() in > ÂÂÂÂmmio_order(). Update function comment of set_typed_p2m_entry() and > ÂÂÂÂclear_mmio_p2m_entry(). Use PRI_mfn. Add ASSERT()s to > ÂÂÂÂ{,un}map_mmio_regions() to detect otherwise endless loops. > > --- a/tools/libxc/xc_domain.c > +++ b/tools/libxc/xc_domain.c > @@ -2174,7 +2174,7 @@ int xc_domain_memory_mapping( > Â{ > ÂÂÂÂÂDECLARE_DOMCTL; > ÂÂÂÂÂxc_dominfo_t info; > -ÂÂÂÂint ret = 0, err; > +ÂÂÂÂint ret = 0, rc; > ÂÂÂÂÂunsigned long done = 0, nr, max_batch_sz; > Â > ÂÂÂÂÂif ( xc_domain_getinfo(xch, domid, 1, &info) != 1 || > @@ -2199,19 +2199,24 @@ int xc_domain_memory_mapping( > ÂÂÂÂÂÂÂÂÂdomctl.u.memory_mapping.nr_mfns = nr; > ÂÂÂÂÂÂÂÂÂdomctl.u.memory_mapping.first_gfn = first_gfn + done; > ÂÂÂÂÂÂÂÂÂdomctl.u.memory_mapping.first_mfn = first_mfn + done; > -ÂÂÂÂÂÂÂÂerr = do_domctl(xch, &domctl); > -ÂÂÂÂÂÂÂÂif ( err && errno == E2BIG ) > +ÂÂÂÂÂÂÂÂrc = do_domctl(xch, &domctl); > +ÂÂÂÂÂÂÂÂif ( rc < 0 && errno == E2BIG ) > ÂÂÂÂÂÂÂÂÂ{ > ÂÂÂÂÂÂÂÂÂÂÂÂÂif ( max_batch_sz <= 1 ) > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂbreak; > ÂÂÂÂÂÂÂÂÂÂÂÂÂmax_batch_sz >>= 1; > ÂÂÂÂÂÂÂÂÂÂÂÂÂcontinue; > ÂÂÂÂÂÂÂÂÂ} > +ÂÂÂÂÂÂÂÂif ( rc > 0 ) > +ÂÂÂÂÂÂÂÂ{ > +ÂÂÂÂÂÂÂÂÂÂÂÂdone += rc; > +ÂÂÂÂÂÂÂÂÂÂÂÂcontinue; > +ÂÂÂÂÂÂÂÂ} > ÂÂÂÂÂÂÂÂÂ/* Save the first error... */ > ÂÂÂÂÂÂÂÂÂif ( !ret ) > -ÂÂÂÂÂÂÂÂÂÂÂÂret = err; > +ÂÂÂÂÂÂÂÂÂÂÂÂret = rc; > ÂÂÂÂÂÂÂÂÂ/* .. and ignore the rest of them when removing. */ > -ÂÂÂÂÂÂÂÂif ( err && add_mapping != DPCI_REMOVE_MAPPING ) > +ÂÂÂÂÂÂÂÂif ( rc && add_mapping != DPCI_REMOVE_MAPPING ) > ÂÂÂÂÂÂÂÂÂÂÂÂÂbreak; > Â > ÂÂÂÂÂÂÂÂÂdone += nr; > --- a/xen/arch/x86/domain_build.c > +++ b/xen/arch/x86/domain_build.c > @@ -436,7 +436,8 @@ static __init void pvh_add_mem_mapping(s > ÂÂÂÂÂÂÂÂÂelse > ÂÂÂÂÂÂÂÂÂÂÂÂÂa = p2m_access_rw; > Â > -ÂÂÂÂÂÂÂÂif ( (rc = set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i), a)) ) > +ÂÂÂÂÂÂÂÂif ( (rc = set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i), > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂPAGE_ORDER_4K, a)) ) > ÂÂÂÂÂÂÂÂÂÂÂÂÂpanic("pvh_add_mem_mapping: gfn:%lx mfn:%lx i:%ld rc:%d\n", > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂgfn, mfn, i, rc); > ÂÂÂÂÂÂÂÂÂif ( !(i & 0xfffff) ) > --- a/xen/arch/x86/hvm/vmx/vmx.c > +++ b/xen/arch/x86/hvm/vmx/vmx.c > @@ -2491,7 +2491,7 @@ static int vmx_alloc_vlapic_mapping(stru > ÂÂÂÂÂshare_xen_page_with_guest(pg, d, XENSHARE_writable); > ÂÂÂÂÂd->arch.hvm_domain.vmx.apic_access_mfn = mfn; > ÂÂÂÂÂset_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), > _mfn(mfn), > -ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_get_hostp2m(d)->default_access); > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂPAGE_ORDER_4K, p2m_get_hostp2m(d)- > >default_access); > Â > ÂÂÂÂÂreturn 0; > Â} > --- a/xen/arch/x86/mm/p2m.c > +++ b/xen/arch/x86/mm/p2m.c > @@ -899,48 +899,62 @@ void p2m_change_type_range(struct domain > ÂÂÂÂÂp2m_unlock(p2m); > Â} > Â > -/* Returns: 0 for success, -errno for failure */ > +/* > + * Returns: > + *ÂÂÂÂ0ÂÂÂÂÂÂÂÂfor success > + *ÂÂÂÂ-errnoÂÂÂfor failure > + *ÂÂÂÂorder+1ÂÂfor caller to retry with order (guaranteed smaller than > + *ÂÂÂÂÂÂÂÂÂÂÂÂÂthe order value passed in) > + */ > Âstatic int set_typed_p2m_entry(struct domain *d, unsigned long gfn, > mfn_t mfn, > -ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_type_t gfn_p2mt, p2m_access_t access) > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂunsigned int order, p2m_type_t gfn_p2mt, > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_access_t access) > Â{ > ÂÂÂÂÂint rc = 0; > ÂÂÂÂÂp2m_access_t a; > ÂÂÂÂÂp2m_type_t ot; > ÂÂÂÂÂmfn_t omfn; > +ÂÂÂÂunsigned int cur_order = 0; > ÂÂÂÂÂstruct p2m_domain *p2m = p2m_get_hostp2m(d); > Â > ÂÂÂÂÂif ( !paging_mode_translate(d) ) > ÂÂÂÂÂÂÂÂÂreturn -EIO; > Â > -ÂÂÂÂgfn_lock(p2m, gfn, 0); > -ÂÂÂÂomfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL, NULL); > +ÂÂÂÂgfn_lock(p2m, gfn, order); > +ÂÂÂÂomfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, &cur_order, NULL); > +ÂÂÂÂif ( cur_order < order ) > +ÂÂÂÂ{ > +ÂÂÂÂÂÂÂÂgfn_unlock(p2m, gfn, order); > +ÂÂÂÂÂÂÂÂreturn cur_order + 1; > +ÂÂÂÂ} > ÂÂÂÂÂif ( p2m_is_grant(ot) || p2m_is_foreign(ot) ) > ÂÂÂÂÂ{ > -ÂÂÂÂÂÂÂÂgfn_unlock(p2m, gfn, 0); > +ÂÂÂÂÂÂÂÂgfn_unlock(p2m, gfn, order); > ÂÂÂÂÂÂÂÂÂdomain_crash(d); > ÂÂÂÂÂÂÂÂÂreturn -ENOENT; > ÂÂÂÂÂ} > ÂÂÂÂÂelse if ( p2m_is_ram(ot) ) > ÂÂÂÂÂ{ > +ÂÂÂÂÂÂÂÂunsigned long i; > + > ÂÂÂÂÂÂÂÂÂASSERT(mfn_valid(omfn)); > -ÂÂÂÂÂÂÂÂset_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); > +ÂÂÂÂÂÂÂÂfor ( i = 0; i < (1UL << order); ++i ) > +ÂÂÂÂÂÂÂÂÂÂÂÂset_gpfn_from_mfn(mfn_x(omfn) + i, INVALID_M2P_ENTRY); > ÂÂÂÂÂ} > Â > ÂÂÂÂÂP2M_DEBUG("set %d %lx %lx\n", gfn_p2mt, gfn, mfn_x(mfn)); > -ÂÂÂÂrc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, gfn_p2mt, > -ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂaccess); > +ÂÂÂÂrc = p2m_set_entry(p2m, gfn, mfn, order, gfn_p2mt, access); > ÂÂÂÂÂif ( rc ) > -ÂÂÂÂÂÂÂÂgdprintk(XENLOG_ERR, > -ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ"p2m_set_entry failed! mfn=%08lx rc:%d\n", > -ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂmfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)), > rc); > +ÂÂÂÂÂÂÂÂgdprintk(XENLOG_ERR, "p2m_set_entry: %#lx:%u -> %d > (0x%"PRI_mfn")\n", > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂgfn, order, rc, mfn_x(mfn)); > ÂÂÂÂÂelse if ( p2m_is_pod(ot) ) > ÂÂÂÂÂ{ > ÂÂÂÂÂÂÂÂÂpod_lock(p2m); > -ÂÂÂÂÂÂÂÂp2m->pod.entry_count--; > +ÂÂÂÂÂÂÂÂp2m->pod.entry_count -= 1UL << order; > ÂÂÂÂÂÂÂÂÂBUG_ON(p2m->pod.entry_count < 0); > ÂÂÂÂÂÂÂÂÂpod_unlock(p2m); > ÂÂÂÂÂ} > -ÂÂÂÂgfn_unlock(p2m, gfn, 0); > +ÂÂÂÂgfn_unlock(p2m, gfn, order); > Â > ÂÂÂÂÂreturn rc; > Â} > @@ -949,14 +963,21 @@ static int set_typed_p2m_entry(struct do > Âstatic int set_foreign_p2m_entry(struct domain *d, unsigned long gfn, > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂmfn_t mfn) > Â{ > -ÂÂÂÂreturn set_typed_p2m_entry(d, gfn, mfn, p2m_map_foreign, > +ÂÂÂÂreturn set_typed_p2m_entry(d, gfn, mfn, PAGE_ORDER_4K, > p2m_map_foreign, > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_get_hostp2m(d)->default_access); > Â} > Â > Âint set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, > -ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_access_t access) > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂunsigned int order, p2m_access_t access) > Â{ > -ÂÂÂÂreturn set_typed_p2m_entry(d, gfn, mfn, p2m_mmio_direct, access); > +ÂÂÂÂif ( order && > +ÂÂÂÂÂÂÂÂÂrangeset_overlaps_range(mmio_ro_ranges, mfn_x(mfn), > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂmfn_x(mfn) + (1UL << order) - 1) && > +ÂÂÂÂÂÂÂÂÂ!rangeset_contains_range(mmio_ro_ranges, mfn_x(mfn), > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂmfn_x(mfn) + (1UL << order) - 1) ) > +ÂÂÂÂÂÂÂÂreturn order; > + > +ÂÂÂÂreturn set_typed_p2m_entry(d, gfn, mfn, order, p2m_mmio_direct, > access); > Â} > Â > Âint set_identity_p2m_entry(struct domain *d, unsigned long gfn, > @@ -1009,20 +1030,33 @@ int set_identity_p2m_entry(struct domain > ÂÂÂÂÂreturn ret; > Â} > Â > -/* Returns: 0 for success, -errno for failure */ > -int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn) > +/* > + * Returns: > + *ÂÂÂÂ0ÂÂÂÂÂÂÂÂfor success > + *ÂÂÂÂ-errnoÂÂÂfor failure > + *ÂÂÂÂorder+1ÂÂfor caller to retry with order (guaranteed smaller than > + *ÂÂÂÂÂÂÂÂÂÂÂÂÂthe order value passed in) > + */ > +int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂunsigned int order) > Â{ > ÂÂÂÂÂint rc = -EINVAL; > ÂÂÂÂÂmfn_t actual_mfn; > ÂÂÂÂÂp2m_access_t a; > ÂÂÂÂÂp2m_type_t t; > +ÂÂÂÂunsigned int cur_order = 0; > ÂÂÂÂÂstruct p2m_domain *p2m = p2m_get_hostp2m(d); > Â > ÂÂÂÂÂif ( !paging_mode_translate(d) ) > ÂÂÂÂÂÂÂÂÂreturn -EIO; > Â > -ÂÂÂÂgfn_lock(p2m, gfn, 0); > -ÂÂÂÂactual_mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL); > +ÂÂÂÂgfn_lock(p2m, gfn, order); > +ÂÂÂÂactual_mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, &cur_order, NULL); > +ÂÂÂÂif ( cur_order < order ) > +ÂÂÂÂ{ > +ÂÂÂÂÂÂÂÂrc = cur_order + 1; > +ÂÂÂÂÂÂÂÂgoto out; > +ÂÂÂÂ} > Â > ÂÂÂÂÂ/* Do not use mfn_valid() here as it will usually fail for MMIO > pages. */ > ÂÂÂÂÂif ( (INVALID_MFN == mfn_x(actual_mfn)) || (t != p2m_mmio_direct) ) > @@ -1035,11 +1069,11 @@ int clear_mmio_p2m_entry(struct domain * > ÂÂÂÂÂÂÂÂÂgdprintk(XENLOG_WARNING, > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ"no mapping between mfn %08lx and gfn %08lx\n", > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂmfn_x(mfn), gfn); > -ÂÂÂÂrc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, > p2m_invalid, > +ÂÂÂÂrc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), order, p2m_invalid, > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m->default_access); > Â > Â out: > -ÂÂÂÂgfn_unlock(p2m, gfn, 0); > +ÂÂÂÂgfn_unlock(p2m, gfn, order); > Â > ÂÂÂÂÂreturn rc; > Â} > @@ -2095,6 +2129,25 @@ void *map_domain_gfn(struct p2m_domain * > ÂÂÂÂÂreturn map_domain_page(*mfn); > Â} > Â > +static unsigned int mmio_order(const struct domain *d, > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂunsigned long start_fn, unsigned long nr) > +{ > +ÂÂÂÂif ( !need_iommu(d) || !iommu_use_hap_pt(d) || > +ÂÂÂÂÂÂÂÂÂ(start_fn & ((1UL << PAGE_ORDER_2M) - 1)) || !(nr >> > PAGE_ORDER_2M) ) > +ÂÂÂÂÂÂÂÂreturn 0; > + > +ÂÂÂÂif ( !(start_fn & ((1UL << PAGE_ORDER_1G) - 1)) && (nr >> > PAGE_ORDER_1G) && > +ÂÂÂÂÂÂÂÂÂhap_has_1gb ) > +ÂÂÂÂÂÂÂÂreturn PAGE_ORDER_1G; > + > +ÂÂÂÂif ( hap_has_2mb ) > +ÂÂÂÂÂÂÂÂreturn PAGE_ORDER_2M; > + > +ÂÂÂÂreturn 0; > +} > + > +#define MAP_MMIO_MAX_ITER 64 /* pretty arbitrary */ > + > Âint map_mmio_regions(struct domain *d, > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂunsigned long start_gfn, > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂunsigned long nr, > @@ -2102,22 +2155,29 @@ int map_mmio_regions(struct domain *d, > Â{ > ÂÂÂÂÂint ret = 0; > ÂÂÂÂÂunsigned long i; > +ÂÂÂÂunsigned int iter, order; > Â > ÂÂÂÂÂif ( !paging_mode_translate(d) ) > ÂÂÂÂÂÂÂÂÂreturn 0; > Â > -ÂÂÂÂfor ( i = 0; !ret && i < nr; i++ ) > +ÂÂÂÂfor ( iter = i = 0; i < nr && iter < MAP_MMIO_MAX_ITER; > +ÂÂÂÂÂÂÂÂÂÂi += 1UL << order, ++iter ) > ÂÂÂÂÂ{ > -ÂÂÂÂÂÂÂÂret = set_mmio_p2m_entry(d, start_gfn + i, _mfn(mfn + i), > -ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_get_hostp2m(d)->default_access); > -ÂÂÂÂÂÂÂÂif ( ret ) > +ÂÂÂÂÂÂÂÂ/* OR'ing gfn and mfn values will return an order suitable to > both. */ > +ÂÂÂÂÂÂÂÂfor ( order = mmio_order(d, (start_gfn + i) | (mfn + i), nr - > i); ; > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂorder = ret - 1 ) > ÂÂÂÂÂÂÂÂÂ{ > -ÂÂÂÂÂÂÂÂÂÂÂÂunmap_mmio_regions(d, start_gfn, i, mfn); > -ÂÂÂÂÂÂÂÂÂÂÂÂbreak; > +ÂÂÂÂÂÂÂÂÂÂÂÂret = set_mmio_p2m_entry(d, start_gfn + i, _mfn(mfn + i), > order, > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_get_hostp2m(d)- > >default_access); > +ÂÂÂÂÂÂÂÂÂÂÂÂif ( ret <= 0 ) > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂbreak; > +ÂÂÂÂÂÂÂÂÂÂÂÂASSERT(ret <= order); > ÂÂÂÂÂÂÂÂÂ} > +ÂÂÂÂÂÂÂÂif ( ret < 0 ) > +ÂÂÂÂÂÂÂÂÂÂÂÂbreak; > ÂÂÂÂÂ} > Â > -ÂÂÂÂreturn ret; > +ÂÂÂÂreturn i == nr ? 0 : i ?: ret; > Â} > Â > Âint unmap_mmio_regions(struct domain *d, > @@ -2125,20 +2185,30 @@ int unmap_mmio_regions(struct domain *d, > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂunsigned long nr, > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂunsigned long mfn) > Â{ > -ÂÂÂÂint err = 0; > +ÂÂÂÂint ret = 0; > ÂÂÂÂÂunsigned long i; > +ÂÂÂÂunsigned int iter, order; > Â > ÂÂÂÂÂif ( !paging_mode_translate(d) ) > ÂÂÂÂÂÂÂÂÂreturn 0; > Â > -ÂÂÂÂfor ( i = 0; i < nr; i++ ) > +ÂÂÂÂfor ( iter = i = 0; i < nr && iter < MAP_MMIO_MAX_ITER; > +ÂÂÂÂÂÂÂÂÂÂi += 1UL << order, ++iter ) > ÂÂÂÂÂ{ > -ÂÂÂÂÂÂÂÂint ret = clear_mmio_p2m_entry(d, start_gfn + i, _mfn(mfn + i)); > -ÂÂÂÂÂÂÂÂif ( ret ) > -ÂÂÂÂÂÂÂÂÂÂÂÂerr = ret; > +ÂÂÂÂÂÂÂÂ/* OR'ing gfn and mfn values will return an order suitable to > both. */ > +ÂÂÂÂÂÂÂÂfor ( order = mmio_order(d, (start_gfn + i) | (mfn + i), nr - > i); ; > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂorder = ret - 1 ) > +ÂÂÂÂÂÂÂÂ{ > +ÂÂÂÂÂÂÂÂÂÂÂÂret = clear_mmio_p2m_entry(d, start_gfn + i, _mfn(mfn + i), > order); > +ÂÂÂÂÂÂÂÂÂÂÂÂif ( ret <= 0 ) > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂbreak; > +ÂÂÂÂÂÂÂÂÂÂÂÂASSERT(ret <= order); > +ÂÂÂÂÂÂÂÂ} > +ÂÂÂÂÂÂÂÂif ( ret < 0 ) > +ÂÂÂÂÂÂÂÂÂÂÂÂbreak; > ÂÂÂÂÂ} > Â > -ÂÂÂÂreturn err; > +ÂÂÂÂreturn i == nr ? 0 : i ?: ret; > Â} > Â > Âunsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp) > --- a/xen/arch/x86/mm/p2m-ept.c > +++ b/xen/arch/x86/mm/p2m-ept.c > @@ -136,6 +136,7 @@ static void ept_p2m_type_to_flags(struct > ÂÂÂÂÂÂÂÂÂÂÂÂÂentry->r = entry->x = 1; > ÂÂÂÂÂÂÂÂÂÂÂÂÂentry->w = !rangeset_contains_singleton(mmio_ro_ranges, > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂentry->mfn); > +ÂÂÂÂÂÂÂÂÂÂÂÂASSERT(entry->w || !is_epte_superpage(entry)); > ÂÂÂÂÂÂÂÂÂÂÂÂÂentry->a = !!cpu_has_vmx_ept_ad; > ÂÂÂÂÂÂÂÂÂÂÂÂÂentry->d = entry->w && cpu_has_vmx_ept_ad; > ÂÂÂÂÂÂÂÂÂÂÂÂÂbreak; > --- a/xen/arch/x86/mm/p2m-pt.c > +++ b/xen/arch/x86/mm/p2m-pt.c > @@ -72,7 +72,8 @@ static const unsigned long pgt[] = { > ÂÂÂÂÂPGT_l3_page_table > Â}; > Â > -static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn) > +static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn, > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂunsigned int level) > Â{ > ÂÂÂÂÂunsigned long flags; > ÂÂÂÂÂ/* > @@ -107,6 +108,8 @@ static unsigned long p2m_type_to_flags(p > ÂÂÂÂÂcase p2m_mmio_direct: > ÂÂÂÂÂÂÂÂÂif ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) ) > ÂÂÂÂÂÂÂÂÂÂÂÂÂflags |= _PAGE_RW; > +ÂÂÂÂÂÂÂÂelse > +ÂÂÂÂÂÂÂÂÂÂÂÂASSERT(!level); > ÂÂÂÂÂÂÂÂÂreturn flags | P2M_BASE_FLAGS | _PAGE_PCD; > ÂÂÂÂÂ} > Â} > @@ -436,7 +439,7 @@ static int do_recalc(struct p2m_domain * > ÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_type_t p2mt = p2m_is_logdirty_range(p2m, gfn & mask, gfn > | ~mask) > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ? p2m_ram_logdirty : p2m_ram_rw; > ÂÂÂÂÂÂÂÂÂÂÂÂÂunsigned long mfn = l1e_get_pfn(e); > -ÂÂÂÂÂÂÂÂÂÂÂÂunsigned long flags = p2m_type_to_flags(p2mt, _mfn(mfn)); > +ÂÂÂÂÂÂÂÂÂÂÂÂunsigned long flags = p2m_type_to_flags(p2mt, _mfn(mfn), > level); > Â > ÂÂÂÂÂÂÂÂÂÂÂÂÂif ( level ) > ÂÂÂÂÂÂÂÂÂÂÂÂÂ{ > @@ -573,7 +576,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, > ÂÂÂÂÂÂÂÂÂASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct); > ÂÂÂÂÂÂÂÂÂl3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) > ÂÂÂÂÂÂÂÂÂÂÂÂÂ? l3e_from_pfn(mfn_x(mfn), > -ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_type_to_flags(p2mt, mfn) | _PAGE_PSE) > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_type_to_flags(p2mt, mfn, 2) | _PAGE_PSE) > ÂÂÂÂÂÂÂÂÂÂÂÂÂ: l3e_empty(); > ÂÂÂÂÂÂÂÂÂentry_content.l1 = l3e_content.l3; > Â > @@ -609,7 +612,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, > Â > ÂÂÂÂÂÂÂÂÂif ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ) > ÂÂÂÂÂÂÂÂÂÂÂÂÂentry_content = p2m_l1e_from_pfn(mfn_x(mfn), > -ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_type_to_flags(p2mt, > mfn)); > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_type_to_flags(p2mt, > mfn, 0)); > ÂÂÂÂÂÂÂÂÂelse > ÂÂÂÂÂÂÂÂÂÂÂÂÂentry_content = l1e_empty(); > Â > @@ -645,7 +648,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, > ÂÂÂÂÂÂÂÂÂASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct); > ÂÂÂÂÂÂÂÂÂif ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ) > ÂÂÂÂÂÂÂÂÂÂÂÂÂl2e_content = l2e_from_pfn(mfn_x(mfn), > -ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_type_to_flags(p2mt, mfn) | > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_type_to_flags(p2mt, mfn, 1) | > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ_PAGE_PSE); > ÂÂÂÂÂÂÂÂÂelse > ÂÂÂÂÂÂÂÂÂÂÂÂÂl2e_content = l2e_empty(); > --- a/xen/common/domctl.c > +++ b/xen/common/domctl.c > @@ -1046,10 +1046,12 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ(gfn + nr_mfns - 1) < gfn ) /* wrap? */ > ÂÂÂÂÂÂÂÂÂÂÂÂÂbreak; > Â > +#ifndef CONFIG_X86 /* XXX ARM!? */ > ÂÂÂÂÂÂÂÂÂret = -E2BIG; > ÂÂÂÂÂÂÂÂÂ/* Must break hypercall up as this could take a while. */ > ÂÂÂÂÂÂÂÂÂif ( nr_mfns > 64 ) > ÂÂÂÂÂÂÂÂÂÂÂÂÂbreak; > +#endif > Â > ÂÂÂÂÂÂÂÂÂret = -EPERM; > ÂÂÂÂÂÂÂÂÂif ( !iomem_access_permitted(current->domain, mfn, mfn_end) || > @@ -1067,7 +1069,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂd->domain_id, gfn, mfn, nr_mfns); > Â > ÂÂÂÂÂÂÂÂÂÂÂÂÂret = map_mmio_regions(d, gfn, nr_mfns, mfn); > -ÂÂÂÂÂÂÂÂÂÂÂÂif ( ret ) > +ÂÂÂÂÂÂÂÂÂÂÂÂif ( ret < 0 ) > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂprintk(XENLOG_G_WARNING > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ"memory_map:fail: dom%d gfn=%lx mfn=%lx nr=%lx > ret:%ld\n", > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂd->domain_id, gfn, mfn, nr_mfns, ret); > @@ -1079,7 +1081,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂd->domain_id, gfn, mfn, nr_mfns); > Â > ÂÂÂÂÂÂÂÂÂÂÂÂÂret = unmap_mmio_regions(d, gfn, nr_mfns, mfn); > -ÂÂÂÂÂÂÂÂÂÂÂÂif ( ret && is_hardware_domain(current->domain) ) > +ÂÂÂÂÂÂÂÂÂÂÂÂif ( ret < 0 && is_hardware_domain(current->domain) ) > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂprintk(XENLOG_ERR > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ"memory_map: error %ld removing dom%d access to > [%lx,%lx]\n", > ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂret, d->domain_id, mfn, mfn_end); > --- a/xen/common/memory.c > +++ b/xen/common/memory.c > @@ -259,7 +259,7 @@ int guest_remove_page(struct domain *d, > ÂÂÂÂÂ} > ÂÂÂÂÂif ( p2mt == p2m_mmio_direct ) > ÂÂÂÂÂ{ > -ÂÂÂÂÂÂÂÂclear_mmio_p2m_entry(d, gmfn, _mfn(mfn)); > +ÂÂÂÂÂÂÂÂclear_mmio_p2m_entry(d, gmfn, _mfn(mfn), 0); > ÂÂÂÂÂÂÂÂÂput_gfn(d, gmfn); > ÂÂÂÂÂÂÂÂÂreturn 1; > ÂÂÂÂÂ} > --- a/xen/include/asm-x86/p2m.h > +++ b/xen/include/asm-x86/p2m.h > @@ -574,8 +574,9 @@ int p2m_is_logdirty_range(struct p2m_dom > Â > Â/* Set mmio addresses in the p2m table (for pass-through) */ > Âint set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, > -ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂp2m_access_t access); > -int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t > mfn); > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂunsigned int order, p2m_access_t access); > +int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂunsigned int order); > Â > Â/* Set identity addresses in the p2m table (for pass-through) */ > Âint set_identity_p2m_entry(struct domain *d, unsigned long gfn, > --- a/xen/include/public/domctl.h > +++ b/xen/include/public/domctl.h > @@ -542,8 +542,15 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_ > Â > Â > Â/* Bind machine I/O address range -> HVM address range. */ > -/* If this returns -E2BIG lower nr_mfns value. */ > Â/* XEN_DOMCTL_memory_mapping */ > +/* Returns > +ÂÂÂ- zeroÂÂÂÂÂsuccess, everything done > +ÂÂÂ- -E2BIGÂÂÂpassed in nr_mfns value too large for the implementation > +ÂÂÂ- positive partial success for the first <result> page frames (with > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ<result> less than nr_mfns), requiring re-invocation by > the > +ÂÂÂÂÂÂÂÂÂÂÂÂÂÂcaller after updating inputs > +ÂÂÂ- negative error; other than -E2BIG > +*/ > Â#define DPCI_ADD_MAPPINGÂÂÂÂÂÂÂÂÂ1 > Â#define DPCI_REMOVE_MAPPINGÂÂÂÂÂÂ0 > Âstruct xen_domctl_memory_mapping { > > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |