|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] libxc: remove superpages option for pv domains
On Fri, Oct 09, 2015 at 04:12:12PM +0100, Ian Campbell wrote:
> On Thu, 2015-10-08 at 17:23 +0200, Juergen Gross wrote:
> > The pv domain builder currently supports the additional flag
> > "superpages" to build a pv domain with 2MB pages. This feature isn't
> > being used by any component other than the python xc bindings.
> >
> > Remove the flag and it's support from the xc bindings and the domain
> > builder
> >
> > Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
>
> Konrad,
>
> In <20151008011056.GA22076@xxxxxxxxxxxxxxxxxxx> you indicated you were ok
> with this, may we take that as an:
>
> Acked-by: Konrad Rzeszutek Wilk < konrad@xxxxxxxxxx >
>
> for this change?
>
> (Or maybe even an
> Acked-by: Konrad Rzeszutek Wilk < konrad.wilk@xxxxxxxxxx>
> ?)
That one.
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
>
> Thanks,
> Ian.
>
> > ---
> > tools/libxc/include/xc_dom.h | 1 -
> > tools/libxc/xc_dom_x86.c | 271 ++++++++++++++++----------------
> > ------
> > tools/python/xen/lowlevel/xc/xc.c | 10 +-
> > 3 files changed, 118 insertions(+), 164 deletions(-)
> >
> > diff --git a/tools/libxc/include/xc_dom.h b/tools/libxc/include/xc_dom.h
> > index e52b023..ae2b985 100644
> > --- a/tools/libxc/include/xc_dom.h
> > +++ b/tools/libxc/include/xc_dom.h
> > @@ -157,7 +157,6 @@ struct xc_dom_image {
> >
> > xc_interface *xch;
> > domid_t guest_domid;
> > - int8_t superpages;
> > int claim_enabled; /* 0 by default, 1 enables it */
> > int shadow_enabled;
> >
> > diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
> > index dd331bf..9d11dd3 100644
> > --- a/tools/libxc/xc_dom_x86.c
> > +++ b/tools/libxc/xc_dom_x86.c
> > @@ -1005,181 +1005,140 @@ static int meminit_pv(struct xc_dom_image *dom)
> > return rc;
> > }
> >
> > - if ( dom->superpages )
> > + /* try to claim pages for early warning of insufficient memory avail
> > */
> > + if ( dom->claim_enabled )
> > {
> > - int count = dom->total_pages >> SUPERPAGE_2MB_SHIFT;
> > - xen_pfn_t extents[count];
> > -
> > - dom->p2m_size = dom->total_pages;
> > - dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) *
> > - dom->p2m_size);
> > - if ( dom->p2m_host == NULL )
> > - return -EINVAL;
> > -
> > - DOMPRINTF("Populating memory with %d superpages", count);
> > - for ( pfn = 0; pfn < count; pfn++ )
> > - extents[pfn] = pfn << SUPERPAGE_2MB_SHIFT;
> > - rc = xc_domain_populate_physmap_exact(dom->xch, dom
> > ->guest_domid,
> > - count,
> > SUPERPAGE_2MB_SHIFT, 0,
> > - extents);
> > + rc = xc_domain_claim_pages(dom->xch, dom->guest_domid,
> > + dom->total_pages);
> > if ( rc )
> > return rc;
> > + }
> >
> > - /* Expand the returned mfn into the p2m array */
> > - pfn = 0;
> > - for ( i = 0; i < count; i++ )
> > - {
> > - mfn = extents[i];
> > - for ( j = 0; j < SUPERPAGE_2MB_NR_PFNS; j++, pfn++ )
> > - dom->p2m_host[pfn] = mfn + j;
> > - }
> > + /* Setup dummy vNUMA information if it's not provided. Note
> > + * that this is a valid state if libxl doesn't provide any
> > + * vNUMA information.
> > + *
> > + * The dummy values make libxc allocate all pages from
> > + * arbitrary physical nodes. This is the expected behaviour if
> > + * no vNUMA configuration is provided to libxc.
> > + *
> > + * Note that the following hunk is just for the convenience of
> > + * allocation code. No defaulting happens in libxc.
> > + */
> > + if ( dom->nr_vmemranges == 0 )
> > + {
> > + nr_vmemranges = 1;
> > + vmemranges = dummy_vmemrange;
> > + vmemranges[0].start = 0;
> > + vmemranges[0].end = (uint64_t)dom->total_pages << PAGE_SHIFT;
> > + vmemranges[0].flags = 0;
> > + vmemranges[0].nid = 0;
> > +
> > + nr_vnodes = 1;
> > + vnode_to_pnode = dummy_vnode_to_pnode;
> > + vnode_to_pnode[0] = XC_NUMA_NO_NODE;
> > }
> > else
> > {
> > - /* try to claim pages for early warning of insufficient memory
> > avail */
> > - if ( dom->claim_enabled ) {
> > - rc = xc_domain_claim_pages(dom->xch, dom->guest_domid,
> > - dom->total_pages);
> > - if ( rc )
> > - return rc;
> > - }
> > + nr_vmemranges = dom->nr_vmemranges;
> > + nr_vnodes = dom->nr_vnodes;
> > + vmemranges = dom->vmemranges;
> > + vnode_to_pnode = dom->vnode_to_pnode;
> > + }
> >
> > - /* Setup dummy vNUMA information if it's not provided. Note
> > - * that this is a valid state if libxl doesn't provide any
> > - * vNUMA information.
> > - *
> > - * The dummy values make libxc allocate all pages from
> > - * arbitrary physical nodes. This is the expected behaviour if
> > - * no vNUMA configuration is provided to libxc.
> > - *
> > - * Note that the following hunk is just for the convenience of
> > - * allocation code. No defaulting happens in libxc.
> > - */
> > - if ( dom->nr_vmemranges == 0 )
> > - {
> > - nr_vmemranges = 1;
> > - vmemranges = dummy_vmemrange;
> > - vmemranges[0].start = 0;
> > - vmemranges[0].end = (uint64_t)dom->total_pages <<
> > PAGE_SHIFT;
> > - vmemranges[0].flags = 0;
> > - vmemranges[0].nid = 0;
> > -
> > - nr_vnodes = 1;
> > - vnode_to_pnode = dummy_vnode_to_pnode;
> > - vnode_to_pnode[0] = XC_NUMA_NO_NODE;
> > - }
> > - else
> > - {
> > - nr_vmemranges = dom->nr_vmemranges;
> > - nr_vnodes = dom->nr_vnodes;
> > - vmemranges = dom->vmemranges;
> > - vnode_to_pnode = dom->vnode_to_pnode;
> > - }
> > + total = dom->p2m_size = 0;
> > + for ( i = 0; i < nr_vmemranges; i++ )
> > + {
> > + total += ((vmemranges[i].end - vmemranges[i].start) >>
> > PAGE_SHIFT);
> > + dom->p2m_size = max(dom->p2m_size,
> > + (xen_pfn_t)(vmemranges[i].end >>
> > PAGE_SHIFT));
> > + }
> > + if ( total != dom->total_pages )
> > + {
> > + xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
> > + "%s: vNUMA page count mismatch (0x%"PRIpfn" !=
> > 0x%"PRIpfn")",
> > + __func__, total, dom->total_pages);
> > + return -EINVAL;
> > + }
> >
> > - total = dom->p2m_size = 0;
> > - for ( i = 0; i < nr_vmemranges; i++ )
> > - {
> > - total += ((vmemranges[i].end - vmemranges[i].start)
> > - >> PAGE_SHIFT);
> > - dom->p2m_size =
> > - dom->p2m_size > (vmemranges[i].end >> PAGE_SHIFT) ?
> > - dom->p2m_size : (vmemranges[i].end >> PAGE_SHIFT);
> > - }
> > - if ( total != dom->total_pages )
> > - {
> > - xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
> > - "%s: vNUMA page count mismatch (0x%"PRIpfn" !=
> > 0x%"PRIpfn")",
> > - __func__, total, dom->total_pages);
> > - return -EINVAL;
> > - }
> > + dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) * dom
> > ->p2m_size);
> > + if ( dom->p2m_host == NULL )
> > + return -EINVAL;
> > + for ( pfn = 0; pfn < dom->p2m_size; pfn++ )
> > + dom->p2m_host[pfn] = INVALID_P2M_ENTRY;
> >
> > - dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) *
> > - dom->p2m_size);
> > - if ( dom->p2m_host == NULL )
> > - return -EINVAL;
> > - for ( pfn = 0; pfn < dom->p2m_size; pfn++ )
> > - dom->p2m_host[pfn] = INVALID_P2M_ENTRY;
> > + /* allocate guest memory */
> > + for ( i = 0; i < nr_vmemranges; i++ )
> > + {
> > + unsigned int memflags;
> > + uint64_t pages, super_pages;
> > + unsigned int pnode = vnode_to_pnode[vmemranges[i].nid];
> > + xen_pfn_t extents[SUPERPAGE_BATCH_SIZE];
> > + xen_pfn_t pfn_base_idx;
> >
> > - /* allocate guest memory */
> > - for ( i = 0; i < nr_vmemranges; i++ )
> > - {
> > - unsigned int memflags;
> > - uint64_t pages, super_pages;
> > - unsigned int pnode = vnode_to_pnode[vmemranges[i].nid];
> > - xen_pfn_t extents[SUPERPAGE_BATCH_SIZE];
> > - xen_pfn_t pfn_base_idx;
> > -
> > - memflags = 0;
> > - if ( pnode != XC_NUMA_NO_NODE )
> > - memflags |= XENMEMF_exact_node(pnode);
> > -
> > - pages = (vmemranges[i].end - vmemranges[i].start)
> > - >> PAGE_SHIFT;
> > - super_pages = pages >> SUPERPAGE_2MB_SHIFT;
> > - pfn_base = vmemranges[i].start >> PAGE_SHIFT;
> > -
> > - for ( pfn = pfn_base; pfn < pfn_base+pages; pfn++ )
> > - dom->p2m_host[pfn] = pfn;
> > -
> > - pfn_base_idx = pfn_base;
> > - while (super_pages) {
> > - uint64_t count =
> > - min_t(uint64_t, super_pages,SUPERPAGE_BATCH_SIZE);
> > - super_pages -= count;
> > -
> > - for ( pfn = pfn_base_idx, j = 0;
> > - pfn < pfn_base_idx + (count <<
> > SUPERPAGE_2MB_SHIFT);
> > - pfn += SUPERPAGE_2MB_NR_PFNS, j++ )
> > - extents[j] = dom->p2m_host[pfn];
> > - rc = xc_domain_populate_physmap(dom->xch, dom
> > ->guest_domid, count,
> > - SUPERPAGE_2MB_SHIFT,
> > memflags,
> > - extents);
> > - if ( rc < 0 )
> > - return rc;
> > -
> > - /* Expand the returned mfns into the p2m array. */
> > - pfn = pfn_base_idx;
> > - for ( j = 0; j < rc; j++ )
> > - {
> > - mfn = extents[j];
> > - for ( k = 0; k < SUPERPAGE_2MB_NR_PFNS; k++, pfn++ )
> > - dom->p2m_host[pfn] = mfn + k;
> > - }
> > - pfn_base_idx = pfn;
> > - }
> > + memflags = 0;
> > + if ( pnode != XC_NUMA_NO_NODE )
> > + memflags |= XENMEMF_exact_node(pnode);
> >
> > - for ( j = pfn_base_idx - pfn_base; j < pages; j += allocsz )
> > - {
> > - allocsz = pages - j;
> > - if ( allocsz > 1024*1024 )
> > - allocsz = 1024*1024;
> > + pages = (vmemranges[i].end - vmemranges[i].start) >> PAGE_SHIFT;
> > + super_pages = pages >> SUPERPAGE_2MB_SHIFT;
> > + pfn_base = vmemranges[i].start >> PAGE_SHIFT;
> >
> > - rc = xc_domain_populate_physmap_exact(dom->xch,
> > - dom->guest_domid, allocsz, 0, memflags,
> > - &dom->p2m_host[pfn_base+j]);
> > + for ( pfn = pfn_base; pfn < pfn_base+pages; pfn++ )
> > + dom->p2m_host[pfn] = pfn;
> >
> > - if ( rc )
> > - {
> > - if ( pnode != XC_NUMA_NO_NODE )
> > - xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
> > - "%s: failed to allocate 0x%"PRIx64"
> > pages (v=%d, p=%d)",
> > - __func__, pages, i, pnode);
> > - else
> > - xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
> > - "%s: failed to allocate 0x%"PRIx64"
> > pages",
> > - __func__, pages);
> > - return rc;
> > - }
> > + pfn_base_idx = pfn_base;
> > + while ( super_pages ) {
> > + uint64_t count = min_t(uint64_t, super_pages,
> > SUPERPAGE_BATCH_SIZE);
> > + super_pages -= count;
> > +
> > + for ( pfn = pfn_base_idx, j = 0;
> > + pfn < pfn_base_idx + (count << SUPERPAGE_2MB_SHIFT);
> > + pfn += SUPERPAGE_2MB_NR_PFNS, j++ )
> > + extents[j] = dom->p2m_host[pfn];
> > + rc = xc_domain_populate_physmap(dom->xch, dom->guest_domid,
> > count,
> > + SUPERPAGE_2MB_SHIFT,
> > memflags,
> > + extents);
> > + if ( rc < 0 )
> > + return rc;
> > +
> > + /* Expand the returned mfns into the p2m array. */
> > + pfn = pfn_base_idx;
> > + for ( j = 0; j < rc; j++ )
> > + {
> > + mfn = extents[j];
> > + for ( k = 0; k < SUPERPAGE_2MB_NR_PFNS; k++, pfn++ )
> > + dom->p2m_host[pfn] = mfn + k;
> > }
> > - rc = 0;
> > + pfn_base_idx = pfn;
> > }
> >
> > - /* Ensure no unclaimed pages are left unused.
> > - * OK to call if hadn't done the earlier claim call. */
> > - (void)xc_domain_claim_pages(dom->xch, dom->guest_domid,
> > - 0 /* cancels the claim */);
> > + for ( j = pfn_base_idx - pfn_base; j < pages; j += allocsz )
> > + {
> > + allocsz = min_t(uint64_t, 1024 * 1024, pages - j);
> > + rc = xc_domain_populate_physmap_exact(dom->xch, dom
> > ->guest_domid,
> > + allocsz, 0, memflags, &dom->p2m_host[pfn_base +
> > j]);
> > +
> > + if ( rc )
> > + {
> > + if ( pnode != XC_NUMA_NO_NODE )
> > + xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
> > + "%s: failed to allocate 0x%"PRIx64"
> > pages (v=%d, p=%d)",
> > + __func__, pages, i, pnode);
> > + else
> > + xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
> > + "%s: failed to allocate 0x%"PRIx64"
> > pages",
> > + __func__, pages);
> > + return rc;
> > + }
> > + }
> > + rc = 0;
> > }
> >
> > + /* Ensure no unclaimed pages are left unused.
> > + * OK to call if hadn't done the earlier claim call. */
> > + xc_domain_claim_pages(dom->xch, dom->guest_domid, 0 /* cancel claim
> > */);
> > +
> > return rc;
> > }
> >
> > diff --git a/tools/python/xen/lowlevel/xc/xc.c
> > b/tools/python/xen/lowlevel/xc/xc.c
> > index b441777..26290a3 100644
> > --- a/tools/python/xen/lowlevel/xc/xc.c
> > +++ b/tools/python/xen/lowlevel/xc/xc.c
> > @@ -463,7 +463,6 @@ static PyObject *pyxc_linux_build(XcObject *self,
> > char *image, *ramdisk = NULL, *cmdline = "", *features = NULL;
> > int flags = 0;
> > int store_evtchn, console_evtchn;
> > - int superpages = 0;
> > unsigned int mem_mb;
> > unsigned long store_mfn = 0;
> > unsigned long console_mfn = 0;
> > @@ -476,22 +475,19 @@ static PyObject *pyxc_linux_build(XcObject *self,
> > "console_evtchn", "image",
> > /* optional */
> > "ramdisk", "cmdline", "flags",
> > - "features", "superpages", NULL };
> > + "features", NULL };
> >
> > - if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiis|ssisi",
> > kwd_list,
> > + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiis|ssis",
> > kwd_list,
> > &domid, &store_evtchn, &mem_mb,
> > &console_evtchn, &image,
> > /* optional */
> > - &ramdisk, &cmdline, &flags,
> > - &features, &superpages) )
> > + &ramdisk, &cmdline, &flags,
> > &features) )
> > return NULL;
> >
> > xc_dom_loginit(self->xc_handle);
> > if (!(dom = xc_dom_allocate(self->xc_handle, cmdline, features)))
> > return pyxc_error_to_exception(self->xc_handle);
> >
> > - dom->superpages = superpages;
> > -
> > if ( xc_dom_linux_build(self->xc_handle, dom, domid, mem_mb, image,
> > ramdisk, flags, store_evtchn, &store_mfn,
> > console_evtchn, &console_mfn) != 0 ) {
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |