[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/6] xc: use XENMEM_claim_pages hypercall during guest creation.
From: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx> We add an extra parameter to the structures passed to the PV routine (arch_setup_meminit) and HVM routine (setup_guest) that determines whether the claim hypercall is to be done. The contents of the 'claim_enabled' is defined as an 'int' in case the hypercall expands in the future with extra flags (for example for per-NUMA allocation). For right now the proper values are: 0 to disable it or 1 to enable it. If the hypervisor does not support this function, the xc_domain_claim_pages and xc_domain_get_outstanding_pages will silently return 0 (and set errno to zero). Signed-off-by: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx> [v2: Updated per Ian's recommendations] [v3: Added support for out-of-sync hypervisor] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> --- tools/libxc/xc_dom.h | 1 + tools/libxc/xc_dom_x86.c | 12 ++++++++++++ tools/libxc/xc_domain.c | 30 ++++++++++++++++++++++++++++++ tools/libxc/xc_hvm_build_x86.c | 23 +++++++++++++++++++---- tools/libxc/xenctrl.h | 6 ++++++ tools/libxc/xenguest.h | 2 ++ 6 files changed, 70 insertions(+), 4 deletions(-) diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h index 779b9d4..ac36600 100644 --- a/tools/libxc/xc_dom.h +++ b/tools/libxc/xc_dom.h @@ -135,6 +135,7 @@ struct xc_dom_image { domid_t guest_domid; int8_t vhpt_size_log2; /* for IA64 */ int8_t superpages; + int claim_enabled; /* 0 by default, 1 enables it */ int shadow_enabled; int xen_version; diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c index eb9ac07..d89526d 100644 --- a/tools/libxc/xc_dom_x86.c +++ b/tools/libxc/xc_dom_x86.c @@ -706,6 +706,13 @@ int arch_setup_meminit(struct xc_dom_image *dom) } else { + /* try to claim pages for early warning of insufficient memory avail */ + if ( dom->claim_enabled ) { + rc = xc_domain_claim_pages(dom->xch, dom->guest_domid, + dom->total_pages); + if ( rc ) + return rc; + } /* setup initial p2m */ for ( pfn = 0; pfn < dom->total_pages; pfn++ ) dom->p2m_host[pfn] = pfn; @@ -722,6 +729,11 @@ int arch_setup_meminit(struct xc_dom_image *dom) dom->xch, dom->guest_domid, allocsz, 0, 0, &dom->p2m_host[i]); } + + /* Ensure no unclaimed pages are left unused. + * OK to call if hadn't done the earlier claim call. */ + (void)xc_domain_claim_pages(dom->xch, dom->guest_domid, + 0 /* cancels the claim */); } return rc; diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c index 480ce91..299c907 100644 --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -775,6 +775,36 @@ int xc_domain_add_to_physmap(xc_interface *xch, return do_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp)); } +int xc_domain_claim_pages(xc_interface *xch, + uint32_t domid, + unsigned long nr_pages) +{ + int err; + struct xen_memory_reservation reservation = { + .nr_extents = nr_pages, + .extent_order = 0, + .mem_flags = 0, /* no flags */ + .domid = domid + }; + + set_xen_guest_handle(reservation.extent_start, HYPERCALL_BUFFER_NULL); + + err = do_memory_op(xch, XENMEM_claim_pages, &reservation, sizeof(reservation)); + /* Ignore it if the hypervisor does not support the call. */ + if (err == -1 && errno == ENOSYS) + err = errno = 0; + return err; +} +unsigned long xc_domain_get_outstanding_pages(xc_interface *xch) +{ + long ret = do_memory_op(xch, XENMEM_get_outstanding_pages, NULL, 0); + + /* Ignore it if the hypervisor does not support the call. */ + if (ret == -1 && errno == ENOSYS) + ret = errno = 0; + return ret; +} + int xc_domain_populate_physmap(xc_interface *xch, uint32_t domid, unsigned long nr_extents, diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c index 3b5d777..ab33a7f 100644 --- a/tools/libxc/xc_hvm_build_x86.c +++ b/tools/libxc/xc_hvm_build_x86.c @@ -252,6 +252,7 @@ static int setup_guest(xc_interface *xch, unsigned long stat_normal_pages = 0, stat_2mb_pages = 0, stat_1gb_pages = 0; int pod_mode = 0; + int claim_enabled = args->claim_enabled; if ( nr_pages > target_pages ) pod_mode = XENMEMF_populate_on_demand; @@ -329,6 +330,16 @@ static int setup_guest(xc_interface *xch, xch, dom, 0xa0, 0, pod_mode, &page_array[0x00]); cur_pages = 0xc0; stat_normal_pages = 0xc0; + + /* try to claim pages for early warning of insufficient memory available */ + if ( claim_enabled ) { + rc = xc_domain_claim_pages(xch, dom, nr_pages - cur_pages); + if ( rc != 0 ) + { + PERROR("Could not allocate memory for HVM guest as we cannot claim memory!"); + goto error_out; + } + } while ( (rc == 0) && (nr_pages > cur_pages) ) { /* Clip count to maximum 1GB extent. */ @@ -506,12 +517,16 @@ static int setup_guest(xc_interface *xch, munmap(page0, PAGE_SIZE); } - free(page_array); - return 0; - + rc = 0; + goto out; error_out: + rc = -1; + out: + /* ensure no unclaimed pages are left unused */ + xc_domain_claim_pages(xch, dom, 0 /* cancels the claim */); + free(page_array); - return -1; + return rc; } /* xc_hvm_build: diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h index 32122fd..e695456 100644 --- a/tools/libxc/xenctrl.h +++ b/tools/libxc/xenctrl.h @@ -1129,6 +1129,12 @@ int xc_domain_populate_physmap_exact(xc_interface *xch, unsigned int mem_flags, xen_pfn_t *extent_start); +int xc_domain_claim_pages(xc_interface *xch, + uint32_t domid, + unsigned long nr_pages); + +unsigned long xc_domain_get_outstanding_pages(xc_interface *xch); + int xc_domain_memory_exchange_pages(xc_interface *xch, int domid, unsigned long nr_in_extents, diff --git a/tools/libxc/xenguest.h b/tools/libxc/xenguest.h index 7d4ac33..4714bd2 100644 --- a/tools/libxc/xenguest.h +++ b/tools/libxc/xenguest.h @@ -231,6 +231,8 @@ struct xc_hvm_build_args { /* Extra SMBIOS structures passed to HVMLOADER */ struct xc_hvm_firmware_module smbios_module; + /* Whether to use claim hypercall (1 - enable, 0 - disable). */ + int claim_enabled; }; /** -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |