[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 5/5] xen/memory, tools: Make init-dom0less consume XEN_DOMCTL_get_mem_map
Previous commits enable the toolstack to get the domain memory map, therefore instead of hardcoding the guest magic pages region, use the XEN_DOMCTL_get_mem_map domctl to get the start address of the guest magic pages region. Add the (XEN)MEMF_force_heap_alloc memory flags to force populate_physmap() to allocate page from domheap instead of using 1:1 or static allocated pages to map the magic pages. Reported-by: Alec Kwapis <alec.kwapis@xxxxxxxxxxxxx> Signed-off-by: Henry Wang <xin.wang2@xxxxxxx> --- v2: - New patch --- tools/helpers/init-dom0less.c | 22 ++++++++++++++++++---- xen/common/memory.c | 10 ++++++++-- xen/include/public/memory.h | 5 +++++ xen/include/xen/mm.h | 2 ++ 4 files changed, 33 insertions(+), 6 deletions(-) diff --git a/tools/helpers/init-dom0less.c b/tools/helpers/init-dom0less.c index fee93459c4..92c612f6da 100644 --- a/tools/helpers/init-dom0less.c +++ b/tools/helpers/init-dom0less.c @@ -23,16 +23,30 @@ static int alloc_xs_page(struct xc_interface_core *xch, libxl_dominfo *info, uint64_t *xenstore_pfn) { - int rc; - const xen_pfn_t base = GUEST_MAGIC_BASE >> XC_PAGE_SHIFT; - xen_pfn_t p2m = (GUEST_MAGIC_BASE >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET; + int rc, i; + xen_pfn_t base = ((xen_pfn_t)-1); + xen_pfn_t p2m = ((xen_pfn_t)-1); + uint32_t nr_regions = XEN_MAX_MEM_REGIONS; + struct xen_mem_region mem_regions[XEN_MAX_MEM_REGIONS] = {0}; + + rc = xc_get_domain_mem_map(xch, info->domid, mem_regions, &nr_regions); + + for ( i = 0; i < nr_regions; i++ ) + { + if ( mem_regions[i].type == GUEST_MEM_REGION_MAGIC ) + { + base = mem_regions[i].start >> XC_PAGE_SHIFT; + p2m = (mem_regions[i].start >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET; + } + } rc = xc_domain_setmaxmem(xch, info->domid, info->max_memkb + (XC_PAGE_SIZE/1024)); if (rc < 0) return rc; - rc = xc_domain_populate_physmap_exact(xch, info->domid, 1, 0, 0, &p2m); + rc = xc_domain_populate_physmap_exact(xch, info->domid, 1, 0, + XENMEMF_force_heap_alloc, &p2m); if (rc < 0) return rc; diff --git a/xen/common/memory.c b/xen/common/memory.c index b3b05c2ec0..18b6c16aed 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -219,7 +219,8 @@ static void populate_physmap(struct memop_args *a) } else { - if ( is_domain_direct_mapped(d) ) + if ( is_domain_direct_mapped(d) && + !(a->memflags & MEMF_force_heap_alloc) ) { mfn = _mfn(gpfn); @@ -246,7 +247,8 @@ static void populate_physmap(struct memop_args *a) mfn = _mfn(gpfn); } - else if ( is_domain_using_staticmem(d) ) + else if ( is_domain_using_staticmem(d) && + !(a->memflags & MEMF_force_heap_alloc) ) { /* * No easy way to guarantee the retrieved pages are contiguous, @@ -1433,6 +1435,10 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) && (reservation.mem_flags & XENMEMF_populate_on_demand) ) args.memflags |= MEMF_populate_on_demand; + if ( op == XENMEM_populate_physmap + && (reservation.mem_flags & XENMEMF_force_heap_alloc) ) + args.memflags |= MEMF_force_heap_alloc; + if ( xsm_memory_adjust_reservation(XSM_TARGET, curr_d, d) ) { rcu_unlock_domain(d); diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h index 5e545ae9a4..2a1bfa5bfa 100644 --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -41,6 +41,11 @@ #define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request) /* Flag to indicate the node specified is virtual node */ #define XENMEMF_vnode (1<<18) +/* + * Flag to force populate physmap to use pages from domheap instead of 1:1 + * or static allocation. + */ +#define XENMEMF_force_heap_alloc (1<<19) #endif struct xen_memory_reservation { diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index bb29b352ec..a4554f730d 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -205,6 +205,8 @@ struct npfec { #define MEMF_no_icache_flush (1U<<_MEMF_no_icache_flush) #define _MEMF_no_scrub 8 #define MEMF_no_scrub (1U<<_MEMF_no_scrub) +#define _MEMF_force_heap_alloc 9 +#define MEMF_force_heap_alloc (1U<<_MEMF_force_heap_alloc) #define _MEMF_node 16 #define MEMF_node_mask ((1U << (8 * sizeof(nodeid_t))) - 1) #define MEMF_node(n) ((((n) + 1) & MEMF_node_mask) << _MEMF_node) -- 2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |