[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/4] hvm: NUMA guest: extend populate_physmap to use a node
To make use of the new node aware memop hypercall, the xc_domain_memory_populate_physmap function is extended by a node parameter. Passing XENMEM_DEFAULT_NODE mimics the current behavior. Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx> -- Andre Przywara AMD-Operating System Research Center (OSRC), Dresden, Germany Tel: +49 351 277-84917 ----to satisfy European Law for business letters: AMD Saxony Limited Liability Company & Co. KG, Wilschdorfer Landstr. 101, 01109 Dresden, Germany Register Court Dresden: HRA 4896, General Partner authorized to represent: AMD Saxony LLC (Wilmington, Delaware, US) General Manager of AMD Saxony LLC: Dr. Hans-R. Deppe, Thomas McCoy # HG changeset patch # User Andre Przywara <andre.przywara@xxxxxxx> # Date 1215083041 -7200 # Node ID a0dccef499b005ba13eb70bf6cac856af44a10a0 # Parent e308bd4e9179493e3897143bf6e5841c14b4f357 made alloc_physmap useing NUMA node diff -r e308bd4e9179 -r a0dccef499b0 tools/ioemu/vl.c --- a/tools/ioemu/vl.c Thu Jul 03 13:01:11 2008 +0200 +++ b/tools/ioemu/vl.c Thu Jul 03 13:04:01 2008 +0200 @@ -7045,7 +7045,7 @@ int err = 0; err = xc_domain_memory_populate_physmap(xc_handle, domid, nr_pages, 0, - address_bits, extent_start); + address_bits, XENMEM_DEFAULT_NODE, extent_start); if (err) { fprintf(stderr, "Failed to populate physmap\n"); return -1; diff -r e308bd4e9179 -r a0dccef499b0 tools/libxc/ia64/xc_ia64_hvm_build.c --- a/tools/libxc/ia64/xc_ia64_hvm_build.c Thu Jul 03 13:01:11 2008 +0200 +++ b/tools/libxc/ia64/xc_ia64_hvm_build.c Thu Jul 03 13:04:01 2008 +0200 @@ -937,15 +937,16 @@ // Allocate memory for VTI guest, up to VGA hole from 0xA0000-0xC0000. rc = xc_domain_memory_populate_physmap(xc_handle, dom, (nr_pages > VGA_START_PAGE) ? - VGA_START_PAGE : nr_pages, - 0, 0, &pfn_list[0]); + VGA_START_PAGE : nr_pages, 0, 0, + XENMEM_DEFAULT_NODE, &pfn_list[0]); // We're not likely to attempt to create a domain with less than // 640k of memory, but test for completeness if (rc == 0 && nr_pages > VGA_END_PAGE) rc = xc_domain_memory_populate_physmap(xc_handle, dom, - nr_pages - VGA_END_PAGE, - 0, 0, &pfn_list[VGA_END_PAGE]); + nr_pages - VGA_END_PAGE, 0, 0, + XENMEM_DEFAULT_NODE, + &pfn_list[VGA_END_PAGE]); if (rc != 0) { PERROR("Could not allocate normal memory for Vti guest.\n"); goto error_out; @@ -957,8 +958,8 @@ for (i = 0; i < GFW_PAGES; i++) pfn_list[i] = (GFW_START >> PAGE_SHIFT) + i; - rc = xc_domain_memory_populate_physmap(xc_handle, dom, GFW_PAGES, - 0, 0, &pfn_list[0]); + rc = xc_domain_memory_populate_physmap(xc_handle, dom, GFW_PAGES, 0, 0, + XENMEM_DEFAULT_NODE, &pfn_list[0]); if (rc != 0) { PERROR("Could not allocate GFW memory for Vti guest.\n"); goto error_out; @@ -973,8 +974,8 @@ pfn_list[nr_special_pages] = memmap_info_pfn; nr_special_pages++; - rc = xc_domain_memory_populate_physmap(xc_handle, dom, nr_special_pages, - 0, 0, &pfn_list[0]); + rc = xc_domain_memory_populate_physmap(xc_handle, dom, nr_special_pages, 0, + 0, XENMEM_DEFAULT_NODE, &pfn_list[0]); if (rc != 0) { PERROR("Could not allocate IO page or store page or buffer io page.\n"); goto error_out; diff -r e308bd4e9179 -r a0dccef499b0 tools/libxc/ia64/xc_ia64_linux_restore.c --- a/tools/libxc/ia64/xc_ia64_linux_restore.c Thu Jul 03 13:01:11 2008 +0200 +++ b/tools/libxc/ia64/xc_ia64_linux_restore.c Thu Jul 03 13:04:01 2008 +0200 @@ -35,7 +35,8 @@ if (xc_ia64_p2m_present(p2m_table, gmfn)) return 0; - return xc_domain_memory_populate_physmap(xc_handle, dom, 1, 0, 0, &gmfn); + return xc_domain_memory_populate_physmap(xc_handle, dom, 1, 0, 0, + XENMEM_DEFAULT_NODE, &gmfn); } static int @@ -530,8 +531,8 @@ }; unsigned long nr_pages = sizeof(pfn_list) / sizeof(pfn_list[0]); - rc = xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages, - 0, 0, &pfn_list[0]); + rc = xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages, 0, 0, + XENMEM_DEFAULT_NODE, &pfn_list[0]); if (rc != 0) PERROR("Could not allocate IO page or buffer io page.\n"); return rc; diff -r e308bd4e9179 -r a0dccef499b0 tools/libxc/xc_dom_ia64.c --- a/tools/libxc/xc_dom_ia64.c Thu Jul 03 13:01:11 2008 +0200 +++ b/tools/libxc/xc_dom_ia64.c Thu Jul 03 13:04:01 2008 +0200 @@ -173,7 +173,7 @@ /* allocate guest memory */ rc = xc_domain_memory_populate_physmap(dom->guest_xc, dom->guest_domid, - nbr, 0, 0, + nbr, 0, 0, XENMEM_DEFAULT_NODE, dom->p2m_host); return rc; } diff -r e308bd4e9179 -r a0dccef499b0 tools/libxc/xc_dom_x86.c --- a/tools/libxc/xc_dom_x86.c Thu Jul 03 13:01:11 2008 +0200 +++ b/tools/libxc/xc_dom_x86.c Thu Jul 03 13:04:01 2008 +0200 @@ -713,7 +713,7 @@ /* allocate guest memory */ rc = xc_domain_memory_populate_physmap(dom->guest_xc, dom->guest_domid, dom->total_pages, 0, 0, - dom->p2m_host); + XENMEM_DEFAULT_NODE, dom->p2m_host); return rc; } diff -r e308bd4e9179 -r a0dccef499b0 tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c Thu Jul 03 13:01:11 2008 +0200 +++ b/tools/libxc/xc_domain.c Thu Jul 03 13:04:01 2008 +0200 @@ -511,13 +511,15 @@ unsigned long nr_extents, unsigned int extent_order, unsigned int address_bits, + unsigned int node, xen_pfn_t *extent_start) { int err; struct xen_memory_reservation reservation = { .nr_extents = nr_extents, .extent_order = extent_order, - .mem_flags = XENMEM_addr_bits(address_bits), + .mem_flags = XENMEM_addr_bits(address_bits) | + XENMEM_set_node(node), .domid = domid }; set_xen_guest_handle(reservation.extent_start, extent_start); diff -r e308bd4e9179 -r a0dccef499b0 tools/libxc/xc_domain_restore.c --- a/tools/libxc/xc_domain_restore.c Thu Jul 03 13:01:11 2008 +0200 +++ b/tools/libxc/xc_domain_restore.c Thu Jul 03 13:04:01 2008 +0200 @@ -110,7 +110,7 @@ /* Allocate the requisite number of mfns. */ if ( nr_mfns && (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0, 0, - p2m_batch) != 0) ) + XENMEM_DEFAULT_NODE, p2m_batch) != 0) ) { ERROR("Failed to allocate memory for batch.!\n"); errno = ENOMEM; @@ -525,7 +525,7 @@ /* Now allocate a bunch of mfns for this batch */ if ( nr_mfns && (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0, - 0, p2m_batch) != 0) ) + 0, XENMEM_DEFAULT_NODE, p2m_batch) != 0) ) { ERROR("Failed to allocate memory for batch.!\n"); errno = ENOMEM; diff -r e308bd4e9179 -r a0dccef499b0 tools/libxc/xc_hvm_build.c --- a/tools/libxc/xc_hvm_build.c Thu Jul 03 13:01:11 2008 +0200 +++ b/tools/libxc/xc_hvm_build.c Thu Jul 03 13:04:01 2008 +0200 @@ -218,7 +218,7 @@ * we can be preempted and hence dom0 remains responsive. */ rc = xc_domain_memory_populate_physmap( - xc_handle, dom, 0xa0, 0, 0, &page_array[0x00]); + xc_handle, dom, 0xa0, 0, 0, XENMEM_DEFAULT_NODE, &page_array[0x00]); cur_pages = 0xc0; while ( (rc == 0) && (nr_pages > cur_pages) ) { @@ -243,6 +243,7 @@ struct xen_memory_reservation sp_req = { .nr_extents = count >> SUPERPAGE_PFN_SHIFT, .extent_order = SUPERPAGE_PFN_SHIFT, + .mem_flags = XENMEM_set_node(XENMEM_DEFAULT_NODE), .domid = dom }; set_xen_guest_handle(sp_req.extent_start, sp_extents); @@ -261,7 +262,7 @@ if ( count != 0 ) { rc = xc_domain_memory_populate_physmap( - xc_handle, dom, count, 0, 0, &page_array[cur_pages]); + xc_handle, dom, count, 0, 0, XENMEM_DEFAULT_NODE, &page_array[cur_pages]); cur_pages += count; } } diff -r e308bd4e9179 -r a0dccef499b0 tools/libxc/xenctrl.h --- a/tools/libxc/xenctrl.h Thu Jul 03 13:01:11 2008 +0200 +++ b/tools/libxc/xenctrl.h Thu Jul 03 13:04:01 2008 +0200 @@ -625,6 +625,7 @@ unsigned long nr_extents, unsigned int extent_order, unsigned int address_bits, + unsigned int node, xen_pfn_t *extent_start); int xc_domain_ioport_permission(int xc_handle, diff -r e308bd4e9179 -r a0dccef499b0 xen/common/memory.c --- a/xen/common/memory.c Thu Jul 03 13:01:11 2008 +0200 +++ b/xen/common/memory.c Thu Jul 03 13:04:01 2008 +0200 @@ -44,7 +44,6 @@ unsigned long i; xen_pfn_t mfn; struct domain *d = a->domain; - unsigned int node = domain_to_node(d); if ( !guest_handle_is_null(a->extent_list) && !guest_handle_subrange_okay(a->extent_list, a->nr_done, @@ -64,7 +63,7 @@ } page = alloc_domheap_pages( - d, a->extent_order, a->memflags | MEMF_node(node)); + d, a->extent_order, a->memflags); if ( unlikely(page == NULL) ) { gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: " @@ -93,7 +92,6 @@ unsigned long i, j; xen_pfn_t gpfn, mfn; struct domain *d = a->domain; - unsigned int node = domain_to_node(d); if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done, a->nr_extents-1) ) @@ -115,7 +113,7 @@ goto out; page = alloc_domheap_pages( - d, a->extent_order, a->memflags | MEMF_node(node)); + d, a->extent_order, a->memflags); if ( unlikely(page == NULL) ) { gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: " diff -r e308bd4e9179 -r a0dccef499b0 xen/common/page_alloc.c --- a/xen/common/page_alloc.c Thu Jul 03 13:01:11 2008 +0200 +++ b/xen/common/page_alloc.c Thu Jul 03 13:04:01 2008 +0200 @@ -788,7 +788,7 @@ { struct page_info *pg = NULL; unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1; - unsigned int node = (uint8_t)((memflags >> _MEMF_node) - 1); + unsigned int node = MEMF_getnode(memflags); ASSERT(!in_irq()); diff -r e308bd4e9179 -r a0dccef499b0 xen/include/public/memory.h --- a/xen/include/public/memory.h Thu Jul 03 13:01:11 2008 +0200 +++ b/xen/include/public/memory.h Thu Jul 03 13:04:01 2008 +0200 @@ -35,6 +35,8 @@ #define XENMEM_increase_reservation 0 #define XENMEM_decrease_reservation 1 #define XENMEM_populate_physmap 6 + +#define XENMEM_DEFAULT_NODE ((unsigned int)-1) #define XENMEM_addr_bits(f) ((f)&0xFF) #define XENMEM_get_node(f) (((((f)&0xFF00)>>8)-1)&0xFF) diff -r e308bd4e9179 -r a0dccef499b0 xen/include/xen/mm.h --- a/xen/include/xen/mm.h Thu Jul 03 13:01:11 2008 +0200 +++ b/xen/include/xen/mm.h Thu Jul 03 13:04:01 2008 +0200 @@ -74,6 +74,7 @@ #define MEMF_no_refcount (1U<<_MEMF_no_refcount) #define _MEMF_node 8 #define MEMF_node(n) ((((n)+1)&0xff)<<_MEMF_node) +#define MEMF_getnode(n) (uint8_t)(((n)>>_MEMF_node)-1) #define _MEMF_bits 24 #define MEMF_bits(n) ((n)<<_MEMF_bits) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |