[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/5] allow domain heap allocations to specify more than one NUMA node



... using struct domain as a container for passing the respective
affinity mask: Quite a number of allocations are domain specific, yet
not to be accounted for that domain. Introduce a flag suppressing the
accounting altogether (i.e. going beyond MEMF_no_refcount) and use it
right away in common code (x86 and IOMMU code will get adjusted
subsequently).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1657,7 +1657,8 @@ gnttab_transfer(
             struct page_info *new_page;
             void *sp, *dp;
 
-            new_page = alloc_domheap_page(NULL, MEMF_bits(max_bitsize));
+            new_page = alloc_domheap_page(e, MEMF_no_owner |
+                                             MEMF_bits(max_bitsize));
             if ( new_page == NULL )
             {
                 gop.status = GNTST_address_too_big;
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -462,7 +462,8 @@ static long memory_exchange(XEN_GUEST_HA
         /* Allocate a chunk's worth of anonymous output pages. */
         for ( j = 0; j < (1UL << out_chunk_order); j++ )
         {
-            page = alloc_domheap_pages(NULL, exch.out.extent_order, memflags);
+            page = alloc_domheap_pages(d, exch.out.extent_order,
+                                       MEMF_no_owner | memflags);
             if ( unlikely(page == NULL) )
             {
                 rc = -ENOMEM;
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1685,10 +1685,14 @@ struct page_info *alloc_domheap_pages(
 
     ASSERT(!in_irq());
 
-    bits = domain_clamp_alloc_bitsize(d, bits ? : (BITS_PER_LONG+PAGE_SHIFT));
+    bits = domain_clamp_alloc_bitsize(memflags & MEMF_no_owner ? NULL : d,
+                                      bits ? : (BITS_PER_LONG+PAGE_SHIFT));
     if ( (zone_hi = min_t(unsigned int, bits_to_zone(bits), zone_hi)) == 0 )
         return NULL;
 
+    if ( memflags & MEMF_no_owner )
+        memflags |= MEMF_no_refcount;
+
     if ( dma_bitsize && ((dma_zone = bits_to_zone(dma_bitsize)) < zone_hi) )
         pg = alloc_heap_pages(dma_zone + 1, zone_hi, order, memflags, d);
 
@@ -1698,7 +1702,8 @@ struct page_info *alloc_domheap_pages(
                                   memflags, d)) == NULL)) )
          return NULL;
 
-    if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
+    if ( d && !(memflags & MEMF_no_owner) &&
+         assign_pages(d, pg, order, memflags) )
     {
         free_heap_pages(pg, order);
         return NULL;
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -120,6 +120,8 @@ struct npfec {
 #define  MEMF_no_dma      (1U<<_MEMF_no_dma)
 #define _MEMF_exact_node  4
 #define  MEMF_exact_node  (1U<<_MEMF_exact_node)
+#define _MEMF_no_owner    5
+#define  MEMF_no_owner    (1U<<_MEMF_no_owner)
 #define _MEMF_node        8
 #define  MEMF_node_mask   ((1U << (8 * sizeof(nodeid_t))) - 1)
 #define  MEMF_node(n)     ((((n) + 1) & MEMF_node_mask) << _MEMF_node)



Attachment: heap-alloc-for-domain.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.