[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC][PATCH 2/2] Add hcall to probe Xen heap



* Ryan Harper <ryanh@xxxxxxxxxx> [2007-04-09 20:10]:
> For post-3.0.5 inclusion:
> 
> Add new domctl hypercall to expose current heap values.  This functionality is
> needed for probing how much memory is available in a given node prior to VM
> creation.

Refreshed to changeset:   15200:bd3d6b4c52ec

-- 
Ryan Harper
Software Engineer; Linux Technology Center
IBM Corp., Austin, Tx
(512) 838-9253   T/L: 678-9253
ryanh@xxxxxxxxxx


diffstat output:
 tools/libxc/xc_domain.c     |   28 ++++++++++++++++++++++++++++
 tools/libxc/xenctrl.h       |   16 ++++++++++++++++
 xen/common/domctl.c         |   35 +++++++++++++++++++++++++++++++++++
 xen/common/page_alloc.c     |    9 +--------
 xen/include/public/domctl.h |   16 ++++++++++++++++
 xen/include/xen/mm.h        |   10 ++++++++++
 6 files changed, 106 insertions(+), 8 deletions(-)

Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>
---
Add new domctl hypercall to expose current heap values.  This functionality is
needed for probing how much memory is available in a given node prior to VM
creation.

Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>

diff -r 48cbb32df526 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Thu Mar 29 21:55:42 2007 -0500
+++ b/tools/libxc/xc_domain.c   Fri Mar 30 09:41:40 2007 -0500
@@ -584,6 +584,34 @@ int xc_domain_ioport_permission(int xc_h
     domctl.u.ioport_permission.allow_access = allow_access;
 
     return do_domctl(xc_handle, &domctl);
+}
+
+int xc_availheap(int xc_handle,
+                 int zone_lo,
+                 int zone_hi,
+                 int node,
+                 uint32_t *nr_zones,
+                 uint32_t *nr_nodes,
+                 uint64_t *pages)
+{
+    DECLARE_DOMCTL;
+    int rc = 0;
+
+    domctl.cmd = XEN_DOMCTL_availheap;
+    domctl.u.availheap.zone_lo = zone_lo;
+    domctl.u.availheap.zone_hi = zone_hi;
+    domctl.u.availheap.node = node;
+
+    rc = do_domctl(xc_handle, &domctl);
+    if ( rc >= 0 ) {
+        if (nr_zones)
+            *nr_zones = domctl.u.availheap.nr_zones;
+        if (nr_nodes)
+            *nr_nodes = domctl.u.availheap.nr_nodes;
+        *pages = domctl.u.availheap.pages;
+    }
+
+    return rc;
 }
 
 int xc_vcpu_setcontext(int xc_handle,
diff -r 48cbb32df526 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Thu Mar 29 21:55:42 2007 -0500
+++ b/tools/libxc/xenctrl.h     Fri Mar 30 09:44:28 2007 -0500
@@ -611,6 +611,22 @@ int xc_get_pfn_type_batch(int xc_handle,
 /* Get current total pages allocated to a domain. */
 long xc_get_tot_pages(int xc_handle, uint32_t domid);
 
+/**
+ * This function retrieves the the number of pages available
+ * in the heap in a specific range of zones and nodes.
+ * 
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm domid the domain to query
+ * @parm zone_lo the starting zone to query
+ * @parm zone_lo the last zone to query
+ * @parm node the node to query
+ * @parm *nr_zones caller variable to put number of zones queried
+ * @parm *nr_nodes caller variable to put number of nodes queried
+ * @parm *pages caller variable to put total pages counted
+ * @return 0 on success, <0 on failure.
+ */
+int xc_availheap(int xc_handle, int zone_lo, int zone_hi, int node,
+                 uint32_t *nr_zones, uint32_t *nr_nodes, uint64_t *pages);
 
 /*
  * Trace Buffer Operations
diff -r 48cbb32df526 xen/common/domctl.c
--- a/xen/common/domctl.c       Thu Mar 29 21:55:42 2007 -0500
+++ b/xen/common/domctl.c       Fri Mar 30 10:02:01 2007 -0500
@@ -24,6 +24,8 @@
 #include <asm/current.h>
 #include <public/domctl.h>
 #include <acm/acm_hooks.h>
+#include <asm/numa.h>
+#include <xen/nodemask.h>
 
 extern long arch_do_domctl(
     struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
@@ -711,6 +713,39 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
     }
     break;
 
+    case XEN_DOMCTL_availheap:
+    { 
+        int zone_lo = 0, zone_hi = NR_ZONES-1;
+
+        ret = -EINVAL;
+        if ( op->u.availheap.node >= num_online_nodes() )
+            break;
+        if ( op->u.availheap.zone_lo >= NR_ZONES )
+            break;
+        if ( op->u.availheap.zone_lo > op->u.availheap.zone_hi )
+            break;
+
+        if ( op->u.availheap.zone_lo > 0 )
+           zone_lo = op->u.availheap.zone_lo;
+        if ( op->u.availheap.zone_hi >= 0 && op->u.availheap.zone_hi < 
NR_ZONES )
+           zone_hi = op->u.availheap.zone_hi;
+
+        op->u.availheap.nr_zones = zone_hi - zone_lo + 1;
+        
+        ( op->u.availheap.node < 0 ) ?
+            (op->u.availheap.nr_nodes=num_online_nodes()) :
+            (op->u.availheap.nr_nodes=1);
+
+        op->u.availheap.pages =
+            avail_heap_pages(zone_lo, zone_hi, op->u.availheap.node);
+
+        if ( copy_to_guest(u_domctl, op, 1) )
+            ret = -EFAULT;
+        else
+            ret = 0;
+    }
+    break;
+
     default:
         ret = arch_do_domctl(op, u_domctl);
         break;
diff -r 48cbb32df526 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Thu Mar 29 21:55:42 2007 -0500
+++ b/xen/common/page_alloc.c   Fri Mar 30 10:00:25 2007 -0500
@@ -310,13 +310,6 @@ unsigned long alloc_boot_pages(
  * BINARY BUDDY ALLOCATOR
  */
 
-#define MEMZONE_XEN 0
-#ifdef PADDR_BITS
-#define NR_ZONES    (PADDR_BITS - PAGE_SHIFT)
-#else
-#define NR_ZONES    (BITS_PER_LONG - PAGE_SHIFT)
-#endif
-
 #define pfn_dom_zone_type(_pfn) (fls(_pfn) - 1)
 
 typedef struct list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1];
@@ -544,7 +537,7 @@ void init_heap_pages(
     }
 }
 
-static unsigned long avail_heap_pages(
+unsigned long avail_heap_pages(
     unsigned int zone_lo, unsigned int zone_hi, unsigned int node)
 {
     unsigned int i, zone, num_nodes = num_online_nodes();
diff -r 48cbb32df526 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Thu Mar 29 21:55:42 2007 -0500
+++ b/xen/include/public/domctl.h       Thu Mar 29 22:29:43 2007 -0500
@@ -389,6 +389,21 @@ typedef struct xen_domctl_settimeoffset 
 typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
 
+
+#define XEN_DOMCTL_availheap        37 
+struct xen_domctl_availheap {
+    /* in  */
+    int zone_lo;             /* starting zone */
+    int zone_hi;             /* ending zone, -1 for zone_lo to NR_ZONES */
+    int node;                /* query available pages in node, -1 for all */
+    /* out */
+    uint32_t nr_zones;    /* number of zones queried */
+    uint32_t nr_nodes;    /* number of nodes queried */
+    uint64_t pages;
+};
+typedef struct xen_domctl_availheap xen_domctl_availheap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_availheap_t);
+
  
 #define XEN_DOMCTL_gethvmcontext     33
 #define XEN_DOMCTL_sethvmcontext     34
@@ -457,6 +472,7 @@ struct xen_domctl {
         struct xen_domctl_hvmcontext        hvmcontext;
         struct xen_domctl_address_size      address_size;
         struct xen_domctl_sendtrigger       sendtrigger;
+        struct xen_domctl_availheap         availheap;
         uint8_t                             pad[128];
     } u;
 };
diff -r 48cbb32df526 xen/include/xen/mm.h
--- a/xen/include/xen/mm.h      Thu Mar 29 21:55:42 2007 -0500
+++ b/xen/include/xen/mm.h      Fri Mar 30 10:01:02 2007 -0500
@@ -33,6 +33,13 @@
 #include <xen/list.h>
 #include <xen/spinlock.h>
 
+#define MEMZONE_XEN 0
+#ifdef PADDR_BITS
+#define NR_ZONES    (PADDR_BITS - PAGE_SHIFT)
+#else
+#define NR_ZONES    (BITS_PER_LONG - PAGE_SHIFT)
+#endif
+
 struct domain;
 struct page_info;
 
@@ -64,6 +71,9 @@ unsigned long avail_domheap_pages(void);
 unsigned long avail_domheap_pages(void);
 #define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0))
 #define free_domheap_page(p)  (free_domheap_pages(p,0))
+
+unsigned long avail_heap_pages(
+    unsigned int zone_lo, unsigned int zone_hi, unsigned int node);
 
 void scrub_heap_pages(void);
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.