[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [DOCSDAY PATCH] xen: rework the comments for struct xen_domctl_vnuma.



In fact: vnode_to_pnode is an array, not a mask; there was a
typo in the one about vmemrange; there was no indication
of the data directions.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Jan Beulich <JBeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Elena Ufimtseva <elena.ufimtseva@xxxxxxxxxx>
---
 xen/include/public/domctl.h |   32 +++++++++++++++++++++-----------
 1 file changed, 21 insertions(+), 11 deletions(-)

diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index b3413a2..ad76853 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -958,27 +958,37 @@ typedef struct xen_domctl_vcpu_msrs 
xen_domctl_vcpu_msrs_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msrs_t);
 #endif
 
-/*
- * Use in XEN_DOMCTL_setvnumainfo to set
- * vNUMA domain topology.
- */
+/* XEN_DOMCTL_setvnumainfo: specifies a virtual NUMA topology for the guest */
 struct xen_domctl_vnuma {
+    /* IN: number of vNUMA nodes to setup. Shall be greater than 0 */
     uint32_t nr_vnodes;
+    /* IN: number of memory ranges to setup */
     uint32_t nr_vmemranges;
+    /*
+     * IN: number of vCPUs of the domain (used as size of the vcpu_to_vnode
+     * array declared below). Shall be equal to the domain's max_vcpus.
+     */
     uint32_t nr_vcpus;
-    uint32_t pad;
+    uint32_t pad;                                  /* must be zero */
+
+    /*
+     * IN: array for specifying the distances of the vNUMA nodes
+     * between each others. Shall have nr_vnodes*nr_vnodes elements.
+     */
     XEN_GUEST_HANDLE_64(uint) vdistance;
+    /*
+     * IN: array for specifying to what vNUMA node each vCPU
+     * belongs. Shall have nr_vcpus elements.
+     */
     XEN_GUEST_HANDLE_64(uint) vcpu_to_vnode;
-
     /*
-     * vnodes to physical NUMA nodes mask.
-     * This kept on per-domain basis for
-     * interested consumers, such as numa aware ballooning.
+     * IN: array for specifying on what physical NUMA node
+     * each vNUMA node is placed. Shall have nr_vnodes elements.
      */
     XEN_GUEST_HANDLE_64(uint) vnode_to_pnode;
-
     /*
-     * memory rages for each vNUMA node
+     * IN: array for specifying the memory ranges.
+     * Shall have nr_vmemranges elements.
      */
     XEN_GUEST_HANDLE_64(xen_vmemrange_t) vmemrange;
 };


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.