[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] vNUMA: validate XEN_DOMCTL_setvnumainfo input



On 03/03/15 14:38, Jan Beulich wrote:
As we get ready to use the information set for a domain here we should
make sure it is actually valid: Both vNode and pNode numbers should be
in range. Do a little bit of other cleanup so the code ends up looking
reasonably consistent in style.

Along with this goes that we don't need an array of unsigned int to
store the pNode number - a nodeid_t one (a quarter the size) suffices.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>


--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -344,7 +344,7 @@ static struct vnuma_info *vnuma_alloc(un
 
     vnuma->vdistance = xmalloc_array(unsigned int, nr_vnodes * nr_vnodes);
     vnuma->vcpu_to_vnode = xmalloc_array(unsigned int, nr_vcpus);
-    vnuma->vnode_to_pnode = xmalloc_array(unsigned int, nr_vnodes);
+    vnuma->vnode_to_pnode = xmalloc_array(nodeid_t, nr_vnodes);
     vnuma->vmemrange = xmalloc_array(xen_vmemrange_t, nr_ranges);
 
     if ( vnuma->vdistance == NULL || vnuma->vmemrange == NULL ||
@@ -382,30 +382,40 @@ static struct vnuma_info *vnuma_init(con
                          nr_vnodes * nr_vnodes) )
         goto vnuma_fail;
 
+    if ( copy_from_guest(info->vmemrange, uinfo->vmemrange,
+                         uinfo->nr_vmemranges) )
+        goto vnuma_fail;
+
     if ( copy_from_guest(info->vcpu_to_vnode, uinfo->vcpu_to_vnode,
                          d->max_vcpus) )
         goto vnuma_fail;
 
-    if ( copy_from_guest(info->vnode_to_pnode, uinfo->vnode_to_pnode,
-                         nr_vnodes) )
-        goto vnuma_fail;
+    ret = -E2BIG;
+    for ( i = 0; i < d->max_vcpus; ++i )
+        if ( info->vcpu_to_vnode[i] >= nr_vnodes )
+            goto vnuma_fail;
 
-    if (copy_from_guest(info->vmemrange, uinfo->vmemrange,
-                        uinfo->nr_vmemranges))
-        goto vnuma_fail;
+    for ( i = 0; i < nr_vnodes; ++i )
+    {
+        unsigned int pnode;
+
+        ret = -EFAULT;
+        if ( copy_from_guest_offset(&pnode, uinfo->vnode_to_pnode, i, 1) )
+            goto vnuma_fail;
+        ret = -E2BIG;
+        if ( pnode >= MAX_NUMNODES )
+            goto vnuma_fail;
+        info->vnode_to_pnode[i] = pnode;
+    }
 
     info->nr_vnodes = nr_vnodes;
     info->nr_vmemranges = uinfo->nr_vmemranges;
 
     /* Check that vmemranges flags are zero. */
+    ret = -EINVAL;
     for ( i = 0; i < info->nr_vmemranges; i++ )
-    {
         if ( info->vmemrange[i].flags != 0 )
-        {
-            ret = -EINVAL;
             goto vnuma_fail;
-        }
-    }
 
     return info;
 
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -4,6 +4,7 @@
 
 #include <public/xen.h>
 #include <asm/domain.h>
+#include <asm/numa.h>
 
 typedef union {
     struct vcpu_guest_context *nat;
@@ -99,7 +100,7 @@ struct vnuma_info {
     unsigned int nr_vmemranges;
     unsigned int *vdistance;
     unsigned int *vcpu_to_vnode;
-    unsigned int *vnode_to_pnode;
+    nodeid_t *vnode_to_pnode;
     struct xen_vmemrange *vmemrange;
 };
 





_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.