[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v6 01/10] xen: vnuma topology and subop hypercalls



On ven, 2014-07-18 at 01:50 -0400, Elena Ufimtseva wrote:

> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index cd64aea..895584a 100644

> @@ -297,6 +297,144 @@ int vcpuaffinity_params_invalid(const 
> xen_domctl_vcpuaffinity_t *vcpuaff)
>              guest_handle_is_null(vcpuaff->cpumap_soft.bitmap));
>  }
>  
> +/*
> + * Allocates memory for vNUMA, **vnuma should be NULL.
> + * Caller has to make sure that domain has max_pages
> + * and number of vcpus set for domain.
> + * Verifies that single allocation does not exceed
> + * PAGE_SIZE.
> + */
> +static int vnuma_alloc(struct vnuma_info **vnuma,
> +                       unsigned int nr_vnodes,
> +                       unsigned int nr_vcpus,
> +                       unsigned int dist_size)
> +{
> +    struct vnuma_info *v;
> +
> +    if ( vnuma && *vnuma )
> +        return -EINVAL;
> +
> +    v = *vnuma;
>
Do you need this? What for?

> +    /*
> +     * check if any of xmallocs exeeds PAGE_SIZE.
> +     * If yes, consider it as an error for now.
>
Do you mind elaborating a bit more on the 'for now'? Why 'for now'?
What's the plan for the future, etc. ...

> +     */
> +    if ( nr_vnodes > PAGE_SIZE / sizeof(nr_vnodes)       ||
> +        nr_vcpus > PAGE_SIZE / sizeof(nr_vcpus)          ||
> +        nr_vnodes > PAGE_SIZE / sizeof(struct vmemrange) ||
> +        dist_size > PAGE_SIZE / sizeof(dist_size) )
> +        return -EINVAL;
> +
> +    v = xzalloc(struct vnuma_info);
> +    if ( !v )
> +        return -ENOMEM;
> +
> +    v->vdistance = xmalloc_array(unsigned int, dist_size);
> +    v->vmemrange = xmalloc_array(vmemrange_t, nr_vnodes);
> +    v->vcpu_to_vnode = xmalloc_array(unsigned int, nr_vcpus);
> +    v->vnode_to_pnode = xmalloc_array(unsigned int, nr_vnodes);
> +
> +    if ( v->vdistance == NULL || v->vmemrange == NULL ||
> +        v->vcpu_to_vnode == NULL || v->vnode_to_pnode == NULL )
> +    {
> +        vnuma_destroy(v);
> +        return -ENOMEM;
> +    }
> +
> +    *vnuma = v;
> +
> +    return 0;
> +}
> +
> +/*
> + * Allocate memory and construct one vNUMA node,
> + * set default parameters, assign all memory and
> + * vcpus to this node, set distance to 10.
> + */
> +static long vnuma_fallback(const struct domain *d,
> +                          struct vnuma_info **vnuma)
> +{
> + 
I think I agree with Wei, about this fallback not being necessary.

> +/*
> + * construct vNUMA topology form u_vnuma struct and return
> + * it in dst.
> + */
> +long vnuma_init(const struct xen_domctl_vnuma *u_vnuma,
> +                const struct domain *d,
> +                struct vnuma_info **dst)
> +{
> +    unsigned int dist_size, nr_vnodes = 0;
> +    long ret;
> +    struct vnuma_info *v = NULL;
> +
> +    ret = -EINVAL;
> +
Why not initialize 'ret' while defining it?

> +    /* If vNUMA topology already set, just exit. */
> +    if ( !u_vnuma || *dst )
> +        return ret;
> +
> +    nr_vnodes = u_vnuma->nr_vnodes;
> +
> +    if ( nr_vnodes == 0 )
> +        return ret;
> +
> +    if ( nr_vnodes > (UINT_MAX / nr_vnodes) )
> +        return ret;
> +
Mmmm, do we perhaps want to #define a maximum number of supported vitual
node, put it somewhere in an header, and use it for the check? I mean
something like what we have for the host (in that case, it's called
MAX_NUMNODES).

I mean, if UINT_MAX is 2^64, would it make sense to allow a 2^32 nodes
guest? 

> +    dist_size = nr_vnodes * nr_vnodes;
> +
> +    ret = vnuma_alloc(&v, nr_vnodes, d->max_vcpus, dist_size);
> +    if ( ret )
> +        return ret;
> +
> +    /* On failure, set only one vNUMA node and its success. */
> +    ret = 0;
> +
> +    if ( copy_from_guest(v->vdistance, u_vnuma->vdistance, dist_size) )
> +        goto vnuma_onenode;
> +    if ( copy_from_guest(v->vmemrange, u_vnuma->vmemrange, nr_vnodes) )
> +        goto vnuma_onenode;
> +    if ( copy_from_guest(v->vcpu_to_vnode, u_vnuma->vcpu_to_vnode,
> +        d->max_vcpus) )
> +        goto vnuma_onenode;
> +    if ( copy_from_guest(v->vnode_to_pnode, u_vnuma->vnode_to_pnode,
> +        nr_vnodes) )
> +        goto vnuma_onenode;
> +
> +    v->nr_vnodes = nr_vnodes;
> +    *dst = v;
> +
> +    return ret;
> +
> +vnuma_onenode:
> +    vnuma_destroy(v);
> +    return vnuma_fallback(d, dst);
>
As said, just report the error and bail in this case.

> +}
> +
>  long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
>  {
>      long ret = 0;
> @@ -967,6 +1105,35 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
> u_domctl)
>      }
>      break;
>  
> +    case XEN_DOMCTL_setvnumainfo:
> +    {
> +        struct vnuma_info *v = NULL;
> +
> +        ret = -EFAULT;
> +        if ( guest_handle_is_null(op->u.vnuma.vdistance)     ||
> +            guest_handle_is_null(op->u.vnuma.vmemrange)      ||
> +            guest_handle_is_null(op->u.vnuma.vcpu_to_vnode)  ||
> +            guest_handle_is_null(op->u.vnuma.vnode_to_pnode) )
> +            return ret;
> +
> +        ret = -EINVAL;
> +
> +        ret = vnuma_init(&op->u.vnuma, d, &v);
>
Rather pointless 'ret=-EINVAL', I would say. :-)

> +        if ( ret < 0 || v == NULL )
> +            break;
> +
> +        /* overwrite vnuma for domain */
> +        if ( !d->vnuma )
> +            vnuma_destroy(d->vnuma);
> +
> +        domain_lock(d);
> +        d->vnuma = v;
> +        domain_unlock(d);
> +
> +        ret = 0;
> +    }
> +    break;
> +
>      default:
>          ret = arch_do_domctl(op, d, u_domctl);
>          break;

Regards,
Dario

-- 
<<This happens because I choose it to happen!>> (Raistlin Majere)
-----------------------------------------------------------------
Dario Faggioli, Ph.D, http://about.me/dario.faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)

Attachment: signature.asc
Description: This is a digitally signed message part

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.