|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v4 for Xen 4.6 1/4] xen: enable per-VCPU parameter settings for RTDS scheduler
>>> On 11.07.15 at 06:52, <lichong659@xxxxxxxxx> wrote:
> @@ -1162,8 +1176,82 @@ rt_dom_cntl(
> }
> spin_unlock_irqrestore(&prv->lock, flags);
> break;
> + case XEN_DOMCTL_SCHEDOP_getvcpuinfo:
> + spin_lock_irqsave(&prv->lock, flags);
> + for ( index = 0; index < op->u.v.nr_vcpus; index++ )
> + {
> + if ( copy_from_guest_offset(&local_sched,
> + op->u.v.vcpus, index, 1) )
> + {
> + rc = -EFAULT;
> + break;
> + }
> + if ( local_sched.vcpuid >= d->max_vcpus ||
> + d->vcpu[local_sched.vcpuid] == NULL )
> + {
> + rc = -EINVAL;
> + break;
> + }
> + svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
> +
> + local_sched.s.rtds.budget = svc->budget / MICROSECS(1);
> + local_sched.s.rtds.period = svc->period / MICROSECS(1);
> +
> + if ( __copy_to_guest_offset(op->u.v.vcpus, index,
> + &local_sched, 1) )
> + {
> + rc = -EFAULT;
> + break;
> + }
> + if( hypercall_preempt_check() )
> + {
> + rc = -ERESTART;
> + break;
> + }
I still don't see how this is supposed to work.
> + }
> + spin_unlock_irqrestore(&prv->lock, flags);
> + break;
> + case XEN_DOMCTL_SCHEDOP_putvcpuinfo:
> + spin_lock_irqsave(&prv->lock, flags);
> + for( index = 0; index < op->u.v.nr_vcpus; index++ )
> + {
> + if ( copy_from_guest_offset(&local_sched,
> + op->u.v.vcpus, index, 1) )
> + {
> + rc = -EFAULT;
> + break;
> + }
> + if ( local_sched.vcpuid >= d->max_vcpus ||
> + d->vcpu[local_sched.vcpuid] == NULL )
> + {
> + rc = -EINVAL;
> + break;
> + }
> + svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
> + period = MICROSECS(local_sched.s.rtds.period);
> + budget = MICROSECS(local_sched.s.rtds.budget);
> + if ( period < MICROSECS(10) || period > RTDS_MAX_PERIOD ||
> + budget < MICROSECS(10) || budget > period )
Apart from numerous coding style issues I think the first of the
checks in this if() is redundant (covered by the combination of
the last two ones) and hence would better be dropped.
> --- a/xen/common/schedule.c
> +++ b/xen/common/schedule.c
> @@ -1052,10 +1052,22 @@ long sched_adjust(struct domain *d, struct
> xen_domctl_scheduler_op *op)
> if ( ret )
> return ret;
>
> - if ( (op->sched_id != DOM2OP(d)->sched_id) ||
> - ((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
> - (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
> + if ( op->sched_id != DOM2OP(d)->sched_id )
> return -EINVAL;
> + else
> + switch ( op->cmd )
> + {
> + case XEN_DOMCTL_SCHEDOP_putinfo:
> + break;
> + case XEN_DOMCTL_SCHEDOP_getinfo:
> + break;
> + case XEN_DOMCTL_SCHEDOP_putvcpuinfo:
> + break;
> + case XEN_DOMCTL_SCHEDOP_getvcpuinfo:
> + break;
Only this break should stay, the three earlier ones should be dropped
as redundant.
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -330,31 +330,56 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
> #define XEN_SCHEDULER_ARINC653 7
> #define XEN_SCHEDULER_RTDS 8
>
> +typedef struct xen_domctl_sched_sedf {
> + uint64_aligned_t period;
> + uint64_aligned_t slice;
> + uint64_aligned_t latency;
> + uint32_t extratime;
> + uint32_t weight;
> +} xen_domctl_sched_sedf_t;
> +
> +typedef struct xen_domctl_sched_credit {
> + uint16_t weight;
> + uint16_t cap;
> +} xen_domctl_sched_credit_t;
> +
> +typedef struct xen_domctl_sched_credit2 {
> + uint16_t weight;
> +} xen_domctl_sched_credit2_t;
> +
> +typedef struct xen_domctl_sched_rtds {
> + uint32_t period;
> + uint32_t budget;
> +} xen_domctl_sched_rtds_t;
> +
> +typedef struct xen_domctl_schedparam_vcpu {
> + union {
> + xen_domctl_sched_credit_t credit;
> + xen_domctl_sched_credit2_t credit2;
> + xen_domctl_sched_rtds_t rtds;
> + } s;
> + uint16_t vcpuid;
> + uint16_t padding;
This pads to a 32-bit boundary, leaving another 32-bit hole.
> +} xen_domctl_schedparam_vcpu_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_domctl_schedparam_vcpu_t);
> +
> /* Set or get info? */
> #define XEN_DOMCTL_SCHEDOP_putinfo 0
> #define XEN_DOMCTL_SCHEDOP_getinfo 1
> +#define XEN_DOMCTL_SCHEDOP_putvcpuinfo 2
> +#define XEN_DOMCTL_SCHEDOP_getvcpuinfo 3
> struct xen_domctl_scheduler_op {
> uint32_t sched_id; /* XEN_SCHEDULER_* */
> uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
> union {
> - struct xen_domctl_sched_sedf {
> - uint64_aligned_t period;
> - uint64_aligned_t slice;
> - uint64_aligned_t latency;
> - uint32_t extratime;
> - uint32_t weight;
> - } sedf;
> - struct xen_domctl_sched_credit {
> - uint16_t weight;
> - uint16_t cap;
> - } credit;
> - struct xen_domctl_sched_credit2 {
> - uint16_t weight;
> - } credit2;
> - struct xen_domctl_sched_rtds {
> - uint32_t period;
> - uint32_t budget;
> - } rtds;
> + xen_domctl_sched_sedf_t sedf;
> + xen_domctl_sched_credit_t credit;
> + xen_domctl_sched_credit2_t credit2;
> + xen_domctl_sched_rtds_t rtds;
> + struct {
> + XEN_GUEST_HANDLE_64(xen_domctl_schedparam_vcpu_t) vcpus;
> + uint16_t nr_vcpus;
> + } v;
And there's still no explicit padding here at all (nor am I convinced
that uint16_t is really a good choice for nr_vcpus - uint32_t would
seem more natural without causing any problems or structure
growth).
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |