[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 4/7] xen: sched: get rid of the per domain vCPU list in RTDS



On 08/10/15 13:52, Dario Faggioli wrote:
> As, curently, there is no reason for bothering having
> it and keeping it updated.
> 
> In fact, it is only used for dumping and changing
> vCPUs parameters, but that can be achieved easily with
> for_each_vcpu.
> 
> Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>

With Andrew's comments addressed:

Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>

> ---
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Meng Xu <mengxu@xxxxxxxxxxxxx>
> ---
>  xen/common/sched_rt.c |   36 +++++++++++++-----------------------
>  1 file changed, 13 insertions(+), 23 deletions(-)
> 
> diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
> index 37a32a4..797adc1 100644
> --- a/xen/common/sched_rt.c
> +++ b/xen/common/sched_rt.c
> @@ -160,7 +160,6 @@ struct rt_private {
>   */
>  struct rt_vcpu {
>      struct list_head q_elem;    /* on the runq/depletedq list */
> -    struct list_head sdom_elem; /* on the domain VCPU list */
>  
>      /* Up-pointers */
>      struct rt_dom *sdom;
> @@ -182,7 +181,6 @@ struct rt_vcpu {
>   * Domain
>   */
>  struct rt_dom {
> -    struct list_head vcpu;      /* link its VCPUs */
>      struct list_head sdom_elem; /* link list on rt_priv */
>      struct domain *dom;         /* pointer to upper domain */
>  };
> @@ -290,7 +288,7 @@ rt_dump_pcpu(const struct scheduler *ops, int cpu)
>  static void
>  rt_dump(const struct scheduler *ops)
>  {
> -    struct list_head *iter_sdom, *iter_svc, *runq, *depletedq, *iter;
> +    struct list_head *runq, *depletedq, *iter;
>      struct rt_private *prv = rt_priv(ops);
>      struct rt_vcpu *svc;
>      struct rt_dom *sdom;
> @@ -319,14 +317,16 @@ rt_dump(const struct scheduler *ops)
>      }
>  
>      printk("Domain info:\n");
> -    list_for_each( iter_sdom, &prv->sdom )
> +    list_for_each( iter, &prv->sdom )
>      {
> -        sdom = list_entry(iter_sdom, struct rt_dom, sdom_elem);
> +        struct vcpu *vc;
> +
> +        sdom = list_entry(iter, struct rt_dom, sdom_elem);
>          printk("\tdomain: %d\n", sdom->dom->domain_id);
>  
> -        list_for_each( iter_svc, &sdom->vcpu )
> +        for_each_vcpu( sdom->dom, vc )
>          {
> -            svc = list_entry(iter_svc, struct rt_vcpu, sdom_elem);
> +            svc = rt_vcpu(vc);
>              rt_dump_vcpu(ops, svc);
>          }
>      }
> @@ -527,7 +527,6 @@ rt_alloc_domdata(const struct scheduler *ops, struct 
> domain *dom)
>      if ( sdom == NULL )
>          return NULL;
>  
> -    INIT_LIST_HEAD(&sdom->vcpu);
>      INIT_LIST_HEAD(&sdom->sdom_elem);
>      sdom->dom = dom;
>  
> @@ -587,7 +586,6 @@ rt_alloc_vdata(const struct scheduler *ops, struct vcpu 
> *vc, void *dd)
>          return NULL;
>  
>      INIT_LIST_HEAD(&svc->q_elem);
> -    INIT_LIST_HEAD(&svc->sdom_elem);
>      svc->flags = 0U;
>      svc->sdom = dd;
>      svc->vcpu = vc;
> @@ -614,8 +612,7 @@ rt_free_vdata(const struct scheduler *ops, void *priv)
>   * This function is called in sched_move_domain() in schedule.c
>   * When move a domain to a new cpupool.
>   * It inserts vcpus of moving domain to the scheduler's RunQ in
> - * dest. cpupool; and insert rt_vcpu svc to scheduler-specific
> - * vcpu list of the dom
> + * dest. cpupool.
>   */
>  static void
>  rt_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
> @@ -634,15 +631,11 @@ rt_vcpu_insert(const struct scheduler *ops, struct vcpu 
> *vc)
>          __runq_insert(ops, svc);
>      vcpu_schedule_unlock_irq(lock, vc);
>  
> -    /* add rt_vcpu svc to scheduler-specific vcpu list of the dom */
> -    list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
> -
>      SCHED_STAT_CRANK(vcpu_insert);
>  }
>  
>  /*
> - * Remove rt_vcpu svc from the old scheduler in source cpupool; and
> - * Remove rt_vcpu svc from scheduler-specific vcpu list of the dom
> + * Remove rt_vcpu svc from the old scheduler in source cpupool.
>   */
>  static void
>  rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
> @@ -659,9 +652,6 @@ rt_vcpu_remove(const struct scheduler *ops, struct vcpu 
> *vc)
>      if ( __vcpu_on_q(svc) )
>          __q_remove(svc);
>      vcpu_schedule_unlock_irq(lock, vc);
> -
> -    if ( !is_idle_vcpu(vc) )
> -        list_del_init(&svc->sdom_elem);
>  }
>  
>  /*
> @@ -1137,7 +1127,7 @@ rt_dom_cntl(
>      struct rt_private *prv = rt_priv(ops);
>      struct rt_dom * const sdom = rt_dom(d);
>      struct rt_vcpu *svc;
> -    struct list_head *iter;
> +    struct vcpu *vc;
>      unsigned long flags;
>      int rc = 0;
>  
> @@ -1145,7 +1135,7 @@ rt_dom_cntl(
>      {
>      case XEN_DOMCTL_SCHEDOP_getinfo:
>          spin_lock_irqsave(&prv->lock, flags);
> -        svc = list_entry(sdom->vcpu.next, struct rt_vcpu, sdom_elem);
> +        svc = rt_vcpu(sdom->dom->vcpu[0]);
>          op->u.rtds.period = svc->period / MICROSECS(1); /* transfer to us */
>          op->u.rtds.budget = svc->budget / MICROSECS(1);
>          spin_unlock_irqrestore(&prv->lock, flags);
> @@ -1157,9 +1147,9 @@ rt_dom_cntl(
>              break;
>          }
>          spin_lock_irqsave(&prv->lock, flags);
> -        list_for_each( iter, &sdom->vcpu )
> +        for_each_vcpu( sdom->dom, vc )
>          {
> -            struct rt_vcpu * svc = list_entry(iter, struct rt_vcpu, 
> sdom_elem);
> +            svc = rt_vcpu(vc);
>              svc->period = MICROSECS(op->u.rtds.period); /* transfer to 
> nanosec */
>              svc->budget = MICROSECS(op->u.rtds.budget);
>          }
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.