[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 03/16] xen: sched: make implementing .alloc_pdata optional



On 18/03/16 20:04, Dario Faggioli wrote:
> The .alloc_pdata scheduler hook must, before this change,
> be implemented by all schedulers --even those ones that
> don't need to allocate anything.
> 
> Make it possible to just use the SCHED_OP(), like for
> the other hooks, by using ERR_PTR() and IS_ERR() for
> error reporting. This:
>  - makes NULL a variant of success;
>  - allows for errors other than ENOMEM to be properly
>    communicated (if ever necessary).
> 
> This, in turn, means that schedulers not needing to
> allocate any per-pCPU data, can avoid implementing the
> hook. In fact, the artificial implementation of
> .alloc_pdata in the ARINC653 is removed (and, while there,
> nuke .free_pdata too, as it is equally useless).
> 
> Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
> ---
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Robert VanVossen <robert.vanvossen@xxxxxxxxxxxxxxx>
> Cc: Josh Whitehead <josh.whitehead@xxxxxxxxxxxxxxx>
> Cc: Meng Xu <mengxu@xxxxxxxxxxxxx>
> Cc: Jan Beulich <JBeulich@xxxxxxxx>
> Cc: Juergen Gross <jgross@xxxxxxxx>
> ---
> Changes from v1:
>  * use IS_ERR() and friends to deal with the return value
>    of alloc_pdata, as suggested during review.
> ---
>  xen/common/sched_arinc653.c |   31 -------------------------------
>  xen/common/sched_credit.c   |    4 ++--
>  xen/common/sched_credit2.c  |    2 +-
>  xen/common/sched_rt.c       |    7 +++----
>  xen/common/schedule.c       |   18 ++++++++----------
>  xen/include/xen/sched-if.h  |    1 +
>  6 files changed, 15 insertions(+), 48 deletions(-)
> 
> diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
> index 8a11a2f..b79fcdf 100644
> --- a/xen/common/sched_arinc653.c
> +++ b/xen/common/sched_arinc653.c
> @@ -456,34 +456,6 @@ a653sched_free_vdata(const struct scheduler *ops, void 
> *priv)
>  }
>  
>  /**
> - * This function allocates scheduler-specific data for a physical CPU
> - *
> - * We do not actually make use of any per-CPU data but the hypervisor expects
> - * a non-NULL return value
> - *
> - * @param ops       Pointer to this instance of the scheduler structure
> - *
> - * @return          Pointer to the allocated data
> - */
> -static void *
> -a653sched_alloc_pdata(const struct scheduler *ops, int cpu)
> -{
> -    /* return a non-NULL value to keep schedule.c happy */
> -    return SCHED_PRIV(ops);
> -}
> -
> -/**
> - * This function frees scheduler-specific data for a physical CPU
> - *
> - * @param ops       Pointer to this instance of the scheduler structure
> - */
> -static void
> -a653sched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
> -{
> -    /* nop */
> -}
> -
> -/**
>   * This function allocates scheduler-specific data for a domain
>   *
>   * We do not actually make use of any per-domain data but the hypervisor
> @@ -737,9 +709,6 @@ static const struct scheduler sched_arinc653_def = {
>      .free_vdata     = a653sched_free_vdata,
>      .alloc_vdata    = a653sched_alloc_vdata,
>  
> -    .free_pdata     = a653sched_free_pdata,
> -    .alloc_pdata    = a653sched_alloc_pdata,
> -
>      .free_domdata   = a653sched_free_domdata,
>      .alloc_domdata  = a653sched_alloc_domdata,
>  
> diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
> index 305889a..288749f 100644
> --- a/xen/common/sched_credit.c
> +++ b/xen/common/sched_credit.c
> @@ -532,12 +532,12 @@ csched_alloc_pdata(const struct scheduler *ops, int cpu)
>      /* Allocate per-PCPU info */
>      spc = xzalloc(struct csched_pcpu);
>      if ( spc == NULL )
> -        return NULL;
> +        return ERR_PTR(-ENOMEM);
>  
>      if ( !alloc_cpumask_var(&spc->balance_mask) )
>      {
>          xfree(spc);
> -        return NULL;
> +        return ERR_PTR(-ENOMEM);
>      }
>  
>      spin_lock_irqsave(&prv->lock, flags);
> diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
> index 7ddad38..36dc9ee 100644
> --- a/xen/common/sched_credit2.c
> +++ b/xen/common/sched_credit2.c
> @@ -2044,7 +2044,7 @@ csched2_alloc_pdata(const struct scheduler *ops, int 
> cpu)
>          printk("%s: cpu %d not online yet, deferring initializatgion\n",
>                 __func__, cpu);
>  
> -    return (void *)1;
> +    return NULL;
>  }
>  
>  static void
> diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
> index d98bfb6..ac8019f 100644
> --- a/xen/common/sched_rt.c
> +++ b/xen/common/sched_rt.c
> @@ -665,7 +665,7 @@ rt_alloc_pdata(const struct scheduler *ops, int cpu)
>      spin_unlock_irqrestore(old_lock, flags);
>  
>      if ( !alloc_cpumask_var(&_cpumask_scratch[cpu]) )
> -        return NULL;
> +        return ERR_PTR(-ENOMEM);
>  
>      if ( prv->repl_timer == NULL )
>      {
> @@ -673,13 +673,12 @@ rt_alloc_pdata(const struct scheduler *ops, int cpu)
>          prv->repl_timer = xzalloc(struct timer);
>  
>          if ( prv->repl_timer == NULL )
> -            return NULL;
> +            return ERR_PTR(-ENOMEM);
>  
>          init_timer(prv->repl_timer, repl_timer_handler, (void *)ops, cpu);
>      }
>  
> -    /* 1 indicates alloc. succeed in schedule.c */
> -    return (void *)1;
> +    return NULL;
>  }
>  
>  static void
> diff --git a/xen/common/schedule.c b/xen/common/schedule.c
> index 0627eb5..1adc0e2 100644
> --- a/xen/common/schedule.c
> +++ b/xen/common/schedule.c
> @@ -1491,9 +1491,9 @@ static int cpu_schedule_up(unsigned int cpu)
>      if ( idle_vcpu[cpu] == NULL )
>          return -ENOMEM;
>  
> -    if ( (ops.alloc_pdata != NULL) &&
> -         ((sd->sched_priv = ops.alloc_pdata(&ops, cpu)) == NULL) )
> -        return -ENOMEM;
> +    sd->sched_priv = SCHED_OP(&ops, alloc_pdata, cpu);
> +    if ( IS_ERR(sd->sched_priv) )
> +        return PTR_ERR(sd->sched_priv);

Calling xfree() with an IS_ERR() value might be a bad idea.
Either you need to set sd->sched_priv to NULL in error case or you
modify xfree() to return immediately not only in the NULL case, but
in the IS_ERR() case as well.


Juergen

>  
>      return 0;
>  }
> @@ -1503,8 +1503,7 @@ static void cpu_schedule_down(unsigned int cpu)
>      struct schedule_data *sd = &per_cpu(schedule_data, cpu);
>      struct scheduler *sched = per_cpu(scheduler, cpu);
>  
> -    if ( sd->sched_priv != NULL )
> -        SCHED_OP(sched, free_pdata, sd->sched_priv, cpu);
> +    SCHED_OP(sched, free_pdata, sd->sched_priv, cpu);
>      SCHED_OP(sched, free_vdata, idle_vcpu[cpu]->sched_priv);
>  
>      idle_vcpu[cpu]->sched_priv = NULL;
> @@ -1599,9 +1598,8 @@ void __init scheduler_init(void)
>      idle_domain->max_vcpus = nr_cpu_ids;
>      if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
>          BUG();
> -    if ( ops.alloc_pdata &&
> -         !(this_cpu(schedule_data).sched_priv = ops.alloc_pdata(&ops, 0)) )
> -        BUG();
> +    this_cpu(schedule_data).sched_priv = SCHED_OP(&ops, alloc_pdata, 0);
> +    BUG_ON(IS_ERR(this_cpu(schedule_data).sched_priv));
>      SCHED_OP(&ops, init_pdata, this_cpu(schedule_data).sched_priv, 0);
>  }
>  
> @@ -1644,8 +1642,8 @@ int schedule_cpu_switch(unsigned int cpu, struct 
> cpupool *c)
>  
>      idle = idle_vcpu[cpu];
>      ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
> -    if ( ppriv == NULL )
> -        return -ENOMEM;
> +    if ( IS_ERR(ppriv) )
> +        return PTR_ERR(ppriv);
>      SCHED_OP(new_ops, init_pdata, ppriv, cpu);
>      vpriv = SCHED_OP(new_ops, alloc_vdata, idle, idle->domain->sched_priv);
>      if ( vpriv == NULL )
> diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
> index 70c08c6..560cba5 100644
> --- a/xen/include/xen/sched-if.h
> +++ b/xen/include/xen/sched-if.h
> @@ -9,6 +9,7 @@
>  #define __XEN_SCHED_IF_H__
>  
>  #include <xen/percpu.h>
> +#include <xen/err.h>
>  
>  /* A global pointer to the initial cpupool (POOL0). */
>  extern struct cpupool *cpupool0;
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.