[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/3] arinc: whitespace and formatting fixes



On 18/11/2013 20:16, Nathan Studer wrote:
> From: Nathan Studer <nate.studer@xxxxxxxxxxxxxxx>
>
> Remove the excessive amount of trailing whitespace in the
> arinc653 scheduler file and add a local variables block.
>
> Suggested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Signed-off-by: Nathan Studer <nate.studer@xxxxxxxxxxxxxxx>

That's much better, thanks.

Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

> ---
>  xen/common/sched_arinc653.c |   96 
> ++++++++++++++++++++++++-------------------
>  1 file changed, 53 insertions(+), 43 deletions(-)
>
> diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
> index 7b7b387..f4eb943 100644
> --- a/xen/common/sched_arinc653.c
> +++ b/xen/common/sched_arinc653.c
> @@ -40,9 +40,9 @@
>   * Private Macros                                                         *
>   **************************************************************************/
>  
> -/** 
> - * Retrieve the idle VCPU for a given physical CPU 
> - */ 
> +/**
> + * Retrieve the idle VCPU for a given physical CPU
> + */
>  #define IDLETASK(cpu)  (idle_vcpu[cpu])
>  
>  /**
> @@ -76,7 +76,7 @@ typedef struct arinc653_vcpu_s
>      struct list_head    list;
>  } arinc653_vcpu_t;
>  
> -/**  
> +/**
>   * The sched_entry_t structure holds a single entry of the
>   * ARINC 653 schedule.
>   */
> @@ -101,8 +101,8 @@ typedef struct sched_entry_s
>  typedef struct a653sched_priv_s
>  {
>      /**
> -     * This array holds the active ARINC 653 schedule. 
> -     *  
> +     * This array holds the active ARINC 653 schedule.
> +     *
>       * When the system tries to start a new VCPU, this schedule is scanned
>       * to look for a matching (handle, VCPU #) pair. If both the handle 
> (UUID)
>       * and VCPU number match, then the VCPU is allowed to run. Its run time
> @@ -112,12 +112,12 @@ typedef struct a653sched_priv_s
>  
>      /**
>       * This variable holds the number of entries that are valid in
> -     * the arinc653_schedule table. 
> -     *  
> +     * the arinc653_schedule table.
> +     *
>       * This is not necessarily the same as the number of domains in the
>       * schedule. A domain could be listed multiple times within the schedule,
>       * or a domain with multiple VCPUs could have a different
> -     * schedule entry for each VCPU. 
> +     * schedule entry for each VCPU.
>       */
>      int num_schedule_entries;
>  
> @@ -131,9 +131,9 @@ typedef struct a653sched_priv_s
>       */
>      s_time_t next_major_frame;
>  
> -    /** 
> -     * pointers to all Xen VCPU structures for iterating through 
> -     */ 
> +    /**
> +     * pointers to all Xen VCPU structures for iterating through
> +     */
>      struct list_head vcpu_list;
>  } a653sched_priv_t;
>  
> @@ -143,14 +143,14 @@ typedef struct a653sched_priv_s
>  
>  /**
>   * This function compares two domain handles.
> - * 
> + *
>   * @param h1        Pointer to handle 1
>   * @param h2        Pointer to handle 2
> - * 
> + *
>   * @return          <ul>
> - *                  <li> <0:  handle 1 is less than handle 2   
> - *                  <li>  0:  handle 1 is equal to handle 2    
> - *                  <li> >0:  handle 1 is greater than handle 2 
> + *                  <li> <0:  handle 1 is less than handle 2
> + *                  <li>  0:  handle 1 is equal to handle 2
> + *                  <li> >0:  handle 1 is greater than handle 2
>   *                  </ul>
>   */
>  static int dom_handle_cmp(const xen_domain_handle_t h1,
> @@ -162,11 +162,11 @@ static int dom_handle_cmp(const xen_domain_handle_t h1,
>  /**
>   * This function searches the vcpu list to find a VCPU that matches
>   * the domain handle and VCPU ID specified.
> - * 
> + *
>   * @param ops       Pointer to this instance of the scheduler structure
>   * @param handle    Pointer to handler
>   * @param vcpu_id   VCPU ID
> - * 
> + *
>   * @return          <ul>
>   *                  <li> Pointer to the matching VCPU if one is found
>   *                  <li> NULL otherwise
> @@ -191,7 +191,7 @@ static struct vcpu *find_vcpu(
>  /**
>   * This function updates the pointer to the Xen VCPU structure for each entry
>   * in the ARINC 653 schedule.
> - * 
> + *
>   * @param ops       Pointer to this instance of the scheduler structure
>   * @return          <None>
>   */
> @@ -211,7 +211,7 @@ static void update_schedule_vcpus(const struct scheduler 
> *ops)
>   * in place a new ARINC653 schedule.
>   *
>   * @param ops       Pointer to this instance of the scheduler structure
> - * 
> + *
>   * @return          <ul>
>   *                  <li> 0 = success
>   *                  <li> !0 = error
> @@ -253,10 +253,10 @@ arinc653_sched_set(
>      if ( !found_dom0 )
>          goto fail;
>  
> -    /* 
> +    /*
>       * Error if the major frame is not large enough to run all entries as
>       * indicated by comparing the total run time to the major frame length.
> -     */ 
> +     */
>      if ( total_runtime > schedule->major_frame )
>          goto fail;
>  
> @@ -276,10 +276,10 @@ arinc653_sched_set(
>      update_schedule_vcpus(ops);
>  
>      /*
> -     * The newly-installed schedule takes effect immediately. We do not even 
> +     * The newly-installed schedule takes effect immediately. We do not even
>       * wait for the current major frame to expire.
>       *
> -     * Signal a new major frame to begin. The next major frame is set up by 
> +     * Signal a new major frame to begin. The next major frame is set up by
>       * the do_schedule callback function when it is next invoked.
>       */
>      sched_priv->next_major_frame = NOW();
> @@ -392,8 +392,8 @@ a653sched_alloc_vdata(const struct scheduler *ops, struct 
> vcpu *vc, void *dd)
>  
>      /*
>       * Initialize our ARINC 653 scheduler-specific information for the VCPU.
> -     * The VCPU starts "asleep." When Xen is ready for the VCPU to run, it 
> -     * will call the vcpu_wake scheduler callback function and our scheduler 
> +     * The VCPU starts "asleep." When Xen is ready for the VCPU to run, it
> +     * will call the vcpu_wake scheduler callback function and our scheduler
>       * will mark the VCPU awake.
>       */
>      svc->vc = vc;
> @@ -483,7 +483,7 @@ a653sched_free_domdata(const struct scheduler *ops, void 
> *data)
>  
>  /**
>   * Xen scheduler callback function to sleep a VCPU
> - * 
> + *
>   * @param ops       Pointer to this instance of the scheduler structure
>   * @param vc        Pointer to the VCPU structure for the current domain
>   */
> @@ -503,7 +503,7 @@ a653sched_vcpu_sleep(const struct scheduler *ops, struct 
> vcpu *vc)
>  
>  /**
>   * Xen scheduler callback function to wake up a VCPU
> - * 
> + *
>   * @param ops       Pointer to this instance of the scheduler structure
>   * @param vc        Pointer to the VCPU structure for the current domain
>   */
> @@ -519,10 +519,10 @@ a653sched_vcpu_wake(const struct scheduler *ops, struct 
> vcpu *vc)
>  /**
>   * Xen scheduler callback function to select a VCPU to run.
>   * This is the main scheduler routine.
> - * 
> + *
>   * @param ops       Pointer to this instance of the scheduler structure
>   * @param now       Current time
> - * 
> + *
>   * @return          Address of the VCPU structure scheduled to be run next
>   *                  Amount of time to execute the returned VCPU
>   *                  Flag for whether the VCPU was migrated
> @@ -559,7 +559,7 @@ a653sched_do_schedule(
>          }
>      }
>  
> -    /* 
> +    /*
>       * If we exhausted the domains in the schedule and still have time left
>       * in the major frame then switch next at the next major frame.
>       */
> @@ -567,10 +567,10 @@ a653sched_do_schedule(
>          next_switch_time = sched_priv->next_major_frame;
>  
>      /*
> -     * If there are more domains to run in the current major frame, set 
> -     * new_task equal to the address of next domain's VCPU structure. 
> -     * Otherwise, set new_task equal to the address of the idle task's VCPU 
> -     * structure. 
> +     * If there are more domains to run in the current major frame, set
> +     * new_task equal to the address of next domain's VCPU structure.
> +     * Otherwise, set new_task equal to the address of the idle task's VCPU
> +     * structure.
>       */
>      new_task = (sched_index < sched_priv->num_schedule_entries)
>          ? sched_priv->schedule[sched_index].vc
> @@ -584,10 +584,10 @@ a653sched_do_schedule(
>          new_task = IDLETASK(0);
>      BUG_ON(new_task == NULL);
>  
> -    /* 
> +    /*
>       * Check to make sure we did not miss a major frame.
> -     * This is a good test for robust partitioning. 
> -     */ 
> +     * This is a good test for robust partitioning.
> +     */
>      BUG_ON(now >= sched_priv->next_major_frame);
>  
>      /* Tasklet work (which runs in idle VCPU context) overrides all else. */
> @@ -595,8 +595,8 @@ a653sched_do_schedule(
>          new_task = IDLETASK(0);
>  
>      /*
> -     * Return the amount of time the next domain has to run and the address 
> -     * of the selected task's VCPU structure. 
> +     * Return the amount of time the next domain has to run and the address
> +     * of the selected task's VCPU structure.
>       */
>      ret.time = next_switch_time - now;
>      ret.task = new_task;
> @@ -609,10 +609,10 @@ a653sched_do_schedule(
>  
>  /**
>   * Xen scheduler callback function to select a CPU for the VCPU to run on
> - * 
> + *
>   * @param ops       Pointer to this instance of the scheduler structure
>   * @param v         Pointer to the VCPU structure for the current domain
> - * 
> + *
>   * @return          Number of selected physical CPU
>   */
>  static int
> @@ -709,3 +709,13 @@ const struct scheduler sched_arinc653_def = {
>      .tick_suspend   = NULL,
>      .tick_resume    = NULL,
>  };
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * tab-width: 4
> + * indent-tabs-mode: nil
> + * End:
> + */


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.