[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2 of 4] CONFIG: remove smp barrier definitions



On 09/02/2012 02:49, "Jan Beulich" <JBeulich@xxxxxxxx> wrote:

>>>> On 08.02.12 at 17:45, Andrew Cooper <andrew.cooper3@xxxxxxxxxx> wrote:
>> Now CONFIG_SMP has been removed, there is no need to define
>> smp_{,r,w}mb()s which used to conditionally compiled to different
>> operations (even though those conditonally different operations still
>> ended up as simple barrier()s)
>> 
>> Therefore, remove smp_{,r,w}mb()s and just use regular {,r,w}mb()s
> 
> Did you read the Linux side description and usage guidelines before
> doing this? I don't think doing the adjustment here is a good idea,
> even if the smp_ ones are aliases of the plain ones (which doesn't
> necessarily have to the case on any future architectures Xen might
> get ported to).

In that they can document barriers used on shared memory versus I/O memory,
perhaps worth keeping the smp_* variants.

 -- Keir

> Jan
> 
>> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
>> 
>> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/arch/x86/acpi/cpu_idle.c
>> --- a/xen/arch/x86/acpi/cpu_idle.c
>> +++ b/xen/arch/x86/acpi/cpu_idle.c
>> @@ -260,7 +260,7 @@ static void mwait_idle_with_hints(unsign
>>      s_time_t expires = per_cpu(timer_deadline, cpu);
>>  
>>      __monitor((void *)&mwait_wakeup(cpu), 0, 0);
>> -    smp_mb();
>> +    mb();
>>  
>>      /*
>>       * Timer deadline passing is the event on which we will be woken via
>> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/arch/x86/cpu/mtrr/main.c
>> --- a/xen/arch/x86/cpu/mtrr/main.c
>> +++ b/xen/arch/x86/cpu/mtrr/main.c
>> @@ -248,7 +248,7 @@ static void set_mtrr(unsigned int reg, u
>>  
>> /* ok, reset count and toggle gate */
>> atomic_set(&data.count, nr_cpus);
>> - smp_wmb();
>> + wmb();
>> atomic_set(&data.gate,1);
>>  
>> /* do our MTRR business */
>> @@ -271,7 +271,7 @@ static void set_mtrr(unsigned int reg, u
>> cpu_relax();
>>  
>> atomic_set(&data.count, nr_cpus);
>> - smp_wmb();
>> + wmb();
>> atomic_set(&data.gate,0);
>>  
>> /*
>> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/arch/x86/irq.c
>> --- a/xen/arch/x86/irq.c
>> +++ b/xen/arch/x86/irq.c
>> @@ -207,7 +207,7 @@ static void dynamic_irq_cleanup(unsigned
>>      spin_unlock_irqrestore(&desc->lock, flags);
>>  
>>      /* Wait to make sure it's not being used on another CPU */
>> -    do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS );
>> +    do { mb(); } while ( desc->status & IRQ_INPROGRESS );
>>  
>>      if (action)
>>          xfree(action);
>> @@ -931,7 +931,7 @@ void __init release_irq(unsigned int irq
>>      spin_unlock_irqrestore(&desc->lock,flags);
>>  
>>      /* Wait to make sure it's not being used on another CPU */
>> -    do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS );
>> +    do { mb(); } while ( desc->status & IRQ_INPROGRESS );
>>  
>>      if (action && action->free_on_release)
>>          xfree(action);
>> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/domain.c
>> --- a/xen/common/domain.c
>> +++ b/xen/common/domain.c
>> @@ -544,7 +544,7 @@ void domain_shutdown(struct domain *d, u
>>  
>>      d->is_shutting_down = 1;
>>  
>> -    smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */
>> +    mb(); /* set shutdown status /then/ check for per-cpu deferrals */
>>  
>>      for_each_vcpu ( d, v )
>>      {
>> @@ -594,7 +594,7 @@ int vcpu_start_shutdown_deferral(struct
>>          return 1;
>>  
>>      v->defer_shutdown = 1;
>> -    smp_mb(); /* set deferral status /then/ check for shutdown */
>> +    mb(); /* set deferral status /then/ check for shutdown */
>>      if ( unlikely(v->domain->is_shutting_down) )
>>          vcpu_check_shutdown(v);
>>  
>> @@ -604,7 +604,7 @@ int vcpu_start_shutdown_deferral(struct
>>  void vcpu_end_shutdown_deferral(struct vcpu *v)
>>  {
>>      v->defer_shutdown = 0;
>> -    smp_mb(); /* clear deferral status /then/ check for shutdown */
>> +    mb(); /* clear deferral status /then/ check for shutdown */
>>      if ( unlikely(v->domain->is_shutting_down) )
>>          vcpu_check_shutdown(v);
>>  }
>> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/rcupdate.c
>> --- a/xen/common/rcupdate.c
>> +++ b/xen/common/rcupdate.c
>> @@ -252,7 +252,7 @@ static void rcu_start_batch(struct rcu_c
>>           * next_pending == 0 must be visible in
>>           * __rcu_process_callbacks() before it can see new value of cur.
>>           */
>> -        smp_wmb();
>> +        wmb();
>>          rcp->cur++;
>>  
>>          cpumask_copy(&rcp->cpumask, &cpu_online_map);
>> @@ -340,7 +340,7 @@ static void __rcu_process_callbacks(stru
>>          /* see the comment and corresponding wmb() in
>>           * the rcu_start_batch()
>>           */
>> -        smp_rmb();
>> +        rmb();
>>  
>>          if (!rcp->next_pending) {
>>              /* and start it/schedule start if it's a new batch */
>> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/schedule.c
>> --- a/xen/common/schedule.c
>> +++ b/xen/common/schedule.c
>> @@ -657,7 +657,7 @@ static long do_poll(struct sched_poll *s
>>  
>>  #ifndef CONFIG_X86 /* set_bit() implies mb() on x86 */
>>      /* Check for events /after/ setting flags: avoids wakeup waiting race.
>> */
>> -    smp_mb();
>> +    mb();
>>  
>>      /*
>>       * Someone may have seen we are blocked but not that we are polling, or
>> @@ -1173,12 +1173,12 @@ static void schedule(void)
>>  void context_saved(struct vcpu *prev)
>>  {
>>      /* Clear running flag /after/ writing context to memory. */
>> -    smp_wmb();
>> +    wmb();
>>  
>>      prev->is_running = 0;
>>  
>>      /* Check for migration request /after/ clearing running flag. */
>> -    smp_mb();
>> +    mb();
>>  
>>      SCHED_OP(VCPU2OP(prev), context_saved, prev);
>>  
>> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/stop_machine.c
>> --- a/xen/common/stop_machine.c
>> +++ b/xen/common/stop_machine.c
>> @@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(stopmachine_lock)
>>  static void stopmachine_set_state(enum stopmachine_state state)
>>  {
>>      atomic_set(&stopmachine_data.done, 0);
>> -    smp_wmb();
>> +    wmb();
>>      stopmachine_data.state = state;
>>  }
>>  
>> @@ -99,7 +99,7 @@ int stop_machine_run(int (*fn)(void *),
>>      atomic_set(&stopmachine_data.done, 0);
>>      stopmachine_data.state = STOPMACHINE_START;
>>  
>> -    smp_wmb();
>> +    wmb();
>>  
>>      for_each_cpu ( i, &allbutself )
>>          tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i);
>> @@ -134,7 +134,7 @@ static void stopmachine_action(unsigned
>>  
>>      BUG_ON(cpu != smp_processor_id());
>>  
>> -    smp_mb();
>> +    mb();
>>  
>>      while ( state != STOPMACHINE_EXIT )
>>      {
>> @@ -157,7 +157,7 @@ static void stopmachine_action(unsigned
>>              break;
>>          }
>>  
>> -        smp_mb();
>> +        mb();
>>          atomic_inc(&stopmachine_data.done);
>>      }
>>  
>> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/include/asm-x86/system.h
>> --- a/xen/include/asm-x86/system.h
>> +++ b/xen/include/asm-x86/system.h
>> @@ -154,10 +154,6 @@ static always_inline unsigned long __cmp
>>  #define rmb()           barrier()
>>  #define wmb()           barrier()
>>  
>> -#define smp_mb()        mb()
>> -#define smp_rmb()       rmb()
>> -#define smp_wmb()       wmb()
>> -
>>  #define set_mb(var, value) do { xchg(&var, value); } while (0)
>>  #define set_wmb(var, value) do { var = value; wmb(); } while (0)
>>  
>> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/include/xen/list.h
>> --- a/xen/include/xen/list.h
>> +++ b/xen/include/xen/list.h
>> @@ -102,7 +102,7 @@ static inline void __list_add_rcu(struct
>>  {
>>      new->next = next;
>>      new->prev = prev;
>> -    smp_wmb();
>> +    wmb();
>>      next->prev = new;
>>      prev->next = new;
>>  }
>> @@ -244,7 +244,7 @@ static inline void list_replace_rcu(stru
>>  {
>>      new->next = old->next;
>>      new->prev = old->prev;
>> -    smp_wmb();
>> +    wmb();
>>      new->next->prev = new;
>>      new->prev->next = new;
>>      old->prev = LIST_POISON2;
>> @@ -712,7 +712,7 @@ static inline void hlist_replace_rcu(str
>>  
>>      new->next = next;
>>      new->pprev = old->pprev;
>> -    smp_wmb();
>> +    wmb();
>>      if (next)
>>          new->next->pprev = &new->next;
>>      *new->pprev = new;
>> @@ -754,7 +754,7 @@ static inline void hlist_add_head_rcu(st
>>      struct hlist_node *first = h->first;
>>      n->next = first;
>>      n->pprev = &h->first;
>> -    smp_wmb();
>> +    wmb();
>>      if (first)
>>          first->pprev = &n->next;
>>      h->first = n;
>> @@ -804,7 +804,7 @@ static inline void hlist_add_before_rcu(
>>  {
>>      n->pprev = next->pprev;
>>      n->next = next;
>> -    smp_wmb();
>> +    wmb();
>>      next->pprev = &n->next;
>>      *(n->pprev) = n;
>>  }
>> @@ -832,7 +832,7 @@ static inline void hlist_add_after_rcu(s
>>  {
>>      n->next = prev->next;
>>      n->pprev = &prev->next;
>> -    smp_wmb();
>> +    wmb();
>>      prev->next = n;
>>      if (n->next)
>>          n->next->pprev = &n->next;
>> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/include/xen/rcupdate.h
>> --- a/xen/include/xen/rcupdate.h
>> +++ b/xen/include/xen/rcupdate.h
>> @@ -136,7 +136,7 @@ typedef struct _rcu_read_lock rcu_read_l
>>   * call documents which pointers will be dereferenced by RCU read-side
>>   * code.
>>   */
>> -#define rcu_assign_pointer(p, v) ({ smp_wmb(); (p) = (v); })
>> +#define rcu_assign_pointer(p, v) ({ wmb(); (p) = (v); })
>>  
>>  void rcu_init(void);
>>  void rcu_check_callbacks(int cpu);
>> 
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@xxxxxxxxxxxxxxxxxxx
>> http://lists.xensource.com/xen-devel
> 
> 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.