[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 1/4] VT-d PI: track the vcpu number on pi blocking list



On Fri, Jul 7, 2017 at 7:48 AM, Chao Gao <chao.gao@xxxxxxxxx> wrote:
> This patch adds a field, counter, in struct vmx_pi_blocking_vcpu to track
> how many entries are on the pi blocking list.
>
> Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>

Minor nit:  The grammar in the title isn't quite right; "vcpu number"
would be "the number identifying a particular vcpu", not "the number
of vcpus".  It should be, "VT-d PI: Track the number of vcpus on pi
blocking list".

With that:

Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>

> ---
> v4:
>  - non-trace part of Patch 1 in v3
>
> ---
>  xen/arch/x86/hvm/vmx/vmx.c | 14 +++++++++++---
>  1 file changed, 11 insertions(+), 3 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 69ce3aa..ecd6485 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -83,6 +83,7 @@ static int vmx_vmfunc_intercept(struct cpu_user_regs *regs);
>  struct vmx_pi_blocking_vcpu {
>      struct list_head     list;
>      spinlock_t           lock;
> +    unsigned int         counter;
>  };
>
>  /*
> @@ -120,6 +121,7 @@ static void vmx_vcpu_block(struct vcpu *v)
>       */
>      ASSERT(old_lock == NULL);
>
> +    per_cpu(vmx_pi_blocking, v->processor).counter++;
>      list_add_tail(&v->arch.hvm_vmx.pi_blocking.list,
>                    &per_cpu(vmx_pi_blocking, v->processor).list);
>      spin_unlock_irqrestore(pi_blocking_list_lock, flags);
> @@ -187,6 +189,8 @@ static void vmx_pi_unblock_vcpu(struct vcpu *v)
>      {
>          ASSERT(v->arch.hvm_vmx.pi_blocking.lock == pi_blocking_list_lock);
>          list_del(&v->arch.hvm_vmx.pi_blocking.list);
> +        container_of(pi_blocking_list_lock,
> +                     struct vmx_pi_blocking_vcpu, lock)->counter--;
>          v->arch.hvm_vmx.pi_blocking.lock = NULL;
>      }
>
> @@ -235,6 +239,7 @@ void vmx_pi_desc_fixup(unsigned int cpu)
>          if ( pi_test_on(&vmx->pi_desc) )
>          {
>              list_del(&vmx->pi_blocking.list);
> +            per_cpu(vmx_pi_blocking, cpu).counter--;
>              vmx->pi_blocking.lock = NULL;
>              vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx));
>          }
> @@ -259,6 +264,8 @@ void vmx_pi_desc_fixup(unsigned int cpu)
>
>              list_move(&vmx->pi_blocking.list,
>                        &per_cpu(vmx_pi_blocking, new_cpu).list);
> +            per_cpu(vmx_pi_blocking, cpu).counter--;
> +            per_cpu(vmx_pi_blocking, new_cpu).counter++;
>              vmx->pi_blocking.lock = new_lock;
>
>              spin_unlock(new_lock);
> @@ -2358,9 +2365,9 @@ static struct hvm_function_table __initdata 
> vmx_function_table = {
>  static void pi_wakeup_interrupt(struct cpu_user_regs *regs)
>  {
>      struct arch_vmx_struct *vmx, *tmp;
> -    spinlock_t *lock = &per_cpu(vmx_pi_blocking, smp_processor_id()).lock;
> -    struct list_head *blocked_vcpus =
> -               &per_cpu(vmx_pi_blocking, smp_processor_id()).list;
> +    unsigned int cpu = smp_processor_id();
> +    spinlock_t *lock = &per_cpu(vmx_pi_blocking, cpu).lock;
> +    struct list_head *blocked_vcpus = &per_cpu(vmx_pi_blocking, cpu).list;
>
>      ack_APIC_irq();
>      this_cpu(irq_count)++;
> @@ -2377,6 +2384,7 @@ static void pi_wakeup_interrupt(struct cpu_user_regs 
> *regs)
>          if ( pi_test_on(&vmx->pi_desc) )
>          {
>              list_del(&vmx->pi_blocking.list);
> +            per_cpu(vmx_pi_blocking, cpu).counter--;
>              ASSERT(vmx->pi_blocking.lock == lock);
>              vmx->pi_blocking.lock = NULL;
>              vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx));
> --
> 1.8.3.1
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> https://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.