[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 3/3] hvm_has_dm: Do a full check for backing DM



> -----Original Message-----
> From: Don Slutz [mailto:dslutz@xxxxxxxxxxx]
> Sent: 02 February 2015 15:22
> To: xen-devel@xxxxxxxxxxxxx; Jan Beulich; Paul Durrant
> Cc: Andrew Cooper; George Dunlap; Ian Campbell; Ian Jackson; Keir
> (Xen.org); Stefano Stabellini; Wei Liu; Don Slutz
> Subject: [PATCH v2 3/3] hvm_has_dm: Do a full check for backing DM
> 
> This saves a VMENTRY and a VMEXIT since we not longer retry the
> ioport read on backing DM not handling a given ioreq.
> 
> To only call on hvm_select_ioreq_server() once in this code path, return s.
> Also switch to hvm_send_assist_req_to_ioreq_server().
> 
> Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> 

> ---
> v2:
>   Paul Durrant:
>     I think the two changes only make sense in combination.
>       folded old #3 and old #5 into #3.
>     Actually that comment is not right. The operation is not binned;
>     it's just already been done.
>       Comment has been dropped.
>     I think you can ASSERT(s) here, as you should never get here ...
>       Dropped call to hvm_complete_assist_req(), added ASSERT.
>     So basically collapses down to just this call.
>       Yes, so changed to call on hvm_send_assist_req_to_ioreq_server()
>       and removed hvm_send_assist_req() which had 1 caller.
> 
>   Jan Beulich:
>     The change on what to do when hvm_send_assist_req() fails is bad.
>       That is correct.  Made hvm_has_dm() do full checking so
>       that the extra VMEXIT and VMENTRY can be skipped.
>     hvm_complete_assist_req() be a separate function yet having only
>     a single caller ...
>       Folded hvm_complete_assist_req() in to hvm_has_dm() which
>       is the only place it is called.
> 
> 
>  xen/arch/x86/hvm/emulate.c    |  8 ++++++--
>  xen/arch/x86/hvm/hvm.c        | 25 ++++++++-----------------
>  xen/include/asm-x86/hvm/hvm.h |  6 ++++--
>  3 files changed, 18 insertions(+), 21 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index 2ed4344..dca6d2e 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -218,8 +218,11 @@ static int hvmemul_do_io(
>              vio->io_state = HVMIO_handle_mmio_awaiting_completion;
>          break;
>      case X86EMUL_UNHANDLEABLE:
> +    {
> +        struct hvm_ioreq_server *s = hvm_has_dm(curr->domain, &p);
> +
>          /* If there is no backing DM, just ignore accesses */
> -        if ( !hvm_has_dm(curr->domain) )
> +        if ( !s )
>          {
>              rc = X86EMUL_OKAY;
>              vio->io_state = HVMIO_none;
> @@ -227,11 +230,12 @@ static int hvmemul_do_io(
>          else
>          {
>              rc = X86EMUL_RETRY;
> -            if ( !hvm_send_assist_req(&p) )
> +            if ( !hvm_send_assist_req_to_ioreq_server(s, &p) )
>                  vio->io_state = HVMIO_none;
>              else if ( p_data == NULL )
>                  rc = X86EMUL_OKAY;
>          }
> +    }
>          break;
>      default:
>          BUG();
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 6f7b407..81bdf18 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2507,11 +2507,6 @@ int hvm_buffered_io_send(ioreq_t *p)
>      return 1;
>  }
> 
> -bool_t hvm_has_dm(struct domain *d)
> -{
> -    return !list_empty(&d->arch.hvm_domain.ioreq_server.list);
> -}
> -
>  bool_t hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server *s,
>                                             ioreq_t *proto_p)
>  {
> @@ -2519,6 +2514,7 @@ bool_t
> hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server *s,
>      struct domain *d = curr->domain;
>      struct hvm_ioreq_vcpu *sv;
> 
> +    ASSERT(s);
>      if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
>          return 0; /* implicitly bins the i/o operation */
> 
> @@ -2571,8 +2567,13 @@ bool_t
> hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server *s,
>      return 0;
>  }
> 
> -static bool_t hvm_complete_assist_req(ioreq_t *p)
> +struct hvm_ioreq_server *hvm_has_dm(struct domain *d, ioreq_t *p)
>  {
> +    struct hvm_ioreq_server *s = hvm_select_ioreq_server(current-
> >domain, p);
> +
> +    if ( s )
> +        return s;
> +
>      ASSERT(p->type != IOREQ_TYPE_PCI_CONFIG);
>      switch ( p->type )
>      {
> @@ -2599,17 +2600,7 @@ static bool_t hvm_complete_assist_req(ioreq_t
> *p)
>          break;
>      }
> 
> -    return 1;
> -}
> -
> -bool_t hvm_send_assist_req(ioreq_t *p)
> -{
> -    struct hvm_ioreq_server *s = hvm_select_ioreq_server(current-
> >domain, p);
> -
> -    if ( !s )
> -        return hvm_complete_assist_req(p);
> -
> -    return hvm_send_assist_req_to_ioreq_server(s, p);
> +    return NULL;
>  }
> 
>  void hvm_broadcast_assist_req(ioreq_t *p)
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-
> x86/hvm/hvm.h
> index e3d2d9a..4bcc61c 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -228,7 +228,9 @@ int hvm_vcpu_cacheattr_init(struct vcpu *v);
>  void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
>  void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
> 
> -bool_t hvm_send_assist_req(ioreq_t *p);
> +struct hvm_ioreq_server;
> +bool_t hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server
> *s,
> +                                           ioreq_t *p);
>  void hvm_broadcast_assist_req(ioreq_t *p);
> 
>  void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
> @@ -359,7 +361,7 @@ void hvm_hypervisor_cpuid_leaf(uint32_t sub_idx,
>  void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>                                     unsigned int *ecx, unsigned int *edx);
>  void hvm_migrate_timers(struct vcpu *v);
> -bool_t hvm_has_dm(struct domain *d);
> +struct hvm_ioreq_server *hvm_has_dm(struct domain *d, ioreq_t *p);
>  bool_t hvm_io_pending(struct vcpu *v);
>  void hvm_do_resume(struct vcpu *v);
>  void hvm_migrate_pirqs(struct vcpu *v);
> --
> 1.8.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.