[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH 3/5] ioreq-server: on-demand creation of ioreq server


  • To: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>
  • From: Paul Durrant <Paul.Durrant@xxxxxxxxxx>
  • Date: Thu, 30 Jan 2014 15:32:05 +0000
  • Accept-language: en-GB, en-US
  • Cc: "xen-devel@xxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxx>
  • Delivery-date: Thu, 30 Jan 2014 15:32:19 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>
  • Thread-index: AQHPHcZanTzHwQ/6o0WY9Jgbf8Cx0JqdUa4AgAASntA=
  • Thread-topic: [Xen-devel] [RFC PATCH 3/5] ioreq-server: on-demand creation of ioreq server

> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 30 January 2014 15:22
> To: Paul Durrant
> Cc: xen-devel@xxxxxxxxxxxxx
> Subject: Re: [Xen-devel] [RFC PATCH 3/5] ioreq-server: on-demand creation
> of ioreq server
> 
> On 30/01/14 14:19, Paul Durrant wrote:
> > This patch only creates the ioreq server when the legacy HVM parameters
> > are touched by an emulator. It also lays some groundwork for supporting
> > multiple IOREQ servers. For instance, it introduces ioreq server reference
> > counting which is not strictly necessary at this stage but will become so
> > when ioreq servers can be destroyed prior the domain dying.
> >
> > There is a significant change in the layout of the special pages reserved
> > in xc_hvm_build_x86.c. This is so that we can 'grow' them downwards
> without
> > moving pages such as the xenstore page when building a domain that can
> > support more than one emulator.
> >
> > Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> > ---
> >  tools/libxc/xc_hvm_build_x86.c   |   41 ++--
> >  xen/arch/x86/hvm/hvm.c           |  409 ++++++++++++++++++++++++++----
> --------
> >  xen/include/asm-x86/hvm/domain.h |    3 +-
> >  3 files changed, 314 insertions(+), 139 deletions(-)
> >
> > diff --git a/tools/libxc/xc_hvm_build_x86.c
> b/tools/libxc/xc_hvm_build_x86.c
> > index 77bd365..f24f2a1 100644
> > --- a/tools/libxc/xc_hvm_build_x86.c
> > +++ b/tools/libxc/xc_hvm_build_x86.c
> > @@ -41,13 +41,12 @@
> >  #define SPECIALPAGE_PAGING   0
> >  #define SPECIALPAGE_ACCESS   1
> >  #define SPECIALPAGE_SHARING  2
> > -#define SPECIALPAGE_BUFIOREQ 3
> > -#define SPECIALPAGE_XENSTORE 4
> > -#define SPECIALPAGE_IOREQ    5
> > -#define SPECIALPAGE_IDENT_PT 6
> > -#define SPECIALPAGE_CONSOLE  7
> > -#define NR_SPECIAL_PAGES     8
> > -#define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
> > +#define SPECIALPAGE_XENSTORE 3
> > +#define SPECIALPAGE_IDENT_PT 4
> > +#define SPECIALPAGE_CONSOLE  5
> > +#define SPECIALPAGE_IOREQ    6
> > +#define NR_SPECIAL_PAGES     SPECIALPAGE_IOREQ + 2 /* ioreq server
> needs 2 pages */
> > +#define special_pfn(x) (0xff000u - (x))
> >
> >  static int modules_init(struct xc_hvm_build_args *args,
> >                          uint64_t vend, struct elf_binary *elf,
> > @@ -112,7 +111,7 @@ static void build_hvm_info(void *hvm_info_page,
> uint64_t mem_size,
> >      /* Memory parameters. */
> >      hvm_info->low_mem_pgend = lowmem_end >> PAGE_SHIFT;
> >      hvm_info->high_mem_pgend = highmem_end >> PAGE_SHIFT;
> > -    hvm_info->reserved_mem_pgstart = special_pfn(0);
> > +    hvm_info->reserved_mem_pgstart = special_pfn(0) -
> NR_SPECIAL_PAGES;
> >
> >      /* Finish with the checksum. */
> >      for ( i = 0, sum = 0; i < hvm_info->length; i++ )
> > @@ -463,6 +462,24 @@ static int setup_guest(xc_interface *xch,
> >      munmap(hvm_info_page, PAGE_SIZE);
> >
> >      /* Allocate and clear special pages. */
> > +
> > +     DPRINTF("%d SPECIAL PAGES:\n"
> > +            "  PAGING:    %"PRI_xen_pfn"\n"
> > +            "  ACCESS:    %"PRI_xen_pfn"\n"
> > +            "  SHARING:   %"PRI_xen_pfn"\n"
> > +            "  STORE:     %"PRI_xen_pfn"\n"
> > +            "  IDENT_PT:  %"PRI_xen_pfn"\n"
> > +            "  CONSOLE:   %"PRI_xen_pfn"\n"
> > +            "  IOREQ:     %"PRI_xen_pfn"\n",
> > +            NR_SPECIAL_PAGES,
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_PAGING),
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_ACCESS),
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_SHARING),
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_XENSTORE),
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_IDENT_PT),
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_CONSOLE),
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_IOREQ));
> > +
> 
> I realise I am being quite picky here, but from a daemon point of view
> trying to log to facilities like syslog, multi-line single debugging
> messages are a pain.  Would it be possible to do this as 8 DPRINTF()s?
> 

Yes, of course.

> >      for ( i = 0; i < NR_SPECIAL_PAGES; i++ )
> >      {
> >          xen_pfn_t pfn = special_pfn(i);
> > @@ -478,10 +495,6 @@ static int setup_guest(xc_interface *xch,
> >
> >      xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_PFN,
> >                       special_pfn(SPECIALPAGE_XENSTORE));
> > -    xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN,
> > -                     special_pfn(SPECIALPAGE_BUFIOREQ));
> > -    xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN,
> > -                     special_pfn(SPECIALPAGE_IOREQ));
> >      xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN,
> >                       special_pfn(SPECIALPAGE_CONSOLE));
> >      xc_set_hvm_param(xch, dom, HVM_PARAM_PAGING_RING_PFN,
> > @@ -490,6 +503,10 @@ static int setup_guest(xc_interface *xch,
> >                       special_pfn(SPECIALPAGE_ACCESS));
> >      xc_set_hvm_param(xch, dom, HVM_PARAM_SHARING_RING_PFN,
> >                       special_pfn(SPECIALPAGE_SHARING));
> > +    xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN,
> > +                     special_pfn(SPECIALPAGE_IOREQ));
> > +    xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN,
> > +                     special_pfn(SPECIALPAGE_IOREQ) - 1);
> >
> >      /*
> >       * Identity-map page table is required for running with CR0.PG=0 when
> > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> > index a0eaadb..d9874fb 100644
> > --- a/xen/arch/x86/hvm/hvm.c
> > +++ b/xen/arch/x86/hvm/hvm.c
> > @@ -352,24 +352,9 @@ static ioreq_t *get_ioreq(struct hvm_ioreq_server
> *s, int id)
> >      return &p->vcpu_ioreq[id];
> >  }
> >
> > -void hvm_do_resume(struct vcpu *v)
> > +static void hvm_wait_on_io(struct domain *d, ioreq_t *p)
> >  {
> > -    struct hvm_ioreq_server *s;
> > -    ioreq_t *p;
> > -
> > -    check_wakeup_from_wait();
> > -
> > -    if ( is_hvm_vcpu(v) )
> > -        pt_restore_timer(v);
> > -
> > -    s = v->arch.hvm_vcpu.ioreq_server;
> > -    v->arch.hvm_vcpu.ioreq_server = NULL;
> > -
> > -    if ( !s )
> > -        goto check_inject_trap;
> > -
> >      /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE).
> */
> > -    p = get_ioreq(s, v->vcpu_id);
> >      while ( p->state != STATE_IOREQ_NONE )
> >      {
> >          switch ( p->state )
> > @@ -385,12 +370,32 @@ void hvm_do_resume(struct vcpu *v)
> >              break;
> >          default:
> >              gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p-
> >state);
> > -            domain_crash(v->domain);
> > +            domain_crash(d);
> >              return; /* bail */
> >          }
> >      }
> > +}
> > +
> > +void hvm_do_resume(struct vcpu *v)
> > +{
> > +    struct domain *d = v->domain;
> > +    struct hvm_ioreq_server *s;
> > +
> > +    check_wakeup_from_wait();
> > +
> > +    if ( is_hvm_vcpu(v) )
> > +        pt_restore_timer(v);
> > +
> > +    s = v->arch.hvm_vcpu.ioreq_server;
> > +    v->arch.hvm_vcpu.ioreq_server = NULL;
> > +
> > +    if ( s )
> > +    {
> > +        ioreq_t *p = get_ioreq(s, v->vcpu_id);
> > +
> > +        hvm_wait_on_io(d, p);
> > +    }
> >
> > - check_inject_trap:
> >      /* Inject pending hw/sw trap */
> >      if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
> >      {
> > @@ -399,11 +404,13 @@ void hvm_do_resume(struct vcpu *v)
> >      }
> >  }
> >
> > -static void hvm_init_ioreq_page(
> > -    struct domain *d, struct hvm_ioreq_page *iorp)
> > +static void hvm_init_ioreq_page(struct hvm_ioreq_server *s, int buf)
> >  {
> > +    struct hvm_ioreq_page *iorp;
> > +
> > +    iorp = ( buf ) ? &s->buf_ioreq : &s->ioreq;
> > +
> 
> Brackets are redundant.
> 

...but good style IMO.

> >      spin_lock_init(&iorp->lock);
> > -    domain_pause(d);
> >  }
> >
> >  void destroy_ring_for_helper(
> > @@ -419,16 +426,13 @@ void destroy_ring_for_helper(
> >      }
> >  }
> >
> > -static void hvm_destroy_ioreq_page(
> > -    struct domain *d, struct hvm_ioreq_page *iorp)
> > +static void hvm_destroy_ioreq_page(struct hvm_ioreq_server *s, int buf)
> >  {
> > -    spin_lock(&iorp->lock);
> > +    struct hvm_ioreq_page *iorp;
> >
> > -    ASSERT(d->is_dying);
> > +    iorp = ( buf ) ? &s->buf_ioreq : &s->ioreq;
> >
> >      destroy_ring_for_helper(&iorp->va, iorp->page);
> > -
> > -    spin_unlock(&iorp->lock);
> >  }
> >
> >  int prepare_ring_for_helper(
> > @@ -476,8 +480,10 @@ int prepare_ring_for_helper(
> >  }
> >
> >  static int hvm_set_ioreq_page(
> > -    struct domain *d, struct hvm_ioreq_page *iorp, unsigned long gmfn)
> > +    struct hvm_ioreq_server *s, int buf, unsigned long gmfn)
> >  {
> > +    struct domain *d = s->domain;
> > +    struct hvm_ioreq_page *iorp;
> >      struct page_info *page;
> >      void *va;
> >      int rc;
> > @@ -485,22 +491,17 @@ static int hvm_set_ioreq_page(
> >      if ( (rc = prepare_ring_for_helper(d, gmfn, &page, &va)) )
> >          return rc;
> >
> > -    spin_lock(&iorp->lock);
> > +    iorp = ( buf ) ? &s->buf_ioreq : &s->ioreq;
> >
> >      if ( (iorp->va != NULL) || d->is_dying )
> >      {
> > -        destroy_ring_for_helper(&iorp->va, iorp->page);
> > -        spin_unlock(&iorp->lock);
> > +        destroy_ring_for_helper(&va, page);
> >          return -EINVAL;
> >      }
> >
> >      iorp->va = va;
> >      iorp->page = page;
> >
> > -    spin_unlock(&iorp->lock);
> > -
> > -    domain_unpause(d);
> > -
> >      return 0;
> >  }
> >
> > @@ -544,38 +545,6 @@ static int handle_pvh_io(
> >      return X86EMUL_OKAY;
> >  }
> >
> > -static int hvm_init_ioreq_server(struct domain *d)
> > -{
> > -    struct hvm_ioreq_server *s;
> > -    int i;
> > -
> > -    s = xzalloc(struct hvm_ioreq_server);
> > -    if ( !s )
> > -        return -ENOMEM;
> > -
> > -    s->domain = d;
> > -
> > -    for ( i = 0; i < MAX_HVM_VCPUS; i++ )
> > -        s->ioreq_evtchn[i] = -1;
> > -    s->buf_ioreq_evtchn = -1;
> > -
> > -    hvm_init_ioreq_page(d, &s->ioreq);
> > -    hvm_init_ioreq_page(d, &s->buf_ioreq);
> > -
> > -    d->arch.hvm_domain.ioreq_server = s;
> > -    return 0;
> > -}
> > -
> > -static void hvm_deinit_ioreq_server(struct domain *d)
> > -{
> > -    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
> > -
> > -    hvm_destroy_ioreq_page(d, &s->ioreq);
> > -    hvm_destroy_ioreq_page(d, &s->buf_ioreq);
> > -
> > -    xfree(s);
> > -}
> > -
> >  static void hvm_update_ioreq_server_evtchn(struct hvm_ioreq_server
> *s)
> >  {
> >      struct domain *d = s->domain;
> > @@ -637,6 +606,152 @@ static void
> hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s, struct vcpu
> >      }
> >  }
> >
> > +static int hvm_create_ioreq_server(struct domain *d, domid_t domid)
> > +{
> > +    struct hvm_ioreq_server *s;
> > +    int i;
> > +    unsigned long pfn;
> > +    struct vcpu *v;
> > +    int rc;
> 
> i and rc can be declared together.
> 
> > +
> > +    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
> > +
> > +    rc = -EEXIST;
> > +    if ( d->arch.hvm_domain.ioreq_server != NULL )
> > +        goto fail_exist;
> > +
> > +    gdprintk(XENLOG_INFO, "%s: %d\n", __func__, d->domain_id);
> > +
> > +    rc = -ENOMEM;
> > +    s = xzalloc(struct hvm_ioreq_server);
> > +    if ( !s )
> > +        goto fail_alloc;
> > +
> > +    s->domain = d;
> > +    s->domid = domid;
> > +
> > +    for ( i = 0; i < MAX_HVM_VCPUS; i++ )
> > +        s->ioreq_evtchn[i] = -1;
> > +    s->buf_ioreq_evtchn = -1;
> > +
> > +    /* Initialize shared pages */
> > +    pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
> > +
> > +    hvm_init_ioreq_page(s, 0);
> > +    if ( (rc = hvm_set_ioreq_page(s, 0, pfn)) < 0 )
> > +        goto fail_set_ioreq;
> > +
> > +    pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN];
> > +
> > +    hvm_init_ioreq_page(s, 1);
> > +    if ( (rc = hvm_set_ioreq_page(s, 1, pfn)) < 0 )
> > +        goto fail_set_buf_ioreq;
> > +
> > +    for_each_vcpu ( d, v )
> > +    {
> > +        if ( (rc = hvm_ioreq_server_add_vcpu(s, v)) < 0 )
> > +            goto fail_add_vcpu;
> > +    }
> > +
> > +    d->arch.hvm_domain.ioreq_server = s;
> > +
> > +    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
> > +
> > +    return 0;
> > +
> > +fail_add_vcpu:
> > +    for_each_vcpu ( d, v )
> > +        hvm_ioreq_server_remove_vcpu(s, v);
> > +    hvm_destroy_ioreq_page(s, 1);
> > +fail_set_buf_ioreq:
> > +    hvm_destroy_ioreq_page(s, 0);
> > +fail_set_ioreq:
> > +    xfree(s);
> > +fail_alloc:
> > +fail_exist:
> > +    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
> > +    return rc;
> > +}
> > +
> > +static void hvm_destroy_ioreq_server(struct domain *d)
> > +{
> > +    struct hvm_ioreq_server *s;
> > +    struct vcpu *v;
> > +
> > +    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
> > +
> > +    gdprintk(XENLOG_INFO, "%s: %d\n", __func__, d->domain_id);
> > +
> > +    s = d->arch.hvm_domain.ioreq_server;
> > +    if ( !s )
> > +        goto done;
> > +
> > +    domain_pause(d);
> > +
> > +    d->arch.hvm_domain.ioreq_server = NULL;
> > +
> > +    for_each_vcpu ( d, v )
> > +        hvm_ioreq_server_remove_vcpu(s, v);
> > +
> > +    hvm_destroy_ioreq_page(s, 1);
> > +    hvm_destroy_ioreq_page(s, 0);
> > +
> > +    xfree(s);
> > +
> > +    domain_unpause(d);
> > +
> > +done:
> > +    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
> > +}
> > +
> > +static int hvm_get_ioreq_server_buf_port(struct domain *d,
> evtchn_port_t *port)
> > +{
> > +    struct hvm_ioreq_server *s;
> > +    int rc;
> > +
> > +    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
> > +
> > +    s = d->arch.hvm_domain.ioreq_server;
> > +
> > +    rc = -ENOENT;
> > +    if ( !s )
> > +        goto done;
> > +
> > +    *port = s->buf_ioreq_evtchn;
> > +    rc = 0;
> > +
> > +done:
> > +    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
> > +
> > +    return rc;
> > +}
> > +
> > +static int hvm_get_ioreq_server_pfn(struct domain *d, int buf,
> xen_pfn_t *pfn)
> > +{
> > +    struct hvm_ioreq_server *s;
> > +    int rc;
> > +
> > +    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
> > +
> > +    s = d->arch.hvm_domain.ioreq_server;
> > +
> > +    rc = -ENOENT;
> > +    if ( !s )
> > +        goto done;
> > +
> > +    if ( buf )
> > +        *pfn = d-
> >arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN];
> > +    else
> > +        *pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
> 
> This can be reduced and use "params[buf ? HVM_PARAM_BUFIOREQ_PFN :
> HVM_PARAM_IOREQ_PFN]", although that is perhaps not as clear.
> 

Indeed. Yuck.

> > +
> > +    rc = 0;
> > +
> > +done:
> > +    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
> > +
> > +    return rc;
> > +}
> > +
> >  static int hvm_replace_event_channel(struct vcpu *v, domid_t
> remote_domid,
> >                                       int *p_port)
> >  {
> > @@ -652,13 +767,24 @@ static int hvm_replace_event_channel(struct
> vcpu *v, domid_t remote_domid,
> >      return 0;
> >  }
> >
> > -static int hvm_set_ioreq_server_domid(struct hvm_ioreq_server *s,
> domid_t domid)
> > +static int hvm_set_ioreq_server_domid(struct domain *d, domid_t
> domid)
> >  {
> > -    struct domain *d = s->domain;
> > +    struct hvm_ioreq_server *s;
> >      struct vcpu *v;
> >      int rc = 0;
> >
> >      domain_pause(d);
> > +    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
> > +
> > +    s = d->arch.hvm_domain.ioreq_server;
> > +
> > +    rc = -ENOENT;
> > +    if ( !s )
> > +        goto done;
> > +
> > +    rc = 0;
> > +    if ( s->domid == domid )
> > +        goto done;
> >
> >      if ( d->vcpu[0] )
> >      {
> > @@ -680,31 +806,11 @@ static int hvm_set_ioreq_server_domid(struct
> hvm_ioreq_server *s, domid_t domid)
> >
> >  done:
> >      domain_unpause(d);
> > +    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
> 
> Mismatched order of pause/unpause and lock/unlock pairs.  The unlock
> should ideally be before the unpause.
> 

Ok.

> >
> >      return rc;
> >  }
> >
> > -static int hvm_set_ioreq_server_pfn(struct hvm_ioreq_server *s,
> unsigned long pfn)
> > -{
> > -    struct domain *d = s->domain;
> > -    int rc;
> > -
> > -    rc = hvm_set_ioreq_page(d, &s->ioreq, pfn);
> > -    if ( rc < 0 )
> > -        return rc;
> > -
> > -    hvm_update_ioreq_server_evtchn(s);
> > -
> > -    return 0;
> > -}
> > -
> > -static int hvm_set_ioreq_server_buf_pfn(struct hvm_ioreq_server *s,
> unsigned long pfn)
> > -{
> > -    struct domain *d = s->domain;
> > -
> > -    return  hvm_set_ioreq_page(d, &s->buf_ioreq, pfn);
> > -}
> > -
> >  int hvm_domain_initialise(struct domain *d)
> >  {
> >      int rc;
> > @@ -732,6 +838,7 @@ int hvm_domain_initialise(struct domain *d)
> >
> >      }
> >
> > +    spin_lock_init(&d->arch.hvm_domain.ioreq_server_lock);
> >      spin_lock_init(&d->arch.hvm_domain.irq_lock);
> >      spin_lock_init(&d->arch.hvm_domain.uc_lock);
> >
> > @@ -772,20 +879,14 @@ int hvm_domain_initialise(struct domain *d)
> >
> >      rtc_init(d);
> >
> > -    rc = hvm_init_ioreq_server(d);
> > -    if ( rc != 0 )
> > -        goto fail2;
> > -
> >      register_portio_handler(d, 0xe9, 1, hvm_print_line);
> >
> >      rc = hvm_funcs.domain_initialise(d);
> >      if ( rc != 0 )
> > -        goto fail3;
> > +        goto fail2;
> >
> >      return 0;
> >
> > - fail3:
> > -    hvm_deinit_ioreq_server(d);
> >   fail2:
> >      rtc_deinit(d);
> >      stdvga_deinit(d);
> > @@ -809,7 +910,7 @@ void hvm_domain_relinquish_resources(struct
> domain *d)
> >      if ( hvm_funcs.nhvm_domain_relinquish_resources )
> >          hvm_funcs.nhvm_domain_relinquish_resources(d);
> >
> > -    hvm_deinit_ioreq_server(d);
> > +    hvm_destroy_ioreq_server(d);
> >
> >      msixtbl_pt_cleanup(d);
> >
> > @@ -1364,11 +1465,16 @@ int hvm_vcpu_initialise(struct vcpu *v)
> >           && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown:
> nestedhvm_vcpu_destroy */
> >          goto fail5;
> >
> > +    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
> >      s = d->arch.hvm_domain.ioreq_server;
> > +    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
> >
> > -    rc = hvm_ioreq_server_add_vcpu(s, v);
> > -    if ( rc < 0 )
> > -        goto fail6;
> > +    if ( s )
> > +    {
> > +        rc = hvm_ioreq_server_add_vcpu(s, v);
> > +        if ( rc < 0 )
> > +            goto fail6;
> > +    }
> >
> >      if ( v->vcpu_id == 0 )
> >      {
> > @@ -1404,9 +1510,14 @@ int hvm_vcpu_initialise(struct vcpu *v)
> >  void hvm_vcpu_destroy(struct vcpu *v)
> >  {
> >      struct domain *d = v->domain;
> > -    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
> > +    struct hvm_ioreq_server *s;
> > +
> > +    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
> > +    s = d->arch.hvm_domain.ioreq_server;
> > +    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
> >
> > -    hvm_ioreq_server_remove_vcpu(s, v);
> > +    if ( s )
> > +        hvm_ioreq_server_remove_vcpu(s, v);
> >
> >      nestedhvm_vcpu_destroy(v);
> >
> > @@ -1459,7 +1570,10 @@ int hvm_buffered_io_send(ioreq_t *p)
> >      /* Ensure buffered_iopage fits in a page */
> >      BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
> >
> > +    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
> >      s = d->arch.hvm_domain.ioreq_server;
> > +    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
> > +
> >      if ( !s )
> >          return 0;
> >
> > @@ -1532,20 +1646,12 @@ int hvm_buffered_io_send(ioreq_t *p)
> >      return 1;
> >  }
> >
> > -bool_t hvm_send_assist_req(struct vcpu *v, ioreq_t *proto_p)
> > +static bool_t hvm_send_assist_req_to_server(struct hvm_ioreq_server
> *s,
> > +                                            struct vcpu *v,
> > +                                            ioreq_t *proto_p)
> >  {
> >      struct domain *d = v->domain;
> > -    struct hvm_ioreq_server *s;
> > -    ioreq_t *p;
> > -
> > -    if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
> > -        return 0; /* implicitly bins the i/o operation */
> > -
> > -    s = d->arch.hvm_domain.ioreq_server;
> > -    if ( !s )
> > -        return 0;
> > -
> > -    p = get_ioreq(s, v->vcpu_id);
> > +    ioreq_t *p = get_ioreq(s, v->vcpu_id);
> >
> >      if ( unlikely(p->state != STATE_IOREQ_NONE) )
> >      {
> > @@ -1578,6 +1684,26 @@ bool_t hvm_send_assist_req(struct vcpu *v,
> ioreq_t *proto_p)
> >      return 1;
> >  }
> >
> > +bool_t hvm_send_assist_req(struct vcpu *v, ioreq_t *p)
> > +{
> > +    struct domain *d = v->domain;
> > +    struct hvm_ioreq_server *s;
> > +
> > +    ASSERT(v->arch.hvm_vcpu.ioreq_server == NULL);
> > +
> > +    if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
> > +        return 0;
> > +
> > +    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
> > +    s = d->arch.hvm_domain.ioreq_server;
> > +    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
> 
> What is the purpose of talking the server lock just to read the
> ioreq_server pointer?
> 

The lock supposed to be there to eventually wrap a list walk, but as that's 
done in a separate function the lock is probably not particularly illustrative 
here - I'll ditch it.

> > +
> > +    if ( !s )
> > +        return 0;
> > +
> > +    return hvm_send_assist_req_to_server(s, v, p);
> > +}
> > +
> >  void hvm_hlt(unsigned long rflags)
> >  {
> >      struct vcpu *curr = current;
> > @@ -4172,7 +4298,6 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> >      case HVMOP_get_param:
> >      {
> >          struct xen_hvm_param a;
> > -        struct hvm_ioreq_server *s;
> >          struct domain *d;
> >          struct vcpu *v;
> >
> > @@ -4198,20 +4323,12 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> >          if ( rc )
> >              goto param_fail;
> >
> > -        s = d->arch.hvm_domain.ioreq_server;
> > -
> >          if ( op == HVMOP_set_param )
> >          {
> >              rc = 0;
> >
> >              switch ( a.index )
> >              {
> > -            case HVM_PARAM_IOREQ_PFN:
> > -                rc = hvm_set_ioreq_server_pfn(s, a.value);
> > -                break;
> > -            case HVM_PARAM_BUFIOREQ_PFN:
> > -                rc = hvm_set_ioreq_server_buf_pfn(s, a.value);
> > -                break;
> >              case HVM_PARAM_CALLBACK_IRQ:
> >                  hvm_set_callback_via(d, a.value);
> >                  hvm_latch_shinfo_size(d);
> > @@ -4265,7 +4382,9 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> >                  if ( a.value == DOMID_SELF )
> >                      a.value = curr_d->domain_id;
> >
> > -                rc = hvm_set_ioreq_server_domid(s, a.value);
> > +                rc = hvm_create_ioreq_server(d, a.value);
> > +                if ( rc == -EEXIST )
> > +                    rc = hvm_set_ioreq_server_domid(d, a.value);
> >                  break;
> >              case HVM_PARAM_ACPI_S_STATE:
> >                  /* Not reflexive, as we must domain_pause(). */
> > @@ -4360,8 +4479,46 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> >          {
> >              switch ( a.index )
> >              {
> > +            case HVM_PARAM_IOREQ_PFN:
> > +            case HVM_PARAM_BUFIOREQ_PFN:
> >              case HVM_PARAM_BUFIOREQ_EVTCHN:
> > -                a.value = s->buf_ioreq_evtchn;
> > +                /* May need to create server */
> > +                rc = hvm_create_ioreq_server(d, curr_d->domain_id);
> > +                if ( rc != 0 && rc != -EEXIST )
> > +                    goto param_fail;
> > +
> > +                switch ( a.index )
> > +                {
> > +                case HVM_PARAM_IOREQ_PFN: {
> > +                    xen_pfn_t pfn;
> > +
> > +                    if ( (rc = hvm_get_ioreq_server_pfn(d, 0, &pfn)) < 0 )
> > +                        goto param_fail;
> > +
> > +                    a.value = pfn;
> > +                    break;
> > +                }
> > +                case HVM_PARAM_BUFIOREQ_PFN: {
> > +                    xen_pfn_t pfn;
> > +
> > +                    if ( (rc = hvm_get_ioreq_server_pfn(d, 1, &pfn)) < 0 )
> > +                        goto param_fail;
> > +
> > +                    a.value = pfn;
> > +                    break;
> > +                }
> > +                case HVM_PARAM_BUFIOREQ_EVTCHN: {
> > +                    evtchn_port_t port;
> > +
> > +                    if ( (rc = hvm_get_ioreq_server_buf_port(d, &port)) < 
> > 0 )
> > +                        goto param_fail;
> > +
> > +                    a.value = port;
> > +                    break;
> > +                }
> > +                default:
> > +                    BUG();
> > +                }
> >                  break;
> >              case HVM_PARAM_ACPI_S_STATE:
> >                  a.value = d->arch.hvm_domain.is_s3_suspended ? 3 : 0;
> > diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-
> x86/hvm/domain.h
> > index 4c039f8..e750ef0 100644
> > --- a/xen/include/asm-x86/hvm/domain.h
> > +++ b/xen/include/asm-x86/hvm/domain.h
> > @@ -52,6 +52,8 @@ struct hvm_ioreq_server {
> >
> >  struct hvm_domain {
> >      struct hvm_ioreq_server *ioreq_server;
> > +    spinlock_t              ioreq_server_lock;
> > +
> >      struct pl_time         pl_time;
> >
> >      struct hvm_io_handler *io_handler;
> > @@ -106,4 +108,3 @@ struct hvm_domain {
> >  #define hap_enabled(d)  ((d)->arch.hvm_domain.hap_enabled)
> >
> >  #endif /* __ASM_X86_HVM_DOMAIN_H__ */
> > -
> 
> Spurious whitespace change
> 

Ok.

  Paul

> ~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.