[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 5/9] ioreq-server: on-demand creation of ioreq server



> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@xxxxxxxx]
> Sent: 06 May 2014 15:19
> To: Paul Durrant
> Cc: xen-devel@xxxxxxxxxxxxx; Keir (Xen.org)
> Subject: Re: [PATCH v5 5/9] ioreq-server: on-demand creation of ioreq
> server
> 
> >>> On 01.05.14 at 14:08, <paul.durrant@xxxxxxxxxx> wrote:
> > --- a/xen/arch/x86/hvm/hvm.c
> > +++ b/xen/arch/x86/hvm/hvm.c
> > @@ -389,40 +389,38 @@ void hvm_do_resume(struct vcpu *v)
> >  {
> >      struct domain *d = v->domain;
> >      struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
> > -    ioreq_t *p;
> >
> >      check_wakeup_from_wait();
> >
> >      if ( is_hvm_vcpu(v) )
> >          pt_restore_timer(v);
> >
> > -    if ( !s )
> > -        goto check_inject_trap;
> > -
> > -    /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE).
> */
> > -    p = get_ioreq(s, v);
> > -    while ( p->state != STATE_IOREQ_NONE )
> > +    if ( s )
> >      {
> > -        switch ( p->state )
> > +        ioreq_t *p = get_ioreq(s, v);
> > +
> > +        while ( p->state != STATE_IOREQ_NONE )
> >          {
> > -        case STATE_IORESP_READY: /* IORESP_READY -> NONE */
> > -            rmb(); /* see IORESP_READY /then/ read contents of ioreq */
> > -            hvm_io_assist(p);
> > -            break;
> > -        case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} ->
> IORESP_READY */
> > -        case STATE_IOREQ_INPROCESS:
> > -            wait_on_xen_event_channel(p->vp_eport,
> > -                                      (p->state != STATE_IOREQ_READY) &&
> > -                                      (p->state != STATE_IOREQ_INPROCESS));
> > -            break;
> > -        default:
> > -            gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p-
> >state);
> > -            domain_crash(v->domain);
> > -            return; /* bail */
> > +            switch ( p->state )
> > +            {
> > +            case STATE_IORESP_READY: /* IORESP_READY -> NONE */
> > +                rmb(); /* see IORESP_READY /then/ read contents of ioreq */
> > +                hvm_io_assist(p);
> > +                break;
> > +            case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} ->
> IORESP_READY */
> > +            case STATE_IOREQ_INPROCESS:
> > +                wait_on_xen_event_channel(p->vp_eport,
> > +                                          (p->state != STATE_IOREQ_READY) 
> > &&
> > +                                          (p->state != 
> > STATE_IOREQ_INPROCESS));
> > +                break;
> > +            default:
> > +                gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p-
> >state);
> > +                domain_crash(d);
> > +                return; /* bail */
> > +            }
> >          }
> >      }
> >
> > - check_inject_trap:
> >      /* Inject pending hw/sw trap */
> >      if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
> >      {
> 
> Isn't this entire hunk just a stylistic change from using goto to the
> alternative if() representation? I would strongly suggest leaving out
> such reformatting from an already large patch.
> 

Ok. I'll pull it into pre-series tidy up.

> > -static int hvm_create_ioreq_server(struct domain *d, domid_t domid)
> > +static void hvm_ioreq_server_remove_all_vcpus(struct
> hvm_ioreq_server *s)
> >  {
> > -    struct hvm_ioreq_server *s;
> > +    struct hvm_ioreq_vcpu *sv, *next;
> >
> > -    s = xzalloc(struct hvm_ioreq_server);
> > -    if ( !s )
> > -        return -ENOMEM;
> > +    spin_lock(&s->lock);
> > +
> > +    list_for_each_entry_safe ( sv,
> > +                               next,
> > +                               &s->ioreq_vcpu_list,
> > +                               list_entry )
> > +    {
> > +        struct vcpu *v = sv->vcpu;
> > +
> > +        list_del_init(&sv->list_entry);
> 
> list_del() - you're freeing the entry below anyway.
> 

Ok.

> > +static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct
> domain *d,
> > +                                 domid_t domid)
> > +{
> > +    struct vcpu *v;
> > +    int rc;
> > +
> > +    gdprintk(XENLOG_DEBUG, "%s %d\n", __func__, domid);
> 
> Please don't in a non-RFC patch.
> 

Ok.

> > +static int hvm_create_ioreq_server(struct domain *d, domid_t domid)
> >  {
> > -    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
> > +    struct hvm_ioreq_server *s;
> >      int rc;
> >
> > -    spin_lock(&s->lock);
> > +    rc = -ENOMEM;
> > +    s = xzalloc(struct hvm_ioreq_server);
> > +    if ( !s )
> > +        goto fail1;
> > +
> > +    domain_pause(d);
> > +    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
> > +
> > +    rc = -EEXIST;
> > +    if ( d->arch.hvm_domain.ioreq_server != NULL )
> > +        goto fail2;
> >
> > -    rc = hvm_map_ioreq_page(s, buf, pfn);
> > +    rc = hvm_ioreq_server_init(s, d, domid);
> >      if ( rc )
> > -        goto fail;
> > +        goto fail3;
> >
> > -    if (!buf) {
> > -        struct hvm_ioreq_vcpu *sv;
> > +    d->arch.hvm_domain.ioreq_server = s;
> >
> > -        list_for_each_entry ( sv,
> > -                              &s->ioreq_vcpu_list,
> > -                              list_entry )
> > -            hvm_update_ioreq_evtchn(s, sv);
> > -    }
> > +    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
> > +    domain_unpause(d);
> >
> > -    spin_unlock(&s->lock);
> >      return 0;
> >
> > - fail:
> > -    spin_unlock(&s->lock);
> > + fail3:
> > + fail2:
> 
> Why two successive labels?
> 

Two different jumping off points - I know you have a hatred of labels and gotos 
so I'll collapse into one.

> >  static int hvm_set_dm_domain(struct domain *d, domid_t domid)
> >  {
> > -    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
> > +    struct hvm_ioreq_server *s;
> >      int rc = 0;
> >
> > +    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
> > +
> > +    s = d->arch.hvm_domain.ioreq_server;
> > +    if ( !s )
> > +        goto done;
> 
> You didn't do what you were asked for if there's no server - how can
> this result in "success" being returned?
> 

Because lack of server is not a failure! If the HVM param for the emulating 
domain is set prior to server creation then the server is created with the 
correct domain. If it's done subsequently then the guest needs to be paused, 
the emulating domain updated and the event channels rebound.

  Paul

> Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.