[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 2/6] ioreq-server: tidy up use of ioreq_t



> -----Original Message-----
> From: dunlapg@xxxxxxxxx [mailto:dunlapg@xxxxxxxxx] On Behalf Of
> George Dunlap
> Sent: 10 March 2014 15:43
> To: Paul Durrant
> Cc: xen-devel@xxxxxxxxxxxxx
> Subject: Re: [Xen-devel] [PATCH v3 2/6] ioreq-server: tidy up use of ioreq_t
> 
> On Wed, Mar 5, 2014 at 2:47 PM, Paul Durrant <paul.durrant@xxxxxxxxxx>
> wrote:
> > This patch tidies up various occurences of single element ioreq_t
> > arrays on the stack and improves coding style.
> >
> > Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> 
> Maybe I missed this in the earlier discussion, but why is most of this
> not integrated into patch 1?
> 

It was a patch that was added after the v1 RFC patch series. I wanted to keep 
it separate to avoid making patch 1 massively different to what it was before.

  Paul

>  -George
> 
> > ---
> >  xen/arch/x86/hvm/emulate.c |   36 ++++++++++++++++++------------------
> >  xen/arch/x86/hvm/hvm.c     |    2 ++
> >  xen/arch/x86/hvm/io.c      |   37 +++++++++++++++++--------------------
> >  3 files changed, 37 insertions(+), 38 deletions(-)
> >
> > diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> > index 0ba2020..1c71902 100644
> > --- a/xen/arch/x86/hvm/emulate.c
> > +++ b/xen/arch/x86/hvm/emulate.c
> > @@ -57,7 +57,7 @@ static int hvmemul_do_io(
> >      int value_is_ptr = (p_data == NULL);
> >      struct vcpu *curr = current;
> >      struct hvm_vcpu_io *vio;
> > -    ioreq_t p[1];
> > +    ioreq_t p;
> >      unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
> >      p2m_type_t p2mt;
> >      struct page_info *ram_page;
> > @@ -171,38 +171,38 @@ static int hvmemul_do_io(
> >      if ( vio->mmio_retrying )
> >          *reps = 1;
> >
> > -    p->dir = dir;
> > -    p->data_is_ptr = value_is_ptr;
> > -    p->type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO;
> > -    p->size = size;
> > -    p->addr = addr;
> > -    p->count = *reps;
> > -    p->df = df;
> > -    p->data = value;
> > +    p.dir = dir;
> > +    p.data_is_ptr = value_is_ptr;
> > +    p.type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO;
> > +    p.size = size;
> > +    p.addr = addr;
> > +    p.count = *reps;
> > +    p.df = df;
> > +    p.data = value;
> >
> >      if ( dir == IOREQ_WRITE )
> > -        hvmtrace_io_assist(is_mmio, p);
> > +        hvmtrace_io_assist(is_mmio, &p);
> >
> >      if ( is_mmio )
> >      {
> > -        rc = hvm_mmio_intercept(p);
> > +        rc = hvm_mmio_intercept(&p);
> >          if ( rc == X86EMUL_UNHANDLEABLE )
> > -            rc = hvm_buffered_io_intercept(p);
> > +            rc = hvm_buffered_io_intercept(&p);
> >      }
> >      else
> >      {
> > -        rc = hvm_portio_intercept(p);
> > +        rc = hvm_portio_intercept(&p);
> >      }
> >
> >      switch ( rc )
> >      {
> >      case X86EMUL_OKAY:
> >      case X86EMUL_RETRY:
> > -        *reps = p->count;
> > -        p->state = STATE_IORESP_READY;
> > +        *reps = p.count;
> > +        p.state = STATE_IORESP_READY;
> >          if ( !vio->mmio_retry )
> >          {
> > -            hvm_io_assist(p);
> > +            hvm_io_assist(&p);
> >              vio->io_state = HVMIO_none;
> >          }
> >          else
> > @@ -219,7 +219,7 @@ static int hvmemul_do_io(
> >          else
> >          {
> >              rc = X86EMUL_RETRY;
> > -            if ( !hvm_send_assist_req(curr, p) )
> > +            if ( !hvm_send_assist_req(curr, &p) )
> >                  vio->io_state = HVMIO_none;
> >              else if ( p_data == NULL )
> >                  rc = X86EMUL_OKAY;
> > @@ -238,7 +238,7 @@ static int hvmemul_do_io(
> >
> >   finish_access:
> >      if ( dir == IOREQ_READ )
> > -        hvmtrace_io_assist(is_mmio, p);
> > +        hvmtrace_io_assist(is_mmio, &p);
> >
> >      if ( p_data != NULL )
> >          memcpy(p_data, &vio->io_data, size);
> > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> > index 0b2e57e..10b8e8c 100644
> > --- a/xen/arch/x86/hvm/hvm.c
> > +++ b/xen/arch/x86/hvm/hvm.c
> > @@ -349,7 +349,9 @@ static ioreq_t *get_ioreq(struct vcpu *v)
> >  {
> >      struct domain *d = v->domain;
> >      shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
> > +
> >      ASSERT((v == current) || spin_is_locked(&d-
> >arch.hvm_domain.ioreq.lock));
> > +
> >      return p ? &p->vcpu_ioreq[v->vcpu_id] : NULL;
> >  }
> >
> > diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
> > index ba50c53..7aac61d 100644
> > --- a/xen/arch/x86/hvm/io.c
> > +++ b/xen/arch/x86/hvm/io.c
> > @@ -49,22 +49,19 @@
> >  void send_timeoffset_req(unsigned long timeoff)
> >  {
> >      struct vcpu *curr = current;
> > -    ioreq_t p[1];
> > +    ioreq_t p = {
> > +        .type = IOREQ_TYPE_TIMEOFFSET,
> > +        .size = 8,
> > +        .count = 1,
> > +        .dir = IOREQ_WRITE,
> > +        .data = timeoff,
> > +        .state = STATE_IOREQ_READY,
> > +    };
> >
> >      if ( timeoff == 0 )
> >          return;
> >
> > -    memset(p, 0, sizeof(*p));
> > -
> > -    p->type = IOREQ_TYPE_TIMEOFFSET;
> > -    p->size = 8;
> > -    p->count = 1;
> > -    p->dir = IOREQ_WRITE;
> > -    p->data = timeoff;
> > -
> > -    p->state = STATE_IOREQ_READY;
> > -
> > -    if ( !hvm_buffered_io_send(curr->domain, p) )
> > +    if ( !hvm_buffered_io_send(curr->domain, &p) )
> >          printk("Unsuccessful timeoffset update\n");
> >  }
> >
> > @@ -72,14 +69,14 @@ void send_timeoffset_req(unsigned long timeoff)
> >  void send_invalidate_req(void)
> >  {
> >      struct vcpu *curr = current;
> > -    ioreq_t p[1];
> > -
> > -    p->type = IOREQ_TYPE_INVALIDATE;
> > -    p->size = 4;
> > -    p->dir = IOREQ_WRITE;
> > -    p->data = ~0UL; /* flush all */
> > -
> > -    (void)hvm_send_assist_req(curr, p);
> > +    ioreq_t p = {
> > +        .type = IOREQ_TYPE_INVALIDATE,
> > +        .size = 4,
> > +        .dir = IOREQ_WRITE,
> > +        .data = ~0UL, /* flush all */
> > +    };
> > +
> > +    (void)hvm_send_assist_req(curr, &p);
> >  }
> >
> >  int handle_mmio(void)
> > --
> > 1.7.10.4
> >
> >
> > _______________________________________________
> > Xen-devel mailing list
> > Xen-devel@xxxxxxxxxxxxx
> > http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.