|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v4 02/16] xen/arm: make mmio handlers domain specific
On Mon, 26 May 2014, Julien Grall wrote:
> Hi Vijay,
>
> On 26/05/14 11:26, vijay.kilari@xxxxxxxxx wrote:
> > int handle_mmio(mmio_info_t *info)
> > {
> > struct vcpu *v = current;
> > int i;
> > + struct mmio_handler *mmio_handler;
> > + struct io_handler *io_handlers = &v->domain->arch.io_handlers;
>
> NIT: I think mmio_handler and io_handlers can be const.
>
> > +
> > +void register_mmio_handler(struct domain *d,
> > + const struct mmio_handler_ops *handle,
> > + paddr_t addr, paddr_t size)
> > +{
> > + struct io_handler *handler = &d->arch.io_handlers;
> > +
> > + BUG_ON(handler->num_entries >= MAX_IO_HANDLER);
> > +
> > + spin_lock(&handler->lock);
> > +
> > + handler->mmio_handlers[handler->num_entries].mmio_handler_ops = handle;
> > + handler->mmio_handlers[handler->num_entries].addr = addr;
> > + handler->mmio_handlers[handler->num_entries].size = size;
> > + handler->num_entries++;
> > + dsb(sy);
>
> This is wrong. As I said on the previous version, the dsb needs to be called
> before incrementing the num_entries.
>
> This is because as you don't use spinlock in handle_mmio, you have to make
> sure the array modification has reached the memory before update num_entries.
>
> At the same time dsb(is) is enough.
yeah, this needs to be fixed
> > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> > index 4962e70..151ec3e 100644
> > --- a/xen/arch/arm/vgic.c
> > +++ b/xen/arch/arm/vgic.c
> > @@ -73,43 +73,6 @@ static struct vgic_irq_rank *vgic_irq_rank(struct vcpu
> > *v, int b, int n)
> > return NULL;
> > }
> >
> > -int domain_vgic_init(struct domain *d)
> > -{
> > - int i;
> > -
> > - d->arch.vgic.ctlr = 0;
> > -
> > - /* Currently nr_lines in vgic and gic doesn't have the same meanings
> > - * Here nr_lines = number of SPIs
> > - */
> > - if ( is_hardware_domain(d) )
> > - d->arch.vgic.nr_lines = gic_number_lines() - 32;
> > - else
> > - d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */
> > -
> > - d->arch.vgic.shared_irqs =
> > - xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
> > - if ( d->arch.vgic.shared_irqs == NULL )
> > - return -ENOMEM;
> > -
> > - d->arch.vgic.pending_irqs =
> > - xzalloc_array(struct pending_irq, d->arch.vgic.nr_lines);
> > - if ( d->arch.vgic.pending_irqs == NULL )
> > - {
> > - xfree(d->arch.vgic.shared_irqs);
> > - return -ENOMEM;
> > - }
> > -
> > - for (i=0; i<d->arch.vgic.nr_lines; i++)
> > - {
> > - INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight);
> > - INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].lr_queue);
> > - }
> > - for (i=0; i<DOMAIN_NR_RANKS(d); i++)
> > - spin_lock_init(&d->arch.vgic.shared_irqs[i].lock);
> > - return 0;
> > -}
> > -
>
> Rather than moving a whole chunk of code, why can't you add forward
> declaration for vgic_disk_mmio_{read,write}?
I think it would be OK either way
> > void domain_vgic_free(struct domain *d)
> > {
> > xfree(d->arch.vgic.shared_irqs);
> > @@ -676,15 +639,7 @@ write_ignore:
> > return 1;
> > }
> >
> > -static int vgic_distr_mmio_check(struct vcpu *v, paddr_t addr)
> > -{
> > - struct domain *d = v->domain;
> > -
> > - return (addr >= (d->arch.vgic.dbase)) && (addr < (d->arch.vgic.dbase +
> > PAGE_SIZE));
> > -}
> > -
> > -const struct mmio_handler vgic_distr_mmio_handler = {
> > - .check_handler = vgic_distr_mmio_check,
> > +const struct mmio_handler_ops vgic_distr_mmio_handler = {
> > .read_handler = vgic_distr_mmio_read,
> > .write_handler = vgic_distr_mmio_write,
> > };
> > @@ -766,6 +721,38 @@ out:
> > smp_send_event_check_mask(cpumask_of(v->processor));
> > }
> >
> > +int domain_vgic_init(struct domain *d)
> > +{
> > + int i;
> > +
> > + d->arch.vgic.ctlr = 0;
> > +
> > + /* Currently nr_lines in vgic and gic doesn't have the same meanings
> > + * Here nr_lines = number of SPIs
> > + */
> > + if ( d->domain_id == 0 )
> > + d->arch.vgic.nr_lines = gic_number_lines() - 32;
> > + else
> > + d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */
> > +
> > + d->arch.vgic.shared_irqs =
> > + xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
> > + d->arch.vgic.pending_irqs =
> > + xzalloc_array(struct pending_irq, d->arch.vgic.nr_lines);
> > + for (i=0; i<d->arch.vgic.nr_lines; i++)
> > + {
> > + INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight);
> > + INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].lr_queue);
> > + }
> > + for (i=0; i<DOMAIN_NR_RANKS(d); i++)
> > + spin_lock_init(&d->arch.vgic.shared_irqs[i].lock);
> > +
> > + register_mmio_handler(d, &vgic_distr_mmio_handler,
> > + d->arch.vgic.dbase, PAGE_SIZE);
> > +
>
> Sounds like a bit strange to call register_mmio_handler here and let
> gicv_setup set dbase. Can you add a comment saying to smth like "We rely on
> gicv_setup to initialize dbase"?
>
> > + return 0;
> > +}
> > +
> > /*
> > * Local variables:
> > * mode: C
> > diff --git a/xen/arch/arm/vuart.c b/xen/arch/arm/vuart.c
> > index 953cd46..52f3259 100644
> > --- a/xen/arch/arm/vuart.c
> > +++ b/xen/arch/arm/vuart.c
> > @@ -44,24 +44,6 @@
> >
> > #define domain_has_vuart(d) ((d)->arch.vuart.info != NULL)
> >
> > -int domain_vuart_init(struct domain *d)
> > -{
> > - ASSERT( is_hardware_domain(d) );
> > -
> > - d->arch.vuart.info = serial_vuart_info(SERHND_DTUART);
> > - if ( !d->arch.vuart.info )
> > - return 0;
> > -
> > - spin_lock_init(&d->arch.vuart.lock);
> > - d->arch.vuart.idx = 0;
> > -
> > - d->arch.vuart.buf = xzalloc_array(char, VUART_BUF_SIZE);
> > - if ( !d->arch.vuart.buf )
> > - return -ENOMEM;
> > -
> > - return 0;
> > -}
> > -
>
> Same remark here about forward declaration.
>
> Regards,
>
> --
> Julien Grall
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |