[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v9 22/28] ARM: vITS: handle MOVI command



On Thu, 11 May 2017, Andre Przywara wrote:
> The MOVI command moves the interrupt affinity from one redistributor
> (read: VCPU) to another.
> For now migration of "live" LPIs is not yet implemented, but we store
> the changed affinity in the host LPI structure and in our virtual ITTE.
> 
> Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
> ---
>  xen/arch/arm/gic-v3-its.c        | 30 ++++++++++++++++++++
>  xen/arch/arm/gic-v3-lpi.c        | 15 ++++++++++
>  xen/arch/arm/vgic-v3-its.c       | 59 
> ++++++++++++++++++++++++++++++++++++++++
>  xen/include/asm-arm/gic_v3_its.h |  4 +++
>  4 files changed, 108 insertions(+)
> 
> diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c
> index 8a50f7d..f00597e 100644
> --- a/xen/arch/arm/gic-v3-its.c
> +++ b/xen/arch/arm/gic-v3-its.c
> @@ -915,6 +915,36 @@ struct pending_irq *gicv3_assign_guest_event(struct 
> domain *d,
>      return pirq;
>  }
>  
> +/* Changes the target VCPU for a given host LPI assigned to a domain. */
> +int gicv3_lpi_change_vcpu(struct domain *d, paddr_t vdoorbell,
> +                          uint32_t vdevid, uint32_t veventid,
> +                          unsigned int vcpu_id)
> +{
> +    uint32_t host_lpi;
> +    struct its_device *dev;
> +
> +    spin_lock(&d->arch.vgic.its_devices_lock);
> +    dev = get_its_device(d, vdoorbell, vdevid);
> +    if ( dev )
> +        host_lpi = get_host_lpi(dev, veventid);
> +    else
> +        host_lpi = 0;
> +    spin_unlock(&d->arch.vgic.its_devices_lock);
> +
> +    if ( !host_lpi )
> +        return -ENOENT;
> +
> +    /*
> +     * TODO: This just changes the virtual affinity, the physical LPI
> +     * still stays on the same physical CPU.
> +     * Consider to move the physical affinity to the pCPU running the new
> +     * vCPU. However this requires scheduling a host ITS command.
> +     */
> +    gicv3_lpi_update_host_vcpuid(host_lpi, vcpu_id);
> +
> +    return 0;
> +}
> +
>  /* Scan the DT for any ITS nodes and create a list of host ITSes out of it. 
> */
>  void gicv3_its_dt_init(const struct dt_device_node *node)
>  {
> diff --git a/xen/arch/arm/gic-v3-lpi.c b/xen/arch/arm/gic-v3-lpi.c
> index d427539..6af5ad9 100644
> --- a/xen/arch/arm/gic-v3-lpi.c
> +++ b/xen/arch/arm/gic-v3-lpi.c
> @@ -225,6 +225,21 @@ void gicv3_lpi_update_host_entry(uint32_t host_lpi, int 
> domain_id,
>      write_u64_atomic(&hlpip->data, hlpi.data);
>  }
>  
> +int gicv3_lpi_update_host_vcpuid(uint32_t host_lpi, unsigned int vcpu_id)
> +{
> +    union host_lpi *hlpip;
> +
> +    ASSERT(host_lpi >= LPI_OFFSET);
> +
> +    host_lpi -= LPI_OFFSET;
> +
> +    hlpip = &lpi_data.host_lpis[host_lpi / HOST_LPIS_PER_PAGE][host_lpi % 
> HOST_LPIS_PER_PAGE];
> +
> +    write_u16_atomic(&hlpip->vcpu_id, vcpu_id);
> +
> +    return 0;
> +}
> +
>  static int gicv3_lpi_allocate_pendtable(uint64_t *reg)
>  {
>      uint64_t val;
> diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
> index c5c0e5e..ef7c78f 100644
> --- a/xen/arch/arm/vgic-v3-its.c
> +++ b/xen/arch/arm/vgic-v3-its.c
> @@ -670,6 +670,59 @@ out_remove_mapping:
>      return ret;
>  }
>  
> +static int its_handle_movi(struct virt_its *its, uint64_t *cmdptr)
> +{
> +    uint32_t devid = its_cmd_get_deviceid(cmdptr);
> +    uint32_t eventid = its_cmd_get_id(cmdptr);
> +    uint16_t collid = its_cmd_get_collection(cmdptr);
> +    unsigned long flags;
> +    struct pending_irq *p;
> +    struct vcpu *ovcpu, *nvcpu;
> +    uint32_t vlpi;
> +    int ret = -1;
> +
> +    spin_lock(&its->its_lock);
> +    /* Check for a mapped LPI and get the LPI number. */
> +    if ( !read_itte_locked(its, devid, eventid, &ovcpu, &vlpi) )
> +        goto out_unlock;
> +
> +    if ( vlpi == INVALID_LPI )
> +        goto out_unlock;
> +
> +    /* Check the new collection ID and get the new VCPU pointer */
> +    nvcpu = get_vcpu_from_collection(its, collid);
> +    if ( !nvcpu )
> +        goto out_unlock;
> +
> +    p = gicv3_its_get_event_pending_irq(its->d, its->doorbell_address,
> +                                        devid, eventid);
> +    if ( unlikely(!p) )
> +        goto out_unlock;
> +
> +    spin_lock_irqsave(&ovcpu->arch.vgic.lock, flags);
> +
> +    /* Update our cached vcpu_id in the pending_irq. */
> +    p->lpi_vcpu_id = nvcpu->vcpu_id;

I think we need to call gicv3_lpi_update_host_vcpuid here.
gicv3_lpi_update_host_vcpuid and this line change the vcpu target: they
need to be called in a region that is protected by both its_lock and
v->arch.vgic.lock.

In addition, right before calling gicv3_lpi_update_host_vcpuid, we need
to change the target for a possible existing inflight interrupt, see
vgic_migrate_irq. We need to handle both the case where the vLPI is
inflight but not in an LR yet, which correspond to the

  if ( !list_empty(&p->lr_queue) )

case in vgic_migrate_irq. In that case, we remove the struct pending_irq
from lr_queue and inflight of the old vcpu and add it to the list of the
new vcpu. More difficult is the case of a vLPI which is both inflight
and in an LR. In the code it corresponds to:

  if ( !list_empty(&p->inflight) )

and we need to set GIC_IRQ_GUEST_MIGRATING. In other words, we need to
call a function that is pretty much like vgic_migrate_irq but without
the irq_set_affinity calls that we cannot handle with LPIs yet. Or you
could just call vgic_migrate_irq making sure that irq_set_affinity does
nothing for LPIs for now.

In the past you replied that vgic_migrate_irq starts with:

if ( p->desc == NULL )

so it wouldn't work for LPIs. Of course, we need to change that check,
but overall the function can be made to work for LPIs as long as
irq_set_affinity does something sensible for them.

If you prefer to implement this after the vgic lock rework, add a TODO
comment, and maybe a BUG_ON(!list_empty(&p->inflight)).


> +    spin_unlock_irqrestore(&ovcpu->arch.vgic.lock, flags);
> +
> +    /* Now store the new collection in the translation table. */
> +    if ( !write_itte_locked(its, devid, eventid, collid, vlpi, &nvcpu) )
> +        goto out_unlock;
> +
> +    spin_unlock(&its->its_lock);
> +
> +    /* TODO: lookup currently-in-guest virtual IRQs and migrate them? */
> +
> +    return gicv3_lpi_change_vcpu(its->d, its->doorbell_address,
> +                                 devid, eventid, nvcpu->vcpu_id);
> +
> +out_unlock:
> +    spin_unlock(&its->its_lock);
> +
> +    return ret;
> +}
> +
>  #define ITS_CMD_BUFFER_SIZE(baser)      ((((baser) & 0xff) + 1) << 12)
>  #define ITS_CMD_OFFSET(reg)             ((reg) & GENMASK(19, 5))
>  
> @@ -715,6 +768,12 @@ static int vgic_its_handle_cmds(struct domain *d, struct 
> virt_its *its)
>          case GITS_CMD_MAPTI:
>              ret = its_handle_mapti(its, command);
>              break;
> +        case GITS_CMD_MOVALL:
> +            gdprintk(XENLOG_G_INFO, "vGITS: ignoring MOVALL command\n");
> +            break;
> +        case GITS_CMD_MOVI:
> +            ret = its_handle_movi(its, command);
> +            break;
>          case GITS_CMD_SYNC:
>              /* We handle ITS commands synchronously, so we ignore SYNC. */
>              break;
> diff --git a/xen/include/asm-arm/gic_v3_its.h 
> b/xen/include/asm-arm/gic_v3_its.h
> index 9c08cee..82d788c 100644
> --- a/xen/include/asm-arm/gic_v3_its.h
> +++ b/xen/include/asm-arm/gic_v3_its.h
> @@ -178,8 +178,12 @@ int gicv3_remove_guest_event(struct domain *d, paddr_t 
> vdoorbell_address,
>  struct pending_irq *gicv3_assign_guest_event(struct domain *d, paddr_t 
> doorbell,
>                                               uint32_t devid, uint32_t 
> eventid,
>                                               struct vcpu *v, uint32_t 
> virt_lpi);
> +int gicv3_lpi_change_vcpu(struct domain *d, paddr_t doorbell,
> +                          uint32_t devid, uint32_t eventid,
> +                          unsigned int vcpu_id);
>  void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id,
>                                   unsigned int vcpu_id, uint32_t virt_lpi);
> +int gicv3_lpi_update_host_vcpuid(uint32_t host_lpi, unsigned int vcpu_id);
>  
>  #else
>  
> -- 
> 2.9.0
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> https://lists.xen.org/xen-devel
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.