|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [v3 11/15] Update IRTE according to guest interrupt config changes
On 24/06/15 06:18, Feng Wu wrote:
> When guest changes its interrupt configuration (such as, vector, etc.)
> for direct-assigned devices, we need to update the associated IRTE
> with the new guest vector, so external interrupts from the assigned
> devices can be injected to guests without VM-Exit.
>
> For lowest-priority interrupts, we use vector-hashing mechamisn to find
> the destination vCPU. This follows the hardware behavior, since modern
> Intel CPUs use vector hashing to handle the lowest-priority interrupt.
>
> For multicast/broadcast vCPU, we cannot handle it via interrupt posting,
> still use interrupt remapping.
>
> Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
> ---
> v3:
> - Use bitmap to store the all the possible destination vCPUs of an
> interrupt, then trying to find the right destination from the bitmap
> - Typo and some small changes
>
> xen/drivers/passthrough/io.c | 96
> +++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 95 insertions(+), 1 deletion(-)
>
> diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
> index 9b77334..18e24e1 100644
> --- a/xen/drivers/passthrough/io.c
> +++ b/xen/drivers/passthrough/io.c
> @@ -26,6 +26,7 @@
> #include <asm/hvm/iommu.h>
> #include <asm/hvm/support.h>
> #include <xen/hvm/irq.h>
> +#include <asm/io_apic.h>
>
> static DEFINE_PER_CPU(struct list_head, dpci_list);
>
> @@ -199,6 +200,78 @@ void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci)
> xfree(dpci);
> }
>
> +/*
> + * The purpose of this routine is to find the right destination vCPU for
> + * an interrupt which will be delivered by VT-d posted-interrupt. There
> + * are several cases as below:
> + *
> + * - For lowest-priority interrupts, we find the destination vCPU from the
> + * guest vector using vector-hashing mechanism and return true. This
> follows
> + * the hardware behavior, since modern Intel CPUs use vector hashing to
> + * handle the lowest-priority interrupt.
> + * - Otherwise, for single destination interrupt, it is straightforward to
> + * find the destination vCPU and return true.
> + * - For multicast/broadcast vCPU, we cannot handle it via interrupt posting,
> + * so return false.
s/false/NULL/ ?
> + *
> + * Here is the details about the vector-hashing mechanism:
> + * 1. For lowest-priority interrupts, store all the possible destination
> + * vCPUs in an array.
> + * 2. Use "gvec % max number of destination vCPUs" to find the right
> + * destination vCPU in the array for the lowest-priority interrupt.
> + */
> +static struct vcpu *pi_find_dest_vcpu(struct domain *d, uint8_t dest_id,
dest_id should clearly be 32bits rather than 8.
> + uint8_t dest_mode, uint8_t
> delivery_mode,
> + uint8_t gvec)
> +{
> + unsigned long *dest_vcpu_bitmap = NULL;
> + unsigned int dest_vcpu_num = 0, idx = 0;
> + int size = (d->max_vcpus + BITS_PER_LONG - 1) / BITS_PER_LONG;
unsigned int, and "size" is far too generic of a name.
> + struct vcpu *v, *dest = NULL;
> + int i;
Also unsigned.
> +
> + dest_vcpu_bitmap = xzalloc_array(unsigned long, size);
> + if ( !dest_vcpu_bitmap )
> + {
> + dprintk(XENLOG_G_INFO,
> + "dom%d: failed to allocate memory\n", d->domain_id);
> + return NULL;
> + }
> +
> + for_each_vcpu ( d, v )
> + {
> + if ( !vlapic_match_dest(vcpu_vlapic(v), NULL, 0,
> + dest_id, dest_mode) )
> + continue;
> +
> + __set_bit(v->vcpu_id, dest_vcpu_bitmap);
> + dest_vcpu_num++;
> + }
> +
> + if ( delivery_mode == dest_LowestPrio )
> + {
> + if ( dest_vcpu_num != 0 )
Too many spaces inside the brackets.
> + {
> + for ( i = 0; i <= gvec % dest_vcpu_num; i++)
> + idx = find_next_bit(dest_vcpu_bitmap, d->max_vcpus, idx) + 1;
> + idx--;
> +
> + BUG_ON(idx >= d->max_vcpus || idx < 0);
> + dest = d->vcpu[idx];
> + }
> + }
> + else if ( dest_vcpu_num == 1 )
> + {
> + idx = find_first_bit(dest_vcpu_bitmap, d->max_vcpus);
> + BUG_ON(idx >= d->max_vcpus || idx < 0);
find_first_bit() is unsigned, so can never be less than 0.
> + dest = d->vcpu[idx];
> + }
> +
> + xfree(dest_vcpu_bitmap);
> +
> + return dest;
> +}
> +
> int pt_irq_create_bind(
> struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
> {
> @@ -257,7 +330,7 @@ int pt_irq_create_bind(
> {
> case PT_IRQ_TYPE_MSI:
> {
> - uint8_t dest, dest_mode;
> + uint8_t dest, dest_mode, delivery_mode;
> int dest_vcpu_id;
>
> if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
> @@ -330,11 +403,32 @@ int pt_irq_create_bind(
> /* Calculate dest_vcpu_id for MSI-type pirq migration. */
> dest = pirq_dpci->gmsi.gflags & VMSI_DEST_ID_MASK;
> dest_mode = !!(pirq_dpci->gmsi.gflags & VMSI_DM_MASK);
> + delivery_mode = (pirq_dpci->gmsi.gflags >> GFLAGS_SHIFT_DELIV_MODE) &
> + VMSI_DELIV_MASK;
> dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
> pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id;
> spin_unlock(&d->event_lock);
> if ( dest_vcpu_id >= 0 )
> hvm_migrate_pirqs(d->vcpu[dest_vcpu_id]);
> +
> + /* Use interrupt posting if it is supported */
> + if ( iommu_intpost )
> + {
> + struct vcpu *vcpu = pi_find_dest_vcpu(d, dest, dest_mode,
> + delivery_mode, pirq_dpci->gmsi.gvec);
> +
> + if ( !vcpu )
> + dprintk(XENLOG_G_WARNING,
> + "dom%u: failed to find the dest vCPU for PI, guest "
> + "vector:0x%x use software way to deliver the "
> + " interrupts.\n", d->domain_id,
> pirq_dpci->gmsi.gvec);
If this is normal for a multicast interrupt, it must not be a WARNING
level error message. It probably shouldn't even be a message at all.
~Andrew
> + else if ( pi_update_irte( vcpu, info, pirq_dpci->gmsi.gvec ) !=
> 0 )
> + dprintk(XENLOG_G_WARNING,
> + "%pv: failed to update PI IRTE, guest vector:0x%x "
> + "use software way to deliver the interrupts.\n",
> + vcpu, pirq_dpci->gmsi.gvec);
> + }
> +
> break;
> }
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |