[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH V2 14/26] X86/vvtd: Process interrupt remapping request



From: Chao Gao <chao.gao@xxxxxxxxx>

When a remapping interrupt request arrives, remapping hardware computes the
interrupt_index per the algorithm described in VTD spec
"Interrupt Remapping Table", interprets the IRTE and generates a remapped
interrupt request.

This patch introduces viommu_handle_irq_request() to emulate the process how
remapping hardware handles a remapping interrupt request.

Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
Signed-off-by: Lan Tianyu <tianyu.lan@xxxxxxxxx>
---
 xen/arch/x86/hvm/vvtd.c             | 279 +++++++++++++++++++++++++++++++++++-
 xen/drivers/passthrough/vtd/iommu.h |  21 +++
 xen/drivers/passthrough/vtd/vtd.h   |   6 +
 3 files changed, 305 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/hvm/vvtd.c b/xen/arch/x86/hvm/vvtd.c
index b6fd34b..c993a15 100644
--- a/xen/arch/x86/hvm/vvtd.c
+++ b/xen/arch/x86/hvm/vvtd.c
@@ -23,12 +23,17 @@
 #include <xen/types.h>
 #include <xen/viommu.h>
 #include <xen/xmalloc.h>
+#include <asm/apic.h>
 #include <asm/current.h>
+#include <asm/event.h>
 #include <asm/hvm/domain.h>
+#include <asm/io_apic.h>
 #include <asm/page.h>
+#include <asm/p2m.h>
 #include <public/viommu.h>
 
 #include "../../../drivers/passthrough/vtd/iommu.h"
+#include "../../../drivers/passthrough/vtd/vtd.h"
 
 struct hvm_hw_vvtd_regs {
     uint8_t data[1024];
@@ -38,6 +43,9 @@ struct hvm_hw_vvtd_regs {
 #define VIOMMU_STATUS_IRQ_REMAPPING_ENABLED     (1 << 0)
 #define VIOMMU_STATUS_DMA_REMAPPING_ENABLED     (1 << 1)
 
+#define vvtd_irq_remapping_enabled(vvtd) \
+            (vvtd->status & VIOMMU_STATUS_IRQ_REMAPPING_ENABLED)
+
 struct vvtd {
     /* VIOMMU_STATUS_XXX_REMAPPING_ENABLED */
     int status;
@@ -120,6 +128,140 @@ static inline uint8_t vvtd_get_reg_byte(struct vvtd *vtd, 
uint32_t reg)
     vvtd_set_reg(vvtd, (reg) + 4, (val) >> 32); \
 } while(0)
 
+static int map_guest_page(struct domain *d, uint64_t gfn, void **virt)
+{
+    struct page_info *p;
+
+    p = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
+    if ( !p )
+        return -EINVAL;
+
+    if ( !get_page_type(p, PGT_writable_page) )
+    {
+        put_page(p);
+        return -EINVAL;
+    }
+
+    *virt = __map_domain_page_global(p);
+    if ( !*virt )
+    {
+        put_page_and_type(p);
+        return -ENOMEM;
+    }
+    return 0;
+}
+
+static void unmap_guest_page(void *virt)
+{
+    struct page_info *page;
+
+    if ( !virt )
+        return;
+
+    virt = (void *)((unsigned long)virt & PAGE_MASK);
+    page = mfn_to_page(domain_page_map_to_mfn(virt));
+
+    unmap_domain_page_global(virt);
+    put_page_and_type(page);
+}
+
+static void vvtd_inj_irq(
+    struct vlapic *target,
+    uint8_t vector,
+    uint8_t trig_mode,
+    uint8_t delivery_mode)
+{
+    VVTD_DEBUG(VVTD_DBG_INFO, "dest=v%d, delivery_mode=%x vector=%d "
+               "trig_mode=%d.",
+               vlapic_vcpu(target)->vcpu_id, delivery_mode,
+               vector, trig_mode);
+
+    ASSERT((delivery_mode == dest_Fixed) ||
+           (delivery_mode == dest_LowestPrio));
+
+    vlapic_set_irq(target, vector, trig_mode);
+}
+
+static int vvtd_delivery(
+    struct domain *d, int vector,
+    uint32_t dest, uint8_t dest_mode,
+    uint8_t delivery_mode, uint8_t trig_mode)
+{
+    struct vlapic *target;
+    struct vcpu *v;
+
+    switch ( delivery_mode )
+    {
+    case dest_LowestPrio:
+        target = vlapic_lowest_prio(d, NULL, 0, dest, dest_mode);
+        if ( target != NULL )
+        {
+            vvtd_inj_irq(target, vector, trig_mode, delivery_mode);
+            break;
+        }
+        VVTD_DEBUG(VVTD_DBG_INFO, "null round robin: vector=%02x\n", vector);
+        break;
+
+    case dest_Fixed:
+        for_each_vcpu ( d, v )
+            if ( vlapic_match_dest(vcpu_vlapic(v), NULL, 0, dest,
+                                   dest_mode) )
+                vvtd_inj_irq(vcpu_vlapic(v), vector,
+                             trig_mode, delivery_mode);
+        break;
+
+    case dest_NMI:
+        for_each_vcpu ( d, v )
+            if ( vlapic_match_dest(vcpu_vlapic(v), NULL, 0, dest, dest_mode)
+                 && !test_and_set_bool(v->nmi_pending) )
+                vcpu_kick(v);
+        break;
+
+    default:
+        printk(XENLOG_G_WARNING
+               "%pv: Unsupported VTD delivery mode %d for Dom%d\n",
+               current, delivery_mode, d->domain_id);
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
+static uint32_t irq_remapping_request_index(struct irq_remapping_request *irq)
+{
+    if ( irq->type == VIOMMU_REQUEST_IRQ_MSI )
+    {
+        struct msi_msg_remap_entry msi_msg = { { irq->msg.msi.addr }, 0,
+                                               irq->msg.msi.data };
+
+        return MSI_REMAP_ENTRY_INDEX(msi_msg);
+    }
+    else if ( irq->type == VIOMMU_REQUEST_IRQ_APIC )
+    {
+        struct IO_APIC_route_remap_entry remap_rte = { { irq->msg.rte } };
+
+        return IOAPIC_REMAP_ENTRY_INDEX(remap_rte);
+    }
+    BUG();
+    return 0;
+}
+
+static inline uint32_t irte_dest(struct vvtd *vvtd, uint32_t dest)
+{
+    uint64_t irta;
+
+    vvtd_get_reg_quad(vvtd, DMAR_IRTA_REG, irta);
+    /* In xAPIC mode, only 8-bits([15:8]) are valid */
+    return DMA_IRTA_EIME(irta) ? dest : MASK_EXTR(dest, IRTE_xAPIC_DEST_MASK);
+}
+
+static int vvtd_record_fault(struct vvtd *vvtd,
+                             struct irq_remapping_request *irq,
+                             int reason)
+{
+    return 0;
+}
+
 static int vvtd_handle_gcmd_sirtp(struct vvtd *vvtd, uint32_t val)
 {
     uint64_t irta;
@@ -269,6 +411,140 @@ static const struct hvm_mmio_ops vvtd_mmio_ops = {
     .write = vvtd_write
 };
 
+static bool ir_sid_valid(struct iremap_entry *irte, uint32_t source_id)
+{
+    return TRUE;
+}
+
+/*
+ * @record_fault: a flag to indicate whether we need recording a fault
+ * when a fault happens during fetching vIRTE. (1 means recording it,
+ * 0 means ignoring the fault). record_fault = 0 is used in parsing
+ * process in which only the encoded attributes is cared.
+ */
+static int vvtd_get_entry(struct vvtd *vvtd,
+                          struct irq_remapping_request *irq,
+                          struct iremap_entry *dest,
+                          bool record_fault)
+{
+    int ret;
+    uint32_t entry = irq_remapping_request_index(irq);
+    struct iremap_entry  *irte, *irt_page;
+
+    VVTD_DEBUG(VVTD_DBG_TRANS, "interpret a request with index %x", entry);
+
+    if ( entry > vvtd->irt_max_entry )
+    {
+        ret = VTD_FR_IR_INDEX_OVER;
+        goto handle_fault;
+    }
+
+    ret = map_guest_page(vvtd->domain, vvtd->irt + (entry >> 
IREMAP_ENTRY_ORDER),
+                         (void**)&irt_page);
+    if ( ret )
+    {
+        ret = VTD_FR_IR_ROOT_INVAL;
+        goto handle_fault;
+    }
+
+    irte = irt_page + (entry % (1 << IREMAP_ENTRY_ORDER));
+    dest->val = irte->val;
+    if ( !qinval_present(*irte) )
+    {
+        ret = VTD_FR_IR_ENTRY_P;
+        goto unmap_handle_fault;
+    }
+
+    /* Check reserved bits */
+    if ( (irte->remap.res_1 || irte->remap.res_2 || irte->remap.res_3 ||
+          irte->remap.res_4) )
+    {
+        ret = VTD_FR_IR_IRTE_RSVD;
+        goto unmap_handle_fault;
+    }
+
+    if (!ir_sid_valid(irte, irq->source_id))
+    {
+        ret = VTD_FR_IR_SID_ERR;
+        goto unmap_handle_fault;
+    }
+    unmap_guest_page(irt_page);
+    return 0;
+
+ unmap_handle_fault:
+    unmap_guest_page(irt_page);
+ handle_fault:
+    if ( !record_fault )
+        return ret;
+
+    switch ( ret )
+    {
+    case VTD_FR_IR_SID_ERR:
+    case VTD_FR_IR_IRTE_RSVD:
+    case VTD_FR_IR_ENTRY_P:
+        if ( qinval_fault_disable(*irte) )
+            break;
+    /* fall through */
+    case VTD_FR_IR_INDEX_OVER:
+    case VTD_FR_IR_ROOT_INVAL:
+        vvtd_record_fault(vvtd, irq, ret);
+        break;
+
+    default:
+        gdprintk(XENLOG_G_INFO, "Can't handle VT-d fault %x\n", ret);
+    }
+    return ret;
+}
+
+static int vvtd_irq_request_sanity_check(struct vvtd *vvtd,
+                                         struct irq_remapping_request *irq)
+{
+    if ( irq->type == VIOMMU_REQUEST_IRQ_APIC )
+    {
+        struct IO_APIC_route_remap_entry rte = { { irq->msg.rte } };
+
+        ASSERT(rte.format);
+        return (!rte.reserved) ? 0 : VTD_FR_IR_REQ_RSVD;
+    }
+    else if ( irq->type == VIOMMU_REQUEST_IRQ_MSI )
+    {
+        struct msi_msg_remap_entry msi_msg = { { irq->msg.msi.addr } };
+
+        ASSERT(msi_msg.address_lo.format);
+        ASSERT(msi_msg.address_lo.addr_id_val == 0xfee);
+        return 0;
+    }
+    BUG();
+    return 0;
+}
+
+static int vvtd_handle_irq_request(struct domain *d,
+                                   struct irq_remapping_request *irq)
+{
+    struct iremap_entry irte;
+    int ret;
+    struct vvtd *vvtd = domain_vvtd(d);
+
+    if ( !vvtd || !vvtd_irq_remapping_enabled(vvtd) )
+        return -EINVAL;
+
+    ret = vvtd_irq_request_sanity_check(vvtd, irq);
+    if ( ret )
+    {
+        vvtd_record_fault(vvtd, irq, ret);
+        return ret;
+    }
+
+    if ( !vvtd_get_entry(vvtd, irq, &irte, true) )
+    {
+        vvtd_delivery(vvtd->domain, irte.remap.vector,
+                      irte_dest(vvtd, irte.remap.dst), irte.remap.dm,
+                      irte.remap.dlm, irte.remap.tm);
+        return 0;
+    }
+    return -EFAULT;
+}
+
 static void vvtd_reset(struct vvtd *vvtd, uint64_t capability)
 {
     uint64_t cap, ecap;
@@ -362,7 +638,8 @@ static int vvtd_destroy(struct viommu *viommu)
 struct viommu_ops vvtd_hvm_vmx_ops = {
     .query_caps = vvtd_query_caps,
     .create = vvtd_create,
-    .destroy = vvtd_destroy
+    .destroy = vvtd_destroy,
+    .handle_irq_request = vvtd_handle_irq_request
 };
 
 static int vvtd_register(void)
diff --git a/xen/drivers/passthrough/vtd/iommu.h 
b/xen/drivers/passthrough/vtd/iommu.h
index fd040d0..1c53d22 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -247,6 +247,21 @@
 #define dma_frcd_source_id(c) (c & 0xffff)
 #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
 
+enum VTD_FAULT_TYPE
+{
+    /* Interrupt remapping transition faults */
+    VTD_FR_IR_REQ_RSVD = 0x20,   /* One or more IR request reserved
+                                  * fields set */
+    VTD_FR_IR_INDEX_OVER = 0x21, /* Index value greater than max */
+    VTD_FR_IR_ENTRY_P = 0x22,    /* Present (P) not set in IRTE */
+    VTD_FR_IR_ROOT_INVAL = 0x23, /* IR Root table invalid */
+    VTD_FR_IR_IRTE_RSVD = 0x24,  /* IRTE Rsvd field non-zero with
+                                  * Present flag set */
+    VTD_FR_IR_REQ_COMPAT = 0x25, /* Encountered compatible IR
+                                  * request while disabled */
+    VTD_FR_IR_SID_ERR = 0x26,    /* Invalid Source-ID */
+};
+
 /*
  * 0: Present
  * 1-11: Reserved
@@ -387,6 +402,12 @@ struct iremap_entry {
 };
 
 /*
+ * When VT-d doesn't enable Extended Interrupt Mode. Hardware only interprets
+ * only 8-bits ([15:8]) of Destination-ID field in the IRTEs.
+ */
+#define IRTE_xAPIC_DEST_MASK 0xff00
+
+/*
  * Posted-interrupt descriptor address is 64 bits with 64-byte aligned, only
  * the upper 26 bits of lest significiant 32 bits is available.
  */
diff --git a/xen/drivers/passthrough/vtd/vtd.h 
b/xen/drivers/passthrough/vtd/vtd.h
index bb8889f..1032b46 100644
--- a/xen/drivers/passthrough/vtd/vtd.h
+++ b/xen/drivers/passthrough/vtd/vtd.h
@@ -47,6 +47,8 @@ struct IO_APIC_route_remap_entry {
     };
 };
 
+#define IOAPIC_REMAP_ENTRY_INDEX(x) ((x.index_15 << 15) + x.index_0_14)
+
 struct msi_msg_remap_entry {
     union {
         u32 val;
@@ -65,4 +67,8 @@ struct msi_msg_remap_entry {
     u32        data;           /* msi message data */
 };
 
+#define MSI_REMAP_ENTRY_INDEX(x) ((x.address_lo.index_15 << 15) + \
+                                  x.address_lo.index_0_14 + \
+                                  (x.address_lo.SHV ? (uint16_t)x.data : 0))
+
 #endif // _VTD_H_
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.