[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1 of 6 V6] amd iommu: Add 2 hypercalls for libxc



# HG changeset patch
# User Wei Wang <wei.wang2@xxxxxxx>
# Date 1331210211 -3600
# Node ID 810296995a893c1bec898a366f08520fbae755fb
# Parent  f611200469159a960c3f0b975aa0f91ec768e29f
amd iommu: Add 2 hypercalls for libxc

iommu_set_msi: used by qemu to inform hypervisor iommu vector number in
guest space. Hypervisor needs this vector to inject msi into guest after
writing PPR logs back to guest.

iommu_bind_bdf: used by xl to bind virtual bdf to machine bdf for passthru
devices. IOMMU emulator receives iommu cmd from guest OS and then forwards
them to host iommu. Virtual device ids in guest iommu commands must be
converted into physical ids before sending them to real hardware.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>

diff -r f61120046915 -r 810296995a89 xen/drivers/passthrough/amd/iommu_guest.c
--- a/xen/drivers/passthrough/amd/iommu_guest.c Wed Mar 07 11:50:31 2012 +0100
+++ b/xen/drivers/passthrough/amd/iommu_guest.c Thu Mar 08 13:36:51 2012 +0100
@@ -48,14 +48,31 @@
         (reg)->hi = (val) >> 32; \
     } while (0)
 
-static unsigned int machine_bdf(struct domain *d, uint16_t guest_bdf)
+static unsigned int machine_bdf(struct domain *d, uint16_t guest_seg,
+                                uint16_t guest_bdf)
 {
-    return guest_bdf;
+    struct pci_dev *pdev;
+    uint16_t mbdf = 0;
+
+    for_each_pdev( d, pdev )
+    {
+        if ( (pdev->gbdf == guest_bdf) && (pdev->gseg == guest_seg) )
+        {
+            mbdf = PCI_BDF2(pdev->bus, pdev->devfn);
+            break;
+        }
+    }
+    return mbdf;
 }
 
-static uint16_t guest_bdf(struct domain *d, uint16_t machine_bdf)
+static uint16_t guest_bdf(struct domain *d, uint16_t machine_seg,
+                          uint16_t machine_bdf)
 {
-    return machine_bdf;
+    struct pci_dev *pdev;
+
+    pdev = pci_get_pdev_by_domain(d, machine_seg, PCI_BUS(machine_bdf),
+                                  PCI_DEVFN2(machine_bdf));
+    return pdev->gbdf;
 }
 
 static inline struct guest_iommu *domain_iommu(struct domain *d)
@@ -207,7 +224,7 @@ void guest_iommu_add_ppr_log(struct doma
     log = log_base + tail % (PAGE_SIZE / sizeof(ppr_entry_t));
 
     /* Convert physical device id back into virtual device id */
-    gdev_id = guest_bdf(d, iommu_get_devid_from_cmd(entry[0]));
+    gdev_id = guest_bdf(d, 0, iommu_get_devid_from_cmd(entry[0]));
     iommu_set_devid_to_cmd(&entry[0], gdev_id);
 
     memcpy(log, entry, sizeof(ppr_entry_t));
@@ -256,7 +273,7 @@ void guest_iommu_add_event_log(struct do
     log = log_base + tail % (PAGE_SIZE / sizeof(event_entry_t));
 
     /* re-write physical device id into virtual device id */
-    dev_id = guest_bdf(d, iommu_get_devid_from_cmd(entry[0]));
+    dev_id = guest_bdf(d, 0, iommu_get_devid_from_cmd(entry[0]));
     iommu_set_devid_to_cmd(&entry[0], dev_id);
     memcpy(log, entry, sizeof(event_entry_t));
 
@@ -278,7 +295,7 @@ static int do_complete_ppr_request(struc
     uint16_t dev_id;
     struct amd_iommu *iommu;
 
-    dev_id = machine_bdf(d, iommu_get_devid_from_cmd(cmd->data[0]));
+    dev_id = machine_bdf(d, 0, iommu_get_devid_from_cmd(cmd->data[0]));
     iommu = find_iommu_for_device(0, dev_id);
 
     if ( !iommu )
@@ -330,7 +347,7 @@ static int do_invalidate_iotlb_pages(str
     struct amd_iommu *iommu;
     uint16_t dev_id;
 
-    dev_id = machine_bdf(d, iommu_get_devid_from_cmd(cmd->data[0]));
+    dev_id = machine_bdf(d, 0, iommu_get_devid_from_cmd(cmd->data[0]));
 
     iommu = find_iommu_for_device(0, dev_id);
     if ( !iommu )
@@ -409,7 +426,7 @@ static int do_invalidate_dte(struct doma
 
     g_iommu = domain_iommu(d);
     gbdf = iommu_get_devid_from_cmd(cmd->data[0]);
-    mbdf = machine_bdf(d, gbdf);
+    mbdf = machine_bdf(d, 0, gbdf);
 
     /* Guest can only update DTEs for its passthru devices */
     if ( mbdf == 0 || gbdf == 0 )
@@ -916,3 +933,44 @@ const struct hvm_mmio_handler iommu_mmio
     .read_handler = guest_iommu_mmio_read,
     .write_handler = guest_iommu_mmio_write
 };
+
+/* iommu hypercall handler */
+int iommu_bind_bdf(struct domain* d, uint16_t gseg, uint16_t gbdf,
+                   uint16_t mseg, uint16_t mbdf)
+{
+    struct pci_dev *pdev;
+    int ret = -ENODEV;
+
+    if ( !iommu_found() || !iommu_enabled || !iommuv2_enabled )
+        return 0;
+
+    spin_lock(&pcidevs_lock);
+
+    for_each_pdev( d, pdev )
+    {
+        if ( (pdev->seg != mseg) || (pdev->bus != PCI_BUS(mbdf) ) || 
+             (pdev->devfn != PCI_DEVFN2(mbdf)) )
+            continue;
+
+        pdev->gseg = gseg;
+        pdev->gbdf = gbdf;
+        ret = 0;
+    }
+
+    spin_unlock(&pcidevs_lock);
+    return ret;
+}
+
+void iommu_set_msi(struct domain* d, uint8_t vector, uint8_t dest,
+                   uint8_t dest_mode, uint8_t delivery_mode, uint8_t trig_mode)
+{
+    struct guest_iommu *iommu = domain_iommu(d);
+
+    if ( !iommu )
+        return;
+
+    iommu->msi.vector = vector;
+    iommu->msi.dest = dest;
+    iommu->msi.dest_mode = dest_mode;
+    iommu->msi.trig_mode = trig_mode;
+}
diff -r f61120046915 -r 810296995a89 xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Wed Mar 07 11:50:31 2012 +0100
+++ b/xen/drivers/passthrough/iommu.c   Thu Mar 08 13:36:51 2012 +0100
@@ -653,6 +653,40 @@ int iommu_do_domctl(
         put_domain(d);
         break;
 
+    case XEN_DOMCTL_guest_iommu_op:
+    {
+        xen_domctl_guest_iommu_op_t *guest_op;
+
+        if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
+        {
+            gdprintk(XENLOG_ERR,
+                    "XEN_DOMCTL_guest_iommu_op: get_domain_by_id() failed\n");
+            ret = -EINVAL;
+            break;
+        }
+
+        guest_op = &(domctl->u.guest_iommu_op);
+        switch ( guest_op->op )
+        {
+        case XEN_DOMCTL_GUEST_IOMMU_OP_SET_MSI:
+             iommu_set_msi(d, guest_op->u.msi.vector, 
+                           guest_op->u.msi.dest,
+                           guest_op->u.msi.dest_mode, 
+                           guest_op->u.msi.delivery_mode,
+                           guest_op->u.msi.trig_mode);
+             ret = 0;
+             break;
+        case XEN_DOMCTL_GUEST_IOMMU_OP_BIND_BDF:
+             ret = iommu_bind_bdf(d, guest_op->u.bdf_bind.g_seg,
+                                  guest_op->u.bdf_bind.g_bdf,
+                                  guest_op->u.bdf_bind.m_seg,
+                                  guest_op->u.bdf_bind.m_bdf);
+             break;
+        }
+        put_domain(d);
+        break;
+    }
+
     default:
         ret = -ENOSYS;
         break;
diff -r f61120046915 -r 810296995a89 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Wed Mar 07 11:50:31 2012 +0100
+++ b/xen/include/public/domctl.h       Thu Mar 08 13:36:51 2012 +0100
@@ -820,6 +820,31 @@ struct xen_domctl_set_access_required {
 typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t);
 
+/* Support for guest iommu emulation */
+struct xen_domctl_guest_iommu_op {
+    /* XEN_DOMCTL_GUEST_IOMMU_OP_* */
+#define XEN_DOMCTL_GUEST_IOMMU_OP_SET_MSI               0
+#define XEN_DOMCTL_GUEST_IOMMU_OP_BIND_BDF              1
+    uint8_t op;
+    union {
+        struct iommu_msi {
+            uint8_t     vector;
+            uint8_t     dest;
+            uint8_t     dest_mode;
+            uint8_t     delivery_mode;
+            uint8_t     trig_mode;
+        } msi;
+        struct bdf_bind {
+            uint16_t    g_seg;
+            uint16_t    g_bdf;
+            uint16_t    m_seg;
+            uint16_t    m_bdf;
+        } bdf_bind;
+    } u;
+};
+typedef struct xen_domctl_guest_iommu_op xen_domctl_guest_iommu_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_guest_iommu_op_t);
+
 struct xen_domctl {
     uint32_t cmd;
 #define XEN_DOMCTL_createdomain                   1
@@ -885,6 +910,7 @@ struct xen_domctl {
 #define XEN_DOMCTL_set_access_required           64
 #define XEN_DOMCTL_audit_p2m                     65
 #define XEN_DOMCTL_set_virq_handler              66
+#define XEN_DOMCTL_guest_iommu_op                67
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -933,6 +959,7 @@ struct xen_domctl {
         struct xen_domctl_debug_op          debug_op;
         struct xen_domctl_mem_event_op      mem_event_op;
         struct xen_domctl_mem_sharing_op    mem_sharing_op;
+        struct xen_domctl_guest_iommu_op    guest_iommu_op;
 #if defined(__i386__) || defined(__x86_64__)
         struct xen_domctl_cpuid             cpuid;
         struct xen_domctl_vcpuextstate      vcpuextstate;
diff -r f61120046915 -r 810296995a89 xen/include/xen/iommu.h
--- a/xen/include/xen/iommu.h   Wed Mar 07 11:50:31 2012 +0100
+++ b/xen/include/xen/iommu.h   Thu Mar 08 13:36:51 2012 +0100
@@ -164,6 +164,11 @@ int iommu_do_domctl(struct xen_domctl *,
 void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int 
page_count);
 void iommu_iotlb_flush_all(struct domain *d);
 
+/* Only used by AMD IOMMU so far */
+void iommu_set_msi(struct domain* d, uint8_t vector, uint8_t dest,
+                   uint8_t dest_mode, uint8_t delivery_mode, uint8_t 
trig_mode);
+int iommu_bind_bdf(struct domain* d, uint16_t gseg, uint16_t gbdf, 
+                   uint16_t mseg, uint16_t mbdf);
 /*
  * The purpose of the iommu_dont_flush_iotlb optional cpu flag is to
  * avoid unecessary iotlb_flush in the low level IOMMU code.
diff -r f61120046915 -r 810296995a89 xen/include/xen/pci.h
--- a/xen/include/xen/pci.h     Wed Mar 07 11:50:31 2012 +0100
+++ b/xen/include/xen/pci.h     Thu Mar 08 13:36:51 2012 +0100
@@ -62,6 +62,11 @@ struct pci_dev {
     const u16 seg;
     const u8 bus;
     const u8 devfn;
+
+    /* Used by iommu to represent virtual seg and bdf value in guest space */
+    u16 gseg;
+    u16 gbdf;
+
     struct pci_dev_info info;
     struct arch_pci_dev arch;
     u64 vf_rlen[6];


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.