[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1 of 7 V4] amd iommu: Add 2 hypercalls for libxc



# HG changeset patch
# User Wei Wang <wei.wang2@xxxxxxx>
# Date 1327066811 -3600
# Node ID 978e61814be49ec544151803be3e3b2717551316
# Parent  3d58058fc7a2ebda9c2add654c917b9f2c1b12e6
amd iommu: Add 2 hypercalls for libxc

iommu_set_msi: used by qemu to inform hypervisor iommu vector number in
guest space. Hypervisor needs this vector to inject msi into guest after
write PPR log entry into guest buffer.

iommu_bind_bdf: used by xl to bind virtual bdf of passthru device to machine bdf. IOMMU emulator receives iommu cmd from guest OS and then forwards them to host iommu. But virtual device id in cmds from guest should be converted into physical id before sending them to real hardware.

Signed -off-by: Wei Wang <wei.wang2@xxxxxxx>

diff -r 3d58058fc7a2 -r 978e61814be4 xen/drivers/passthrough/amd/iommu_guest.c --- a/xen/drivers/passthrough/amd/iommu_guest.c Fri Jan 20 10:26:57 2012 +0000 +++ b/xen/drivers/passthrough/amd/iommu_guest.c Fri Jan 20 14:40:11 2012 +0100
@@ -48,14 +48,31 @@
         (reg)->hi = (val) >> 32; \
     } while (0)

-static unsigned int machine_bdf(struct domain *d, uint16_t guest_bdf)
+static unsigned int machine_bdf(struct domain *d, uint16_t guest_seg,
+                                uint16_t guest_bdf)
 {
-    return guest_bdf;
+    struct pci_dev *pdev;
+    uint16_t mbdf = 0;
+
+    for_each_pdev( d, pdev )
+    {
+        if ( (pdev->gbdf == guest_bdf) && (pdev->gseg == guest_seg) )
+        {
+            mbdf = PCI_BDF2(pdev->bus, pdev->devfn);
+            break;
+        }
+    }
+    return mbdf;
 }

-static uint16_t guest_bdf(struct domain *d, uint16_t machine_bdf)
+static uint16_t guest_bdf(struct domain *d, uint16_t machine_seg,
+                          uint16_t machine_bdf)
 {
-    return machine_bdf;
+    struct pci_dev *pdev;
+
+    pdev = pci_get_pdev_by_domain(d, machine_seg, PCI_BUS(machine_bdf),
+                                  PCI_DEVFN2(machine_bdf));
+    return pdev->gbdf;
 }

 static inline struct guest_iommu *domain_iommu(struct domain *d)
@@ -207,7 +224,7 @@ void guest_iommu_add_ppr_log(struct doma
     log = log_base + tail % (PAGE_SIZE / sizeof(ppr_entry_t));

     /* Convert physical device id back into virtual device id */
-    gdev_id = guest_bdf(d, iommu_get_devid_from_cmd(entry[0]));
+    gdev_id = guest_bdf(d, 0, iommu_get_devid_from_cmd(entry[0]));
     iommu_set_devid_to_cmd(&entry[0], gdev_id);

     memcpy(log, entry, sizeof(ppr_entry_t));
@@ -256,7 +273,7 @@ void guest_iommu_add_event_log(struct do
     log = log_base + tail % (PAGE_SIZE / sizeof(event_entry_t));

     /* re-write physical device id into virtual device id */
-    dev_id = guest_bdf(d, iommu_get_devid_from_cmd(entry[0]));
+    dev_id = guest_bdf(d, 0, iommu_get_devid_from_cmd(entry[0]));
     iommu_set_devid_to_cmd(&entry[0], dev_id);
     memcpy(log, entry, sizeof(event_entry_t));

@@ -278,7 +295,7 @@ static int do_complete_ppr_request(struc
     uint16_t dev_id;
     struct amd_iommu *iommu;

-    dev_id = machine_bdf(d, iommu_get_devid_from_cmd(cmd->data[0]));
+    dev_id = machine_bdf(d, 0, iommu_get_devid_from_cmd(cmd->data[0]));
     iommu = find_iommu_for_device(0, dev_id);

     if ( !iommu )
@@ -330,7 +347,7 @@ static int do_invalidate_iotlb_pages(str
     struct amd_iommu *iommu;
     uint16_t dev_id;

-    dev_id = machine_bdf(d, iommu_get_devid_from_cmd(cmd->data[0]));
+    dev_id = machine_bdf(d, 0, iommu_get_devid_from_cmd(cmd->data[0]));

     iommu = find_iommu_for_device(0, dev_id);
     if ( !iommu )
@@ -409,7 +426,7 @@ static int do_invalidate_dte(struct doma

     g_iommu = domain_iommu(d);
     gbdf = iommu_get_devid_from_cmd(cmd->data[0]);
-    mbdf = machine_bdf(d, gbdf);
+    mbdf = machine_bdf(d, 0, gbdf);

     /* Guest can only update DTEs for its passthru devices */
     if ( mbdf == 0 || gbdf == 0 )
@@ -919,3 +936,45 @@ const struct hvm_mmio_handler iommu_mmio
     .read_handler = guest_iommu_mmio_read,
     .write_handler = guest_iommu_mmio_write
 };
+
+/* iommu hypercall handler */
+int iommu_bind_bdf(struct domain* d, uint16_t gseg, uint16_t gbdf,
+                   uint16_t mseg, uint16_t mbdf)
+{
+    struct pci_dev *pdev;
+    int ret = -ENODEV;
+
+    if ( !iommu_found() )
+        return 0;
+
+    spin_lock(&pcidevs_lock);
+
+    for_each_pdev( d, pdev )
+    {
+        if ( (pdev->seg != mseg) || (pdev->bus != PCI_BUS(mbdf) ) ||
+             (pdev->devfn != PCI_DEVFN2(mbdf)) )
+            continue;
+
+        pdev->gseg = gseg;
+        pdev->gbdf = gbdf;
+        ret = 0;
+    }
+
+    spin_unlock(&pcidevs_lock);
+    return ret;
+}
+
+void iommu_set_msi(struct domain* d, uint16_t vector, uint16_t dest,
+                   uint16_t dest_mode, uint16_t delivery_mode,
+                   uint16_t trig_mode)
+{
+    struct guest_iommu *iommu = domain_iommu(d);
+
+    if ( !iommu_found() )
+        return;
+
+    iommu->msi.vector = vector;
+    iommu->msi.dest = dest;
+    iommu->msi.dest_mode = dest_mode;
+    iommu->msi.trig_mode = trig_mode;
+}
diff -r 3d58058fc7a2 -r 978e61814be4 xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Fri Jan 20 10:26:57 2012 +0000
+++ b/xen/drivers/passthrough/iommu.c   Fri Jan 20 14:40:11 2012 +0100
@@ -648,6 +648,40 @@ int iommu_do_domctl(
         put_domain(d);
         break;

+    case XEN_DOMCTL_guest_iommu_op:
+    {
+        xen_domctl_guest_iommu_op_t * guest_op;
+
+        if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
+        {
+            gdprintk(XENLOG_ERR,
+ "XEN_DOMCTL_guest_iommu_op: get_domain_by_id() failed\n");
+            ret = -EINVAL;
+            break;
+        }
+
+        guest_op = &(domctl->u.guest_iommu_op);
+        switch ( guest_op->op )
+        {
+            case XEN_DOMCTL_GUEST_IOMMU_OP_SET_MSI:
+                iommu_set_msi(d, guest_op->u.msi.vector,
+                              guest_op->u.msi.dest,
+                              guest_op->u.msi.dest_mode,
+                              guest_op->u.msi.delivery_mode,
+                              guest_op->u.msi.trig_mode);
+                ret = 0;
+                break;
+            case XEN_DOMCTL_GUEST_IOMMU_OP_BIND_BDF:
+                ret = iommu_bind_bdf(d, guest_op->u.bdf_bind.g_seg,
+                                     guest_op->u.bdf_bind.g_bdf,
+                                     guest_op->u.bdf_bind.m_seg,
+                                     guest_op->u.bdf_bind.m_bdf);
+                break;
+        }
+        put_domain(d);
+        break;
+    }
+
     default:
         ret = -ENOSYS;
         break;
diff -r 3d58058fc7a2 -r 978e61814be4 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Fri Jan 20 10:26:57 2012 +0000
+++ b/xen/include/public/domctl.h       Fri Jan 20 14:40:11 2012 +0100
@@ -848,6 +848,31 @@ struct xen_domctl_set_access_required {
typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t);

+/* Support for guest iommu emulation */
+struct xen_domctl_guest_iommu_op {
+    /* XEN_DOMCTL_GUEST_IOMMU_OP_* */
+#define XEN_DOMCTL_GUEST_IOMMU_OP_SET_MSI               0
+#define XEN_DOMCTL_GUEST_IOMMU_OP_BIND_BDF              1
+    uint8_t op;
+    union {
+        struct iommu_msi {
+        uint8_t  vector;
+        uint8_t  dest;
+        uint8_t  dest_mode;
+        uint8_t  delivery_mode;
+        uint8_t  trig_mode;
+        } msi;
+        struct bdf_bind {
+            uint16_t            g_seg;
+            uint16_t            g_bdf;
+            uint16_t            m_seg;
+            uint16_t            m_bdf;
+        } bdf_bind;
+    } u;
+};
+typedef struct xen_domctl_guest_iommu_op xen_domctl_guest_iommu_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_guest_iommu_op_t);
+
 struct xen_domctl {
     uint32_t cmd;
 #define XEN_DOMCTL_createdomain                   1
@@ -912,6 +937,7 @@ struct xen_domctl {
 #define XEN_DOMCTL_getvcpuextstate               63
 #define XEN_DOMCTL_set_access_required           64
 #define XEN_DOMCTL_audit_p2m                     65
+#define XEN_DOMCTL_guest_iommu_op                66
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -960,6 +986,7 @@ struct xen_domctl {
         struct xen_domctl_debug_op          debug_op;
         struct xen_domctl_mem_event_op      mem_event_op;
         struct xen_domctl_mem_sharing_op    mem_sharing_op;
+        struct xen_domctl_guest_iommu_op    guest_iommu_op;
 #if defined(__i386__) || defined(__x86_64__)
         struct xen_domctl_cpuid             cpuid;
         struct xen_domctl_vcpuextstate      vcpuextstate;
diff -r 3d58058fc7a2 -r 978e61814be4 xen/include/xen/iommu.h
--- a/xen/include/xen/iommu.h   Fri Jan 20 10:26:57 2012 +0000
+++ b/xen/include/xen/iommu.h   Fri Jan 20 14:40:11 2012 +0100
@@ -164,6 +164,12 @@ int iommu_do_domctl(struct xen_domctl *,
void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count);
 void iommu_iotlb_flush_all(struct domain *d);

+/* Only used by AMD IOMMU so far */
+void iommu_set_msi(struct domain* d, uint16_t vector, uint16_t dest,
+                   uint16_t dest_mode, uint16_t delivery_mode,
+                   uint16_t trig_mode);
+int iommu_bind_bdf(struct domain* d, uint16_t gseg, uint16_t gbdf,
+                   uint16_t mseg, uint16_t mbdf);
 /*
  * The purpose of the iommu_dont_flush_iotlb optional cpu flag is to
  * avoid unecessary iotlb_flush in the low level IOMMU code.
diff -r 3d58058fc7a2 -r 978e61814be4 xen/include/xen/pci.h
--- a/xen/include/xen/pci.h     Fri Jan 20 10:26:57 2012 +0000
+++ b/xen/include/xen/pci.h     Fri Jan 20 14:40:11 2012 +0100
@@ -58,6 +58,11 @@ struct pci_dev {
     const u16 seg;
     const u8 bus;
     const u8 devfn;
+
+ /* Used by iommu to represent virtual seg and bdf value in guest space */
+    u16 gseg;
+    u16 gbdf;
+
     struct pci_dev_info info;
     struct arch_pci_dev arch;
     u64 vf_rlen[6];
_______________________________________________
osrc-patches mailing list
osrc-patches@xxxxxxxxxxxx
https://elbe.amd.com/mailman/listinfo/osrc-patches


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.