[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 12/12] IOMMU: patch certain indirect calls to direct ones



This is intentionally not touching hooks used rarely (or not at all)
during the lifetime of a VM.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v4: New.

--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -228,7 +228,8 @@ void __hwdom_init iommu_hwdom_init(struc
                   == PGT_writable_page) )
                 mapping |= IOMMUF_writable;
 
-            ret = hd->platform_ops->map_page(d, gfn, mfn, mapping);
+            ret = iommu_call(hd->platform_ops, map_page,
+                             d, gfn, mfn, mapping);
             if ( !rc )
                 rc = ret;
 
@@ -300,7 +301,7 @@ int iommu_map_page(struct domain *d, uns
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
-    rc = hd->platform_ops->map_page(d, gfn, mfn, flags);
+    rc = iommu_call(hd->platform_ops, map_page, d, gfn, mfn, flags);
     if ( unlikely(rc) )
     {
         if ( !d->is_shutting_down && printk_ratelimit() )
@@ -323,7 +324,7 @@ int iommu_unmap_page(struct domain *d, u
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
-    rc = hd->platform_ops->unmap_page(d, gfn);
+    rc = iommu_call(hd->platform_ops, unmap_page, d, gfn);
     if ( unlikely(rc) )
     {
         if ( !d->is_shutting_down && printk_ratelimit() )
@@ -349,7 +350,7 @@ static void iommu_free_pagetables(unsign
         spin_unlock(&iommu_pt_cleanup_lock);
         if ( !pg )
             return;
-        iommu_get_ops()->free_page_table(pg);
+        iommu_vcall(iommu_get_ops(), free_page_table, pg);
     } while ( !softirq_pending(smp_processor_id()) );
 
     tasklet_schedule_on_cpu(&iommu_pt_cleanup_tasklet,
@@ -366,7 +367,7 @@ int iommu_iotlb_flush(struct domain *d,
     if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush 
)
         return 0;
 
-    rc = hd->platform_ops->iotlb_flush(d, gfn, page_count);
+    rc = iommu_call(hd->platform_ops, iotlb_flush, d, gfn, page_count);
     if ( unlikely(rc) )
     {
         if ( !d->is_shutting_down && printk_ratelimit() )
@@ -389,7 +390,7 @@ int iommu_iotlb_flush_all(struct domain
     if ( !iommu_enabled || !hd->platform_ops || 
!hd->platform_ops->iotlb_flush_all )
         return 0;
 
-    rc = hd->platform_ops->iotlb_flush_all(d);
+    rc = iommu_call(hd->platform_ops, iotlb_flush_all, d);
     if ( unlikely(rc) )
     {
         if ( !d->is_shutting_down && printk_ratelimit() )
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1301,14 +1301,14 @@ int iommu_update_ire_from_msi(
     struct msi_desc *msi_desc, struct msi_msg *msg)
 {
     return iommu_intremap
-           ? iommu_get_ops()->update_ire_from_msi(msi_desc, msg) : 0;
+           ? iommu_call(iommu_ops, update_ire_from_msi, msi_desc, msg) : 0;
 }
 
 void iommu_read_msi_from_ire(
     struct msi_desc *msi_desc, struct msi_msg *msg)
 {
     if ( iommu_intremap )
-        iommu_get_ops()->read_msi_from_ire(msi_desc, msg);
+        iommu_vcall(iommu_ops, read_msi_from_ire, msi_desc, msg);
 }
 
 static int iommu_add_device(struct pci_dev *pdev)
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -26,14 +26,12 @@
 void iommu_update_ire_from_apic(
     unsigned int apic, unsigned int reg, unsigned int value)
 {
-    const struct iommu_ops *ops = iommu_get_ops();
-    ops->update_ire_from_apic(apic, reg, value);
+    iommu_vcall(iommu_ops, update_ire_from_apic, apic, reg, value);
 }
 
 unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg)
 {
-    const struct iommu_ops *ops = iommu_get_ops();
-    return ops->read_apic_from_ire(apic, reg);
+    return iommu_call(iommu_ops, read_apic_from_ire, apic, reg);
 }
 
 int __init iommu_setup_hpet_msi(struct msi_desc *msi)
@@ -44,7 +42,6 @@ int __init iommu_setup_hpet_msi(struct m
 
 int arch_iommu_populate_page_table(struct domain *d)
 {
-    const struct domain_iommu *hd = dom_iommu(d);
     struct page_info *page;
     int rc = 0, n = 0;
 
@@ -68,9 +65,8 @@ int arch_iommu_populate_page_table(struc
             {
                 ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
                 BUG_ON(SHARED_M2P(gfn));
-                rc = hd->platform_ops->map_page(d, gfn, mfn,
-                                                IOMMUF_readable |
-                                                IOMMUF_writable);
+                rc = iommu_call(iommu_ops, map_page, d, gfn, mfn,
+                                IOMMUF_readable | IOMMUF_writable);
             }
             if ( rc )
             {
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -176,9 +176,17 @@ struct iommu_ops {
     void (*dump_p2m_table)(struct domain *d);
 };
 
-#ifndef CONFIG_IOMMU_MIXED
+#ifdef CONFIG_IOMMU_MIXED
+# define iommu_call(ops, fn, args...) ((ops)->fn(args))
+# define iommu_vcall iommu_call
+#else
+# include <asm/alternative.h>
+
 extern struct iommu_ops iommu_ops;
 
+# define iommu_call(ops, fn, args...)  alternative_call(iommu_ops.fn, ## args)
+# define iommu_vcall(ops, fn, args...) alternative_vcall(iommu_ops.fn, ## args)
+
 static inline const struct iommu_ops *iommu_get_ops(void)
 {
     BUG_ON(!iommu_ops.init);




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.