[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 6/7] common/pv-iommu: Add foreign ops to PV-IOMMU interface



Currently the ops are X86 only due to a dependency on the
X86 only B2M implementation.

Foreign ops conform to draft D of PV-IOMMU design.

XSM control been implemented to allow full security control of
these priviledged operatins.

Page references and page locking are taken before using B2M
interface which is mandated by the B2M interface itself.

Signed-off-by: Malcolm Crossley <malcolm.crossley@xxxxxxxxxx>
--
Cc: dgdegra@xxxxxxxxxxxxx
Cc: jbeulich@xxxxxxxx
Cc: ian.campbell@xxxxxxxxxx
Cc: keir@xxxxxxx
Cc: tim@xxxxxxx
Cc: andrew.cooper3@xxxxxxxxxx
Cc: xen-devel@xxxxxxxxxxxxx
---
 xen/common/pv_iommu.c         | 269 ++++++++++++++++++++++++++++++++++++++++++
 xen/include/public/pv-iommu.h |  22 ++++
 xen/include/xsm/dummy.h       |   6 +
 xen/include/xsm/xsm.h         |   6 +
 xen/xsm/dummy.c               |   1 +
 5 files changed, 304 insertions(+)

diff --git a/xen/common/pv_iommu.c b/xen/common/pv_iommu.c
index 91485f3..bbf1a21 100644
--- a/xen/common/pv_iommu.c
+++ b/xen/common/pv_iommu.c
@@ -21,7 +21,12 @@
 #include <asm/event.h>
 #include <xen/guest_access.h>
 #include <public/pv-iommu.h>
+#include <xsm/xsm.h>
 
+#ifdef CONFIG_X86
+#include <asm/m2b.h>
+#include <asm/setup.h>
+#endif
 #define ret_t long
 
 static int get_paged_frame(unsigned long gfn, unsigned long *frame,
@@ -79,6 +84,7 @@ void do_iommu_sub_op(struct pv_iommu_op *op)
 {
     struct domain *d = current->domain;
     struct domain *rd = NULL;
+    int ret;
 
     /* Only order 0 pages supported */
     if ( IOMMU_get_page_order(op->flags) != 0 )
@@ -183,7 +189,270 @@ void do_iommu_sub_op(struct pv_iommu_op *op)
             op->status = 0;
             break;
         }
+#ifdef CONFIG_X86
+        case IOMMUOP_map_foreign_page:
+        {
+            unsigned long mfn, tmp;
+            unsigned int flags = 0;
+            struct page_info *page = NULL;
+            struct m2b_entry *m2b_e;
+            int locked;
+
+            /* Check if calling domain can create IOMMU mappings */
+            if ( !can_use_iommu_check(d) )
+            {
+                op->status = -EPERM;
+                goto finish;
+            }
+
+            rd = rcu_lock_domain_by_any_id(op->u.map_foreign_page.domid);
+            if ( !rd )
+            {
+                op->status = -ENXIO;
+                goto finish;
+            }
+
+            /* Only HVM domains can have their pages foreign mapped */
+            if ( is_pv_domain(rd) )
+            {
+                op->status = -EPERM;
+                goto finish;
+            }
+
+            if ( d->domain_id == op->u.map_foreign_page.domid ||
+                    op->u.map_foreign_page.domid == DOMID_SELF )
+            {
+                op->status = -EPERM;
+                goto finish;
+            }
+
+            /* Check for privilege over remote domain*/
+            if ( xsm_iommu_control(XSM_DM_PRIV, rd, op->subop_id) )
+            {
+                op->status = -EPERM;
+                goto finish;
+            }
+
+            /* Lookup page struct backing gfn */
+            if ( get_paged_frame(op->u.map_foreign_page.gfn, &mfn, &page, 0,
+                        rd) )
+            {
+                op->status = -ENXIO;
+                goto finish;
+            }
+            /* Check M2B for existing mapping */
+            m2b_e = lookup_m2b_entry(page, d,
+                        op->u.map_foreign_page.ioserver,
+                        op->u.map_foreign_page.bfn);
+
+            /* M2B already exists for domid, gfn, ioserver combination */
+            if ( m2b_e )
+            {
+                put_page(page);
+                op->status = 0;
+                goto finish;
+            }
+
+            if ( !mfn_valid(mfn) || xen_in_range(mfn) ||
+                 is_xen_heap_page(page)  ||
+                 (page->count_info & PGC_allocated) ||
+                 ( (page->count_info & PGC_count_mask) < 2 ))
+            {
+                put_page(page);
+                op->status = -EPERM;
+                goto finish;
+            }
+
+            /* Check for conflict with existing BFN mapping */
+            if ( !iommu_lookup_page(d, op->u.map_foreign_page.bfn, &tmp) )
+            {
+                put_page(page);
+                op->status = -EPERM;
+                goto finish;
+            }
+
+            if ( op->flags & IOMMU_OP_readable )
+                flags |= IOMMUF_readable;
+
+            if ( op->flags & IOMMU_OP_writeable )
+                flags |= IOMMUF_writable;
+
+            if ( iommu_map_page(d, op->u.map_foreign_page.bfn, mfn, flags) )
+            {
+                put_page(page);
+                op->status = -EIO;
+                goto finish;
+            }
+            /* Add M2B entry */
+            locked = page_lock(page);
+            ret = add_m2b_entry(page, d,
+                        op->u.map_foreign_page.ioserver,
+                        op->u.map_foreign_page.bfn);
+            atomic_inc(&rd->m2b_count);
+            if ( locked )
+                page_unlock(page);
+            if ( ret )
+            {
+                if ( iommu_unmap_page(d, op->u.map_foreign_page.bfn) )
+                    domain_crash(d);
+
+                put_page(page);
+                op->status = -ENOMEM;
+                goto finish;
+            }
+
+            op->status = 0;
+            break;
+        }
+        case IOMMUOP_lookup_foreign_page:
+        {
+            unsigned long mfn;
+            struct page_info *page = NULL;
+            struct m2b_entry *m2b_e;
+            int rc;
+            int locked;
 
+            if ( d->domain_id == op->u.lookup_foreign_page.domid ||
+                 op->u.lookup_foreign_page.domid == DOMID_SELF )
+            {
+                op->status = -EPERM;
+                goto finish;
+            }
+
+            rd = rcu_lock_domain_by_any_id(op->u.lookup_foreign_page.domid);
+
+            if ( !rd )
+            {
+                op->status = -ENXIO;
+                goto finish;
+            }
+
+            /* Only HVM domains can have their pages foreign mapped */
+            if ( is_pv_domain(rd) )
+            {
+                op->status = -EPERM;
+                goto finish;
+            }
+
+            /* Check for privilege */
+            if ( xsm_iommu_control(XSM_DM_PRIV, rd, op->subop_id) )
+            {
+                op->status = -EPERM;
+                goto finish;
+            }
+
+            /* Lookup page struct backing gfn */
+            if ( (rc = get_paged_frame(op->u.lookup_foreign_page.gfn, &mfn, 
&page, 0,
+                                 rd)) )
+            {
+                op->status = -ENXIO; // Should this be something else?
+                goto finish;
+            }
+
+            /* Check M2B for existing mapping */
+            m2b_e = lookup_m2b_entry(page, d,
+                                     op->u.lookup_foreign_page.ioserver,
+                                     BFN_ANY);
+
+            /* M2B already exists for domid, gfn, ioserver combination */
+            if ( m2b_e )
+            {
+                put_page(page);
+                op->u.lookup_foreign_page.bfn = m2b_e->bfn;
+                op->status = 0;
+                goto finish;
+            }
+
+            /* Only create BFN mappings for guest mapped memory */
+            if ( !(page->count_info & PGC_allocated) ||
+                 ( (page->count_info & PGC_count_mask) < 2 ))
+            {
+                    put_page(page);
+                    op->status = -EPERM;
+                    goto finish;
+            }
+
+            /* Check if IOMMU is disabled/bypassed */
+            if ( !can_use_iommu_check(d) )
+            {
+                /* Add M2B entry using MFN */
+                locked = page_lock(page);
+                ret = add_m2b_entry(page, d,
+                                    op->u.lookup_foreign_page.ioserver, mfn);
+                atomic_inc(&rd->m2b_count);
+                if ( locked )
+                    page_unlock(page);
+                if ( ret )
+                {
+                   put_page(page);
+                   op->status = -ENOMEM;
+                   goto finish;
+                }
+                op->u.lookup_foreign_page.bfn = mfn;
+            }
+            op->status = 0;
+            break;
+        }
+        case IOMMUOP_unmap_foreign_page:
+        {
+            struct m2b_entry *m2b_e;
+            struct page_info *page;
+            unsigned long mfn;
+            int locked;
+
+
+            if ( !can_use_iommu_check(d) )
+            {
+                page = mfn_to_page(op->u.unmap_foreign_page.bfn);
+            }
+            else
+            {
+                /* Check if there is a valid BFN mapping for this domain */
+                if ( iommu_lookup_page(d, op->u.unmap_foreign_page.bfn, &mfn) )
+                {
+                   op->status = -ENOENT;
+                   goto finish;
+                }
+                /* Use MFN from B2M mapping to lookup page */
+                page = mfn_to_page(mfn);
+            }
+
+            if ( !page )
+            {
+               op->status = -ENOENT;
+               goto finish;
+            }
+
+            /* Try to remove the M2B mapping */
+            locked = page_lock(page);
+            ret = del_m2b_entry(page, d,
+                                op->u.unmap_foreign_page.ioserver,
+                                op->u.unmap_foreign_page.bfn);
+            atomic_dec(&page_get_owner(page)->m2b_count);
+            if ( locked )
+            page_unlock(page);
+            if ( ret )
+            {
+               op->status = -ENOENT;
+               goto finish;
+            }
+
+            if ( can_use_iommu_check(d) )
+            {
+                /* Check if there are any M2B mappings left for this domain */
+                m2b_e = lookup_m2b_entry(page, d,
+                                         IOSERVER_ANY,
+                                         op->u.unmap_foreign_page.bfn);
+
+                /* No M2B left for this bfn so IOMMU unmap it */
+                if ( !m2b_e )
+                {
+                    if ( iommu_unmap_page(d, op->u.map_foreign_page.bfn) )
+                        domain_crash(d);
+                }
+            }
+        }
+#endif
         default:
             op->status = -ENODEV;
             break;
diff --git a/xen/include/public/pv-iommu.h b/xen/include/public/pv-iommu.h
index 0f19f96..366037c 100644
--- a/xen/include/public/pv-iommu.h
+++ b/xen/include/public/pv-iommu.h
@@ -26,6 +26,9 @@
 #define IOMMUOP_query_caps            1
 #define IOMMUOP_map_page              2
 #define IOMMUOP_unmap_page            3
+#define IOMMUOP_map_foreign_page      4
+#define IOMMUOP_lookup_foreign_page   5
+#define IOMMUOP_unmap_foreign_page    6
 
 struct pv_iommu_op {
     uint16_t subop_id;
@@ -49,6 +52,25 @@ struct pv_iommu_op {
         struct {
             uint64_t bfn;
         } unmap_page;
+
+        struct {
+            uint64_t bfn;
+            uint64_t gfn;
+            uint16_t domid;
+            uint16_t ioserver;
+        } map_foreign_page;
+
+        struct {
+            uint64_t bfn;
+            uint64_t gfn;
+            uint16_t domid;
+            uint16_t ioserver;
+        } lookup_foreign_page;
+
+        struct {
+            uint64_t bfn;
+            uint16_t ioserver;
+        } unmap_foreign_page;
     } u;
 };
 
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index 1d13826..d0be2fe 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -566,6 +566,12 @@ static XSM_INLINE int xsm_vm_event_control(XSM_DEFAULT_ARG 
struct domain *d, int
     return xsm_default_action(action, current->domain, d);
 }
 
+static XSM_INLINE int xsm_iommu_control(XSM_DEFAULT_ARG struct domain *d, 
unsigned long op)
+{
+    XSM_ASSERT_ACTION(XSM_DM_PRIV);
+    return xsm_default_action(action, current->domain, d);
+}
+
 #ifdef CONFIG_HAS_MEM_ACCESS
 static XSM_INLINE int xsm_mem_access(XSM_DEFAULT_ARG struct domain *d)
 {
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 3afed70..92e6e33 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -114,6 +114,7 @@ struct xsm_operations {
     int (*iomem_permission) (struct domain *d, uint64_t s, uint64_t e, uint8_t 
allow);
     int (*iomem_mapping) (struct domain *d, uint64_t s, uint64_t e, uint8_t 
allow);
     int (*pci_config_permission) (struct domain *d, uint32_t machine_bdf, 
uint16_t start, uint16_t end, uint8_t access);
+    int (*iommu_control) (struct domain *d, unsigned long op);
 
 #if defined(CONFIG_HAS_PASSTHROUGH) && defined(CONFIG_HAS_PCI)
     int (*get_device_group) (uint32_t machine_bdf);
@@ -605,6 +606,11 @@ static inline int xsm_vm_event_control (xsm_default_t def, 
struct domain *d, int
     return xsm_ops->vm_event_control(d, mode, op);
 }
 
+static inline int xsm_iommu_control(xsm_default_t def, struct domain *d, 
unsigned long op)
+{
+    return xsm_ops->iommu_control(d, op);
+}
+
 #ifdef CONFIG_HAS_MEM_ACCESS
 static inline int xsm_mem_access (xsm_default_t def, struct domain *d)
 {
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 0f32636..e18bb3b 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -119,6 +119,7 @@ void xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, hvm_altp2mhvm_op);
 
     set_to_dummy_if_null(ops, do_xsm_op);
+    set_to_dummy_if_null(ops, iommu_control);
 #ifdef CONFIG_COMPAT
     set_to_dummy_if_null(ops, do_compat_op);
 #endif
-- 
1.7.12.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.