[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [V2 PATCH 7/8] pvh dom0: Add and remove foreign pages



On Tue, 26 Nov 2013 10:03:52 -0500
Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> wrote:

> On 11/25/2013 07:32 PM, Mukesh Rathor wrote:
> > On Mon, 25 Nov 2013 14:00:35 -0500
> > Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> wrote:
...........
> the xsm_add_to_physmap hook to add a new parameter for the foreign
> domain; the latter seems the simplest change, passing NULL for pg_src
> when not using XENMAPSPACE_gmfn_foreign. The hook would look
> something like this:
> 
> int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *curr,
>               struct domain *target, struct domain *pg_src)
> {
>      int rc;
>      XSM_ASSERT_ACTION(XSM_TARGET);
>      rc = xsm_default_action(action, curr, target);
>      if ( pg_src && !rc )
>          rc = xsm_default_action(action, curr, pg_src);
>      return rc;
> }
> with the corresponding FLASK hook:
> { ...
>       rc = domain_has_perm(curr, target, SECCLASS_MMU,
> MMU__PHYSMAP); if ( pg_src && !rc )
>       rc = domain_has_perm(curr, pg_src, SECCLASS_MMU,
> MMU__MAP_READ|MMU__MAP_WRITE); }
> 
> This will require pulling the get_pg_owner(foreign_domid) up a few
> levels in order to have the struct domain* available instead of the
> domid, but that doesn't seem like it would cause any issues.


Thanks a lot for detailed help. Please lmk if following looks ok:
Mukesh


diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index fc8dded..11c9a89 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4733,7 +4733,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) 
arg)
         if ( d == NULL )
             return -ESRCH;
 
-        if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d) )
+        if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL) )
         {
             rcu_unlock_domain(d);
             return -EPERM;
@@ -4759,7 +4759,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) 
arg)
     case XENMEM_add_to_physmap_range:
     {
         struct xen_add_to_physmap_range xatpr;
-        struct domain *d;
+        struct domain *d, *fd = NULL;
 
         if ( copy_from_guest(&xatpr, arg, 1) )
             return -EFAULT;
@@ -4772,7 +4772,17 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) 
arg)
         if ( d == NULL )
             return -ESRCH;
 
-        if ( (rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d)) )
+        if ( xatpr.foreign_domid )
+        {
+            if ( (fd = rcu_lock_domain_by_any_id(xatpr.foreign_domid)) == NULL 
)
+            {
+                rcu_unlock_domain(d);
+                return -ESRCH;
+            }
+            rcu_unlock_domain(fd);
+        }
+
+        if ( (rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd)) )
         {
             rcu_unlock_domain(d);
             return rc;
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index eb9e1a1..34c097d 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -467,10 +467,16 @@ static XSM_INLINE int 
xsm_pci_config_permission(XSM_DEFAULT_ARG struct domain *d
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d1, 
struct domain *d2)
+static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d1, 
struct domain *d2, struct domain *d3)
 {
+    int rc;
+
     XSM_ASSERT_ACTION(XSM_TARGET);
-    return xsm_default_action(action, d1, d2);
+    rc = xsm_default_action(action, d1, d2);
+    if ( d3 && !rc )
+        rc = xsm_default_action(action, d1, d3);
+
+    return rc;
 }
 
 static XSM_INLINE int xsm_remove_from_physmap(XSM_DEFAULT_ARG struct domain 
*d1, struct domain *d2)
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 1939453..2d29a2f 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -90,7 +90,7 @@ struct xsm_operations {
     int (*memory_adjust_reservation) (struct domain *d1, struct domain *d2);
     int (*memory_stat_reservation) (struct domain *d1, struct domain *d2);
     int (*memory_pin_page) (struct domain *d1, struct domain *d2, struct 
page_info *page);
-    int (*add_to_physmap) (struct domain *d1, struct domain *d2);
+    int (*add_to_physmap) (struct domain *d1, struct domain *d2, struct domain 
*d3);
     int (*remove_from_physmap) (struct domain *d1, struct domain *d2);
     int (*claim_pages) (struct domain *d);
 
@@ -344,9 +344,9 @@ static inline int xsm_memory_pin_page(xsm_default_t def, 
struct domain *d1, stru
     return xsm_ops->memory_pin_page(d1, d2, page);
 }
 
-static inline int xsm_add_to_physmap(xsm_default_t def, struct domain *d1, 
struct domain *d2)
+static inline int xsm_add_to_physmap(xsm_default_t def, struct domain *d1, 
struct domain *d2, struct domain *d3)
 {
-    return xsm_ops->add_to_physmap(d1, d2);
+    return xsm_ops->add_to_physmap(d1, d2, d3);
 }
 
 static inline int xsm_remove_from_physmap(xsm_default_t def, struct domain 
*d1, struct domain *d2)
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index b1e2593..e541dd3 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1068,9 +1068,15 @@ static inline int flask_tmem_control(void)
     return domain_has_xen(current->domain, XEN__TMEM_CONTROL);
 }
 
-static int flask_add_to_physmap(struct domain *d1, struct domain *d2)
+static int flask_add_to_physmap(struct domain *d1, struct domain *d2, struct 
domain *d3)
 {
-    return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP);
+    int rc;
+
+    rc = domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP);
+    if ( d3 && !rc )
+        rc = domain_has_perm(d1, d3, SECCLASS_MMU, 
+                             MMU__MAP_READ|MMU__MAP_WRITE);
+    return rc;
 }
 
 static int flask_remove_from_physmap(struct domain *d1, struct domain *d2)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.