[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 14/20] tmem: Add access control check



Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
Cc: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx>
---
 tools/flask/policy/policy/flask/access_vectors |  1 +
 xen/common/tmem.c                              | 10 +++++-----
 xen/include/xen/tmem_xen.h                     |  5 -----
 xen/include/xsm/dummy.h                        |  7 +++++++
 xen/include/xsm/xsm.h                          |  6 ++++++
 xen/xsm/dummy.c                                |  1 +
 xen/xsm/flask/hooks.c                          |  6 ++++++
 xen/xsm/flask/include/av_perm_to_string.h      |  1 +
 xen/xsm/flask/include/av_permissions.h         |  1 +
 9 files changed, 28 insertions(+), 10 deletions(-)

diff --git a/tools/flask/policy/policy/flask/access_vectors 
b/tools/flask/policy/policy/flask/access_vectors
index 28b8ada..2986b40 100644
--- a/tools/flask/policy/policy/flask/access_vectors
+++ b/tools/flask/policy/policy/flask/access_vectors
@@ -35,6 +35,7 @@ class xen
        lockprof
        cpupool_op
        sched_op
+       tmem_op
 }
 
 class domain
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 1a8777c..fe2db45 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -23,6 +23,7 @@
 #include <xen/radix-tree.h>
 #include <xen/list.h>
 #include <xen/init.h>
+#include <xsm/xsm.h>
 
 #define EXPORT /* indicates code other modules are dependent upon */
 #define FORWARD
@@ -2540,11 +2541,10 @@ static NOINLINE int do_tmem_control(struct tmem_op *op)
     uint32_t subop = op->u.ctrl.subop;
     OID *oidp = (OID *)(&op->u.ctrl.oid[0]);
 
-    if (!tmh_current_is_privileged())
-    {
-        /* don't fail... mystery: sometimes dom0 fails here */
-        /* return -EPERM; */
-    }
+    ret = xsm_tmem_control(subop);
+    if ( ret )
+        return ret;
+
     switch(subop)
     {
     case TMEMC_THAW:
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 4a35760..f248128 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -344,11 +344,6 @@ static inline bool_t tmh_set_client_from_id(
     return rc;
 }
 
-static inline bool_t tmh_current_is_privileged(void)
-{
-    return IS_PRIV(current->domain);
-}
-
 static inline uint8_t tmh_get_first_byte(pfp_t *pfp)
 {
     void *p = __map_domain_page(pfp);
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index afbc504..b26de57 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -500,6 +500,13 @@ static XSM_DEFAULT(int, sched_op) (void)
     return 0;
 }
 
+static XSM_DEFAULT(int, tmem_control) (uint32_t subcmd)
+{
+    if ( !IS_PRIV(current->domain) )
+        return -EPERM;
+    return 0;
+}
+
 static XSM_DEFAULT(long, __do_xsm_op)(XEN_GUEST_HANDLE(xsm_op_t) op)
 {
     return -ENOSYS;
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 6483ec6..ff76cae 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -137,6 +137,7 @@ struct xsm_operations {
     int (*lockprof)(void);
     int (*cpupool_op)(void);
     int (*sched_op)(void);
+    int (*tmem_control)(uint32_t subop);
 
     long (*__do_xsm_op) (XEN_GUEST_HANDLE(xsm_op_t) op);
 
@@ -605,6 +606,11 @@ static inline int xsm_sched_op(void)
     return xsm_call(sched_op());
 }
 
+static inline int xsm_tmem_control(uint32_t subop)
+{
+    return xsm_call(tmem_control(subop));
+}
+
 static inline long xsm___do_xsm_op (XEN_GUEST_HANDLE(xsm_op_t) op)
 {
     return xsm_ops->__do_xsm_op(op);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 055071a..0a18d50 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -119,6 +119,7 @@ void xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, lockprof);
     set_to_dummy_if_null(ops, cpupool_op);
     set_to_dummy_if_null(ops, sched_op);
+    set_to_dummy_if_null(ops, tmem_control);
 
     set_to_dummy_if_null(ops, __do_xsm_op);
 
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index fc89ebc..f6ec7bd 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -962,6 +962,11 @@ static inline int flask_sched_op(void)
     return domain_has_xen(current->domain, XEN__SCHED_OP);
 }
 
+static inline int flask_tmem_control(uint32_t subcmd)
+{
+    return domain_has_xen(current->domain, XEN__TMEM_OP);
+}
+
 static int flask_perfcontrol(void)
 {
     return domain_has_xen(current->domain, XEN__PERFCONTROL);
@@ -1599,6 +1604,7 @@ static struct xsm_operations flask_ops = {
     .lockprof = flask_lockprof,
     .cpupool_op = flask_cpupool_op,
     .sched_op = flask_sched_op,
+    .tmem_control = flask_tmem_control,
 
     .__do_xsm_op = do_flask_op,
 
diff --git a/xen/xsm/flask/include/av_perm_to_string.h 
b/xen/xsm/flask/include/av_perm_to_string.h
index 997f098..5d5a45a 100644
--- a/xen/xsm/flask/include/av_perm_to_string.h
+++ b/xen/xsm/flask/include/av_perm_to_string.h
@@ -29,6 +29,7 @@
    S_(SECCLASS_XEN, XEN__LOCKPROF, "lockprof")
    S_(SECCLASS_XEN, XEN__CPUPOOL_OP, "cpupool_op")
    S_(SECCLASS_XEN, XEN__SCHED_OP, "sched_op")
+   S_(SECCLASS_XEN, XEN__TMEM_OP, "tmem_op")
    S_(SECCLASS_DOMAIN, DOMAIN__SETVCPUCONTEXT, "setvcpucontext")
    S_(SECCLASS_DOMAIN, DOMAIN__PAUSE, "pause")
    S_(SECCLASS_DOMAIN, DOMAIN__UNPAUSE, "unpause")
diff --git a/xen/xsm/flask/include/av_permissions.h 
b/xen/xsm/flask/include/av_permissions.h
index 8596a55..e6d6a6d 100644
--- a/xen/xsm/flask/include/av_permissions.h
+++ b/xen/xsm/flask/include/av_permissions.h
@@ -29,6 +29,7 @@
 #define XEN__LOCKPROF                             0x08000000UL
 #define XEN__CPUPOOL_OP                           0x10000000UL
 #define XEN__SCHED_OP                             0x20000000UL
+#define XEN__TMEM_OP                              0x40000000UL
 
 #define DOMAIN__SETVCPUCONTEXT                    0x00000001UL
 #define DOMAIN__PAUSE                             0x00000002UL
-- 
1.7.11.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.