[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V9 5/6] xen/xsm: Split vm_event_op into three separate labels
The XSM label vm_event_op has been used to control the three memops controlling mem_access, mem_paging and mem_sharing. While these systems rely on vm_event, these are not vm_event operations themselves. Thus, in this patch we introduce three separate labels for each of these memops. Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> --- xen/arch/x86/mm/mem_paging.c | 2 +- xen/arch/x86/mm/mem_sharing.c | 2 +- xen/common/mem_access.c | 2 +- xen/include/xsm/dummy.h | 20 +++++++++++++++++++- xen/include/xsm/xsm.h | 33 ++++++++++++++++++++++++++++++--- xen/xsm/dummy.c | 13 ++++++++++++- xen/xsm/flask/hooks.c | 33 ++++++++++++++++++++++++++++++--- xen/xsm/flask/policy/access_vectors | 6 ++++++ 8 files changed, 100 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c index 17d2319..9ee3aba 100644 --- a/xen/arch/x86/mm/mem_paging.c +++ b/xen/arch/x86/mm/mem_paging.c @@ -39,7 +39,7 @@ int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg) if ( rc ) return rc; - rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_paging_op); + rc = xsm_mem_paging(XSM_DM_PRIV, d); if ( rc ) goto out; diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index ff01378..78fb013 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -1311,7 +1311,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) if ( rc ) return rc; - rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_sharing_op); + rc = xsm_mem_sharing(XSM_DM_PRIV, d); if ( rc ) goto out; diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c index 511c8c5..aa00513 100644 --- a/xen/common/mem_access.c +++ b/xen/common/mem_access.c @@ -48,7 +48,7 @@ int mem_access_memop(unsigned long cmd, if ( !p2m_mem_access_sanity_check(d) ) goto out; - rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_access_op); + rc = xsm_mem_access(XSM_DM_PRIV, d); if ( rc ) goto out; diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h index 50ee929..16967ed 100644 --- a/xen/include/xsm/dummy.h +++ b/xen/include/xsm/dummy.h @@ -519,11 +519,29 @@ static XSM_INLINE int xsm_vm_event_control(XSM_DEFAULT_ARG struct domain *d, int return xsm_default_action(action, current->domain, d); } -static XSM_INLINE int xsm_vm_event_op(XSM_DEFAULT_ARG struct domain *d, int op) +#ifdef HAS_MEM_ACCESS +static XSM_INLINE int xsm_mem_access(XSM_DEFAULT_ARG struct domain *d) { XSM_ASSERT_ACTION(XSM_DM_PRIV); return xsm_default_action(action, current->domain, d); } +#endif + +#ifdef HAS_MEM_PAGING +static XSM_INLINE int xsm_mem_paging(XSM_DEFAULT_ARG struct domain *d) +{ + XSM_ASSERT_ACTION(XSM_DM_PRIV); + return xsm_default_action(action, current->domain, d); +} +#endif + +#ifdef HAS_MEM_SHARING +static XSM_INLINE int xsm_mem_sharing(XSM_DEFAULT_ARG struct domain *d) +{ + XSM_ASSERT_ACTION(XSM_DM_PRIV); + return xsm_default_action(action, current->domain, d); +} +#endif #ifdef CONFIG_X86 static XSM_INLINE int xsm_do_mca(XSM_DEFAULT_VOID) diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h index ca8371c..49f06c9 100644 --- a/xen/include/xsm/xsm.h +++ b/xen/include/xsm/xsm.h @@ -142,7 +142,18 @@ struct xsm_operations { int (*get_vnumainfo) (struct domain *d); int (*vm_event_control) (struct domain *d, int mode, int op); - int (*vm_event_op) (struct domain *d, int op); + +#ifdef HAS_MEM_ACCESS + int (*mem_access) (struct domain *d); +#endif + +#ifdef HAS_MEM_PAGING + int (*mem_paging) (struct domain *d); +#endif + +#ifdef HAS_MEM_SHARING + int (*mem_sharing) (struct domain *d); +#endif #ifdef CONFIG_X86 int (*do_mca) (void); @@ -546,10 +557,26 @@ static inline int xsm_vm_event_control (xsm_default_t def, struct domain *d, int return xsm_ops->vm_event_control(d, mode, op); } -static inline int xsm_vm_event_op (xsm_default_t def, struct domain *d, int op) +#ifdef HAS_MEM_ACCESS +static inline int xsm_mem_access (xsm_default_t def, struct domain *d) { - return xsm_ops->vm_event_op(d, op); + return xsm_ops->mem_access(d); } +#endif + +#ifdef HAS_MEM_PAGING +static inline int xsm_mem_paging (xsm_default_t def, struct domain *d) +{ + return xsm_ops->mem_paging(d); +} +#endif + +#ifdef HAS_MEM_SHARING +static inline int xsm_mem_sharing (xsm_default_t def, struct domain *d) +{ + return xsm_ops->mem_sharing(d); +} +#endif #ifdef CONFIG_X86 static inline int xsm_do_mca(xsm_default_t def) diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c index 6d12d32..3ddb4f6 100644 --- a/xen/xsm/dummy.c +++ b/xen/xsm/dummy.c @@ -119,7 +119,18 @@ void xsm_fixup_ops (struct xsm_operations *ops) set_to_dummy_if_null(ops, map_gmfn_foreign); set_to_dummy_if_null(ops, vm_event_control); - set_to_dummy_if_null(ops, vm_event_op); + +#ifdef HAS_MEM_ACCESS + set_to_dummy_if_null(ops, mem_access); +#endif + +#ifdef HAS_MEM_PAGING + set_to_dummy_if_null(ops, mem_paging); +#endif + +#ifdef HAS_MEM_SHARING + set_to_dummy_if_null(ops, mem_sharing); +#endif #ifdef CONFIG_X86 set_to_dummy_if_null(ops, do_mca); diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c index 061d897..6215001 100644 --- a/xen/xsm/flask/hooks.c +++ b/xen/xsm/flask/hooks.c @@ -1139,10 +1139,26 @@ static int flask_vm_event_control(struct domain *d, int mode, int op) return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__VM_EVENT); } -static int flask_vm_event_op(struct domain *d, int op) +#ifdef HAS_MEM_ACCESS +static int flask_mem_access(struct domain *d) { - return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__VM_EVENT); + return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_ACCESS); +} +#endif + +#ifdef HAS_MEM_PAGING +static int flask_mem_paging(struct domain *d) +{ + return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_PAGING); +} +#endif + +#ifdef HAS_MEM_SHARING +static int flask_mem_sharing(struct domain *d) +{ + return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_SHARING); } +#endif #if defined(HAS_PASSTHROUGH) && defined(HAS_PCI) static int flask_get_device_group(uint32_t machine_bdf) @@ -1579,7 +1595,18 @@ static struct xsm_operations flask_ops = { .get_vnumainfo = flask_get_vnumainfo, .vm_event_control = flask_vm_event_control, - .vm_event_op = flask_vm_event_op, + +#ifdef HAS_MEM_ACCESS + .mem_access = flask_mem_access, +#endif + +#ifdef HAS_MEM_PAGING + .mem_paging = flask_mem_paging, +#endif + +#ifdef HAS_MEM_SHARING + .mem_sharing = flask_mem_sharing, +#endif #ifdef CONFIG_COMPAT .do_compat_op = compat_flask_op, diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors index 9a9d1c5..af4a6ae 100644 --- a/xen/xsm/flask/policy/access_vectors +++ b/xen/xsm/flask/policy/access_vectors @@ -223,6 +223,12 @@ class domain2 # XEN_DOMCTL_monitor_op # XEN_DOMCTL_vm_event_op vm_event +# XENMEM_access_op + mem_access +# XENMEM_paging_op + mem_paging +# XENMEM_sharing_op + mem_sharing } # Similar to class domain, but primarily contains domctls related to HVM domains -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |