[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
On 12/16/2013 09:38 PM, Mukesh Rathor wrote: In preparation for the next patch, we update xsm_add_to_physmap to allow for checking of foreign domain. Thus, the current domain must have the right to update the mappings of target domain with pages from foreign domain. Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> Acked-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> For some reason I only have v3 and cannot find the v4-v7 patches in my Inbox, although I do have the copies that came via the xen-devel list. I recall having some transient email issues during that time, so if Oracle's mail servers have a more aggressive retry policy the messages could have been lost due to that; I wasn't intending to ignore your patches. --- xen/arch/arm/mm.c | 4 ++-- xen/arch/x86/mm.c | 18 +++++++++++++++--- xen/include/xsm/dummy.h | 10 ++++++++-- xen/include/xsm/xsm.h | 6 +++--- xen/xsm/flask/hooks.c | 9 +++++++-- 5 files changed, 35 insertions(+), 12 deletions(-) diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 654281a..3515526 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -1134,7 +1134,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) if ( d == NULL ) return -ESRCH; - rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d); + rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL); if ( rc ) { rcu_unlock_domain(d); @@ -1165,7 +1165,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) if ( d == NULL ) return -ESRCH; - rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d); + rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL); if ( rc ) { rcu_unlock_domain(d); diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index ae332f4..0cae437 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -4730,7 +4730,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) if ( d == NULL ) return -ESRCH; - if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d) ) + if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL) ) { rcu_unlock_domain(d); return -EPERM; @@ -4756,7 +4756,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) case XENMEM_add_to_physmap_range: { struct xen_add_to_physmap_range xatpr; - struct domain *d; + struct domain *d, *fd = NULL; if ( copy_from_guest(&xatpr, arg, 1) ) return -EFAULT; @@ -4769,10 +4769,22 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) if ( d == NULL ) return -ESRCH; - if ( (rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d)) == 0 ) + if ( xatpr.space == XENMAPSPACE_gmfn_foreign ) + { + fd = get_pg_owner(xatpr.foreign_domid); + if ( fd == NULL ) + { + rcu_unlock_domain(d); + return -ESRCH; + } + } + rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd); + if ( rc == 0 ) rc = xenmem_add_to_physmap_range(d, &xatpr); rcu_unlock_domain(d); + if ( fd ) + put_pg_owner(fd); if ( rc == -EAGAIN ) rc = hypercall_create_continuation( diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h index eb9e1a1..1228e52 100644 --- a/xen/include/xsm/dummy.h +++ b/xen/include/xsm/dummy.h @@ -467,10 +467,16 @@ static XSM_INLINE int xsm_pci_config_permission(XSM_DEFAULT_ARG struct domain *d return xsm_default_action(action, current->domain, d); } -static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d1, struct domain *d2) +static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d, struct domain *t, struct domain *f) { + int rc; + XSM_ASSERT_ACTION(XSM_TARGET); - return xsm_default_action(action, d1, d2); + rc = xsm_default_action(action, d, t); + if ( f && !rc ) + rc = xsm_default_action(action, d, f); + + return rc; } static XSM_INLINE int xsm_remove_from_physmap(XSM_DEFAULT_ARG struct domain *d1, struct domain *d2) diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h index 1939453..9ee9543 100644 --- a/xen/include/xsm/xsm.h +++ b/xen/include/xsm/xsm.h @@ -90,7 +90,7 @@ struct xsm_operations { int (*memory_adjust_reservation) (struct domain *d1, struct domain *d2); int (*memory_stat_reservation) (struct domain *d1, struct domain *d2); int (*memory_pin_page) (struct domain *d1, struct domain *d2, struct page_info *page); - int (*add_to_physmap) (struct domain *d1, struct domain *d2); + int (*add_to_physmap) (struct domain *d, struct domain *t, struct domain *f); int (*remove_from_physmap) (struct domain *d1, struct domain *d2); int (*claim_pages) (struct domain *d); @@ -344,9 +344,9 @@ static inline int xsm_memory_pin_page(xsm_default_t def, struct domain *d1, stru return xsm_ops->memory_pin_page(d1, d2, page); } -static inline int xsm_add_to_physmap(xsm_default_t def, struct domain *d1, struct domain *d2) +static inline int xsm_add_to_physmap(xsm_default_t def, struct domain *d, struct domain *t, struct domain *f) { - return xsm_ops->add_to_physmap(d1, d2); + return xsm_ops->add_to_physmap(d, t, f); } static inline int xsm_remove_from_physmap(xsm_default_t def, struct domain *d1, struct domain *d2) diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c index 7cdef04..81294b1 100644 --- a/xen/xsm/flask/hooks.c +++ b/xen/xsm/flask/hooks.c @@ -1068,9 +1068,14 @@ static inline int flask_tmem_control(void) return domain_has_xen(current->domain, XEN__TMEM_CONTROL); } -static int flask_add_to_physmap(struct domain *d1, struct domain *d2) +static int flask_add_to_physmap(struct domain *d, struct domain *t, struct domain *f) { - return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP); + int rc; + + rc = domain_has_perm(d, t, SECCLASS_MMU, MMU__PHYSMAP); + if ( f && !rc ) + rc = domain_has_perm(d, f, SECCLASS_MMU, MMU__MAP_READ|MMU__MAP_WRITE); + return rc; } static int flask_remove_from_physmap(struct domain *d1, struct domain *d2) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |