On 20/12/13 10:41, Jan Beulich wrote:
There's really nothing really architecture specific here; the
architecture specific handling is limited to
xenmem_add_to_physmap_one().
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Tim Deegan <tim@xxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---
v3: Restriction of XENMAPSPACE_gmfn_foreign only being possible via
XENMEM_add_to_physmap_range no longer being dropped.
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -968,7 +968,7 @@ void share_xen_page_with_privileged_gues
share_xen_page_with_guest(page, dom_xen, readonly);
}
-static int xenmem_add_to_physmap_one(
+int xenmem_add_to_physmap_one(
struct domain *d,
uint16_t space,
domid_t foreign_domid,
@@ -1118,37 +1118,6 @@ long arch_memory_op(int op, XEN_GUEST_HA
switch ( op )
{
- case XENMEM_add_to_physmap:
- {
- struct xen_add_to_physmap xatp;
- struct domain *d;
-
- if ( copy_from_guest(&xatp, arg, 1) )
- return -EFAULT;
-
- /* Foreign mapping is only supported by add_to_physmap_range */
- if ( xatp.space == XENMAPSPACE_gmfn_foreign )
- return -EINVAL;
-
- d = rcu_lock_domain_by_any_id(xatp.domid);
- if ( d == NULL )
- return -ESRCH;
-
- rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
- if ( rc )
- {
- rcu_unlock_domain(d);
- return rc;
- }
-
- rc = xenmem_add_to_physmap_one(d, xatp.space, DOMID_INVALID,
- xatp.idx, xatp.gpfn);
-
- rcu_unlock_domain(d);
-
- return rc;
- }
-
case XENMEM_add_to_physmap_range:
{
struct xen_add_to_physmap_range xatpr;
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4520,20 +4520,23 @@ static int handle_iomem_range(unsigned l
return 0;
}
-static int xenmem_add_to_physmap_once(
+int xenmem_add_to_physmap_one(
struct domain *d,
- const struct xen_add_to_physmap *xatp)
+ uint16_t space,
+ domid_t foreign_domid,
+ unsigned long idx,
+ xen_pfn_t gpfn)
{
struct page_info *page = NULL;
unsigned long gfn = 0; /* gcc ... */
- unsigned long prev_mfn, mfn = 0, gpfn, idx;
+ unsigned long prev_mfn, mfn = 0, old_gpfn;
int rc;
p2m_type_t p2mt;
- switch ( xatp->space )
+ switch ( space )
{
case XENMAPSPACE_shared_info:
- if ( xatp->idx == 0 )
+ if ( idx == 0 )
mfn = virt_to_mfn(d->shared_info);
break;
case XENMAPSPACE_grant_table:
@@ -4542,9 +4545,8 @@ static int xenmem_add_to_physmap_once(
if ( d->grant_table->gt_version == 0 )
d->grant_table->gt_version = 1;
- idx = xatp->idx;
if ( d->grant_table->gt_version == 2 &&
- (xatp->idx & XENMAPIDX_grant_table_status) )
+ (idx & XENMAPIDX_grant_table_status) )
{
idx &= ~XENMAPIDX_grant_table_status;
if ( idx < nr_status_frames(d->grant_table) )
@@ -4566,9 +4568,9 @@ static int xenmem_add_to_physmap_once(
case XENMAPSPACE_gmfn:
{
p2m_type_t p2mt;
- gfn = xatp->idx;
- idx = mfn_x(get_gfn_unshare(d, xatp->idx, &p2mt));
+ gfn = idx;
+ idx = mfn_x(get_gfn_unshare(d, idx, &p2mt));
/* If the page is still shared, exit early */
if ( p2m_is_shared(p2mt) )
{
@@ -4589,41 +4591,38 @@ static int xenmem_add_to_physmap_once(
{
if ( page )
put_page(page);
- if ( xatp->space == XENMAPSPACE_gmfn ||
- xatp->space == XENMAPSPACE_gmfn_range )
+ if ( space == XENMAPSPACE_gmfn || space == XENMAPSPACE_gmfn_range )
put_gfn(d, gfn);
return -EINVAL;
}
/* Remove previously mapped page if it was present. */
- prev_mfn = mfn_x(get_gfn(d, xatp->gpfn, &p2mt));
+ prev_mfn = mfn_x(get_gfn(d, gpfn, &p2mt));
if ( mfn_valid(prev_mfn) )
{
if ( is_xen_heap_mfn(prev_mfn) )
/* Xen heap frames are simply unhooked from this phys slot. */
- guest_physmap_remove_page(d, xatp->gpfn, prev_mfn, PAGE_ORDER_4K);
+ guest_physmap_remove_page(d, gpfn, prev_mfn, PAGE_ORDER_4K);
else
/* Normal domain memory is freed, to avoid leaking memory. */
- guest_remove_page(d, xatp->gpfn);
+ guest_remove_page(d, gpfn);
}
/* In the XENMAPSPACE_gmfn case we still hold a ref on the old page. */
- put_gfn(d, xatp->gpfn);
+ put_gfn(d, gpfn);
/* Unmap from old location, if any. */
- gpfn = get_gpfn_from_mfn(mfn);
- ASSERT( gpfn != SHARED_M2P_ENTRY );
- if ( xatp->space == XENMAPSPACE_gmfn ||
- xatp->space == XENMAPSPACE_gmfn_range )
- ASSERT( gpfn == gfn );
- if ( gpfn != INVALID_M2P_ENTRY )
- guest_physmap_remove_page(d, gpfn, mfn, PAGE_ORDER_4K);
+ old_gpfn = get_gpfn_from_mfn(mfn);
+ ASSERT( old_gpfn != SHARED_M2P_ENTRY );
+ if ( space == XENMAPSPACE_gmfn || space == XENMAPSPACE_gmfn_range )
+ ASSERT( old_gpfn == gfn );
+ if ( old_gpfn != INVALID_M2P_ENTRY )
+ guest_physmap_remove_page(d, old_gpfn, mfn, PAGE_ORDER_4K);
/* Map at new location. */
- rc = guest_physmap_add_page(d, xatp->gpfn, mfn, PAGE_ORDER_4K);
+ rc = guest_physmap_add_page(d, gpfn, mfn, PAGE_ORDER_4K);
/* In the XENMAPSPACE_gmfn, we took a ref of the gfn at the top */
- if ( xatp->space == XENMAPSPACE_gmfn ||
- xatp->space == XENMAPSPACE_gmfn_range )
+ if ( space == XENMAPSPACE_gmfn || space == XENMAPSPACE_gmfn_range )
put_gfn(d, gfn);
if ( page )
@@ -4632,90 +4631,12 @@ static int xenmem_add_to_physmap_once(
return rc;
}
-static int xenmem_add_to_physmap(struct domain *d,
- struct xen_add_to_physmap *xatp)
-{
- struct xen_add_to_physmap start_xatp;
- int rc = 0;
-
- if ( xatp->space == XENMAPSPACE_gmfn_range )
- {
- if ( need_iommu(d) )
- this_cpu(iommu_dont_flush_iotlb) = 1;
-
- start_xatp = *xatp;
- while ( xatp->size > 0 )
- {
- rc = xenmem_add_to_physmap_once(d, xatp);
- if ( rc < 0 )
- break;
-
- xatp->idx++;
- xatp->gpfn++;
- xatp->size--;
-
- /* Check for continuation if it's not the last interation */
- if ( xatp->size > 0 && hypercall_preempt_check() )
- {
- rc = -EAGAIN;
- break;
- }
- }
-
- if ( need_iommu(d) )
- {
- this_cpu(iommu_dont_flush_iotlb) = 0;
- iommu_iotlb_flush(d, start_xatp.idx, start_xatp.size - xatp->size);
- iommu_iotlb_flush(d, start_xatp.gpfn, start_xatp.size - xatp->size);
- }
-
- return rc;
- }
-
- return xenmem_add_to_physmap_once(d, xatp);
-}
-
long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
{
int rc;
switch ( op )
{
- case XENMEM_add_to_physmap:
- {
- struct xen_add_to_physmap xatp;
- struct domain *d;
-
- if ( copy_from_guest(&xatp, arg, 1) )
- return -EFAULT;
-
- d = rcu_lock_domain_by_any_id(xatp.domid);
- if ( d == NULL )
- return -ESRCH;
-
- if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d) )
- {
- rcu_unlock_domain(d);
- return -EPERM;
- }
-
- rc = xenmem_add_to_physmap(d, &xatp);
-
- rcu_unlock_domain(d);
-
- if ( xatp.space == XENMAPSPACE_gmfn_range )
- {
- if ( rc && __copy_to_guest(arg, &xatp, 1) )
- rc = -EFAULT;
-
- if ( rc == -EAGAIN )
- rc = hypercall_create_continuation(
- __HYPERVISOR_memory_op, "ih", op, arg);
- }
-
- return rc;
- }
-
case XENMEM_set_memory_map:
{
struct xen_foreign_memory_map fmap;
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -55,34 +55,6 @@ int compat_arch_memory_op(int op, XEN_GU
switch ( op )
{
- case XENMEM_add_to_physmap:
- {
- struct compat_add_to_physmap cmp;
- struct xen_add_to_physmap *nat = COMPAT_ARG_XLAT_VIRT_BASE;
-
- if ( copy_from_guest(&cmp, arg, 1) )
- return -EFAULT;
-
- XLAT_add_to_physmap(nat, &cmp);
- rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
-
- if ( !rc || cmp.space != XENMAPSPACE_gmfn_range )
- break;
-
- XLAT_add_to_physmap(&cmp, nat);
- if ( __copy_to_guest(arg, &cmp, 1) )
- {
- if ( rc == __HYPERVISOR_memory_op )
- hypercall_cancel_continuation();
- return -EFAULT;
- }
-
- if ( rc == __HYPERVISOR_memory_op )
- hypercall_xlat_continuation(NULL, 0x2, nat, arg);
-
- break;
- }
-
case XENMEM_set_memory_map:
{
struct compat_foreign_memory_map cmp;
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -26,11 +26,13 @@ int compat_memory_op(unsigned int cmd, X
XEN_GUEST_HANDLE_PARAM(void) hnd;
struct xen_memory_reservation *rsrv;
struct xen_memory_exchange *xchg;
+ struct xen_add_to_physmap *atp;
struct xen_remove_from_physmap *xrfp;
} nat;
union {
struct compat_memory_reservation rsrv;
struct compat_memory_exchange xchg;
+ struct compat_add_to_physmap atp;
} cmp;
set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
@@ -187,6 +189,14 @@ int compat_memory_op(unsigned int cmd, X
nat.hnd = compat;
break;
+ case XENMEM_add_to_physmap:
+ if ( copy_from_guest(&cmp.atp, compat, 1) )
+ return -EFAULT;
+
+ XLAT_add_to_physmap(nat.atp, &cmp.atp);
+
+ break;
+
case XENMEM_remove_from_physmap:
{
struct compat_remove_from_physmap cmp;
@@ -315,6 +325,16 @@ int compat_memory_op(unsigned int cmd, X
case XENMEM_remove_from_physmap:
break;
+ case XENMEM_add_to_physmap:
+ if ( !rc || cmp.atp.space != XENMAPSPACE_gmfn_range )
+ break;
Why is this check here in the compat layer? Surely the -ENOSYS from
do_memory_op is sufficient to be included in the !rc check ?
+
+ XLAT_add_to_physmap(&cmp.atp, nat.atp);
+ if ( __copy_to_guest(compat, &cmp.atp, 1) )
+ rc = -EFAULT;
+
+ break;
+
default:
domain_crash(current->domain);
split = 0;
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -542,6 +542,53 @@ static long memory_exchange(XEN_GUEST_HA
return rc;
}
+static int xenmem_add_to_physmap(struct domain *d,
+ struct xen_add_to_physmap *xatp)
+{
+ struct xen_add_to_physmap start_xatp;
+ int rc = 0;
+
+ if ( xatp->space != XENMAPSPACE_gmfn_range )
+ return xenmem_add_to_physmap_one(d, xatp->space, DOMID_INVALID,
+ xatp->idx, xatp->gpfn);
+
+#ifdef HAS_PASSTHROUGH
+ if ( need_iommu(d) )
+ this_cpu(iommu_dont_flush_iotlb) = 1;
+#endif
+
+ start_xatp = *xatp;
+ while ( xatp->size > 0 )
+ {
+ rc = xenmem_add_to_physmap_one(d, xatp->space, DOMID_INVALID,
+ xatp->idx, xatp->gpfn);
+ if ( rc < 0 )
+ break;
+
+ xatp->idx++;
+ xatp->gpfn++;
+ xatp->size--;
+
+ /* Check for continuation if it's not the last iteration. */
+ if ( xatp->size > 0 && hypercall_preempt_check() )
+ {
+ rc = -EAGAIN;
+ break;
+ }
+ }
+
+#ifdef HAS_PASSTHROUGH
+ if ( need_iommu(d) )
+ {
+ this_cpu(iommu_dont_flush_iotlb) = 0;
+ iommu_iotlb_flush(d, start_xatp.idx, start_xatp.size - xatp->size);
+ iommu_iotlb_flush(d, start_xatp.gpfn, start_xatp.size - xatp->size);
+ }
+#endif
+
+ return rc;
+}
+
long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
struct domain *d;
@@ -673,6 +720,44 @@ long do_memory_op(unsigned long cmd, XEN
break;
+ case XENMEM_add_to_physmap:
+ {
+ struct xen_add_to_physmap xatp;
+
+ if ( copy_from_guest(&xatp, arg, 1) )
+ return -EFAULT;
+
+ /* Foreign mapping is only possible via add_to_physmap_range. */
+ if ( xatp.space == XENMAPSPACE_gmfn_foreign )
+ return -ENOSYS;
+
+ d = rcu_lock_domain_by_any_id(xatp.domid);
+ if ( d == NULL )
+ return -ESRCH;
+
+ rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
+ if ( rc )
+ {
+ rcu_unlock_domain(d);
+ return rc;
+ }
+
+ rc = xenmem_add_to_physmap(d, &xatp);
+
+ rcu_unlock_domain(d);
+
+ if ( xatp.space == XENMAPSPACE_gmfn_range && rc == -EAGAIN )
+ {
+ if ( !__copy_to_guest(arg, &xatp, 1) )
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_memory_op, "ih", op, arg);
+ else
+ rc = -EFAULT;
+ }
+
+ return rc;
+ }
+
case XENMEM_remove_from_physmap:
{
struct xen_remove_from_physmap xrfp;
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -356,6 +356,10 @@ static inline unsigned int get_order_fro
void scrub_one_page(struct page_info *);
+int xenmem_add_to_physmap_one(struct domain *d, uint16_t space,
+ domid_t foreign_domid,
+ unsigned long idx, xen_pfn_t gpfn);
+
/* Returns 1 on success, 0 on error, negative if the ring
* for event propagation is full in the presence of paging */
int guest_remove_page(struct domain *d, unsigned long gmfn);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|