|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v5 1/4] xen: XENMEM_exchange should only be used/compiled for arch supporting PV guest
From: Julien Grall <jgrall@xxxxxxxxxx>
XENMEM_exchange can only be used by PV guest but the check is well
hidden in steal_page(). This is because paging_model_external() will
return false only for PV domain.
To make clearer this is PV only, add a check at the beginning of the
implementation.
In a follow-up patch, mfn_to_gfn() will be completely removed for
arch not supporting M2P as it is a call for trouble to use it.
Take the opportunity to compile out the code if CONFIG_PV is not set.
Ideally, we would want to to move the hypercall implementation in
arch/x86/pv/mm.c. But this is incredibly tangled.
Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx>
---
Ideally we would want to move the hypercall implementation in
arch/x86/pv/mm.c. But this is a bit messy. So for now just #ifdef it.
Changes in v5:
- Removed the #ifdef CONFIG_X86 as they are not necessary anymore
- Used paging_mode_translate() rather than is_pv_domain()
- Reword the commit message to explain why the #ifdef rather than
implementing mfn_to_gfn() using a BUG_ON() or moving the code
to arch/x86/pv.
Changes in v4:
- Patch added
---
xen/common/memory.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/xen/common/memory.c b/xen/common/memory.c
index e07bd9a5ea4b..9bc78aae35db 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -522,6 +522,7 @@ static bool propagate_node(unsigned int xmf, unsigned int
*memflags)
static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
{
+#ifdef CONFIG_PV
struct xen_memory_exchange exch;
PAGE_LIST_HEAD(in_chunk_list);
PAGE_LIST_HEAD(out_chunk_list);
@@ -609,6 +610,13 @@ static long
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
goto fail_early;
}
+ if ( paging_mode_translate(d) )
+ {
+ rc = -EOPNOTSUPP;
+ rcu_unlock_domain(d);
+ goto fail_early;
+ }
+
rc = xsm_memory_exchange(XSM_TARGET, d);
if ( rc )
{
@@ -648,7 +656,6 @@ static long
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
for ( k = 0; k < (1UL << exch.in.extent_order); k++ )
{
-#ifdef CONFIG_X86
p2m_type_t p2mt;
/* Shared pages cannot be exchanged */
@@ -659,14 +666,9 @@ static long
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
rc = -ENOMEM;
goto fail;
}
-#else /* !CONFIG_X86 */
- mfn = gfn_to_mfn(d, _gfn(gmfn + k));
-#endif
if ( unlikely(!mfn_valid(mfn)) )
{
-#ifdef CONFIG_X86
put_gfn(d, gmfn + k);
-#endif
rc = -EINVAL;
goto fail;
}
@@ -676,16 +678,12 @@ static long
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
rc = steal_page(d, page, MEMF_no_refcount);
if ( unlikely(rc) )
{
-#ifdef CONFIG_X86
put_gfn(d, gmfn + k);
-#endif
goto fail;
}
page_list_add(page, &in_chunk_list);
-#ifdef CONFIG_X86
put_gfn(d, gmfn + k);
-#endif
}
}
@@ -768,8 +766,7 @@ static long
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
guest_physmap_add_page(d, _gfn(gpfn), mfn,
exch.out.extent_order);
- if ( !paging_mode_translate(d) &&
- __copy_mfn_to_guest_offset(exch.out.extent_start,
+ if ( __copy_mfn_to_guest_offset(exch.out.extent_start,
(i << out_chunk_order) + j,
mfn) )
rc = -EFAULT;
@@ -815,6 +812,9 @@ static long
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
if ( __copy_field_to_guest(arg, &exch, nr_exchanged) )
rc = -EFAULT;
return rc;
+#else /* !CONFIG_PV */
+ return -EOPNOTSUPP;
+#endif
}
int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap *xatp,
--
2.17.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |