|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V2] tools/libxc, xen/x86: Added xc_set_mem_access_multi()
Currently it is only possible to set mem_access restrictions only for
a contiguous range of GFNs (or, as a particular case, for a single GFN).
This patch introduces a new libxc function taking an array of GFNs.
The alternative would be to set each page in turn, using a userspace-HV
roundtrip for each call, and triggering a TLB flush per page set.
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
Changes since V1 / RFC:
- Renamed xc_set_mem_access_sparse() to xc_set_mem_access_multi(),
and XENMEM_access_op_set_access_sparse to
XENMEM_access_op_set_access_multi.
- Renamed the 'nr' parameter to 'size'.
- Wrapped long line in the implementation of xc_set_mem_access_multi().
- Factored out common code in _p2m_set_mem_access() (and modified
p2m_set_altp2m_mem_access() in the process, to take an unsigned
long argument instead of a gfn_t).
- Factored out xenmem_access_t to p2m_access_t conversion code in
p2m_xenmem_access_to_p2m_access().
- Added hypercall continuation support.
- Added compat translation support.
- No longer allocating buffers (now using copy_from_guest_offset()).
- Added support for setting an array of access restrictions, as
suggested by Tamas Lengyel.
- This patch incorporates Jan Beulich's "memory: fix compat handling
of XENMEM_access_op".
---
tools/libxc/include/xenctrl.h | 4 ++
tools/libxc/xc_mem_access.c | 38 ++++++++++
xen/arch/x86/mm/p2m.c | 161 ++++++++++++++++++++++++++++++++----------
xen/common/compat/memory.c | 24 +++++--
xen/common/mem_access.c | 11 +++
xen/include/public/memory.h | 14 +++-
xen/include/xen/p2m-common.h | 6 ++
xen/include/xlat.lst | 2 +-
8 files changed, 215 insertions(+), 45 deletions(-)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 560ce7b..c2f14a6 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2125,6 +2125,10 @@ int xc_set_mem_access(xc_interface *xch, domid_t
domain_id,
xenmem_access_t access, uint64_t first_pfn,
uint32_t nr);
+int xc_set_mem_access_multi(xc_interface *xch, domid_t domain_id,
+ uint8_t *access, uint64_t *pages,
+ uint32_t size);
+
/*
* Gets the mem access for the given page (returned in access on success)
*/
diff --git a/tools/libxc/xc_mem_access.c b/tools/libxc/xc_mem_access.c
index eee088c..d69d4f9 100644
--- a/tools/libxc/xc_mem_access.c
+++ b/tools/libxc/xc_mem_access.c
@@ -41,6 +41,44 @@ int xc_set_mem_access(xc_interface *xch,
return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
}
+int xc_set_mem_access_multi(xc_interface *xch,
+ domid_t domain_id,
+ uint8_t *access,
+ uint64_t *pages,
+ uint32_t size)
+{
+ DECLARE_HYPERCALL_BOUNCE(access, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ DECLARE_HYPERCALL_BOUNCE(pages, size * sizeof(uint64_t),
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ int rc;
+
+ xen_mem_access_op_t mao =
+ {
+ .op = XENMEM_access_op_set_access_multi,
+ .domid = domain_id,
+ .access = XENMEM_access_default + 1, /* Invalid value */
+ .pfn = ~0UL, /* Invalid GFN */
+ .nr = size,
+ };
+
+ if ( xc_hypercall_bounce_pre(xch, pages) ||
+ xc_hypercall_bounce_pre(xch, access) )
+ {
+ PERROR("Could not bounce memory for
XENMEM_access_op_set_access_multi");
+ return -1;
+ }
+
+ set_xen_guest_handle(mao.pfn_list, pages);
+ set_xen_guest_handle(mao.access_list, access);
+
+ rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
+
+ xc_hypercall_bounce_post(xch, access);
+ xc_hypercall_bounce_post(xch, pages);
+
+ return rc;
+}
+
int xc_get_mem_access(xc_interface *xch,
domid_t domain_id,
uint64_t pfn,
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 812dbf6..ee6438b 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -28,6 +28,7 @@
#include <xen/event.h>
#include <public/vm_event.h>
#include <asm/domain.h>
+#include <xen/guest_access.h> /* copy_from_guest() */
#include <asm/page.h>
#include <asm/paging.h>
#include <asm/p2m.h>
@@ -1772,13 +1773,12 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
gla,
static inline
int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
struct p2m_domain *ap2m, p2m_access_t a,
- gfn_t gfn)
+ unsigned long gfn_l)
{
mfn_t mfn;
p2m_type_t t;
p2m_access_t old_a;
unsigned int page_order;
- unsigned long gfn_l = gfn_x(gfn);
int rc;
mfn = ap2m->get_entry(ap2m, gfn_l, &t, &old_a, 0, NULL, NULL);
@@ -1811,21 +1811,39 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct
p2m_domain *hp2m,
(current->domain != d));
}
-/*
- * Set access type for a region of gfns.
- * If gfn == INVALID_GFN, sets the default access type.
- */
-long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
- uint32_t start, uint32_t mask, xenmem_access_t access,
- unsigned int altp2m_idx)
+static inline
+int _p2m_set_mem_access(struct domain *d, struct p2m_domain *p2m,
+ struct p2m_domain *ap2m, p2m_access_t a,
+ unsigned long gfn_l)
{
- struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
- p2m_access_t a, _a;
- p2m_type_t t;
- mfn_t mfn;
- unsigned long gfn_l;
- long rc = 0;
+ int rc;
+ if ( ap2m )
+ {
+ rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn_l);
+ /* If the corresponding mfn is invalid we will want to just skip it */
+ if ( rc && rc != -ESRCH )
+ return rc;
+ }
+ else
+ {
+ mfn_t mfn;
+ p2m_access_t _a;
+ p2m_type_t t;
+
+ mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
+ rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
+ return rc;
+ }
+
+ return 0;
+}
+
+static inline
+int p2m_xenmem_access_to_p2m_access(struct p2m_domain *p2m,
+ xenmem_access_t xaccess,
+ p2m_access_t *paccess)
+{
static const p2m_access_t memaccess[] = {
#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
ACCESS(n),
@@ -1841,6 +1859,34 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
#undef ACCESS
};
+ switch ( xaccess )
+ {
+ case 0 ... ARRAY_SIZE(memaccess) - 1:
+ *paccess = memaccess[xaccess];
+ break;
+ case XENMEM_access_default:
+ *paccess = p2m->default_access;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Set access type for a region of gfns.
+ * If gfn == INVALID_GFN, sets the default access type.
+ */
+long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
+ uint32_t start, uint32_t mask, xenmem_access_t access,
+ unsigned int altp2m_idx)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
+ p2m_access_t a;
+ unsigned long gfn_l;
+ long rc = 0;
+
/* altp2m view 0 is treated as the hostp2m */
if ( altp2m_idx )
{
@@ -1851,17 +1897,8 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
ap2m = d->arch.altp2m_p2m[altp2m_idx];
}
- switch ( access )
- {
- case 0 ... ARRAY_SIZE(memaccess) - 1:
- a = memaccess[access];
- break;
- case XENMEM_access_default:
- a = p2m->default_access;
- break;
- default:
+ if ( p2m_xenmem_access_to_p2m_access(p2m, access, &a) )
return -EINVAL;
- }
/* If request to set default access. */
if ( gfn_eq(gfn, INVALID_GFN) )
@@ -1876,23 +1913,71 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
{
- if ( ap2m )
- {
- rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
- /* If the corresponding mfn is invalid we will just skip it */
- if ( rc && rc != -ESRCH )
- break;
- }
- else
+ rc = _p2m_set_mem_access(d, p2m, ap2m, a, gfn_l);
+
+ if ( rc )
+ break;
+
+ /* Check for continuation if it's not the last iteration. */
+ if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
{
- mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
- rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
- if ( rc )
- break;
+ rc = start;
+ break;
}
+ }
+
+ if ( ap2m )
+ p2m_unlock(ap2m);
+ p2m_unlock(p2m);
+
+ return rc;
+}
+
+long p2m_set_mem_access_multi(struct domain *d,
+ XEN_GUEST_HANDLE(uint64_t) pfn_list,
+ XEN_GUEST_HANDLE(uint8) access_list,
+ uint32_t size, uint32_t start, uint32_t mask,
+ unsigned int altp2m_idx)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
+ long rc = 0;
+
+ /* altp2m view 0 is treated as the hostp2m */
+ if ( altp2m_idx )
+ {
+ if ( altp2m_idx >= MAX_ALTP2M ||
+ d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
+ return -EINVAL;
+
+ ap2m = d->arch.altp2m_p2m[altp2m_idx];
+ }
+
+ p2m_lock(p2m);
+ if ( ap2m )
+ p2m_lock(ap2m);
+
+ while ( start < size )
+ {
+ p2m_access_t a;
+
+ uint8_t access;
+ uint64_t gfn_l;
+
+ copy_from_guest_offset(&gfn_l, pfn_list, start, 1);
+ copy_from_guest_offset(&access, access_list, start, 1);
+
+ rc = p2m_xenmem_access_to_p2m_access(p2m, access, &a);
+
+ if ( rc )
+ break;
+
+ rc = _p2m_set_mem_access(d, p2m, ap2m, a, gfn_l);
+
+ if ( rc )
+ break;
/* Check for continuation if it's not the last iteration. */
- if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
+ if ( size > ++start && !(start & mask) && hypercall_preempt_check() )
{
rc = start;
break;
diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
index 20c7671..e9732df 100644
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -15,7 +15,6 @@ CHECK_TYPE(domid);
#undef compat_domid_t
#undef xen_domid_t
-CHECK_mem_access_op;
CHECK_vmemrange;
#ifdef CONFIG_HAS_PASSTHROUGH
@@ -71,6 +70,7 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
struct xen_add_to_physmap_batch *atpb;
struct xen_remove_from_physmap *xrfp;
struct xen_vnuma_topology_info *vnuma;
+ struct xen_mem_access_op *mao;
} nat;
union {
struct compat_memory_reservation rsrv;
@@ -78,6 +78,7 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
struct compat_add_to_physmap atp;
struct compat_add_to_physmap_batch atpb;
struct compat_vnuma_topology_info vnuma;
+ struct compat_mem_access_op mao;
} cmp;
set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
@@ -320,6 +321,23 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
break;
}
+ case XENMEM_access_op:
+ {
+#define XLAT_mem_access_op_HNDL_pfn_list(_d_, _s_) \
+ guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list)
+#define XLAT_mem_access_op_HNDL_access_list(_d_, _s_) \
+ guest_from_compat_handle((_d_)->access_list, (_s_)->access_list)
+
+ XLAT_mem_access_op(nat.mao, &cmp.mao);
+
+#undef XLAT_mem_access_op_HNDL_pfn_list
+#undef XLAT_mem_access_op_HNDL_access_list
+
+ return mem_access_memop(cmd,
+ guest_handle_cast(compat,
+ xen_mem_access_op_t));
+ }
+
case XENMEM_get_vnumainfo:
{
enum XLAT_vnuma_topology_info_vdistance vdistance =
@@ -495,10 +513,6 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
break;
}
- case XENMEM_access_op:
- rc = mem_access_memop(cmd, guest_handle_cast(compat,
xen_mem_access_op_t));
- break;
-
case XENMEM_add_to_physmap_batch:
start_extent = end_extent;
break;
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index b4033f0..18b908f 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd,
}
break;
+ case XENMEM_access_op_set_access_multi:
+ rc = p2m_set_mem_access_multi(d, mao.pfn_list, mao.access_list, mao.nr,
+ start_iter, MEMOP_CMD_MASK, 0);
+ if ( rc > 0 )
+ {
+ ASSERT(!(rc & MEMOP_CMD_MASK));
+ rc = hypercall_create_continuation(__HYPERVISOR_memory_op, "lh",
+ XENMEM_access_op | rc, arg);
+ }
+ break;
+
case XENMEM_access_op_get_access:
{
xenmem_access_t access;
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 3badfb9..9cb2568 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -410,6 +410,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
* #define XENMEM_access_op_enable_emulate 2
* #define XENMEM_access_op_disable_emulate 3
*/
+#define XENMEM_access_op_set_access_multi 4
typedef enum {
XENMEM_access_n,
@@ -442,7 +443,8 @@ struct xen_mem_access_op {
uint8_t access;
domid_t domid;
/*
- * Number of pages for set op
+ * Number of pages for set op (or size of pfn_list for
+ * XENMEM_access_op_set_access_multi)
* Ignored on setting default access and other ops
*/
uint32_t nr;
@@ -452,6 +454,16 @@ struct xen_mem_access_op {
* ~0ull is used to set and get the default access for pages
*/
uint64_aligned_t pfn;
+ /*
+ * List of pfns to set access for
+ * Used only with XENMEM_access_op_set_access_multi
+ */
+ XEN_GUEST_HANDLE(uint64_t) pfn_list;
+ /*
+ * Corresponding list of access settings for pfn_list
+ * Used only with XENMEM_access_op_set_access_multi
+ */
+ XEN_GUEST_HANDLE(uint8) access_list;
};
typedef struct xen_mem_access_op xen_mem_access_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
diff --git a/xen/include/xen/p2m-common.h b/xen/include/xen/p2m-common.h
index b4f9077..384d2bf 100644
--- a/xen/include/xen/p2m-common.h
+++ b/xen/include/xen/p2m-common.h
@@ -53,6 +53,12 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
uint32_t start, uint32_t mask, xenmem_access_t access,
unsigned int altp2m_idx);
+long p2m_set_mem_access_multi(struct domain *d,
+ XEN_GUEST_HANDLE(uint64_t) pfn_list,
+ XEN_GUEST_HANDLE(uint8) access,
+ uint32_t size, uint32_t start, uint32_t mask,
+ unsigned int altp2m_idx);
+
/*
* Get access type for a gfn.
* If gfn == INVALID_GFN, gets the default access type.
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 801a1c1..bdf1d05 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -68,7 +68,7 @@
! memory_exchange memory.h
! memory_map memory.h
! memory_reservation memory.h
-? mem_access_op memory.h
+! mem_access_op memory.h
! pod_target memory.h
! remove_from_physmap memory.h
! reserved_device_memory_map memory.h
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |