|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC] tools/libxc, xen/x86: Added xc_set_mem_access_sparse()
Currently it is only possible to set mem_access restrictions only for
a contiguous range of GFNs (or, as a particular case, for a single GFN).
This patch introduces a new libxc function taking an array of GFNs.
The alternative would be to set each page in turn, using a userspace-HV
roundtrip for each call, and triggering a TLB flush per page set.
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
tools/libxc/include/xenctrl.h | 4 +++
tools/libxc/xc_mem_access.c | 32 +++++++++++++++++++++++
xen/arch/x86/hvm/hvm.c | 2 +-
xen/arch/x86/mm/p2m.c | 59 ++++++++++++++++++++++++++++++-------------
xen/common/compat/memory.c | 1 -
xen/common/mem_access.c | 13 +++++++++-
xen/include/public/memory.h | 6 +++++
xen/include/xen/p2m-common.h | 6 ++---
8 files changed, 100 insertions(+), 23 deletions(-)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 560ce7b..ac84908 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2125,6 +2125,10 @@ int xc_set_mem_access(xc_interface *xch, domid_t
domain_id,
xenmem_access_t access, uint64_t first_pfn,
uint32_t nr);
+int xc_set_mem_access_sparse(xc_interface *xch, domid_t domain_id,
+ xenmem_access_t access, xen_pfn_t *pages,
+ uint32_t nr);
+
/*
* Gets the mem access for the given page (returned in access on success)
*/
diff --git a/tools/libxc/xc_mem_access.c b/tools/libxc/xc_mem_access.c
index eee088c..73b1caa 100644
--- a/tools/libxc/xc_mem_access.c
+++ b/tools/libxc/xc_mem_access.c
@@ -41,6 +41,38 @@ int xc_set_mem_access(xc_interface *xch,
return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
}
+int xc_set_mem_access_sparse(xc_interface *xch,
+ domid_t domain_id,
+ xenmem_access_t access,
+ xen_pfn_t *pages,
+ uint32_t nr)
+{
+ DECLARE_HYPERCALL_BOUNCE(pages, nr * sizeof(xen_pfn_t),
XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ int rc;
+
+ xen_mem_access_op_t mao =
+ {
+ .op = XENMEM_access_op_set_access_sparse,
+ .domid = domain_id,
+ .access = access,
+ .nr = nr
+ };
+
+ if ( xc_hypercall_bounce_pre(xch, pages) )
+ {
+ PERROR("Could not bounce memory for
XENMEM_access_op_set_access_sparse");
+ return -1;
+ }
+
+ set_xen_guest_handle(mao.pfn_list, pages);
+
+ rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
+
+ xc_hypercall_bounce_post(xch, pages);
+
+ return rc;
+}
+
int xc_get_mem_access(xc_interface *xch,
domid_t domain_id,
uint64_t pfn,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0180f26..03461e5 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5323,7 +5323,7 @@ static int do_altp2m_op(
if ( a.u.set_mem_access.pad )
rc = -EINVAL;
else
- rc = p2m_set_mem_access(d, _gfn(a.u.set_mem_access.gfn), 1, 0, 0,
+ rc = p2m_set_mem_access(d, _gfn(a.u.set_mem_access.gfn), NULL, 1,
0, 0,
a.u.set_mem_access.hvmmem_access,
a.u.set_mem_access.view);
break;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 812dbf6..2c45cc6 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1815,7 +1815,7 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct
p2m_domain *hp2m,
* Set access type for a region of gfns.
* If gfn == INVALID_GFN, sets the default access type.
*/
-long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
+long p2m_set_mem_access(struct domain *d, gfn_t gfn, xen_pfn_t *arr, uint32_t
nr,
uint32_t start, uint32_t mask, xenmem_access_t access,
unsigned int altp2m_idx)
{
@@ -1874,28 +1874,53 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
if ( ap2m )
p2m_lock(ap2m);
- for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
+ if ( !arr )
{
- if ( ap2m )
+ for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
{
- rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
- /* If the corresponding mfn is invalid we will just skip it */
- if ( rc && rc != -ESRCH )
- break;
- }
- else
- {
- mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
- rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
- if ( rc )
+ if ( ap2m )
+ {
+ rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
+ /* If the corresponding mfn is invalid we will just skip it */
+ if ( rc && rc != -ESRCH )
+ break;
+ }
+ else
+ {
+ mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
+ rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
+ if ( rc )
+ break;
+ }
+
+ /* Check for continuation if it's not the last iteration. */
+ if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
+ {
+ rc = start;
break;
+ }
}
+ }
+ else
+ {
+ uint32_t i;
- /* Check for continuation if it's not the last iteration. */
- if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
+ for ( i = 0; i < nr; ++i )
{
- rc = start;
- break;
+ if ( ap2m )
+ {
+ rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(arr[i]));
+ /* If the corresponding mfn is invalid we will just skip it */
+ if ( rc && rc != -ESRCH )
+ break;
+ }
+ else
+ {
+ mfn = p2m->get_entry(p2m, arr[i], &t, &_a, 0, NULL, NULL);
+ rc = p2m->set_entry(p2m, arr[i], mfn, PAGE_ORDER_4K, t, a, -1);
+ if ( rc )
+ break;
+ }
}
}
diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
index 20c7671..664b8fe 100644
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -15,7 +15,6 @@ CHECK_TYPE(domid);
#undef compat_domid_t
#undef xen_domid_t
-CHECK_mem_access_op;
CHECK_vmemrange;
#ifdef CONFIG_HAS_PASSTHROUGH
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index b4033f0..1768020 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -66,7 +66,7 @@ int mem_access_memop(unsigned long cmd,
((mao.pfn + mao.nr - 1) > domain_get_maximum_gpfn(d))) )
break;
- rc = p2m_set_mem_access(d, _gfn(mao.pfn), mao.nr, start_iter,
+ rc = p2m_set_mem_access(d, _gfn(mao.pfn), NULL, mao.nr, start_iter,
MEMOP_CMD_MASK, mao.access, 0);
if ( rc > 0 )
{
@@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd,
}
break;
+ case XENMEM_access_op_set_access_sparse:
+ {
+ xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr);
+
+ // copy_from_guest(arr, mao.pfn_list, mao.nr);
+ rc = p2m_set_mem_access(d, _gfn(mao.pfn), arr, mao.nr, start_iter,
+ MEMOP_CMD_MASK, mao.access, 0);
+ xfree(arr);
+ break;
+ }
+
case XENMEM_access_op_get_access:
{
xenmem_access_t access;
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 3badfb9..2e224e3 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -410,6 +410,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
* #define XENMEM_access_op_enable_emulate 2
* #define XENMEM_access_op_disable_emulate 3
*/
+#define XENMEM_access_op_set_access_sparse 4
typedef enum {
XENMEM_access_n,
@@ -452,6 +453,11 @@ struct xen_mem_access_op {
* ~0ull is used to set and get the default access for pages
*/
uint64_aligned_t pfn;
+ /*
+ * List of pfns to set access for
+ * Used only with XENMEM_access_op_set_access_sparse
+ */
+ XEN_GUEST_HANDLE(xen_pfn_t) pfn_list;
};
typedef struct xen_mem_access_op xen_mem_access_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
diff --git a/xen/include/xen/p2m-common.h b/xen/include/xen/p2m-common.h
index b4f9077..c6723b6 100644
--- a/xen/include/xen/p2m-common.h
+++ b/xen/include/xen/p2m-common.h
@@ -49,9 +49,9 @@ int unmap_mmio_regions(struct domain *d,
* Set access type for a region of gfns.
* If gfn == INVALID_GFN, sets the default access type.
*/
-long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
- uint32_t start, uint32_t mask, xenmem_access_t access,
- unsigned int altp2m_idx);
+long p2m_set_mem_access(struct domain *d, gfn_t gfn, xen_pfn_t *arr,
+ uint32_t nr, uint32_t start, uint32_t mask,
+ xenmem_access_t access, unsigned int altp2m_idx);
/*
* Get access type for a gfn.
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |