|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V4] tools/libxc, xen/x86: Added xc_set_mem_access_multi()
Currently it is only possible to set mem_access restrictions only for
a contiguous range of GFNs (or, as a particular case, for a single GFN).
This patch introduces a new libxc function taking an array of GFNs.
The alternative would be to set each page in turn, using a userspace-HV
roundtrip for each call, and triggering a TLB flush per page set.
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
Changes since V3:
- Fixed ARM compilation (replaced ENOTSUP with EOPNOTSUPP, which is
#defined in the in-tree errno.h). The multi code remains
unimplemented for ARM (it depends on " [RFC 21/22] xen/arm: p2m:
Re-implement p2m_set_mem_access using p2m_{set, get}_entry", and
Julien Grall has gracefully accepted to defer implementation
until after both patches go in).
- Reordered the xen/guest_access.h #include in p2m.c.
- Now passing a gfn_t to set_mem_access() instead of unsigned long.
- Removed the p2m prefix from p2m_xenmem_access_to_p2m_access().
- Switched from bool_t to bool.
- Moved the XENMEM_access_op case up with the other do-nothing
XENMEM_* cases.
---
tools/libxc/include/xenctrl.h | 9 +++
tools/libxc/xc_mem_access.c | 38 +++++++++++
xen/arch/arm/p2m.c | 10 +++
xen/arch/x86/mm/p2m.c | 150 ++++++++++++++++++++++++++++++++----------
xen/common/compat/memory.c | 23 +++++--
xen/common/mem_access.c | 11 ++++
xen/include/public/memory.h | 14 +++-
xen/include/xen/p2m-common.h | 6 ++
xen/include/xlat.lst | 2 +-
9 files changed, 224 insertions(+), 39 deletions(-)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 560ce7b..5e685a6 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2126,6 +2126,15 @@ int xc_set_mem_access(xc_interface *xch, domid_t
domain_id,
uint32_t nr);
/*
+ * Set an array of pages to their respective access in the access array.
+ * The nr parameter specifies the size of the pages and access arrays.
+ * The same allowed access types as for xc_set_mem_access() apply.
+ */
+int xc_set_mem_access_multi(xc_interface *xch, domid_t domain_id,
+ uint8_t *access, uint64_t *pages,
+ uint32_t nr);
+
+/*
* Gets the mem access for the given page (returned in access on success)
*/
int xc_get_mem_access(xc_interface *xch, domid_t domain_id,
diff --git a/tools/libxc/xc_mem_access.c b/tools/libxc/xc_mem_access.c
index eee088c..9536635 100644
--- a/tools/libxc/xc_mem_access.c
+++ b/tools/libxc/xc_mem_access.c
@@ -41,6 +41,44 @@ int xc_set_mem_access(xc_interface *xch,
return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
}
+int xc_set_mem_access_multi(xc_interface *xch,
+ domid_t domain_id,
+ uint8_t *access,
+ uint64_t *pages,
+ uint32_t nr)
+{
+ DECLARE_HYPERCALL_BOUNCE(access, nr, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ DECLARE_HYPERCALL_BOUNCE(pages, nr * sizeof(uint64_t),
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ int rc;
+
+ xen_mem_access_op_t mao =
+ {
+ .op = XENMEM_access_op_set_access_multi,
+ .domid = domain_id,
+ .access = XENMEM_access_default + 1, /* Invalid value */
+ .pfn = ~0UL, /* Invalid GFN */
+ .nr = nr,
+ };
+
+ if ( xc_hypercall_bounce_pre(xch, pages) ||
+ xc_hypercall_bounce_pre(xch, access) )
+ {
+ PERROR("Could not bounce memory for
XENMEM_access_op_set_access_multi");
+ return -1;
+ }
+
+ set_xen_guest_handle(mao.pfn_list, pages);
+ set_xen_guest_handle(mao.access_list, access);
+
+ rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
+
+ xc_hypercall_bounce_post(xch, access);
+ xc_hypercall_bounce_post(xch, pages);
+
+ return rc;
+}
+
int xc_get_mem_access(xc_interface *xch,
domid_t domain_id,
uint64_t pfn,
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index b648a9d..5c5049f 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1836,6 +1836,16 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
return 0;
}
+long p2m_set_mem_access_multi(struct domain *d,
+ const XEN_GUEST_HANDLE(const_uint64) pfn_list,
+ const XEN_GUEST_HANDLE(const_uint8) access_list,
+ uint32_t nr, uint32_t start, uint32_t mask,
+ unsigned int altp2m_idx)
+{
+ /* Not yet implemented on ARM. */
+ return -EOPNOTSUPP;
+}
+
int p2m_get_mem_access(struct domain *d, gfn_t gfn,
xenmem_access_t *access)
{
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 27f9d26..97c7cfd 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -23,6 +23,7 @@
* along with this program; If not, see <http://www.gnu.org/licenses/>.
*/
+#include <xen/guest_access.h> /* copy_from_guest() */
#include <xen/iommu.h>
#include <xen/vm_event.h>
#include <xen/event.h>
@@ -1793,21 +1794,37 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct
p2m_domain *hp2m,
(current->domain != d));
}
-/*
- * Set access type for a region of gfns.
- * If gfn == INVALID_GFN, sets the default access type.
- */
-long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
- uint32_t start, uint32_t mask, xenmem_access_t access,
- unsigned int altp2m_idx)
+static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
+ struct p2m_domain *ap2m, p2m_access_t a,
+ gfn_t gfn)
{
- struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
- p2m_access_t a, _a;
- p2m_type_t t;
- mfn_t mfn;
- unsigned long gfn_l;
- long rc = 0;
+ int rc = 0;
+ if ( ap2m )
+ {
+ rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
+ /* If the corresponding mfn is invalid we will want to just skip it */
+ if ( rc == -ESRCH )
+ rc = 0;
+ }
+ else
+ {
+ mfn_t mfn;
+ p2m_access_t _a;
+ p2m_type_t t;
+ unsigned long gfn_l = gfn_x(gfn);
+
+ mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
+ rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
+ }
+
+ return rc;
+}
+
+static bool xenmem_access_to_p2m_access(struct p2m_domain *p2m,
+ xenmem_access_t xaccess,
+ p2m_access_t *paccess)
+{
static const p2m_access_t memaccess[] = {
#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
ACCESS(n),
@@ -1823,6 +1840,34 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
#undef ACCESS
};
+ switch ( xaccess )
+ {
+ case 0 ... ARRAY_SIZE(memaccess) - 1:
+ *paccess = memaccess[xaccess];
+ break;
+ case XENMEM_access_default:
+ *paccess = p2m->default_access;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Set access type for a region of gfns.
+ * If gfn == INVALID_GFN, sets the default access type.
+ */
+long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
+ uint32_t start, uint32_t mask, xenmem_access_t access,
+ unsigned int altp2m_idx)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
+ p2m_access_t a;
+ unsigned long gfn_l;
+ long rc = 0;
+
/* altp2m view 0 is treated as the hostp2m */
if ( altp2m_idx )
{
@@ -1833,17 +1878,8 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
ap2m = d->arch.altp2m_p2m[altp2m_idx];
}
- switch ( access )
- {
- case 0 ... ARRAY_SIZE(memaccess) - 1:
- a = memaccess[access];
- break;
- case XENMEM_access_default:
- a = p2m->default_access;
- break;
- default:
+ if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
return -EINVAL;
- }
/* If request to set default access. */
if ( gfn_eq(gfn, INVALID_GFN) )
@@ -1858,21 +1894,69 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
{
- if ( ap2m )
+ rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
+
+ if ( rc )
+ break;
+
+ /* Check for continuation if it's not the last iteration. */
+ if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
{
- rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
- /* If the corresponding mfn is invalid we will just skip it */
- if ( rc && rc != -ESRCH )
- break;
+ rc = start;
+ break;
}
- else
+ }
+
+ if ( ap2m )
+ p2m_unlock(ap2m);
+ p2m_unlock(p2m);
+
+ return rc;
+}
+
+long p2m_set_mem_access_multi(struct domain *d,
+ const XEN_GUEST_HANDLE(const_uint64) pfn_list,
+ const XEN_GUEST_HANDLE(const_uint8) access_list,
+ uint32_t nr, uint32_t start, uint32_t mask,
+ unsigned int altp2m_idx)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
+ long rc = 0;
+
+ /* altp2m view 0 is treated as the hostp2m */
+ if ( altp2m_idx )
+ {
+ if ( altp2m_idx >= MAX_ALTP2M ||
+ d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
+ return -EINVAL;
+
+ ap2m = d->arch.altp2m_p2m[altp2m_idx];
+ }
+
+ p2m_lock(p2m);
+ if ( ap2m )
+ p2m_lock(ap2m);
+
+ while ( start < nr )
+ {
+ p2m_access_t a;
+ uint8_t access;
+ uint64_t gfn_l;
+
+ copy_from_guest_offset(&gfn_l, pfn_list, start, 1);
+ copy_from_guest_offset(&access, access_list, start, 1);
+
+ if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
{
- mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
- rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
- if ( rc )
- break;
+ rc = -EINVAL;
+ break;
}
+ rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
+
+ if ( rc )
+ break;
+
/* Check for continuation if it's not the last iteration. */
if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
{
diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
index 579040e..017a709 100644
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -15,7 +15,6 @@ CHECK_TYPE(domid);
#undef compat_domid_t
#undef xen_domid_t
-CHECK_mem_access_op;
CHECK_vmemrange;
#ifdef CONFIG_HAS_PASSTHROUGH
@@ -71,6 +70,7 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
struct xen_add_to_physmap_batch *atpb;
struct xen_remove_from_physmap *xrfp;
struct xen_vnuma_topology_info *vnuma;
+ struct xen_mem_access_op *mao;
} nat;
union {
struct compat_memory_reservation rsrv;
@@ -78,6 +78,7 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
struct compat_add_to_physmap atp;
struct compat_add_to_physmap_batch atpb;
struct compat_vnuma_topology_info vnuma;
+ struct compat_mem_access_op mao;
} cmp;
set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
@@ -321,9 +322,22 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
}
case XENMEM_access_op:
- return mem_access_memop(cmd,
- guest_handle_cast(compat,
- xen_mem_access_op_t));
+ {
+ if ( copy_from_guest(&cmp.mao, compat, 1) )
+ return -EFAULT;
+
+#define XLAT_mem_access_op_HNDL_pfn_list(_d_, _s_) \
+ guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list)
+#define XLAT_mem_access_op_HNDL_access_list(_d_, _s_) \
+ guest_from_compat_handle((_d_)->access_list, (_s_)->access_list)
+
+ XLAT_mem_access_op(nat.mao, &cmp.mao);
+
+#undef XLAT_mem_access_op_HNDL_pfn_list
+#undef XLAT_mem_access_op_HNDL_access_list
+
+ break;
+ }
case XENMEM_get_vnumainfo:
{
@@ -510,6 +524,7 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
case XENMEM_maximum_gpfn:
case XENMEM_add_to_physmap:
case XENMEM_remove_from_physmap:
+ case XENMEM_access_op:
break;
case XENMEM_get_vnumainfo:
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 82f4bad..565a320 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd,
}
break;
+ case XENMEM_access_op_set_access_multi:
+ rc = p2m_set_mem_access_multi(d, mao.pfn_list, mao.access_list, mao.nr,
+ start_iter, MEMOP_CMD_MASK, 0);
+ if ( rc > 0 )
+ {
+ ASSERT(!(rc & MEMOP_CMD_MASK));
+ rc = hypercall_create_continuation(__HYPERVISOR_memory_op, "lh",
+ XENMEM_access_op | rc, arg);
+ }
+ break;
+
case XENMEM_access_op_get_access:
{
xenmem_access_t access;
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 3badfb9..a5547a9 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -410,6 +410,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
* #define XENMEM_access_op_enable_emulate 2
* #define XENMEM_access_op_disable_emulate 3
*/
+#define XENMEM_access_op_set_access_multi 4
typedef enum {
XENMEM_access_n,
@@ -442,7 +443,8 @@ struct xen_mem_access_op {
uint8_t access;
domid_t domid;
/*
- * Number of pages for set op
+ * Number of pages for set op (or size of pfn_list for
+ * XENMEM_access_op_set_access_multi)
* Ignored on setting default access and other ops
*/
uint32_t nr;
@@ -452,6 +454,16 @@ struct xen_mem_access_op {
* ~0ull is used to set and get the default access for pages
*/
uint64_aligned_t pfn;
+ /*
+ * List of pfns to set access for
+ * Used only with XENMEM_access_op_set_access_multi
+ */
+ XEN_GUEST_HANDLE(const_uint64) pfn_list;
+ /*
+ * Corresponding list of access settings for pfn_list
+ * Used only with XENMEM_access_op_set_access_multi
+ */
+ XEN_GUEST_HANDLE(const_uint8) access_list;
};
typedef struct xen_mem_access_op xen_mem_access_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
diff --git a/xen/include/xen/p2m-common.h b/xen/include/xen/p2m-common.h
index b4f9077..3be1e91 100644
--- a/xen/include/xen/p2m-common.h
+++ b/xen/include/xen/p2m-common.h
@@ -53,6 +53,12 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
uint32_t start, uint32_t mask, xenmem_access_t access,
unsigned int altp2m_idx);
+long p2m_set_mem_access_multi(struct domain *d,
+ const XEN_GUEST_HANDLE(const_uint64) pfn_list,
+ const XEN_GUEST_HANDLE(const_uint8) access_list,
+ uint32_t nr, uint32_t start, uint32_t mask,
+ unsigned int altp2m_idx);
+
/*
* Get access type for a gfn.
* If gfn == INVALID_GFN, gets the default access type.
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 801a1c1..bdf1d05 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -68,7 +68,7 @@
! memory_exchange memory.h
! memory_map memory.h
! memory_reservation memory.h
-? mem_access_op memory.h
+! mem_access_op memory.h
! pod_target memory.h
! remove_from_physmap memory.h
! reserved_device_memory_map memory.h
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |