|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2 of 2] mem_access: Add xc_hvm_mem_access_bulk() API
tools/libxc/xc_misc.c | 51 ++++++++++++++++++++++++++++++++++
tools/libxc/xenctrl.h | 11 +++++++
xen/arch/x86/hvm/hvm.c | 45 ++++++++++++++++++++++++++++++
xen/arch/x86/mm/p2m-ept.c | 61 +++++++++++++++++++++++++++++++++++++++++
xen/arch/x86/mm/p2m-pt.c | 1 +
xen/arch/x86/mm/p2m.c | 36 ++++++++++++++++++++++++
xen/include/asm-x86/p2m.h | 18 +++++++++++-
xen/include/public/hvm/hvm_op.h | 18 ++++++++++++
8 files changed, 240 insertions(+), 1 deletions(-)
int xc_hvm_set_mem_access_bulk(
xc_interface *xch, domid_t dom, hvmmem_access_t memaccess,
xen_pfn_t *arr, int *err, uint64_t num);
Set the array of guest physical addresses to a specific mem_access type.
Allowed types are HVMMEM_access_default, HVMMEM_access_n, any combination of
HVM_access_ + (rwx), and HVM_access_rx2rw.
When a gfn cannot be set to the specified access, its respective field in
@err is set to the corresponding errno value.
Signed-off-by: Aravindh Puthiyaparambil <aravindh@xxxxxxxxxxxx>
diff -r f12cf785738f -r 2f695c43ce22 tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c Wed Apr 25 19:29:54 2012 -0700
+++ b/tools/libxc/xc_misc.c Thu Apr 26 11:25:50 2012 -0700
@@ -570,6 +570,57 @@ int xc_hvm_set_mem_access(
return rc;
}
+int xc_hvm_set_mem_access_bulk(
+ xc_interface *xch, domid_t dom, hvmmem_access_t mem_access,
+ xen_pfn_t *arr, int *err, uint64_t nr)
+{
+ DECLARE_HYPERCALL;
+ DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_mem_access_bulk, arg);
+ DECLARE_HYPERCALL_BOUNCE(arr, sizeof(xen_pfn_t) * nr,
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ DECLARE_HYPERCALL_BOUNCE(err, sizeof(int) * nr,
+ XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+ int rc;
+
+ arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+ if ( arg == NULL )
+ {
+ PERROR("Could not allocate memory for xc_hvm_set_mem_access_bulk
hypercall");
+ return -1;
+ }
+
+ if ( xc_hypercall_bounce_pre(xch, arr) ) {
+ PERROR("Could not bounce arr for xc_hvm_set_mem_access_bulk
hypercall");
+ rc = -1;
+ goto out;
+ }
+
+ if ( xc_hypercall_bounce_pre(xch, err) ) {
+ PERROR("Could not bounce err for xc_hvm_set_mem_access_bulk
hypercall");
+ rc = -1;
+ goto out;
+ }
+
+ arg->domid = dom;
+ arg->hvmmem_access = mem_access;
+ arg->nr = nr;
+ set_xen_guest_handle(arg->arr, arr);
+ set_xen_guest_handle(arg->err, err);
+
+ hypercall.op = __HYPERVISOR_hvm_op;
+ hypercall.arg[0] = HVMOP_set_mem_access_bulk;
+ hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+
+ rc = do_xen_hypercall(xch, &hypercall);
+
+out:
+ xc_hypercall_buffer_free(xch, arg);
+ xc_hypercall_bounce_post(xch, arr);
+ xc_hypercall_bounce_post(xch, err);
+
+ return rc;
+}
+
int xc_hvm_get_mem_access(
xc_interface *xch, domid_t dom, uint64_t pfn, hvmmem_access_t* mem_access)
{
diff -r f12cf785738f -r 2f695c43ce22 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Wed Apr 25 19:29:54 2012 -0700
+++ b/tools/libxc/xenctrl.h Thu Apr 26 11:25:50 2012 -0700
@@ -1568,6 +1568,17 @@ int xc_hvm_set_mem_access(
xc_interface *xch, domid_t dom, hvmmem_access_t memaccess, uint64_t
first_pfn, uint64_t nr);
/*
+ * Set the arry of pfns to a specific access.
+ * When a pfn cannot be set to the specified access, its respective field in
+ * @err is set to the corresponding errno value.
+ * Allowed types are HVMMEM_access_default, HVMMEM_access_n, any combination of
+ * HVM_access_ + (rwx), and HVM_access_rx2rw
+ */
+int xc_hvm_set_mem_access_bulk(
+ xc_interface *xch, domid_t dom, hvmmem_access_t memaccess,
+ xen_pfn_t *arr, int *err, uint64_t num);
+
+/*
* Gets the mem access for the given page (returned in memacess on success)
*/
int xc_hvm_get_mem_access(
diff -r f12cf785738f -r 2f695c43ce22 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Apr 25 19:29:54 2012 -0700
+++ b/xen/arch/x86/hvm/hvm.c Thu Apr 26 11:25:50 2012 -0700
@@ -4197,6 +4197,51 @@ long do_hvm_op(unsigned long op, XEN_GUE
break;
}
+ case HVMOP_set_mem_access_bulk:
+ {
+ struct xen_hvm_set_mem_access_bulk a;
+ struct domain *d;
+ xen_pfn_t *arr = 0;
+ int *err = 0;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = rcu_lock_remote_target_domain_by_id(a.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ rc = -EINVAL;
+
+ if ( !is_hvm_domain(d) )
+ goto param_fail9;
+
+ rc = -ENOMEM;
+ arr = xmalloc_array(xen_pfn_t, a.nr);
+ if (!arr)
+ goto param_fail9;
+
+ err = xmalloc_array(int, a.nr);
+ if (!err)
+ goto param_fail9;
+
+ if ( copy_from_guest(arr, a.arr, a.nr) )
+ goto param_fail9;
+
+ rc = p2m_set_access_bulk(d, arr, err, a.nr, a.hvmmem_access);
+
+ if ( copy_to_guest(a.err, err, a.nr) )
+ goto param_fail9;
+
+ param_fail9:
+ rcu_unlock_domain(d);
+ if (arr)
+ xfree(arr);
+ if (err)
+ xfree(err);
+ break;
+ }
+
case HVMOP_get_mem_access:
{
struct xen_hvm_get_mem_access a;
diff -r f12cf785738f -r 2f695c43ce22 xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c Wed Apr 25 19:29:54 2012 -0700
+++ b/xen/arch/x86/mm/p2m-ept.c Thu Apr 26 11:25:50 2012 -0700
@@ -597,6 +597,66 @@ out:
return mfn;
}
+static int
+ept_set_entry_bulk(struct p2m_domain *p2m, xen_pfn_t *arr, int *err,
+ uint64_t nr, unsigned int order, p2m_access_t p2ma)
+{
+ unsigned long pfn;
+ p2m_access_t a;
+ p2m_type_t t;
+ mfn_t mfn;
+ ept_entry_t *table;
+ int target = order / EPT_TABLE_ORDER;
+ int rc;
+ bool_t needs_sync, bulk_needs_sync = 0;
+ struct domain *d = p2m->domain;
+ ept_entry_t *old_entries = 0;
+
+ ASSERT((target == 2 && hvm_hap_has_1gb(d)) ||
+ (target == 1 && hvm_hap_has_2mb(d)) ||
+ (target == 0));
+
+ old_entries = xzalloc_array(ept_entry_t, nr);
+ if ( !old_entries )
+ return 0;
+
+ memset(err, 0, nr * sizeof(int));
+ table = map_domain_page(ept_get_asr(d));
+ for ( pfn = 0; pfn < nr; pfn++ )
+ {
+ if ( arr[pfn] > domain_get_maximum_gpfn(d) )
+ {
+ err[pfn] = -EINVAL;
+ continue;
+ }
+
+ needs_sync = 1;
+ mfn = ept_get_entry(p2m, arr[pfn], &t, &a, 0, NULL);
+ rc = __ept_set_entry(p2m, arr[pfn], mfn, order, t, p2ma, table,
+ &old_entries[pfn], &needs_sync);
+ if (rc == 0)
+ err[pfn] = -ENOMEM;
+
+ if ( needs_sync )
+ bulk_needs_sync = 1;
+ }
+ unmap_domain_page(table);
+
+ if ( bulk_needs_sync )
+ ept_sync_domain(p2m->domain);
+
+ /* Release the old intermediate tables, if any. This has to be the
+ last thing we do, after the ept_sync_domain() and removal
+ from the iommu tables, so as to avoid a potential
+ use-after-free. */
+ for ( pfn = 0; pfn < nr; pfn++ )
+ if ( is_epte_present(&old_entries[pfn]) )
+ ept_free_entry(p2m, &old_entries[pfn], target);
+
+ /* Success */
+ return 1;
+}
+
/* WARNING: Only caller doesn't care about PoD pages. So this function will
* always return 0 for PoD pages, not populate them. If that becomes
necessary,
* pass a p2m_query_t type along to distinguish. */
@@ -808,6 +868,7 @@ static void ept_change_entry_type_global
void ept_p2m_init(struct p2m_domain *p2m)
{
p2m->set_entry = ept_set_entry;
+ p2m->set_entry_bulk = ept_set_entry_bulk;
p2m->get_entry = ept_get_entry;
p2m->change_entry_type_global = ept_change_entry_type_global;
p2m->audit_p2m = NULL;
diff -r f12cf785738f -r 2f695c43ce22 xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c Wed Apr 25 19:29:54 2012 -0700
+++ b/xen/arch/x86/mm/p2m-pt.c Thu Apr 26 11:25:50 2012 -0700
@@ -1139,6 +1139,7 @@ long p2m_pt_audit_p2m(struct p2m_domain
void p2m_pt_init(struct p2m_domain *p2m)
{
p2m->set_entry = p2m_set_entry;
+ p2m->set_entry_bulk = NULL;
p2m->get_entry = p2m_gfn_to_mfn;
p2m->change_entry_type_global = p2m_change_type_global;
p2m->write_p2m_entry = paging_write_p2m_entry;
diff -r f12cf785738f -r 2f695c43ce22 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Wed Apr 25 19:29:54 2012 -0700
+++ b/xen/arch/x86/mm/p2m.c Thu Apr 26 11:25:50 2012 -0700
@@ -1334,6 +1334,42 @@ int p2m_set_mem_access(struct domain *d,
return rc;
}
+int p2m_set_access_bulk(struct domain *d, xen_pfn_t *arr, int *err,
+ uint64_t nr, hvmmem_access_t access)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ p2m_access_t a;
+ p2m_access_t memaccess[] = {
+ p2m_access_n,
+ p2m_access_r,
+ p2m_access_w,
+ p2m_access_rw,
+ p2m_access_x,
+ p2m_access_rx,
+ p2m_access_wx,
+ p2m_access_rwx,
+ p2m_access_rx2rw,
+ p2m_access_n2rwx,
+ p2m->default_access,
+ };
+ int rc = 0;
+
+ ASSERT(p2m->set_entry_bulk);
+
+ if ( (unsigned) access >= HVMMEM_access_default )
+ return -EINVAL;
+
+ a = memaccess[access];
+
+ p2m_lock(p2m);
+ if ( p2m->set_entry_bulk(p2m, arr, err, nr, PAGE_ORDER_4K, a) == 0 )
+ rc = -ENOMEM;
+ p2m_unlock(p2m);
+
+ return rc;
+}
+
+
/* Get access type for a pfn
* If pfn == -1ul, gets the default access type */
int p2m_get_mem_access(struct domain *d, unsigned long pfn,
diff -r f12cf785738f -r 2f695c43ce22 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Wed Apr 25 19:29:54 2012 -0700
+++ b/xen/include/asm-x86/p2m.h Thu Apr 26 11:25:50 2012 -0700
@@ -232,6 +232,13 @@ struct p2m_domain {
mfn_t mfn, unsigned int page_order,
p2m_type_t p2mt,
p2m_access_t p2ma);
+ int (*set_entry_bulk)(struct p2m_domain *p2m,
+ xen_pfn_t *arr,
+ int *err,
+ uint64_t nr,
+ unsigned int order,
+ p2m_access_t p2ma);
+
mfn_t (*get_entry )(struct p2m_domain *p2m,
unsigned long gfn,
p2m_type_t *p2mt,
@@ -568,6 +575,11 @@ void p2m_mem_access_resume(struct domain
int p2m_set_mem_access(struct domain *d, unsigned long start_pfn,
uint32_t nr, hvmmem_access_t access);
+/* Set access type for an array of pfns. set_mem_access success or failure is
+ * returned in the err array. */
+int p2m_set_access_bulk(struct domain *d, xen_pfn_t *arr, int *err,
+ uint64_t nr, hvmmem_access_t access);
+
/* Get access type for a pfn
* If pfn == -1ul, gets the default access type */
int p2m_get_mem_access(struct domain *d, unsigned long pfn,
@@ -583,7 +595,11 @@ static inline int p2m_set_mem_access(str
unsigned long start_pfn,
uint32_t nr, hvmmem_access_t access)
{ return -EINVAL; }
-static inline int p2m_get_mem_access(struct domain *d, unsigned long pfn,
+static inline int p2m_set_access_bulk(struct domain *d, xen_pfn_t *arr,
+ int *err, uint64_t nr,
+ hvmmem_access_t access)
+{ return -EINVAL; }
+static inline int p2m_get_mem_access(struct domain *d, unsigned long pfn,
hvmmem_access_t *access)
{ return -EINVAL; }
#endif
diff -r f12cf785738f -r 2f695c43ce22 xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h Wed Apr 25 19:29:54 2012 -0700
+++ b/xen/include/public/hvm/hvm_op.h Thu Apr 26 11:25:50 2012 -0700
@@ -261,4 +261,22 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_m
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+#define HVMOP_set_mem_access_bulk 17
+/* Notify that a array of pfns is to have specific access types */
+struct xen_hvm_set_mem_access_bulk {
+ /* Domain to be updated. */
+ domid_t domid;
+ /* Memory type */
+ uint16_t hvmmem_access; /* hvm_access_t */
+ /* Array of pfns */
+ XEN_GUEST_HANDLE_64(xen_pfn_t) arr;
+ XEN_GUEST_HANDLE_64(int) err ;
+ /* Number of entries */
+ uint64_t nr;
+};
+typedef struct xen_hvm_set_mem_access_bulk xen_hvm_set_mem_access_bulk_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_bulk_t);
+#endif
+
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |