|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH-for-4.9 v1 6/8] dm_op: convert HVMOP_set_mem_type
This patch also removes the need for handling HVMOP restarts, so that
infrastructure is also removed.
Suggested-by: Jan Beulich <jbeulich@xxxxxxxx>
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
tools/libxc/xc_misc.c | 28 +++-----
xen/arch/x86/hvm/dm.c | 92 ++++++++++++++++++++++++
xen/arch/x86/hvm/hvm.c | 136 +-----------------------------------
xen/include/public/hvm/dm_op.h | 16 +++++
xen/include/public/hvm/hvm_op.h | 4 +-
xen/xsm/flask/policy/access_vectors | 2 +-
6 files changed, 121 insertions(+), 157 deletions(-)
diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
index a97864e..607cf80 100644
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -584,28 +584,18 @@ int xc_hvm_modified_memory(
int xc_hvm_set_mem_type(
xc_interface *xch, domid_t dom, hvmmem_type_t mem_type, uint64_t
first_pfn, uint64_t nr)
{
- DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_mem_type, arg);
- int rc;
-
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- {
- PERROR("Could not allocate memory for xc_hvm_set_mem_type hypercall");
- return -1;
- }
+ struct xen_dm_op op;
+ struct xen_dm_op_set_mem_type *data;
- arg->domid = dom;
- arg->hvmmem_type = mem_type;
- arg->first_pfn = first_pfn;
- arg->nr = nr;
+ op.op = DMOP_set_mem_type;
+ data = &op.u.set_mem_type;
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_set_mem_type,
- HYPERCALL_BUFFER_AS_ARG(arg));
-
- xc_hypercall_buffer_free(xch, arg);
+ data->mem_type = mem_type;
+ data->first_pfn = first_pfn;
+ /* NOTE: The following assignment truncates nr to 32-bits */
+ data->nr = nr;
- return rc;
+ return do_dm_op(xch, dom, 1, &op, sizeof(op));
}
int xc_hvm_inject_trap(
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 0dcd454..969b68c 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -160,6 +160,16 @@ static int dm_op_set_pci_link_route(struct domain *d,
uint8_t link,
return 0;
}
+static bool_t dm_op_allow_p2m_type_change(p2m_type_t old, p2m_type_t new)
+{
+ if ( p2m_is_ram(old) ||
+ (p2m_is_hole(old) && new == p2m_mmio_dm) ||
+ (old == p2m_ioreq_server && new == p2m_ram_rw) )
+ return 1;
+
+ return 0;
+}
+
static int dm_op_modified_memory(struct domain *d, xen_pfn_t *first_pfn,
unsigned int *nr)
{
@@ -205,6 +215,79 @@ static int dm_op_modified_memory(struct domain *d,
xen_pfn_t *first_pfn,
return rc;
}
+
+static int dm_op_set_mem_type(struct domain *d, hvmmem_type_t mem_type,
+ xen_pfn_t *first_pfn, unsigned int *nr)
+{
+ xen_pfn_t last_pfn = *first_pfn + *nr - 1;
+ unsigned int iter;
+ int rc;
+
+ /* Interface types to internal p2m types */
+ static const p2m_type_t memtype[] = {
+ [HVMMEM_ram_rw] = p2m_ram_rw,
+ [HVMMEM_ram_ro] = p2m_ram_ro,
+ [HVMMEM_mmio_dm] = p2m_mmio_dm,
+ [HVMMEM_unused] = p2m_invalid,
+ [HVMMEM_ioreq_server] = p2m_ioreq_server
+ };
+
+ if ( (*first_pfn > last_pfn) ||
+ (last_pfn > domain_get_maximum_gpfn(d)) )
+ return -EINVAL;
+
+ if ( mem_type >= ARRAY_SIZE(memtype) ||
+ unlikely(mem_type == HVMMEM_unused) )
+ return -EINVAL;
+
+ iter = 0;
+ rc = 0;
+ while ( iter < *nr )
+ {
+ unsigned long pfn = *first_pfn + iter;
+ p2m_type_t t;
+
+ get_gfn_unshare(d, pfn, &t);
+ if ( p2m_is_paging(t) )
+ {
+ put_gfn(d, pfn);
+ p2m_mem_paging_populate(d, pfn);
+ rc = -EAGAIN;
+ break;
+ }
+ if ( p2m_is_shared(t) )
+ {
+ put_gfn(d, pfn);
+ rc = -EAGAIN;
+ break;
+ }
+ if ( !dm_op_allow_p2m_type_change(t, memtype[mem_type]) )
+ {
+ put_gfn(d, pfn);
+ rc = -EINVAL;
+ break;
+ }
+
+ rc = p2m_change_type_one(d, pfn, t, memtype[mem_type]);
+ put_gfn(d, pfn);
+
+ if ( rc )
+ break;
+
+ /* Check for continuation if it's not the last interation */
+ if ( (++iter < *nr) && hypercall_preempt_check() )
+ {
+ rc = -ERESTART;
+ break;
+ }
+ }
+
+ *first_pfn += iter;
+ *nr -= iter;
+
+ return rc;
+}
+
long do_dm_op(domid_t domid,
unsigned int nr_bufs,
XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
@@ -331,6 +414,15 @@ long do_dm_op(domid_t domid,
rc = dm_op_modified_memory(d, &data->first_pfn, &data->nr);
break;
}
+ case DMOP_set_mem_type:
+ {
+ struct xen_dm_op_set_mem_type *data =
+ &op.u.set_mem_type;
+
+ rc = dm_op_set_mem_type(d, data->mem_type, &data->first_pfn,
+ &data->nr);
+ break;
+ }
default:
rc = -EOPNOTSUPP;
break;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 3b2e9d5..83c4063 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5249,132 +5249,11 @@ static int hvmop_get_mem_type(
return rc;
}
-/*
- * Note that this value is effectively part of the ABI, even if we don't need
- * to make it a formal part of it: A guest suspended for migration in the
- * middle of a continuation would fail to work if resumed on a hypervisor
- * using a different value.
- */
-#define HVMOP_op_mask 0xff
-
-static bool_t hvm_allow_p2m_type_change(p2m_type_t old, p2m_type_t new)
-{
- if ( p2m_is_ram(old) ||
- (p2m_is_hole(old) && new == p2m_mmio_dm) ||
- (old == p2m_ioreq_server && new == p2m_ram_rw) )
- return 1;
-
- return 0;
-}
-
-static int hvmop_set_mem_type(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_set_mem_type_t) arg,
- unsigned long *iter)
-{
- unsigned long start_iter = *iter;
- struct xen_hvm_set_mem_type a;
- struct domain *d;
- int rc;
-
- /* Interface types to internal p2m types */
- static const p2m_type_t memtype[] = {
- [HVMMEM_ram_rw] = p2m_ram_rw,
- [HVMMEM_ram_ro] = p2m_ram_ro,
- [HVMMEM_mmio_dm] = p2m_mmio_dm,
- [HVMMEM_unused] = p2m_invalid,
- [HVMMEM_ioreq_server] = p2m_ioreq_server
- };
-
- if ( copy_from_guest(&a, arg, 1) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(a.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = xsm_hvm_control(XSM_DM_PRIV, d, HVMOP_set_mem_type);
- if ( rc )
- goto out;
-
- rc = -EINVAL;
- if ( a.nr < start_iter ||
- ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
- ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
- goto out;
-
- if ( a.hvmmem_type >= ARRAY_SIZE(memtype) ||
- unlikely(a.hvmmem_type == HVMMEM_unused) )
- goto out;
-
- while ( a.nr > start_iter )
- {
- unsigned long pfn = a.first_pfn + start_iter;
- p2m_type_t t;
-
- get_gfn_unshare(d, pfn, &t);
- if ( p2m_is_paging(t) )
- {
- put_gfn(d, pfn);
- p2m_mem_paging_populate(d, pfn);
- rc = -EAGAIN;
- goto out;
- }
- if ( p2m_is_shared(t) )
- {
- put_gfn(d, pfn);
- rc = -EAGAIN;
- goto out;
- }
- if ( !hvm_allow_p2m_type_change(t, memtype[a.hvmmem_type]) )
- {
- put_gfn(d, pfn);
- goto out;
- }
-
- rc = p2m_change_type_one(d, pfn, t, memtype[a.hvmmem_type]);
- put_gfn(d, pfn);
-
- if ( rc )
- goto out;
-
- /* Check for continuation if it's not the last interation */
- if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
- hypercall_preempt_check() )
- {
- rc = -ERESTART;
- goto out;
- }
- }
- rc = 0;
-
- out:
- rcu_unlock_domain(d);
- *iter = start_iter;
-
- return rc;
-}
-
long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
{
- unsigned long start_iter, mask;
long rc = 0;
- switch ( op & HVMOP_op_mask )
- {
- default:
- mask = ~0UL;
- break;
- case HVMOP_set_mem_type:
- mask = HVMOP_op_mask;
- break;
- }
-
- start_iter = op & ~mask;
- switch ( op &= mask )
+ switch ( op )
{
case HVMOP_set_evtchn_upcall_vector:
rc = hvmop_set_evtchn_upcall_vector(
@@ -5405,12 +5284,6 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
guest_handle_cast(arg, xen_hvm_get_mem_type_t));
break;
- case HVMOP_set_mem_type:
- rc = hvmop_set_mem_type(
- guest_handle_cast(arg, xen_hvm_set_mem_type_t),
- &start_iter);
- break;
-
case HVMOP_pagetable_dying:
{
struct xen_hvm_pagetable_dying a;
@@ -5519,13 +5392,6 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
}
}
- if ( rc == -ERESTART )
- {
- ASSERT(!(start_iter & mask));
- rc = hypercall_create_continuation(__HYPERVISOR_hvm_op, "lh",
- op | start_iter, arg);
- }
-
return rc;
}
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index d2065f2..247cac6 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -245,6 +245,21 @@ struct xen_dm_op_modified_memory {
uint64_t first_pfn;
};
+/*
+ * DMOP_set_mem_type: Notify that a region of memory is to be treated in a
+ * specific way. (See definition of hvmmem_type_t).
+ */
+#define DMOP_set_mem_type 12
+
+struct xen_dm_op_set_mem_type {
+ /* IN - number of contiguous pages */
+ uint32_t nr;
+ /* IN - first pfn in region */
+ uint64_t first_pfn;
+ /* IN - new hvmmem_type_t of region */
+ uint16_t mem_type;
+};
+
struct xen_dm_op {
uint32_t op;
@@ -260,6 +275,7 @@ struct xen_dm_op {
struct xen_dm_op_set_isa_irq_level set_isa_irq_level;
struct xen_dm_op_set_pci_link_route set_pci_link_route;
struct xen_dm_op_modified_memory modified_memory;
+ struct xen_dm_op_set_mem_type set_mem_type;
} u;
};
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 45879cf..2e9a1f6 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -131,8 +131,6 @@ struct xen_hvm_modified_memory {
typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
-#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */
-
#define HVMOP_set_mem_type 8
/* Notify that a region of memory is to be treated in a specific way. */
struct xen_hvm_set_mem_type {
@@ -148,6 +146,8 @@ struct xen_hvm_set_mem_type {
typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
+#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */
+
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
/* Hint from PV drivers for pagetable destruction. */
diff --git a/xen/xsm/flask/policy/access_vectors
b/xen/xsm/flask/policy/access_vectors
index 2041ca5..125210b 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -260,7 +260,7 @@ class hvm
bind_irq
# XEN_DOMCTL_pin_mem_cacheattr
cacheattr
-# HVMOP_get_mem_type, HVMOP_set_mem_type,
+# HVMOP_get_mem_type,
# HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,
# HVMOP_inject_trap
hvmctl
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |