|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v8 05/11] x86/mm: add HYPERVISOR_memory_op to acquire guest resources
Certain memory resources associated with a guest are not necessarily
present in the guest P2M.
This patch adds the boilerplate for new memory op to allow such a resource
to be priv-mapped directly, by either a PV or HVM tools domain.
NOTE: Whilst the new op is not intrinsicly specific to the x86 architecture,
I have no means to test it on an ARM platform and so cannot verify
that it functions correctly. Hence it is currently only implemented
for x86.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
v8:
- Move the code into common as requested by Jan.
- Make the gmfn_list handle a 64-bit type to avoid limiting the MFN
range for a 32-bit tools domain.
- Add missing pad.
- Add compat code.
- Make this patch deal with purely boilerplate.
- Drop George's A-b and Wei's R-b because the changes are non-trivial,
and update Cc list now the boilerplate is common.
v5:
- Switched __copy_to/from_guest_offset() to copy_to/from_guest_offset().
---
xen/arch/x86/mm/p2m.c | 3 +-
xen/common/compat/memory.c | 52 ++++++++++++++++++++++++++++++
xen/common/memory.c | 77 +++++++++++++++++++++++++++++++++++++++++++++
xen/include/asm-x86/p2m.h | 3 ++
xen/include/public/memory.h | 32 ++++++++++++++++++-
xen/include/xlat.lst | 1 +
6 files changed, 165 insertions(+), 3 deletions(-)
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 0b479105b9..d0f8fc249b 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1121,8 +1121,7 @@ static int set_typed_p2m_entry(struct domain *d, unsigned
long gfn, mfn_t mfn,
}
/* Set foreign mfn in the given guest's p2m table. */
-static int set_foreign_p2m_entry(struct domain *d, unsigned long gfn,
- mfn_t mfn)
+int set_foreign_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
{
return set_typed_p2m_entry(d, gfn, mfn, PAGE_ORDER_4K, p2m_map_foreign,
p2m_get_hostp2m(d)->default_access);
diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
index 35bb259808..3af7922e59 100644
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -71,6 +71,7 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
struct xen_remove_from_physmap *xrfp;
struct xen_vnuma_topology_info *vnuma;
struct xen_mem_access_op *mao;
+ struct xen_mem_acquire_resource *mar;
} nat;
union {
struct compat_memory_reservation rsrv;
@@ -79,6 +80,7 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
struct compat_add_to_physmap_batch atpb;
struct compat_vnuma_topology_info vnuma;
struct compat_mem_access_op mao;
+ struct compat_mem_acquire_resource mar;
} cmp;
set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
@@ -395,6 +397,39 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
}
#endif
+ case XENMEM_acquire_resource:
+ {
+ xen_ulong_t *gmfn_list = (xen_ulong_t *)(nat.mar + 1);
+
+ if ( copy_from_guest(&cmp.mar, compat, 1) ||
+ !compat_handle_okay(cmp.mar.gmfn_list,
+ cmp.mar.nr_frames) )
+ return -EFAULT;
+
+ if ( sizeof(*gmfn_list) * cmp.mar.nr_frames >
+ COMPAT_ARG_XLAT_SIZE - sizeof(*nat.mar) )
+ return -E2BIG;
+
+ for ( i = 0; i < cmp.mar.nr_frames; i++ )
+ {
+ compat_ulong_t gmfn;
+
+ if ( __copy_from_compat_offset(&gmfn, cmp.mar.gmfn_list,
+ i, 1) )
+ return -EFAULT;
+
+ gmfn_list[i] = gmfn;
+ }
+
+#define XLAT_mem_acquire_resource_HNDL_gmfn_list(_d_, _s_) \
+ set_xen_guest_handle((_d_)->gmfn_list, gmfn_list)
+
+ XLAT_mem_acquire_resource(nat.mar, &cmp.mar);
+
+#undef XLAT_mem_acquire_resource_HNDL_gmfn_list
+
+ break;
+ }
default:
return compat_arch_memory_op(cmd, compat);
}
@@ -535,6 +570,23 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
rc = -EFAULT;
break;
+ case XENMEM_acquire_resource:
+ {
+ xen_ulong_t *gmfn_list = (xen_ulong_t *)(nat.mar + 1);
+
+ for ( i = 0; i < cmp.mar.nr_frames; i++ )
+ {
+ compat_ulong_t gmfn = gmfn_list[i];
+
+ if ( gmfn != gmfn_list[i] )
+ return -ERANGE;
+
+ if ( __copy_to_compat_offset(cmp.mar.gmfn_list, i,
+ &gmfn, 1) )
+ return -EFAULT;
+ }
+ break;
+ }
default:
domain_crash(current->domain);
split = 0;
diff --git a/xen/common/memory.c b/xen/common/memory.c
index a2abf554e3..a56bfcc88f 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -964,6 +964,67 @@ static long xatp_permission_check(struct domain *d,
unsigned int space)
return xsm_add_to_physmap(XSM_TARGET, current->domain, d);
}
+#ifdef CONFIG_X86
+static int acquire_resource(const xen_mem_acquire_resource_t *xmar)
+{
+ struct domain *d, *currd = current->domain;
+ unsigned long mfn_list[2];
+ int rc;
+
+ if ( xmar->nr_frames == 0 || xmar->pad != 0 )
+ return -EINVAL;
+
+ if ( xmar->nr_frames > ARRAY_SIZE(mfn_list) )
+ return -E2BIG;
+
+ d = rcu_lock_domain_by_any_id(xmar->domid);
+ if ( d == NULL )
+ return -ESRCH;
+
+ rc = xsm_domain_memory_map(XSM_TARGET, d);
+ if ( rc )
+ goto out;
+
+ switch ( xmar->type )
+ {
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ if ( rc )
+ goto out;
+
+ if ( !paging_mode_translate(currd) )
+ {
+ if ( copy_to_guest_offset(xmar->gmfn_list, 0, mfn_list,
+ xmar->nr_frames) )
+ rc = -EFAULT;
+ }
+ else
+ {
+ unsigned int i;
+
+ for ( i = 0; i < xmar->nr_frames; i++ )
+ {
+ xen_pfn_t gfn;
+
+ rc = -EFAULT;
+ if ( copy_from_guest_offset(&gfn, xmar->gmfn_list, i, 1) )
+ goto out;
+
+ rc = set_foreign_p2m_entry(currd, gfn, _mfn(mfn_list[i]));
+ if ( rc )
+ goto out;
+ }
+ }
+
+ out:
+ rcu_unlock_domain(d);
+ return rc;
+}
+#endif /* CONFIG_X86 */
+
long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
struct domain *d, *curr_d = current->domain;
@@ -1405,6 +1466,22 @@ long do_memory_op(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(void) arg)
}
#endif
+ case XENMEM_acquire_resource:
+#ifdef CONFIG_X86
+ {
+ xen_mem_acquire_resource_t xmar;
+
+ if ( copy_from_guest(&xmar, arg, 1) )
+ return -EFAULT;
+
+ rc = acquire_resource(&xmar);
+ break;
+ }
+#else
+ rc = -EOPNOTSUPP;
+ break;
+#endif
+
default:
rc = arch_memory_op(cmd, arg);
break;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 10cdfc09a9..4eff0458bc 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -613,6 +613,9 @@ void p2m_memory_type_changed(struct domain *d);
int p2m_is_logdirty_range(struct p2m_domain *, unsigned long start,
unsigned long end);
+/* Set foreign entry in the p2m table (for priv-mapping) */
+int set_foreign_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
+
/* Set mmio addresses in the p2m table (for pass-through) */
int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
unsigned int order, p2m_access_t access);
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 29386df98b..3aa8fb2fe1 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -599,6 +599,36 @@ struct xen_reserved_device_memory_map {
typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
+/*
+ * Get the pages for a particular guest resource, so that they can be
+ * mapped directly by a tools domain.
+ */
+#define XENMEM_acquire_resource 28
+struct xen_mem_acquire_resource {
+ /* IN - the domain whose resource is to be mapped */
+ domid_t domid;
+ /* IN - the type of resource */
+ uint16_t type;
+ /*
+ * IN - a type-specific resource identifier, which must be zero
+ * unless stated otherwise.
+ */
+ uint32_t id;
+ /* IN - number of (4K) frames of the resource to be mapped */
+ uint32_t nr_frames;
+ uint32_t pad;
+ /* IN - the index of the initial frame to be mapped */
+ uint64_aligned_t frame;
+ /* IN/OUT - If the tools domain is PV then, upon return, gmfn_list
+ * will be populated with the MFNs of the resource.
+ * If the tools domain is HVM then it is expected that, on
+ * entry, gmfn_list will be populated with a list of GFNs
+ * that will be mapped to the MFNs of the resource.
+ */
+ XEN_GUEST_HANDLE(xen_ulong_t) gmfn_list;
+};
+typedef struct xen_mem_acquire_resource xen_mem_acquire_resource_t;
+
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
/*
@@ -650,7 +680,7 @@ struct xen_vnuma_topology_info {
typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
-/* Next available subop number is 28 */
+/* Next available subop number is 29 */
#endif /* __XEN_PUBLIC_MEMORY_H__ */
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 0f17000ea7..5835872334 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -83,6 +83,7 @@
! memory_map memory.h
! memory_reservation memory.h
! mem_access_op memory.h
+! mem_acquire_resource memory.h
! pod_target memory.h
! remove_from_physmap memory.h
! reserved_device_memory_map memory.h
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |