|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4 of 8] x86/mm: wire up sharing ring
xen/arch/x86/mm/mem_event.c | 41 +++++++++++++++++++++++++++++++++++++++++
xen/include/public/domctl.h | 20 +++++++++++++++++++-
xen/include/xen/sched.h | 3 +++
3 files changed, 63 insertions(+), 1 deletions(-)
Now that we have an interface close to finalizing, do the necessary plumbing to
set up a ring for reporting failed allocations in the unshare path.
Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
diff -r 39e3ee550391 -r d03aa37a0288 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -432,6 +432,13 @@ static void mem_access_notification(stru
p2m_mem_access_resume(v->domain);
}
+/* Registered with Xen-bound event channel for incoming notifications. */
+static void mem_sharing_notification(struct vcpu *v, unsigned int port)
+{
+ if ( likely(v->domain->mem_event->share.ring_page != NULL) )
+ mem_sharing_sharing_resume(v->domain);
+}
+
struct domain *get_mem_event_op_target(uint32_t domain, int *rc)
{
struct domain *d;
@@ -599,6 +606,40 @@ int mem_event_domctl(struct domain *d, x
}
break;
+ case XEN_DOMCTL_MEM_EVENT_OP_SHARING:
+ {
+ struct mem_event_domain *med = &d->mem_event->share;
+ rc = -EINVAL;
+
+ switch( mec->op )
+ {
+ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE:
+ {
+ rc = -ENODEV;
+ /* Only HAP is supported */
+ if ( !hap_enabled(d) )
+ break;
+
+ rc = mem_event_enable(d, mec, med, _VPF_mem_sharing,
+ HVM_PARAM_SHARING_RING_PFN,
+ mem_sharing_notification);
+ }
+ break;
+
+ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE:
+ {
+ if ( med->ring_page )
+ rc = mem_event_disable(d, med);
+ }
+ break;
+
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ }
+ break;
+
default:
rc = -ENOSYS;
}
diff -r 39e3ee550391 -r d03aa37a0288 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -710,7 +710,7 @@ struct xen_domctl_gdbsx_domstatus {
/* XEN_DOMCTL_mem_event_op */
/*
-* Domain memory paging
+ * Domain memory paging
* Page memory in and out.
* Domctl interface to set up and tear down the
* pager<->hypervisor interface. Use XENMEM_paging_op*
@@ -740,6 +740,24 @@ struct xen_domctl_gdbsx_domstatus {
#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0
#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1
+/*
+ * Sharing ENOMEM helper.
+ *
+ * As with paging, use the domctl for teardown/setup of the
+ * helper<->hypervisor interface.
+ *
+ * If setup, this ring is used to communicate failed allocations
+ * in the unshare path. XENMEM_sharing_op_resume is used to wake up
+ * vcpus that could not unshare.
+ *
+ * Note that shring can be turned on (as per the domctl below)
+ * *without* this ring being setup.
+ */
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING 3
+
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE 0
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE 1
+
/* Use for teardown/setup of helper<->hypervisor interface for paging,
* access and sharing.*/
struct xen_domctl_mem_event_op {
diff -r 39e3ee550391 -r d03aa37a0288 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -643,6 +643,9 @@ static inline struct domain *next_domain
/* VCPU is blocked due to missing mem_access ring. */
#define _VPF_mem_access 5
#define VPF_mem_access (1UL<<_VPF_mem_access)
+ /* VCPU is blocked due to missing mem_sharing ring. */
+#define _VPF_mem_sharing 6
+#define VPF_mem_sharing (1UL<<_VPF_mem_sharing)
static inline int vcpu_runnable(struct vcpu *v)
{
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |