|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH 3/6] vm_event: Refactor vm_event_domain implementation
Decouple the VM Event interface from the ring implementation.
---
xen/arch/arm/mem_access.c | 2 +-
xen/arch/x86/mm/mem_access.c | 4 +-
xen/arch/x86/mm/mem_paging.c | 2 +-
xen/arch/x86/mm/mem_sharing.c | 5 +-
xen/arch/x86/mm/p2m.c | 10 +-
xen/common/mem_access.c | 2 +-
xen/common/monitor.c | 4 +-
xen/common/vm_event.c | 503 ++++++++++++++++++++++++------------------
xen/drivers/passthrough/pci.c | 2 +-
xen/include/xen/sched.h | 25 +--
xen/include/xen/vm_event.h | 26 +--
11 files changed, 312 insertions(+), 273 deletions(-)
diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
index db49372..ba0114a 100644
--- a/xen/arch/arm/mem_access.c
+++ b/xen/arch/arm/mem_access.c
@@ -290,7 +290,7 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const
struct npfec npfec)
}
/* Otherwise, check if there is a vm_event monitor subscriber */
- if ( !vm_event_check_ring(v->domain->vm_event_monitor) )
+ if ( !vm_event_check(v->domain->vm_event_monitor) )
{
/* No listener */
if ( p2m->access_required )
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 56c06a4..57aeda7 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -182,7 +182,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
gfn_unlock(p2m, gfn, 0);
/* Otherwise, check if there is a memory event listener, and send the
message along */
- if ( !vm_event_check_ring(d->vm_event_monitor) || !req_ptr )
+ if ( !vm_event_check(d->vm_event_monitor) || !req_ptr )
{
/* No listener */
if ( p2m->access_required )
@@ -210,7 +210,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
return true;
}
}
- if ( vm_event_check_ring(d->vm_event_monitor) &&
+ if ( vm_event_check(d->vm_event_monitor) &&
d->arch.monitor.inguest_pagefault_disabled &&
npfec.kind != npfec_kind_with_gla ) /* don't send a mem_event */
{
diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c
index 54a94fa..dc2a59a 100644
--- a/xen/arch/x86/mm/mem_paging.c
+++ b/xen/arch/x86/mm/mem_paging.c
@@ -44,7 +44,7 @@ int
mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg)
goto out;
rc = -ENODEV;
- if ( unlikely(!vm_event_check_ring(d->vm_event_paging)) )
+ if ( unlikely(!vm_event_check(d->vm_event_paging)) )
goto out;
switch( mpo.op )
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 5ac9d8f..91e92a7 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -557,8 +557,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned
long gfn,
.u.mem_sharing.p2mt = p2m_ram_shared
};
- if ( (rc = __vm_event_claim_slot(d,
- d->vm_event_share, allow_sleep)) < 0 )
+ if ( (rc = __vm_event_claim_slot(d->vm_event_share, allow_sleep)) < 0 )
return rc;
if ( v->domain == d )
@@ -567,7 +566,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned
long gfn,
vm_event_vcpu_pause(v);
}
- vm_event_put_request(d, d->vm_event_share, &req);
+ vm_event_put_request(d->vm_event_share, &req);
return 0;
}
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index fea4497..3876dda 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1462,7 +1462,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned
long gfn,
* correctness of the guest execution at this point. If this is the only
* page that happens to be paged-out, we'll be okay.. but it's likely the
* guest will crash shortly anyways. */
- int rc = vm_event_claim_slot(d, d->vm_event_paging);
+ int rc = vm_event_claim_slot(d->vm_event_paging);
if ( rc < 0 )
return;
@@ -1476,7 +1476,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned
long gfn,
/* Evict will fail now, tag this request for pager */
req.u.mem_paging.flags |= MEM_PAGING_EVICT_FAIL;
- vm_event_put_request(d, d->vm_event_paging, &req);
+ vm_event_put_request(d->vm_event_paging, &req);
}
/**
@@ -1514,7 +1514,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned
long gfn_l)
struct p2m_domain *p2m = p2m_get_hostp2m(d);
/* We're paging. There should be a ring */
- int rc = vm_event_claim_slot(d, d->vm_event_paging);
+ int rc = vm_event_claim_slot(d->vm_event_paging);
if ( rc == -ENOSYS )
{
gdprintk(XENLOG_ERR, "Domain %hu paging gfn %lx yet no ring "
@@ -1555,7 +1555,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned
long gfn_l)
{
/* gfn is already on its way back and vcpu is not paused */
out_cancel:
- vm_event_cancel_slot(d, d->vm_event_paging);
+ vm_event_cancel_slot(d->vm_event_paging);
return;
}
@@ -1563,7 +1563,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned
long gfn_l)
req.u.mem_paging.p2mt = p2mt;
req.vcpu_id = v->vcpu_id;
- vm_event_put_request(d, d->vm_event_paging, &req);
+ vm_event_put_request(d->vm_event_paging, &req);
}
/**
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 010e6f8..51e4e2b 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -52,7 +52,7 @@ int mem_access_memop(unsigned long cmd,
goto out;
rc = -ENODEV;
- if ( unlikely(!vm_event_check_ring(d->vm_event_monitor)) )
+ if ( unlikely(!vm_event_check(d->vm_event_monitor)) )
goto out;
switch ( mao.op )
diff --git a/xen/common/monitor.c b/xen/common/monitor.c
index c606683..fdf7b23 100644
--- a/xen/common/monitor.c
+++ b/xen/common/monitor.c
@@ -93,7 +93,7 @@ int monitor_traps(struct vcpu *v, bool sync,
vm_event_request_t *req)
int rc;
struct domain *d = v->domain;
- rc = vm_event_claim_slot(d, d->vm_event_monitor);
+ rc = vm_event_claim_slot(d->vm_event_monitor);
switch ( rc )
{
case 0:
@@ -124,7 +124,7 @@ int monitor_traps(struct vcpu *v, bool sync,
vm_event_request_t *req)
}
vm_event_fill_regs(req);
- vm_event_put_request(d, d->vm_event_monitor, req);
+ vm_event_put_request(d->vm_event_monitor, req);
return rc;
}
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index dddc2d4..77da41b 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -35,86 +35,66 @@
#define xen_rmb() smp_rmb()
#define xen_wmb() smp_wmb()
-#define vm_event_ring_lock_init(_ved) spin_lock_init(&(_ved)->ring_lock)
-#define vm_event_ring_lock(_ved) spin_lock(&(_ved)->ring_lock)
-#define vm_event_ring_unlock(_ved) spin_unlock(&(_ved)->ring_lock)
+#define vm_event_lock_init(_ved) spin_lock_init(&(_ved)->lock)
+#define vm_event_lock(_ved) spin_lock(&(_ved)->lock)
+#define vm_event_unlock(_ved) spin_unlock(&(_ved)->lock)
-static int vm_event_enable(
- struct domain *d,
- struct xen_domctl_vm_event_op *vec,
- struct vm_event_domain **ved,
- int pause_flag,
- int param,
- xen_event_channel_notification_t notification_fn)
-{
- int rc;
- unsigned long ring_gfn = d->arch.hvm.params[param];
-
- if ( !*ved )
- *ved = xzalloc(struct vm_event_domain);
- if ( !*ved )
- return -ENOMEM;
-
- /* Only one helper at a time. If the helper crashed,
- * the ring is in an undefined state and so is the guest.
- */
- if ( (*ved)->ring_page )
- return -EBUSY;;
-
- /* The parameter defaults to zero, and it should be
- * set to something */
- if ( ring_gfn == 0 )
- return -ENOSYS;
-
- vm_event_ring_lock_init(*ved);
- vm_event_ring_lock(*ved);
-
- rc = vm_event_init_domain(d);
-
- if ( rc < 0 )
- goto err;
-
- rc = prepare_ring_for_helper(d, ring_gfn, &(*ved)->ring_pg_struct,
- &(*ved)->ring_page);
- if ( rc < 0 )
- goto err;
-
- /* Set the number of currently blocked vCPUs to 0. */
- (*ved)->blocked = 0;
-
- /* Allocate event channel */
- rc = alloc_unbound_xen_event_channel(d, 0, current->domain->domain_id,
- notification_fn);
- if ( rc < 0 )
- goto err;
-
- (*ved)->xen_port = vec->port = rc;
-
- /* Prepare ring buffer */
- FRONT_RING_INIT(&(*ved)->front_ring,
- (vm_event_sring_t *)(*ved)->ring_page,
- PAGE_SIZE);
-
- /* Save the pause flag for this particular ring. */
- (*ved)->pause_flag = pause_flag;
-
- /* Initialize the last-chance wait queue. */
- init_waitqueue_head(&(*ved)->wq);
-
- vm_event_ring_unlock(*ved);
- return 0;
+#define to_vm_event_domain_ring(_ved) container_of(_ved, struct
vm_event_domain_ring, ved)
- err:
- destroy_ring_for_helper(&(*ved)->ring_page,
- (*ved)->ring_pg_struct);
- vm_event_ring_unlock(*ved);
- xfree(*ved);
- *ved = NULL;
+struct vm_event_domain
+{
+ /* VM event ops */
+ bool (*check)(struct vm_event_domain *ved);
+ int (*claim_slot)(struct vm_event_domain *ved, bool allow_sleep);
+ void (*release_slot)(struct vm_event_domain *ved);
+ void (*put_request)(struct vm_event_domain *ved, vm_event_request_t *req);
+ int (*get_response)(struct vm_event_domain *ved, vm_event_response_t *rsp);
+ int (*disable)(struct vm_event_domain **_ved);
+
+ /* The domain associated with the VM event */
+ struct domain *d;
+
+ /* ring lock */
+ spinlock_t lock;
+};
+
+bool vm_event_check(struct vm_event_domain *ved)
+{
+ return (ved && ved->check(ved));
+}
- return rc;
+/* VM event domain ring implementation */
+struct vm_event_domain_ring
+{
+ /* VM event domain */
+ struct vm_event_domain ved;
+ /* The ring has 64 entries */
+ unsigned char foreign_producers;
+ unsigned char target_producers;
+ /* shared ring page */
+ void *ring_page;
+ struct page_info *ring_pg_struct;
+ /* front-end ring */
+ vm_event_front_ring_t front_ring;
+ /* event channel port (vcpu0 only) */
+ int xen_port;
+ /* vm_event bit for vcpu->pause_flags */
+ int pause_flag;
+ /* list of vcpus waiting for room in the ring */
+ struct waitqueue_head wq;
+ /* the number of vCPUs blocked */
+ unsigned int blocked;
+ /* The last vcpu woken up */
+ unsigned int last_vcpu_wake_up;
+};
+
+static bool vm_event_ring_check(struct vm_event_domain *ved)
+{
+ struct vm_event_domain_ring *impl = to_vm_event_domain_ring(ved);
+ return impl->ring_page != NULL;
}
-static unsigned int vm_event_ring_available(struct vm_event_domain *ved)
+static unsigned int vm_event_ring_available(struct vm_event_domain_ring *ved)
{
int avail_req = RING_FREE_REQUESTS(&ved->front_ring);
avail_req -= ved->target_producers;
@@ -126,15 +106,16 @@ static unsigned int vm_event_ring_available(struct
vm_event_domain *ved)
}
/*
- * vm_event_wake_blocked() will wakeup vcpus waiting for room in the
+ * vm_event_ring_wake_blocked() will wakeup vcpus waiting for room in the
* ring. These vCPUs were paused on their way out after placing an event,
* but need to be resumed where the ring is capable of processing at least
* one event from them.
*/
-static void vm_event_wake_blocked(struct domain *d, struct vm_event_domain
*ved)
+static void vm_event_ring_wake_blocked(struct vm_event_domain_ring *ved)
{
struct vcpu *v;
unsigned int avail_req = vm_event_ring_available(ved);
+ struct domain *d = ved->ved.d;
if ( avail_req == 0 || ved->blocked == 0 )
return;
@@ -171,7 +152,7 @@ static void vm_event_wake_blocked(struct domain *d, struct
vm_event_domain *ved)
* was unable to do so, it is queued on a wait queue. These are woken as
* needed, and take precedence over the blocked vCPUs.
*/
-static void vm_event_wake_queued(struct domain *d, struct vm_event_domain *ved)
+static void vm_event_ring_wake_queued(struct vm_event_domain_ring *ved)
{
unsigned int avail_req = vm_event_ring_available(ved);
@@ -180,79 +161,84 @@ static void vm_event_wake_queued(struct domain *d, struct
vm_event_domain *ved)
}
/*
- * vm_event_wake() will wakeup all vcpus waiting for the ring to
+ * vm_event_ring_wake() will wakeup all vcpus waiting for the ring to
* become available. If we have queued vCPUs, they get top priority. We
* are guaranteed that they will go through code paths that will eventually
- * call vm_event_wake() again, ensuring that any blocked vCPUs will get
+ * call vm_event_ring_wake() again, ensuring that any blocked vCPUs will get
* unpaused once all the queued vCPUs have made it through.
*/
-void vm_event_wake(struct domain *d, struct vm_event_domain *ved)
+static void vm_event_ring_wake(struct vm_event_domain_ring *ved)
{
if (!list_empty(&ved->wq.list))
- vm_event_wake_queued(d, ved);
+ vm_event_ring_wake_queued(ved);
else
- vm_event_wake_blocked(d, ved);
+ vm_event_ring_wake_blocked(ved);
}
-static int vm_event_disable(struct domain *d, struct vm_event_domain **ved)
+static int vm_event_disable(struct vm_event_domain **_ved)
{
- if ( vm_event_check_ring(*ved) )
- {
- struct vcpu *v;
+ return ( vm_event_check(*_ved) ) ? (*_ved)->disable(_ved) : 0;
+}
- vm_event_ring_lock(*ved);
+static int vm_event_ring_disable(struct vm_event_domain **_ved)
+{
+ struct vcpu *v;
+ struct vm_event_domain_ring *ved = to_vm_event_domain_ring(*_ved);
+ struct domain *d = ved->ved.d;
- if ( !list_empty(&(*ved)->wq.list) )
- {
- vm_event_ring_unlock(*ved);
- return -EBUSY;
- }
+ vm_event_lock(&ved->ved);
+
+ if ( !list_empty(&ved->wq.list) )
+ {
+ vm_event_unlock(&ved->ved);
+ return -EBUSY;
+ }
- /* Free domU's event channel and leave the other one unbound */
- free_xen_event_channel(d, (*ved)->xen_port);
+ /* Free domU's event channel and leave the other one unbound */
+ free_xen_event_channel(d, ved->xen_port);
- /* Unblock all vCPUs */
- for_each_vcpu ( d, v )
+ /* Unblock all vCPUs */
+ for_each_vcpu ( d, v )
+ {
+ if ( test_and_clear_bit(ved->pause_flag, &v->pause_flags) )
{
- if ( test_and_clear_bit((*ved)->pause_flag, &v->pause_flags) )
- {
- vcpu_unpause(v);
- (*ved)->blocked--;
- }
+ vcpu_unpause(v);
+ ved->blocked--;
}
+ }
- destroy_ring_for_helper(&(*ved)->ring_page,
- (*ved)->ring_pg_struct);
+ destroy_ring_for_helper(&ved->ring_page,
+ ved->ring_pg_struct);
- vm_event_cleanup_domain(d);
+ vm_event_cleanup_domain(d);
- vm_event_ring_unlock(*ved);
- }
+ vm_event_unlock(&ved->ved);
- xfree(*ved);
- *ved = NULL;
+ XFREE(*_ved);
return 0;
}
-static inline void vm_event_release_slot(struct domain *d,
- struct vm_event_domain *ved)
+static inline void vm_event_ring_release_slot(struct vm_event_domain *ved)
{
+ struct vm_event_domain_ring *impl = to_vm_event_domain_ring(ved);
+
/* Update the accounting */
- if ( current->domain == d )
- ved->target_producers--;
+ if ( current->domain == ved->d )
+ impl->target_producers--;
else
- ved->foreign_producers--;
+ impl->foreign_producers--;
/* Kick any waiters */
- vm_event_wake(d, ved);
+ vm_event_ring_wake(impl);
}
/*
- * vm_event_mark_and_pause() tags vcpu and put it to sleep.
- * The vcpu will resume execution in vm_event_wake_blocked().
+ * vm_event_ring_mark_and_pause() tags vcpu and put it to sleep.
+ * The vcpu will resume execution in vm_event_ring_wake_blocked().
*/
-void vm_event_mark_and_pause(struct vcpu *v, struct vm_event_domain *ved)
+static void vm_event_ring_mark_and_pause(struct vcpu *v,
+ struct vm_event_domain_ring *ved)
{
if ( !test_and_set_bit(ved->pause_flag, &v->pause_flags) )
{
@@ -261,24 +247,31 @@ void vm_event_mark_and_pause(struct vcpu *v, struct
vm_event_domain *ved)
}
}
+void vm_event_put_request(struct vm_event_domain *ved,
+ vm_event_request_t *req)
+{
+ if( !vm_event_check(ved))
+ return;
+
+ ved->put_request(ved, req);
+}
+
/*
* This must be preceded by a call to claim_slot(), and is guaranteed to
* succeed. As a side-effect however, the vCPU may be paused if the ring is
* overly full and its continued execution would cause stalling and excessive
* waiting. The vCPU will be automatically unpaused when the ring clears.
*/
-void vm_event_put_request(struct domain *d,
- struct vm_event_domain *ved,
- vm_event_request_t *req)
+static void vm_event_ring_put_request(struct vm_event_domain *ved,
+ vm_event_request_t *req)
{
vm_event_front_ring_t *front_ring;
int free_req;
unsigned int avail_req;
RING_IDX req_prod;
struct vcpu *curr = current;
-
- if( !vm_event_check_ring(ved))
- return;
+ struct domain *d = ved->d;
+ struct vm_event_domain_ring *impl = to_vm_event_domain_ring(ved);
if ( curr->domain != d )
{
@@ -286,16 +279,16 @@ void vm_event_put_request(struct domain *d,
#ifndef NDEBUG
if ( !(req->flags & VM_EVENT_FLAG_VCPU_PAUSED) )
gdprintk(XENLOG_G_WARNING, "d%dv%d was not paused.\n",
- d->domain_id, req->vcpu_id);
+ ved->d->domain_id, req->vcpu_id);
#endif
}
req->version = VM_EVENT_INTERFACE_VERSION;
- vm_event_ring_lock(ved);
+ vm_event_lock(ved);
/* Due to the reservations, this step must succeed. */
- front_ring = &ved->front_ring;
+ front_ring = &impl->front_ring;
free_req = RING_FREE_REQUESTS(front_ring);
ASSERT(free_req > 0);
@@ -309,35 +302,36 @@ void vm_event_put_request(struct domain *d,
RING_PUSH_REQUESTS(front_ring);
/* We've actually *used* our reservation, so release the slot. */
- vm_event_release_slot(d, ved);
+ vm_event_ring_release_slot(ved);
/* Give this vCPU a black eye if necessary, on the way out.
* See the comments above wake_blocked() for more information
* on how this mechanism works to avoid waiting. */
- avail_req = vm_event_ring_available(ved);
+ avail_req = vm_event_ring_available(impl);
if( curr->domain == d && avail_req < d->max_vcpus &&
!atomic_read(&curr->vm_event_pause_count) )
- vm_event_mark_and_pause(curr, ved);
+ vm_event_ring_mark_and_pause(curr, impl);
- vm_event_ring_unlock(ved);
+ vm_event_unlock(ved);
- notify_via_xen_event_channel(d, ved->xen_port);
+ notify_via_xen_event_channel(d, impl->xen_port);
}
-int vm_event_get_response(struct domain *d, struct vm_event_domain *ved,
- vm_event_response_t *rsp)
+static int vm_event_ring_get_response(struct vm_event_domain *ved,
+ vm_event_response_t *rsp)
{
vm_event_front_ring_t *front_ring;
RING_IDX rsp_cons;
+ struct vm_event_domain_ring *impl = (struct vm_event_domain_ring *)ved;
- vm_event_ring_lock(ved);
+ vm_event_lock(ved);
- front_ring = &ved->front_ring;
+ front_ring = &impl->front_ring;
rsp_cons = front_ring->rsp_cons;
if ( !RING_HAS_UNCONSUMED_RESPONSES(front_ring) )
{
- vm_event_ring_unlock(ved);
+ vm_event_unlock(ved);
return 0;
}
@@ -351,9 +345,9 @@ int vm_event_get_response(struct domain *d, struct
vm_event_domain *ved,
/* Kick any waiters -- since we've just consumed an event,
* there may be additional space available in the ring. */
- vm_event_wake(d, ved);
+ vm_event_ring_wake(impl);
- vm_event_ring_unlock(ved);
+ vm_event_unlock(ved);
return 1;
}
@@ -366,9 +360,15 @@ int vm_event_get_response(struct domain *d, struct
vm_event_domain *ved,
* Note: responses are handled the same way regardless of which ring they
* arrive on.
*/
-void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
+static int vm_event_resume(struct vm_event_domain *ved)
{
vm_event_response_t rsp;
+ struct domain *d;
+
+ if (! vm_event_check(ved))
+ return -ENODEV;
+
+ d = ved->d;
/*
* vm_event_resume() runs in either XEN_VM_EVENT_* domctls, or
@@ -381,7 +381,7 @@ void vm_event_resume(struct domain *d, struct
vm_event_domain *ved)
ASSERT(d != current->domain);
/* Pull all responses off the ring. */
- while ( vm_event_get_response(d, ved, &rsp) )
+ while ( ved->get_response(ved, &rsp) )
{
struct vcpu *v;
@@ -443,31 +443,36 @@ void vm_event_resume(struct domain *d, struct
vm_event_domain *ved)
vm_event_vcpu_unpause(v);
}
}
+
+ return 0;
}
-void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved)
+void vm_event_cancel_slot(struct vm_event_domain *ved)
{
- if( !vm_event_check_ring(ved) )
+ if( !vm_event_check(ved) )
return;
- vm_event_ring_lock(ved);
- vm_event_release_slot(d, ved);
- vm_event_ring_unlock(ved);
+ if (ved->release_slot)
+ {
+ vm_event_lock(ved);
+ ved->release_slot(ved);
+ vm_event_unlock(ved);
+ }
}
-static int vm_event_grab_slot(struct vm_event_domain *ved, int foreign)
+static int vm_event_ring_grab_slot(struct vm_event_domain_ring *ved, int
foreign)
{
unsigned int avail_req;
if ( !ved->ring_page )
return -ENOSYS;
- vm_event_ring_lock(ved);
+ vm_event_lock(&ved->ved);
avail_req = vm_event_ring_available(ved);
if ( avail_req == 0 )
{
- vm_event_ring_unlock(ved);
+ vm_event_unlock(&ved->ved);
return -EBUSY;
}
@@ -476,31 +481,26 @@ static int vm_event_grab_slot(struct vm_event_domain
*ved, int foreign)
else
ved->foreign_producers++;
- vm_event_ring_unlock(ved);
+ vm_event_unlock(&ved->ved);
return 0;
}
/* Simple try_grab wrapper for use in the wait_event() macro. */
-static int vm_event_wait_try_grab(struct vm_event_domain *ved, int *rc)
+static int vm_event_ring_wait_try_grab(struct vm_event_domain_ring *ved, int
*rc)
{
- *rc = vm_event_grab_slot(ved, 0);
+ *rc = vm_event_ring_grab_slot(ved, 0);
return *rc;
}
-/* Call vm_event_grab_slot() until the ring doesn't exist, or is available. */
-static int vm_event_wait_slot(struct vm_event_domain *ved)
+/* Call vm_event_ring_grab_slot() until the ring doesn't exist, or is
available. */
+static int vm_event_ring_wait_slot(struct vm_event_domain_ring *ved)
{
int rc = -EBUSY;
- wait_event(ved->wq, vm_event_wait_try_grab(ved, &rc) != -EBUSY);
+ wait_event(ved->wq, vm_event_ring_wait_try_grab(ved, &rc) != -EBUSY);
return rc;
}
-bool vm_event_check_ring(struct vm_event_domain *ved)
-{
- return (ved && ved->ring_page);
-}
-
/*
* Determines whether or not the current vCPU belongs to the target domain,
* and calls the appropriate wait function. If it is a guest vCPU, then we
@@ -513,46 +513,42 @@ bool vm_event_check_ring(struct vm_event_domain *ved)
* 0: a spot has been reserved
*
*/
-int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
- bool allow_sleep)
+static int vm_event_ring_claim_slot(struct vm_event_domain *ved, bool
allow_sleep)
+{
+ if ( (current->domain == ved->d) && allow_sleep )
+ return vm_event_ring_wait_slot(to_vm_event_domain_ring(ved));
+ else
+ return vm_event_ring_grab_slot(to_vm_event_domain_ring(ved),
+ current->domain != ved->d);
+}
+
+int __vm_event_claim_slot(struct vm_event_domain *ved, bool allow_sleep)
{
- if ( !vm_event_check_ring(ved) )
+ if ( !vm_event_check(ved) )
return -EOPNOTSUPP;
- if ( (current->domain == d) && allow_sleep )
- return vm_event_wait_slot(ved);
- else
- return vm_event_grab_slot(ved, (current->domain != d));
+ return ved->claim_slot(ved, allow_sleep);
}
#ifdef CONFIG_HAS_MEM_PAGING
/* Registered with Xen-bound event channel for incoming notifications. */
static void mem_paging_notification(struct vcpu *v, unsigned int port)
{
- struct domain *domain = v->domain;
-
- if ( likely(vm_event_check_ring(domain->vm_event_paging)) )
- vm_event_resume(domain, domain->vm_event_paging);
+ vm_event_resume(v->domain->vm_event_paging);
}
#endif
/* Registered with Xen-bound event channel for incoming notifications. */
static void monitor_notification(struct vcpu *v, unsigned int port)
{
- struct domain *domain = v->domain;
-
- if ( likely(vm_event_check_ring(domain->vm_event_monitor)) )
- vm_event_resume(domain, domain->vm_event_monitor);
+ vm_event_resume(v->domain->vm_event_monitor);
}
#ifdef CONFIG_HAS_MEM_SHARING
/* Registered with Xen-bound event channel for incoming notifications. */
static void mem_sharing_notification(struct vcpu *v, unsigned int port)
{
- struct domain *domain = v->domain;
-
- if ( likely(vm_event_check_ring(domain->vm_event_share)) )
- vm_event_resume(domain, domain->vm_event_share);
+ vm_event_resume(v->domain->vm_event_share);
}
#endif
@@ -560,7 +556,7 @@ static void mem_sharing_notification(struct vcpu *v,
unsigned int port)
void vm_event_cleanup(struct domain *d)
{
#ifdef CONFIG_HAS_MEM_PAGING
- if ( vm_event_check_ring(d->vm_event_paging) )
+ if ( vm_event_check(d->vm_event_paging) )
{
/* Destroying the wait queue head means waking up all
* queued vcpus. This will drain the list, allowing
@@ -569,24 +565,109 @@ void vm_event_cleanup(struct domain *d)
* Finally, because this code path involves previously
* pausing the domain (domain_kill), unpausing the
* vcpus causes no harm. */
- destroy_waitqueue_head(&d->vm_event_paging->wq);
- (void)vm_event_disable(d, &d->vm_event_paging);
+
destroy_waitqueue_head(&to_vm_event_domain_ring(d->vm_event_paging)->wq);
+ (void)vm_event_disable(&d->vm_event_paging);
}
#endif
- if ( vm_event_check_ring(d->vm_event_monitor) )
+ if ( vm_event_check(d->vm_event_monitor) )
{
- destroy_waitqueue_head(&d->vm_event_monitor->wq);
- (void)vm_event_disable(d, &d->vm_event_monitor);
+
destroy_waitqueue_head(&to_vm_event_domain_ring(d->vm_event_monitor)->wq);
+ (void)vm_event_disable(&d->vm_event_monitor);
}
#ifdef CONFIG_HAS_MEM_SHARING
- if ( vm_event_check_ring(d->vm_event_share) )
+ if ( vm_event_check(d->vm_event_share) )
{
- destroy_waitqueue_head(&d->vm_event_share->wq);
- (void)vm_event_disable(d, &d->vm_event_share);
+
destroy_waitqueue_head(&to_vm_event_domain_ring(d->vm_event_share)->wq);
+ (void)vm_event_disable(&d->vm_event_share);
}
#endif
}
+static int vm_event_ring_enable(
+ struct domain *d,
+ struct xen_domctl_vm_event_op *vec,
+ struct vm_event_domain **ved,
+ int pause_flag,
+ int param,
+ xen_event_channel_notification_t notification_fn)
+{
+ int rc;
+ unsigned long ring_gfn = d->arch.hvm.params[param];
+ struct vm_event_domain_ring *impl;
+
+ impl = (*ved) ? (struct vm_event_domain_ring* )(*ved) :
+ xzalloc(struct vm_event_domain_ring);
+
+ if ( !impl )
+ return -ENOMEM;
+
+ impl->ved.d = d;
+ impl->ved.check = vm_event_ring_check;
+ impl->ved.claim_slot = vm_event_ring_claim_slot;
+ impl->ved.release_slot = vm_event_ring_release_slot;
+ impl->ved.put_request = vm_event_ring_put_request;
+ impl->ved.get_response = vm_event_ring_get_response;
+ impl->ved.disable = vm_event_ring_disable;
+
+ /* Only one helper at a time. If the helper crashed,
+ * the ring is in an undefined state and so is the guest.
+ */
+ if ( impl->ring_page )
+ return -EBUSY;
+
+ /* The parameter defaults to zero, and it should be
+ * set to something */
+ if ( ring_gfn == 0 )
+ return -ENOSYS;
+
+ vm_event_lock_init(&impl->ved);
+ vm_event_lock(&impl->ved);
+
+ rc = vm_event_init_domain(d);
+ if ( rc < 0 )
+ goto err;
+
+ rc = prepare_ring_for_helper(d, ring_gfn, &impl->ring_pg_struct,
+ &impl->ring_page);
+ if ( rc < 0 )
+ goto err;
+
+ /* Set the number of currently blocked vCPUs to 0. */
+ impl->blocked = 0;
+
+ /* Allocate event channel */
+ rc = alloc_unbound_xen_event_channel(d, 0, current->domain->domain_id,
+ notification_fn);
+ if ( rc < 0 )
+ goto err;
+
+ impl->xen_port = vec->port = rc;
+
+ /* Prepare ring buffer */
+ FRONT_RING_INIT(&impl->front_ring,
+ (vm_event_sring_t *)impl->ring_page,
+ PAGE_SIZE);
+
+ /* Save the pause flag for this particular ring. */
+ impl->pause_flag = pause_flag;
+
+ /* Initialize the last-chance wait queue. */
+ init_waitqueue_head(&impl->wq);
+
+ vm_event_unlock(&impl->ved);
+
+ *ved = &impl->ved;
+ return 0;
+
+ err:
+ destroy_ring_for_helper(&impl->ring_page,
+ impl->ring_pg_struct);
+ vm_event_unlock(&impl->ved);
+ XFREE(impl);
+
+ return rc;
+}
+
int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
XEN_GUEST_HANDLE_PARAM(void) u_domctl)
{
@@ -651,26 +732,23 @@ int vm_event_domctl(struct domain *d, struct
xen_domctl_vm_event_op *vec,
break;
/* domain_pause() not required here, see XSA-99 */
- rc = vm_event_enable(d, vec, &d->vm_event_paging, _VPF_mem_paging,
- HVM_PARAM_PAGING_RING_PFN,
- mem_paging_notification);
+ rc = vm_event_ring_enable(d, vec, &d->vm_event_paging,
_VPF_mem_paging,
+ HVM_PARAM_PAGING_RING_PFN,
+ mem_paging_notification);
}
break;
case XEN_VM_EVENT_DISABLE:
- if ( vm_event_check_ring(d->vm_event_paging) )
+ if ( vm_event_check(d->vm_event_paging) )
{
domain_pause(d);
- rc = vm_event_disable(d, &d->vm_event_paging);
+ rc = vm_event_disable(&d->vm_event_paging);
domain_unpause(d);
}
break;
case XEN_VM_EVENT_RESUME:
- if ( vm_event_check_ring(d->vm_event_paging) )
- vm_event_resume(d, d->vm_event_paging);
- else
- rc = -ENODEV;
+ rc = vm_event_resume(d->vm_event_paging);
break;
default:
@@ -692,26 +770,23 @@ int vm_event_domctl(struct domain *d, struct
xen_domctl_vm_event_op *vec,
rc = arch_monitor_init_domain(d);
if ( rc )
break;
- rc = vm_event_enable(d, vec, &d->vm_event_monitor, _VPF_mem_access,
- HVM_PARAM_MONITOR_RING_PFN,
- monitor_notification);
+ rc = vm_event_ring_enable(d, vec, &d->vm_event_monitor,
_VPF_mem_access,
+ HVM_PARAM_MONITOR_RING_PFN,
+ monitor_notification);
break;
case XEN_VM_EVENT_DISABLE:
- if ( vm_event_check_ring(d->vm_event_monitor) )
+ if ( vm_event_check(d->vm_event_monitor) )
{
domain_pause(d);
- rc = vm_event_disable(d, &d->vm_event_monitor);
+ rc = vm_event_disable(&d->vm_event_monitor);
arch_monitor_cleanup_domain(d);
domain_unpause(d);
}
break;
case XEN_VM_EVENT_RESUME:
- if ( vm_event_check_ring(d->vm_event_monitor) )
- vm_event_resume(d, d->vm_event_monitor);
- else
- rc = -ENODEV;
+ rc = vm_event_resume(d->vm_event_monitor);
break;
default:
@@ -740,26 +815,22 @@ int vm_event_domctl(struct domain *d, struct
xen_domctl_vm_event_op *vec,
break;
/* domain_pause() not required here, see XSA-99 */
- rc = vm_event_enable(d, vec, &d->vm_event_share, _VPF_mem_sharing,
- HVM_PARAM_SHARING_RING_PFN,
- mem_sharing_notification);
+ rc = vm_event_ring_enable(d, vec, &d->vm_event_share,
_VPF_mem_sharing,
+ HVM_PARAM_SHARING_RING_PFN,
+ mem_sharing_notification);
break;
case XEN_VM_EVENT_DISABLE:
- if ( vm_event_check_ring(d->vm_event_share) )
+ if ( vm_event_check(d->vm_event_share) )
{
domain_pause(d);
- rc = vm_event_disable(d, &d->vm_event_share);
+ rc = vm_event_disable(&d->vm_event_share);
domain_unpause(d);
}
break;
case XEN_VM_EVENT_RESUME:
- if ( vm_event_check_ring(d->vm_event_share) )
- vm_event_resume(d, d->vm_event_share);
- else
- rc = -ENODEV;
- break;
+ rc = vm_event_resume(d->vm_event_share);
default:
rc = -ENOSYS;
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 1277ce2..a9593e7 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1465,7 +1465,7 @@ static int assign_device(struct domain *d, u16 seg, u8
bus, u8 devfn, u32 flag)
/* Prevent device assign if mem paging or mem sharing have been
* enabled for this domain */
if ( unlikely(d->arch.hvm.mem_sharing_enabled ||
- vm_event_check_ring(d->vm_event_paging) ||
+ vm_event_check(d->vm_event_paging) ||
p2m_get_hostp2m(d)->global_logdirty) )
return -EXDEV;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 0309c1f..d840e03 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -278,30 +278,7 @@ struct vcpu
#define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)
#define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
-/* VM event */
-struct vm_event_domain
-{
- /* ring lock */
- spinlock_t ring_lock;
- /* The ring has 64 entries */
- unsigned char foreign_producers;
- unsigned char target_producers;
- /* shared ring page */
- void *ring_page;
- struct page_info *ring_pg_struct;
- /* front-end ring */
- vm_event_front_ring_t front_ring;
- /* event channel port (vcpu0 only) */
- int xen_port;
- /* vm_event bit for vcpu->pause_flags */
- int pause_flag;
- /* list of vcpus waiting for room in the ring */
- struct waitqueue_head wq;
- /* the number of vCPUs blocked */
- unsigned int blocked;
- /* The last vcpu woken up */
- unsigned int last_vcpu_wake_up;
-};
+struct vm_event_domain;
struct evtchn_port_ops;
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 5302ee5..a5c82d6 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -29,8 +29,8 @@
/* Clean up on domain destruction */
void vm_event_cleanup(struct domain *d);
-/* Returns whether a ring has been set up */
-bool vm_event_check_ring(struct vm_event_domain *ved);
+/* Returns whether the VM event domain has been set up */
+bool vm_event_check(struct vm_event_domain *ved);
/* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no
* available space and the caller is a foreign domain. If the guest itself
@@ -45,30 +45,22 @@ bool vm_event_check_ring(struct vm_event_domain *ved);
* cancel_slot(), both of which are guaranteed to
* succeed.
*/
-int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
- bool allow_sleep);
-static inline int vm_event_claim_slot(struct domain *d,
- struct vm_event_domain *ved)
+int __vm_event_claim_slot(struct vm_event_domain *ved, bool allow_sleep);
+static inline int vm_event_claim_slot(struct vm_event_domain *ved)
{
- return __vm_event_claim_slot(d, ved, true);
+ return __vm_event_claim_slot(ved, true);
}
-static inline int vm_event_claim_slot_nosleep(struct domain *d,
- struct vm_event_domain *ved)
+static inline int vm_event_claim_slot_nosleep(struct vm_event_domain *ved)
{
- return __vm_event_claim_slot(d, ved, false);
+ return __vm_event_claim_slot(ved, false);
}
-void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved);
+void vm_event_cancel_slot(struct vm_event_domain *ved);
-void vm_event_put_request(struct domain *d, struct vm_event_domain *ved,
+void vm_event_put_request(struct vm_event_domain *ved,
vm_event_request_t *req);
-int vm_event_get_response(struct domain *d, struct vm_event_domain *ved,
- vm_event_response_t *rsp);
-
-void vm_event_resume(struct domain *d, struct vm_event_domain *ved);
-
int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
XEN_GUEST_HANDLE_PARAM(void) u_domctl);
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |