|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH V3 07/12] xen: Remove mem_event
This is the third and final patch in the mem_event -> vm_event renaming process
which removes all references and remaining code of mem_event from the Xen
source.
Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
---
MAINTAINERS | 1 -
docs/misc/xsm-flask.txt | 1 -
tools/libxc/Makefile | 1 -
tools/libxc/xc_mem_access.c | 16 +-
tools/libxc/xc_mem_event.c | 162 ---------
tools/libxc/xc_private.h | 12 -
xen/common/Makefile | 1 -
xen/common/mem_event.c | 738 -----------------------------------------
xen/include/public/domctl.h | 90 +----
xen/include/public/mem_event.h | 192 -----------
xen/include/xen/mem_event.h | 143 --------
xen/include/xen/sched.h | 41 ---
xen/include/xsm/dummy.h | 12 -
xen/include/xsm/xsm.h | 12 -
xen/xsm/dummy.c | 2 -
xen/xsm/flask/hooks.c | 12 -
16 files changed, 14 insertions(+), 1422 deletions(-)
delete mode 100644 tools/libxc/xc_mem_event.c
delete mode 100644 xen/common/mem_event.c
delete mode 100644 xen/include/public/mem_event.h
delete mode 100644 xen/include/xen/mem_event.h
diff --git a/MAINTAINERS b/MAINTAINERS
index b86ab83..4c445bb 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -365,7 +365,6 @@ VM EVENT AND ACCESS
M: Tim Deegan <tim@xxxxxxx>
S: Supported
F: xen/common/vm_event.c
-F: xen/common/mem_event.c
F: xen/common/mem_access.c
XENTRACE
diff --git a/docs/misc/xsm-flask.txt b/docs/misc/xsm-flask.txt
index 9eead61..13ce498 100644
--- a/docs/misc/xsm-flask.txt
+++ b/docs/misc/xsm-flask.txt
@@ -88,7 +88,6 @@ __HYPERVISOR_domctl (xen/include/public/domctl.h)
* XEN_DOMCTL_debug_op
* XEN_DOMCTL_gethvmcontext_partial
* XEN_DOMCTL_vm_event_op
- * XEN_DOMCTL_mem_event_op
* XEN_DOMCTL_mem_sharing_op
* XEN_DOMCTL_setvcpuextstate
* XEN_DOMCTL_getvcpuextstate
diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index b97e535..6ef17ec 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -27,7 +27,6 @@ CTRL_SRCS-y += xc_cpu_hotplug.c
CTRL_SRCS-y += xc_resume.c
CTRL_SRCS-y += xc_tmem.c
CTRL_SRCS-y += xc_vm_event.c
-CTRL_SRCS-y += xc_mem_event.c
CTRL_SRCS-y += xc_mem_paging.c
CTRL_SRCS-y += xc_mem_access.c
CTRL_SRCS-y += xc_memshr.c
diff --git a/tools/libxc/xc_mem_access.c b/tools/libxc/xc_mem_access.c
index 1c979ed..aa6e777 100644
--- a/tools/libxc/xc_mem_access.c
+++ b/tools/libxc/xc_mem_access.c
@@ -26,23 +26,23 @@
void *xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t
*port)
{
- return xc_mem_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
- port, 0);
+ return xc_vm_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
+ port, 0);
}
void *xc_mem_access_enable_introspection(xc_interface *xch, domid_t domain_id,
uint32_t *port)
{
- return xc_mem_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
- port, 1);
+ return xc_vm_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
+ port, 1);
}
int xc_mem_access_disable(xc_interface *xch, domid_t domain_id)
{
- return xc_mem_event_control(xch, domain_id,
- XEN_DOMCTL_MEM_EVENT_OP_MONITOR_DISABLE,
- XEN_DOMCTL_MEM_EVENT_OP_MONITOR,
- NULL);
+ return xc_vm_event_control(xch, domain_id,
+ XEN_DOMCTL_VM_EVENT_OP_MONITOR_DISABLE,
+ XEN_DOMCTL_VM_EVENT_OP_MONITOR,
+ NULL);
}
int xc_mem_access_resume(xc_interface *xch, domid_t domain_id)
diff --git a/tools/libxc/xc_mem_event.c b/tools/libxc/xc_mem_event.c
deleted file mode 100644
index 319cc00..0000000
--- a/tools/libxc/xc_mem_event.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/******************************************************************************
- *
- * xc_mem_event.c
- *
- * Interface to low-level memory event functionality.
- *
- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
- */
-
-#include "xc_private.h"
-
-int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
- unsigned int mode, uint32_t *port)
-{
- DECLARE_DOMCTL;
- int rc;
-
- domctl.cmd = XEN_DOMCTL_vm_event_op;
- domctl.domain = domain_id;
- domctl.u.vm_event_op.op = op;
- domctl.u.vm_event_op.mode = mode;
-
- rc = do_domctl(xch, &domctl);
- if ( !rc && port )
- *port = domctl.u.vm_event_op.port;
- return rc;
-}
-
-void *xc_mem_event_enable(xc_interface *xch, domid_t domain_id, int param,
- uint32_t *port, int enable_introspection)
-{
- void *ring_page = NULL;
- uint64_t pfn;
- xen_pfn_t ring_pfn, mmap_pfn;
- unsigned int op, mode;
- int rc1, rc2, saved_errno;
-
- if ( !port )
- {
- errno = EINVAL;
- return NULL;
- }
-
- /* Pause the domain for ring page setup */
- rc1 = xc_domain_pause(xch, domain_id);
- if ( rc1 != 0 )
- {
- PERROR("Unable to pause domain\n");
- return NULL;
- }
-
- /* Get the pfn of the ring page */
- rc1 = xc_hvm_param_get(xch, domain_id, param, &pfn);
- if ( rc1 != 0 )
- {
- PERROR("Failed to get pfn of ring page\n");
- goto out;
- }
-
- ring_pfn = pfn;
- mmap_pfn = pfn;
- ring_page = xc_map_foreign_batch(xch, domain_id, PROT_READ | PROT_WRITE,
- &mmap_pfn, 1);
- if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
- {
- /* Map failed, populate ring page */
- rc1 = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0,
- &ring_pfn);
- if ( rc1 != 0 )
- {
- PERROR("Failed to populate ring pfn\n");
- goto out;
- }
-
- mmap_pfn = ring_pfn;
- ring_page = xc_map_foreign_batch(xch, domain_id, PROT_READ |
PROT_WRITE,
- &mmap_pfn, 1);
- if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
- {
- PERROR("Could not map the ring page\n");
- goto out;
- }
- }
-
- switch ( param )
- {
- case HVM_PARAM_PAGING_RING_PFN:
- op = XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE;
- mode = XEN_DOMCTL_MEM_EVENT_OP_PAGING;
- break;
-
- case HVM_PARAM_MONITOR_RING_PFN:
- if ( enable_introspection )
- op = XEN_DOMCTL_MEM_EVENT_OP_MONITOR_ENABLE_INTROSPECTION;
- else
- op = XEN_DOMCTL_MEM_EVENT_OP_MONITOR_ENABLE;
- mode = XEN_DOMCTL_MEM_EVENT_OP_MONITOR;
- break;
-
- case HVM_PARAM_SHARING_RING_PFN:
- op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE;
- mode = XEN_DOMCTL_MEM_EVENT_OP_SHARING;
- break;
-
- /*
- * This is for the outside chance that the HVM_PARAM is valid but is
invalid
- * as far as mem_event goes.
- */
- default:
- errno = EINVAL;
- rc1 = -1;
- goto out;
- }
-
- rc1 = xc_mem_event_control(xch, domain_id, op, mode, port);
- if ( rc1 != 0 )
- {
- PERROR("Failed to enable mem_event\n");
- goto out;
- }
-
- /* Remove the ring_pfn from the guest's physmap */
- rc1 = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0,
&ring_pfn);
- if ( rc1 != 0 )
- PERROR("Failed to remove ring page from guest physmap");
-
- out:
- saved_errno = errno;
-
- rc2 = xc_domain_unpause(xch, domain_id);
- if ( rc1 != 0 || rc2 != 0 )
- {
- if ( rc2 != 0 )
- {
- if ( rc1 == 0 )
- saved_errno = errno;
- PERROR("Unable to unpause domain");
- }
-
- if ( ring_page )
- munmap(ring_page, XC_PAGE_SIZE);
- ring_page = NULL;
-
- errno = saved_errno;
- }
-
- return ring_page;
-}
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
index 58db86d..843540c 100644
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -421,18 +421,6 @@ int xc_ffs64(uint64_t x);
#define DOMPRINTF_CALLED(xch) xc_dom_printf((xch), "%s: called", __FUNCTION__)
/**
- * mem_event operations. Internal use only.
- */
-int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
- unsigned int mode, uint32_t *port);
-/*
- * Enables mem_event and returns the mapped ring page indicated by param.
- * param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
- */
-void *xc_mem_event_enable(xc_interface *xch, domid_t domain_id, int param,
- uint32_t *port, int enable_introspection);
-
-/**
* vm_event operations. Internal use only.
*/
int xc_vm_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 0db6967..e5bd75b 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -54,7 +54,6 @@ obj-y += rbtree.o
obj-y += lzo.o
obj-$(HAS_PDX) += pdx.o
obj-$(HAS_MEM_ACCESS) += mem_access.o
-obj-$(HAS_MEM_ACCESS) += mem_event.o
obj-$(HAS_MEM_ACCESS) += vm_event.o
obj-bin-$(CONFIG_X86) += $(foreach n,decompress bunzip2 unxz unlzma unlzo
unlz4 earlycpio,$(n).init.o)
diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c
deleted file mode 100644
index c6ee505..0000000
--- a/xen/common/mem_event.c
+++ /dev/null
@@ -1,738 +0,0 @@
-/******************************************************************************
- * mem_event.c
- *
- * Memory event support.
- *
- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#include <xen/sched.h>
-#include <xen/event.h>
-#include <xen/wait.h>
-#include <xen/mem_event.h>
-#include <xen/mem_access.h>
-#include <asm/p2m.h>
-
-#ifdef HAS_MEM_PAGING
-#include <asm/mem_paging.h>
-#endif
-
-#ifdef HAS_MEM_SHARING
-#include <asm/mem_sharing.h>
-#endif
-
-#include <xsm/xsm.h>
-
-/* for public/io/ring.h macros */
-#define xen_mb() mb()
-#define xen_rmb() rmb()
-#define xen_wmb() wmb()
-
-#define mem_event_ring_lock_init(_med) spin_lock_init(&(_med)->ring_lock)
-#define mem_event_ring_lock(_med) spin_lock(&(_med)->ring_lock)
-#define mem_event_ring_unlock(_med) spin_unlock(&(_med)->ring_lock)
-
-static int mem_event_enable(
- struct domain *d,
- xen_domctl_mem_event_op_t *mec,
- struct mem_event_domain *med,
- int pause_flag,
- int param,
- xen_event_channel_notification_t notification_fn)
-{
- int rc;
- unsigned long ring_gfn = d->arch.hvm_domain.params[param];
-
- /* Only one helper at a time. If the helper crashed,
- * the ring is in an undefined state and so is the guest.
- */
- if ( med->ring_page )
- return -EBUSY;
-
- /* The parameter defaults to zero, and it should be
- * set to something */
- if ( ring_gfn == 0 )
- return -ENOSYS;
-
- mem_event_ring_lock_init(med);
- mem_event_ring_lock(med);
-
- rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct,
- &med->ring_page);
- if ( rc < 0 )
- goto err;
-
- /* Set the number of currently blocked vCPUs to 0. */
- med->blocked = 0;
-
- /* Allocate event channel */
- rc = alloc_unbound_xen_event_channel(d->vcpu[0],
- current->domain->domain_id,
- notification_fn);
- if ( rc < 0 )
- goto err;
-
- med->xen_port = mec->port = rc;
-
- /* Prepare ring buffer */
- FRONT_RING_INIT(&med->front_ring,
- (mem_event_sring_t *)med->ring_page,
- PAGE_SIZE);
-
- /* Save the pause flag for this particular ring. */
- med->pause_flag = pause_flag;
-
- /* Initialize the last-chance wait queue. */
- init_waitqueue_head(&med->wq);
-
- mem_event_ring_unlock(med);
- return 0;
-
- err:
- destroy_ring_for_helper(&med->ring_page,
- med->ring_pg_struct);
- mem_event_ring_unlock(med);
-
- return rc;
-}
-
-static unsigned int mem_event_ring_available(struct mem_event_domain *med)
-{
- int avail_req = RING_FREE_REQUESTS(&med->front_ring);
- avail_req -= med->target_producers;
- avail_req -= med->foreign_producers;
-
- BUG_ON(avail_req < 0);
-
- return avail_req;
-}
-
-/*
- * mem_event_wake_blocked() will wakeup vcpus waiting for room in the
- * ring. These vCPUs were paused on their way out after placing an event,
- * but need to be resumed where the ring is capable of processing at least
- * one event from them.
- */
-static void mem_event_wake_blocked(struct domain *d, struct mem_event_domain
*med)
-{
- struct vcpu *v;
- int online = d->max_vcpus;
- unsigned int avail_req = mem_event_ring_available(med);
-
- if ( avail_req == 0 || med->blocked == 0 )
- return;
-
- /*
- * We ensure that we only have vCPUs online if there are enough free slots
- * for their memory events to be processed. This will ensure that no
- * memory events are lost (due to the fact that certain types of events
- * cannot be replayed, we need to ensure that there is space in the ring
- * for when they are hit).
- * See comment below in mem_event_put_request().
- */
- for_each_vcpu ( d, v )
- if ( test_bit(med->pause_flag, &v->pause_flags) )
- online--;
-
- ASSERT(online == (d->max_vcpus - med->blocked));
-
- /* We remember which vcpu last woke up to avoid scanning always linearly
- * from zero and starving higher-numbered vcpus under high load */
- if ( d->vcpu )
- {
- int i, j, k;
-
- for (i = med->last_vcpu_wake_up + 1, j = 0; j < d->max_vcpus; i++, j++)
- {
- k = i % d->max_vcpus;
- v = d->vcpu[k];
- if ( !v )
- continue;
-
- if ( !(med->blocked) || online >= avail_req )
- break;
-
- if ( test_and_clear_bit(med->pause_flag, &v->pause_flags) )
- {
- vcpu_unpause(v);
- online++;
- med->blocked--;
- med->last_vcpu_wake_up = k;
- }
- }
- }
-}
-
-/*
- * In the event that a vCPU attempted to place an event in the ring and
- * was unable to do so, it is queued on a wait queue. These are woken as
- * needed, and take precedence over the blocked vCPUs.
- */
-static void mem_event_wake_queued(struct domain *d, struct mem_event_domain
*med)
-{
- unsigned int avail_req = mem_event_ring_available(med);
-
- if ( avail_req > 0 )
- wake_up_nr(&med->wq, avail_req);
-}
-
-/*
- * mem_event_wake() will wakeup all vcpus waiting for the ring to
- * become available. If we have queued vCPUs, they get top priority. We
- * are guaranteed that they will go through code paths that will eventually
- * call mem_event_wake() again, ensuring that any blocked vCPUs will get
- * unpaused once all the queued vCPUs have made it through.
- */
-void mem_event_wake(struct domain *d, struct mem_event_domain *med)
-{
- if (!list_empty(&med->wq.list))
- mem_event_wake_queued(d, med);
- else
- mem_event_wake_blocked(d, med);
-}
-
-static int mem_event_disable(struct domain *d, struct mem_event_domain *med)
-{
- if ( med->ring_page )
- {
- struct vcpu *v;
-
- mem_event_ring_lock(med);
-
- if ( !list_empty(&med->wq.list) )
- {
- mem_event_ring_unlock(med);
- return -EBUSY;
- }
-
- /* Free domU's event channel and leave the other one unbound */
- free_xen_event_channel(d->vcpu[0], med->xen_port);
-
- /* Unblock all vCPUs */
- for_each_vcpu ( d, v )
- {
- if ( test_and_clear_bit(med->pause_flag, &v->pause_flags) )
- {
- vcpu_unpause(v);
- med->blocked--;
- }
- }
-
- destroy_ring_for_helper(&med->ring_page,
- med->ring_pg_struct);
- mem_event_ring_unlock(med);
- }
-
- return 0;
-}
-
-static inline void mem_event_release_slot(struct domain *d,
- struct mem_event_domain *med)
-{
- /* Update the accounting */
- if ( current->domain == d )
- med->target_producers--;
- else
- med->foreign_producers--;
-
- /* Kick any waiters */
- mem_event_wake(d, med);
-}
-
-/*
- * mem_event_mark_and_pause() tags vcpu and put it to sleep.
- * The vcpu will resume execution in mem_event_wake_waiters().
- */
-void mem_event_mark_and_pause(struct vcpu *v, struct mem_event_domain *med)
-{
- if ( !test_and_set_bit(med->pause_flag, &v->pause_flags) )
- {
- vcpu_pause_nosync(v);
- med->blocked++;
- }
-}
-
-/*
- * This must be preceded by a call to claim_slot(), and is guaranteed to
- * succeed. As a side-effect however, the vCPU may be paused if the ring is
- * overly full and its continued execution would cause stalling and excessive
- * waiting. The vCPU will be automatically unpaused when the ring clears.
- */
-void mem_event_put_request(struct domain *d,
- struct mem_event_domain *med,
- mem_event_request_t *req)
-{
- mem_event_front_ring_t *front_ring;
- int free_req;
- unsigned int avail_req;
- RING_IDX req_prod;
-
- if ( current->domain != d )
- {
- req->flags |= MEM_EVENT_FLAG_FOREIGN;
-#ifndef NDEBUG
- if ( !(req->flags & MEM_EVENT_FLAG_VCPU_PAUSED) )
- gdprintk(XENLOG_G_WARNING, "d%dv%d was not paused.\n",
- d->domain_id, req->vcpu_id);
-#endif
- }
-
- mem_event_ring_lock(med);
-
- /* Due to the reservations, this step must succeed. */
- front_ring = &med->front_ring;
- free_req = RING_FREE_REQUESTS(front_ring);
- ASSERT(free_req > 0);
-
- /* Copy request */
- req_prod = front_ring->req_prod_pvt;
- memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
- req_prod++;
-
- /* Update ring */
- front_ring->req_prod_pvt = req_prod;
- RING_PUSH_REQUESTS(front_ring);
-
- /* We've actually *used* our reservation, so release the slot. */
- mem_event_release_slot(d, med);
-
- /* Give this vCPU a black eye if necessary, on the way out.
- * See the comments above wake_blocked() for more information
- * on how this mechanism works to avoid waiting. */
- avail_req = mem_event_ring_available(med);
- if( current->domain == d && avail_req < d->max_vcpus )
- mem_event_mark_and_pause(current, med);
-
- mem_event_ring_unlock(med);
-
- notify_via_xen_event_channel(d, med->xen_port);
-}
-
-int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
mem_event_response_t *rsp)
-{
- mem_event_front_ring_t *front_ring;
- RING_IDX rsp_cons;
-
- mem_event_ring_lock(med);
-
- front_ring = &med->front_ring;
- rsp_cons = front_ring->rsp_cons;
-
- if ( !RING_HAS_UNCONSUMED_RESPONSES(front_ring) )
- {
- mem_event_ring_unlock(med);
- return 0;
- }
-
- /* Copy response */
- memcpy(rsp, RING_GET_RESPONSE(front_ring, rsp_cons), sizeof(*rsp));
- rsp_cons++;
-
- /* Update ring */
- front_ring->rsp_cons = rsp_cons;
- front_ring->sring->rsp_event = rsp_cons + 1;
-
- /* Kick any waiters -- since we've just consumed an event,
- * there may be additional space available in the ring. */
- mem_event_wake(d, med);
-
- mem_event_ring_unlock(med);
-
- return 1;
-}
-
-void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med)
-{
- mem_event_ring_lock(med);
- mem_event_release_slot(d, med);
- mem_event_ring_unlock(med);
-}
-
-static int mem_event_grab_slot(struct mem_event_domain *med, int foreign)
-{
- unsigned int avail_req;
-
- if ( !med->ring_page )
- return -ENOSYS;
-
- mem_event_ring_lock(med);
-
- avail_req = mem_event_ring_available(med);
- if ( avail_req == 0 )
- {
- mem_event_ring_unlock(med);
- return -EBUSY;
- }
-
- if ( !foreign )
- med->target_producers++;
- else
- med->foreign_producers++;
-
- mem_event_ring_unlock(med);
-
- return 0;
-}
-
-/* Simple try_grab wrapper for use in the wait_event() macro. */
-static int mem_event_wait_try_grab(struct mem_event_domain *med, int *rc)
-{
- *rc = mem_event_grab_slot(med, 0);
- return *rc;
-}
-
-/* Call mem_event_grab_slot() until the ring doesn't exist, or is available. */
-static int mem_event_wait_slot(struct mem_event_domain *med)
-{
- int rc = -EBUSY;
- wait_event(med->wq, mem_event_wait_try_grab(med, &rc) != -EBUSY);
- return rc;
-}
-
-bool_t mem_event_check_ring(struct mem_event_domain *med)
-{
- return (med->ring_page != NULL);
-}
-
-/*
- * Determines whether or not the current vCPU belongs to the target domain,
- * and calls the appropriate wait function. If it is a guest vCPU, then we
- * use mem_event_wait_slot() to reserve a slot. As long as there is a ring,
- * this function will always return 0 for a guest. For a non-guest, we check
- * for space and return -EBUSY if the ring is not available.
- *
- * Return codes: -ENOSYS: the ring is not yet configured
- * -EBUSY: the ring is busy
- * 0: a spot has been reserved
- *
- */
-int __mem_event_claim_slot(struct domain *d, struct mem_event_domain *med,
- bool_t allow_sleep)
-{
- if ( (current->domain == d) && allow_sleep )
- return mem_event_wait_slot(med);
- else
- return mem_event_grab_slot(med, (current->domain != d));
-}
-
-#ifdef HAS_MEM_PAGING
-/* Registered with Xen-bound event channel for incoming notifications. */
-static void mem_paging_notification(struct vcpu *v, unsigned int port)
-{
- if ( likely(v->domain->mem_event->paging.ring_page != NULL) )
- p2m_mem_paging_resume(v->domain);
-}
-#endif
-
-#ifdef HAS_MEM_ACCESS
-/* Registered with Xen-bound event channel for incoming notifications. */
-static void mem_access_notification(struct vcpu *v, unsigned int port)
-{
- if ( likely(v->domain->mem_event->monitor.ring_page != NULL) )
- mem_access_resume(v->domain);
-}
-#endif
-
-#ifdef HAS_MEM_SHARING
-/* Registered with Xen-bound event channel for incoming notifications. */
-static void mem_sharing_notification(struct vcpu *v, unsigned int port)
-{
- if ( likely(v->domain->mem_event->share.ring_page != NULL) )
- mem_sharing_sharing_resume(v->domain);
-}
-#endif
-
-int do_mem_event_op(int op, uint32_t domain, void *arg)
-{
- int ret;
- struct domain *d;
-
- ret = rcu_lock_live_remote_domain_by_id(domain, &d);
- if ( ret )
- return ret;
-
- ret = xsm_mem_event_op(XSM_DM_PRIV, d, op);
- if ( ret )
- goto out;
-
- switch (op)
- {
-#ifdef HAS_MEM_PAGING
- case XENMEM_paging_op:
- ret = mem_paging_memop(d, arg);
- break;
-#endif
-#ifdef HAS_MEM_SHARING
- case XENMEM_sharing_op:
- ret = mem_sharing_memop(d, arg);
- break;
-#endif
- default:
- ret = -ENOSYS;
- }
-
- out:
- rcu_unlock_domain(d);
- return ret;
-}
-
-/* Clean up on domain destruction */
-void mem_event_cleanup(struct domain *d)
-{
-#ifdef HAS_MEM_PAGING
- if ( d->mem_event->paging.ring_page ) {
- /* Destroying the wait queue head means waking up all
- * queued vcpus. This will drain the list, allowing
- * the disable routine to complete. It will also drop
- * all domain refs the wait-queued vcpus are holding.
- * Finally, because this code path involves previously
- * pausing the domain (domain_kill), unpausing the
- * vcpus causes no harm. */
- destroy_waitqueue_head(&d->mem_event->paging.wq);
- (void)mem_event_disable(d, &d->mem_event->paging);
- }
-#endif
-#ifdef HAS_MEM_ACCESS
- if ( d->mem_event->monitor.ring_page ) {
- destroy_waitqueue_head(&d->mem_event->monitor.wq);
- (void)mem_event_disable(d, &d->mem_event->monitor);
- }
-#endif
-#ifdef HAS_MEM_SHARING
- if ( d->mem_event->share.ring_page ) {
- destroy_waitqueue_head(&d->mem_event->share.wq);
- (void)mem_event_disable(d, &d->mem_event->share);
- }
-#endif
-}
-
-int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
- XEN_GUEST_HANDLE_PARAM(void) u_domctl)
-{
- int rc;
-
- rc = xsm_mem_event_control(XSM_PRIV, d, mec->mode, mec->op);
- if ( rc )
- return rc;
-
- if ( unlikely(d == current->domain) )
- {
- gdprintk(XENLOG_INFO, "Tried to do a memory event op on itself.\n");
- return -EINVAL;
- }
-
- if ( unlikely(d->is_dying) )
- {
- gdprintk(XENLOG_INFO, "Ignoring memory event op on dying domain %u\n",
- d->domain_id);
- return 0;
- }
-
- if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
- {
- gdprintk(XENLOG_INFO,
- "Memory event op on a domain (%u) with no vcpus\n",
- d->domain_id);
- return -EINVAL;
- }
-
- rc = -ENOSYS;
-
- switch ( mec->mode )
- {
-#ifdef HAS_MEM_PAGING
- case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
- {
- struct mem_event_domain *med = &d->mem_event->paging;
- rc = -EINVAL;
-
- switch( mec->op )
- {
- case XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE:
- {
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
- rc = -EOPNOTSUPP;
- /* pvh fixme: p2m_is_foreign types need addressing */
- if ( is_pvh_vcpu(current) || is_pvh_domain(hardware_domain) )
- break;
-
- rc = -ENODEV;
- /* Only HAP is supported */
- if ( !hap_enabled(d) )
- break;
-
- /* No paging if iommu is used */
- rc = -EMLINK;
- if ( unlikely(need_iommu(d)) )
- break;
-
- rc = -EXDEV;
- /* Disallow paging in a PoD guest */
- if ( p2m->pod.entry_count )
- break;
-
- rc = mem_event_enable(d, mec, med, _VPF_mem_paging,
- HVM_PARAM_PAGING_RING_PFN,
- mem_paging_notification);
- }
- break;
-
- case XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE:
- {
- if ( med->ring_page )
- rc = mem_event_disable(d, med);
- }
- break;
-
- default:
- rc = -ENOSYS;
- break;
- }
- }
- break;
-#endif
-
-#ifdef HAS_MEM_ACCESS
- case XEN_DOMCTL_MEM_EVENT_OP_MONITOR:
- {
- struct mem_event_domain *med = &d->mem_event->monitor;
- rc = -EINVAL;
-
- switch( mec->op )
- {
- case XEN_DOMCTL_MEM_EVENT_OP_MONITOR_ENABLE:
- case XEN_DOMCTL_MEM_EVENT_OP_MONITOR_ENABLE_INTROSPECTION:
- {
- rc = mem_event_enable(d, mec, med, _VPF_mem_access,
- HVM_PARAM_MONITOR_RING_PFN,
- mem_access_notification);
-
- if ( mec->op ==
XEN_DOMCTL_MEM_EVENT_OP_MONITOR_ENABLE_INTROSPECTION
- && !rc )
- p2m_setup_introspection(d);
-
- }
- break;
-
- case XEN_DOMCTL_MEM_EVENT_OP_MONITOR_DISABLE:
- {
- if ( med->ring_page )
- {
- rc = mem_event_disable(d, med);
- d->arch.hvm_domain.introspection_enabled = 0;
- }
- }
- break;
-
- default:
- rc = -ENOSYS;
- break;
- }
- }
- break;
-#endif
-
-#ifdef HAS_MEM_SHARING
- case XEN_DOMCTL_MEM_EVENT_OP_SHARING:
- {
- struct mem_event_domain *med = &d->mem_event->share;
- rc = -EINVAL;
-
- switch( mec->op )
- {
- case XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE:
- {
- rc = -EOPNOTSUPP;
- /* pvh fixme: p2m_is_foreign types need addressing */
- if ( is_pvh_vcpu(current) || is_pvh_domain(hardware_domain) )
- break;
-
- rc = -ENODEV;
- /* Only HAP is supported */
- if ( !hap_enabled(d) )
- break;
-
- rc = mem_event_enable(d, mec, med, _VPF_mem_sharing,
- HVM_PARAM_SHARING_RING_PFN,
- mem_sharing_notification);
- }
- break;
-
- case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE:
- {
- if ( med->ring_page )
- rc = mem_event_disable(d, med);
- }
- break;
-
- default:
- rc = -ENOSYS;
- break;
- }
- }
- break;
-#endif
-
- default:
- rc = -ENOSYS;
- }
-
- return rc;
-}
-
-void mem_event_vcpu_pause(struct vcpu *v)
-{
- ASSERT(v == current);
-
- atomic_inc(&v->mem_event_pause_count);
- vcpu_pause_nosync(v);
-}
-
-void mem_event_vcpu_unpause(struct vcpu *v)
-{
- int old, new, prev = v->mem_event_pause_count.counter;
-
- /* All unpause requests as a result of toolstack responses. Prevent
- * underflow of the vcpu pause count. */
- do
- {
- old = prev;
- new = old - 1;
-
- if ( new < 0 )
- {
- printk(XENLOG_G_WARNING
- "%pv mem_event: Too many unpause attempts\n", v);
- return;
- }
-
- prev = cmpxchg(&v->mem_event_pause_count.counter, old, new);
- } while ( prev != old );
-
- vcpu_unpause(v);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 6396f4c..5d48a4d 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -750,15 +750,15 @@ struct xen_domctl_gdbsx_domstatus {
};
/*
- * Memory event operations
+ * VM event operations
*/
-/* XEN_DOMCTL_mem_event_op */
+/* XEN_DOMCTL_vm_event_op */
/*
* Domain memory paging
* Page memory in and out.
- * Domctl interface to set up and tear down the
+ * Domctl interface to set up and tear down the
* pager<->hypervisor interface. Use XENMEM_paging_op*
* to perform per-page operations.
*
@@ -769,10 +769,10 @@ struct xen_domctl_gdbsx_domstatus {
* EXDEV - guest has PoD enabled
* EBUSY - guest has or had paging enabled, ring buffer still active
*/
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1
+#define XEN_DOMCTL_VM_EVENT_OP_PAGING 1
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE 1
+#define XEN_DOMCTL_VM_EVENT_OP_PAGING_ENABLE 0
+#define XEN_DOMCTL_VM_EVENT_OP_PAGING_DISABLE 1
/*
* Monitor helper.
@@ -799,84 +799,6 @@ struct xen_domctl_gdbsx_domstatus {
* EBUSY - guest has or had access enabled, ring buffer still active
*
*/
-#define XEN_DOMCTL_MEM_EVENT_OP_MONITOR 2
-
-#define XEN_DOMCTL_MEM_EVENT_OP_MONITOR_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_MONITOR_DISABLE 1
-#define XEN_DOMCTL_MEM_EVENT_OP_MONITOR_ENABLE_INTROSPECTION 2
-
-/*
- * Sharing ENOMEM helper.
- *
- * As with paging, use the domctl for teardown/setup of the
- * helper<->hypervisor interface.
- *
- * If setup, this ring is used to communicate failed allocations
- * in the unshare path. XENMEM_sharing_op_resume is used to wake up
- * vcpus that could not unshare.
- *
- * Note that shring can be turned on (as per the domctl below)
- * *without* this ring being setup.
- */
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING 3
-
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE 1
-
-/* Use for teardown/setup of helper<->hypervisor interface for paging,
- * access and sharing.*/
-struct xen_domctl_mem_event_op {
- uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_*_* */
- uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */
-
- uint32_t port; /* OUT: event channel for ring */
-};
-typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
-
-/*
- * VM event operations
- */
-
-/* XEN_DOMCTL_vm_event_op */
-
-/*
- * Domain memory paging
- * Page memory in and out.
- * Domctl interface to set up and tear down the
- * pager<->hypervisor interface. Use XENMEM_paging_op*
- * to perform per-page operations.
- *
- * The XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE domctl returns several
- * non-standard error codes to indicate why paging could not be enabled:
- * ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
- * EMLINK - guest has iommu passthrough enabled
- * EXDEV - guest has PoD enabled
- * EBUSY - guest has or had paging enabled, ring buffer still active
- */
-#define XEN_DOMCTL_VM_EVENT_OP_PAGING 1
-
-#define XEN_DOMCTL_VM_EVENT_OP_PAGING_ENABLE 0
-#define XEN_DOMCTL_VM_EVENT_OP_PAGING_DISABLE 1
-
-/*
- * Monitor permissions.
- *
- * As with paging, use the domctl for teardown/setup of the
- * helper<->hypervisor interface.
- *
- * There are HVM hypercalls to set the per-page access permissions of every
- * page in a domain. When one of these permissions--independent, read,
- * write, and execute--is violated, the VCPU is paused and a memory event
- * is sent with what happened. (See public/vm_event.h) .
- *
- * The memory event handler can then resume the VCPU and redo the access
- * with a XENMEM_access_op_resume hypercall.
- *
- * The XEN_DOMCTL_VM_EVENT_OP_MONITOR_ENABLE domctl returns several
- * non-standard error codes to indicate why access could not be enabled:
- * EBUSY - guest has or had access enabled, ring buffer still active
- */
#define XEN_DOMCTL_VM_EVENT_OP_MONITOR 2
#define XEN_DOMCTL_VM_EVENT_OP_MONITOR_ENABLE 0
diff --git a/xen/include/public/mem_event.h b/xen/include/public/mem_event.h
deleted file mode 100644
index 0f36b33..0000000
--- a/xen/include/public/mem_event.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/******************************************************************************
- * mem_event.h
- *
- * Memory event common structures.
- *
- * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _XEN_PUBLIC_MEM_EVENT_H
-#define _XEN_PUBLIC_MEM_EVENT_H
-
-#if defined(__XEN__) || defined(__XEN_TOOLS__)
-
-#include "xen.h"
-#include "io/ring.h"
-
-#define MEM_EVENT_INTERFACE_VERSION 0x00000001
-
-/* Memory event flags */
-#define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0)
-#define MEM_EVENT_FLAG_DROP_PAGE (1 << 1)
-#define MEM_EVENT_FLAG_EVICT_FAIL (1 << 2)
-#define MEM_EVENT_FLAG_FOREIGN (1 << 3)
-#define MEM_EVENT_FLAG_DUMMY (1 << 4)
-/*
- * Emulate the fault-causing instruction (if set in the event response flags).
- * This will allow the guest to continue execution without lifting the page
- * access restrictions.
- */
-#define MEM_EVENT_FLAG_EMULATE (1 << 5)
-/*
- * Same as MEM_EVENT_FLAG_EMULATE, but with write operations or operations
- * potentially having side effects (like memory mapped or port I/O) disabled.
- */
-#define MEM_EVENT_FLAG_EMULATE_NOWRITE (1 << 6)
-
-/* Reasons for the vm event request */
-/* Default case */
-#define MEM_EVENT_REASON_UNKNOWN 0
-/* Memory access violation */
-#define MEM_EVENT_REASON_MEM_ACCESS 1
-/* Memory sharing event */
-#define MEM_EVENT_REASON_MEM_SHARING 2
-/* Memory paging event */
-#define MEM_EVENT_REASON_MEM_PAGING 3
-/* CR0 was updated */
-#define MEM_EVENT_REASON_MOV_TO_CR0 4
-/* CR3 was updated */
-#define MEM_EVENT_REASON_MOV_TO_CR3 5
-/* CR4 was updated */
-#define MEM_EVENT_REASON_MOV_TO_CR4 6
-/* An MSR was updated. Does NOT honour HVMPME_onchangeonly */
-#define MEM_EVENT_REASON_MOV_TO_MSR 9
-/* Debug operation executed (int3) */
-#define MEM_EVENT_REASON_SOFTWARE_BREAKPOINT 7
-/* Single-step (MTF) */
-#define MEM_EVENT_REASON_SINGLESTEP 8
-
-/* Using a custom struct (not hvm_hw_cpu) so as to not fill
- * the mem_event ring buffer too quickly. */
-struct mem_event_regs_x86 {
- uint64_t rax;
- uint64_t rcx;
- uint64_t rdx;
- uint64_t rbx;
- uint64_t rsp;
- uint64_t rbp;
- uint64_t rsi;
- uint64_t rdi;
- uint64_t r8;
- uint64_t r9;
- uint64_t r10;
- uint64_t r11;
- uint64_t r12;
- uint64_t r13;
- uint64_t r14;
- uint64_t r15;
- uint64_t rflags;
- uint64_t dr7;
- uint64_t rip;
- uint64_t cr0;
- uint64_t cr2;
- uint64_t cr3;
- uint64_t cr4;
- uint64_t sysenter_cs;
- uint64_t sysenter_esp;
- uint64_t sysenter_eip;
- uint64_t msr_efer;
- uint64_t msr_star;
- uint64_t msr_lstar;
- uint64_t fs_base;
- uint64_t gs_base;
- uint32_t cs_arbytes;
- uint32_t _pad;
-};
-
-struct mem_event_mem_access_data {
- uint64_t gfn;
- uint64_t offset;
- uint64_t gla; /* if gla_valid */
- uint8_t access_r;
- uint8_t access_w;
- uint8_t access_x;
- uint8_t gla_valid;
- uint8_t fault_with_gla;
- uint8_t fault_in_gpt;
- uint16_t _pad;
-};
-
-struct mem_event_mov_to_cr_data {
- uint64_t new_value;
- uint64_t old_value;
-};
-
-struct mem_event_software_breakpoint_data {
- uint64_t gfn;
-};
-
-struct mem_event_singlestep_data {
- uint64_t gfn;
-};
-
-struct mem_event_mov_to_msr_data {
- uint64_t msr;
- uint64_t value;
-};
-
-struct mem_event_paging_data {
- uint64_t gfn;
- uint32_t p2mt;
- uint32_t _pad;
-};
-
-struct mem_event_sharing_data {
- uint64_t gfn;
- uint32_t p2mt;
- uint32_t _pad;
-};
-
-typedef struct mem_event_st {
- uint32_t version; /* MEM_EVENT_INTERFACE_VERSION */
- uint32_t flags;
- uint32_t vcpu_id;
- uint32_t reason; /* MEM_EVENT_REASON_* */
-
- union {
- struct mem_event_paging_data mem_paging;
- struct mem_event_sharing_data mem_sharing;
- struct mem_event_mem_access_data mem_access;
- struct mem_event_mov_to_cr_data mov_to_cr;
- struct mem_event_mov_to_msr_data mov_to_msr;
- struct mem_event_software_breakpoint_data software_breakpoint;
- struct mem_event_singlestep_data singlestep;
- } data;
-
- union {
- struct mem_event_regs_x86 x86;
- } regs;
-} mem_event_request_t, mem_event_response_t;
-
-DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t);
-
-#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
-#endif /* _XEN_PUBLIC_MEM_EVENT_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/xen/mem_event.h b/xen/include/xen/mem_event.h
deleted file mode 100644
index 4f3ad8e..0000000
--- a/xen/include/xen/mem_event.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/******************************************************************************
- * mem_event.h
- *
- * Common interface for memory event support.
- *
- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#ifndef __MEM_EVENT_H__
-#define __MEM_EVENT_H__
-
-#include <xen/sched.h>
-
-#ifdef HAS_MEM_ACCESS
-
-/* Clean up on domain destruction */
-void mem_event_cleanup(struct domain *d);
-
-/* Returns whether a ring has been set up */
-bool_t mem_event_check_ring(struct mem_event_domain *med);
-
-/* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no
- * available space and the caller is a foreign domain. If the guest itself
- * is the caller, -EBUSY is avoided by sleeping on a wait queue to ensure
- * that the ring does not lose future events.
- *
- * However, the allow_sleep flag can be set to false in cases in which it is ok
- * to lose future events, and thus -EBUSY can be returned to guest vcpus
- * (handle with care!).
- *
- * In general, you must follow a claim_slot() call with either put_request() or
- * cancel_slot(), both of which are guaranteed to
- * succeed.
- */
-int __mem_event_claim_slot(struct domain *d, struct mem_event_domain *med,
- bool_t allow_sleep);
-static inline int mem_event_claim_slot(struct domain *d,
- struct mem_event_domain *med)
-{
- return __mem_event_claim_slot(d, med, 1);
-}
-
-static inline int mem_event_claim_slot_nosleep(struct domain *d,
- struct mem_event_domain *med)
-{
- return __mem_event_claim_slot(d, med, 0);
-}
-
-void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med);
-
-void mem_event_put_request(struct domain *d, struct mem_event_domain *med,
- mem_event_request_t *req);
-
-int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
- mem_event_response_t *rsp);
-
-int do_mem_event_op(int op, uint32_t domain, void *arg);
-int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
- XEN_GUEST_HANDLE_PARAM(void) u_domctl);
-
-void mem_event_vcpu_pause(struct vcpu *v);
-void mem_event_vcpu_unpause(struct vcpu *v);
-
-#else
-
-static inline void mem_event_cleanup(struct domain *d) {}
-
-static inline bool_t mem_event_check_ring(struct mem_event_domain *med)
-{
- return 0;
-}
-
-static inline int mem_event_claim_slot(struct domain *d,
- struct mem_event_domain *med)
-{
- return -ENOSYS;
-}
-
-static inline int mem_event_claim_slot_nosleep(struct domain *d,
- struct mem_event_domain *med)
-{
- return -ENOSYS;
-}
-
-static inline
-void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med)
-{}
-
-static inline
-void mem_event_put_request(struct domain *d, struct mem_event_domain *med,
- mem_event_request_t *req)
-{}
-
-static inline
-int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
- mem_event_response_t *rsp)
-{
- return -ENOSYS;
-}
-
-static inline int do_mem_event_op(int op, uint32_t domain, void *arg)
-{
- return -ENOSYS;
-}
-
-static inline
-int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
- XEN_GUEST_HANDLE_PARAM(void) u_domctl)
-{
- return -ENOSYS;
-}
-
-static inline void mem_event_vcpu_pause(struct vcpu *v) {}
-static inline void mem_event_vcpu_unpause(struct vcpu *v) {}
-
-#endif /* HAS_MEM_ACCESS */
-
-#endif /* __MEM_EVENT_H__ */
-
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 681efa9..33283b5 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -23,7 +23,6 @@
#include <public/domctl.h>
#include <public/sysctl.h>
#include <public/vcpu.h>
-#include <public/mem_event.h>
#include <public/vm_event.h>
#include <public/event_channel.h>
@@ -215,8 +214,6 @@ struct vcpu
unsigned long pause_flags;
atomic_t pause_count;
- /* VCPU paused for mem_event replies. */
- atomic_t mem_event_pause_count;
/* VCPU paused for vm_event replies. */
atomic_t vm_event_pause_count;
/* VCPU paused by system controller. */
@@ -260,41 +257,6 @@ struct vcpu
#define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
#define domain_is_locked(d) spin_is_locked(&(d)->domain_lock)
-/* Memory event */
-struct mem_event_domain
-{
- /* ring lock */
- spinlock_t ring_lock;
- /* The ring has 64 entries */
- unsigned char foreign_producers;
- unsigned char target_producers;
- /* shared ring page */
- void *ring_page;
- struct page_info *ring_pg_struct;
- /* front-end ring */
- mem_event_front_ring_t front_ring;
- /* event channel port (vcpu0 only) */
- int xen_port;
- /* mem_event bit for vcpu->pause_flags */
- int pause_flag;
- /* list of vcpus waiting for room in the ring */
- struct waitqueue_head wq;
- /* the number of vCPUs blocked */
- unsigned int blocked;
- /* The last vcpu woken up */
- unsigned int last_vcpu_wake_up;
-};
-
-struct mem_event_per_domain
-{
- /* Memory sharing support */
- struct mem_event_domain share;
- /* Memory paging support */
- struct mem_event_domain paging;
- /* VM event monitor support */
- struct mem_event_domain monitor;
-};
-
/* VM event */
struct vm_event_domain
{
@@ -480,9 +442,6 @@ struct domain
struct lock_profile_qhead profile_head;
- /* Various mem_events */
- struct mem_event_per_domain *mem_event;
-
/* Various vm_events */
struct vm_event_per_domain *vm_event;
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index d6d403a..4227093 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -514,18 +514,6 @@ static XSM_INLINE int xsm_hvm_param_nested(XSM_DEFAULT_ARG
struct domain *d)
}
#ifdef HAS_MEM_ACCESS
-static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain *d,
int mode, int op)
-{
- XSM_ASSERT_ACTION(XSM_PRIV);
- return xsm_default_action(action, current->domain, d);
-}
-
-static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d, int
op)
-{
- XSM_ASSERT_ACTION(XSM_DM_PRIV);
- return xsm_default_action(action, current->domain, d);
-}
-
static XSM_INLINE int xsm_vm_event_control(XSM_DEFAULT_ARG struct domain *d,
int mode, int op)
{
XSM_ASSERT_ACTION(XSM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 581e712..cff9d35 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -142,8 +142,6 @@ struct xsm_operations {
int (*get_vnumainfo) (struct domain *d);
#ifdef HAS_MEM_ACCESS
- int (*mem_event_control) (struct domain *d, int mode, int op);
- int (*mem_event_op) (struct domain *d, int op);
int (*vm_event_control) (struct domain *d, int mode, int op);
int (*vm_event_op) (struct domain *d, int op);
#endif
@@ -546,16 +544,6 @@ static inline int xsm_get_vnumainfo (xsm_default_t def,
struct domain *d)
}
#ifdef HAS_MEM_ACCESS
-static inline int xsm_mem_event_control (xsm_default_t def, struct domain *d,
int mode, int op)
-{
- return xsm_ops->mem_event_control(d, mode, op);
-}
-
-static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d, int
op)
-{
- return xsm_ops->mem_event_op(d, op);
-}
-
static inline int xsm_vm_event_control (xsm_default_t def, struct domain *d,
int mode, int op)
{
return xsm_ops->vm_event_control(d, mode, op);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 3cf5126..25fca68 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -119,8 +119,6 @@ void xsm_fixup_ops (struct xsm_operations *ops)
set_to_dummy_if_null(ops, map_gmfn_foreign);
#ifdef HAS_MEM_ACCESS
- set_to_dummy_if_null(ops, mem_event_control);
- set_to_dummy_if_null(ops, mem_event_op);
set_to_dummy_if_null(ops, vm_event_control);
set_to_dummy_if_null(ops, vm_event_op);
#endif
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 1debd31..c419543 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1203,16 +1203,6 @@ static int flask_deassign_device(struct domain *d,
uint32_t machine_bdf)
#endif /* HAS_PASSTHROUGH && HAS_PCI */
#ifdef HAS_MEM_ACCESS
-static int flask_mem_event_control(struct domain *d, int mode, int op)
-{
- return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
-}
-
-static int flask_mem_event_op(struct domain *d, int op)
-{
- return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
-}
-
static int flask_vm_event_control(struct domain *d, int mode, int op)
{
return current_has_perm(d, SECCLASS_HVM, HVM__VM_EVENT);
@@ -1607,8 +1597,6 @@ static struct xsm_operations flask_ops = {
#endif
#ifdef HAS_MEM_ACCESS
- .mem_event_control = flask_mem_event_control,
- .mem_event_op = flask_mem_event_op,
.vm_event_control = flask_vm_event_control,
.vm_event_op = flask_vm_event_op,
#endif
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |