|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2] arm/monitor vm-events: Implement guest-request support
On Fri, 26 Feb 2016, Corneliu ZUZU wrote:
> This patch adds ARM support for guest-request monitor vm-events.
> Note: on ARM hypercall instruction skipping must be done manually
> by the caller. This will probably be changed in a future patch.
>
> Summary of changes:
> == Moved to common-side:
> * XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST handling (moved from X86
> arch_monitor_domctl_event to common monitor_domctl)
> * hvm_event_guest_request->vm_event_monitor_guest_request
> * hvm_event_traps->vm_event_monitor_traps (also added target vcpu as param)
> * guest-request bits from X86 'struct arch_domain' (to common 'struct
> domain')
> == ARM implementations:
> * do_hvm_op now handling of HVMOP_guest_request_vm_event => calls
> vm_event_monitor_guest_request (as on X86)
> * arch_monitor_get_capabilities->vm_event_monitor_get_capabilities,
> updated to reflect support for XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST
> * vm_event_init_domain (does nothing), vm_event_cleanup_domain
> == Misc:
> * vm_event_fill_regs, no longer X86-specific. ARM-side implementation of
> this
> function currently does nothing, that will be added in a separate patch.
>
> Signed-off-by: Corneliu ZUZU <czuzu@xxxxxxxxxxxxxxx>
ARM and common bits:
Acked-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> Changed since v1:
> * hvm_event_traps, hvm_event_guest_request moved to common/vm_event.c and
> and renamed to vm_event_monitor_traps and vm_event_monitor_guest_request
> * arch_monitor_get_capabilities moved to vm_event.h and renamed to
> vm_event_monitor_get_capabilities
> * arch_hvm_event_fill_regs replaced w/ existing vm_event_fill_regs
> (see commit adc75eba8b15c7103a010f736fe62e3fb2383964)
> * defined altp2m_active for ARM and altp2m_vcpu_idx to remove #ifdef in
> vm_event_monitor_traps
> * change bitfield members type to unsigned int
> ---
> xen/arch/arm/hvm.c | 8 ++++
> xen/arch/x86/hvm/event.c | 82
> ++++++++---------------------------------
> xen/arch/x86/hvm/hvm.c | 3 +-
> xen/arch/x86/monitor.c | 18 +--------
> xen/arch/x86/vm_event.c | 1 +
> xen/common/monitor.c | 36 +++++++++++++++---
> xen/common/vm_event.c | 60 ++++++++++++++++++++++++++++++
> xen/include/asm-arm/altp2m.h | 39 ++++++++++++++++++++
> xen/include/asm-arm/monitor.h | 8 +---
> xen/include/asm-arm/vm_event.h | 19 +++++++++-
> xen/include/asm-x86/altp2m.h | 10 +++--
> xen/include/asm-x86/domain.h | 4 +-
> xen/include/asm-x86/hvm/event.h | 13 +++++--
> xen/include/asm-x86/monitor.h | 23 ------------
> xen/include/asm-x86/vm_event.h | 23 ++++++++++++
> xen/include/xen/sched.h | 6 +++
> xen/include/xen/vm_event.h | 9 +++++
> 17 files changed, 232 insertions(+), 130 deletions(-)
> create mode 100644 xen/include/asm-arm/altp2m.h
>
> diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c
> index 056db1a..c01123a 100644
> --- a/xen/arch/arm/hvm.c
> +++ b/xen/arch/arm/hvm.c
> @@ -22,6 +22,7 @@
> #include <xen/errno.h>
> #include <xen/guest_access.h>
> #include <xen/sched.h>
> +#include <xen/vm_event.h>
>
> #include <xsm/xsm.h>
>
> @@ -72,6 +73,13 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> break;
> }
>
> + case HVMOP_guest_request_vm_event:
> + if ( guest_handle_is_null(arg) )
> + vm_event_monitor_guest_request();
> + else
> + rc = -EINVAL;
> + break;
> +
> default:
> {
> gdprintk(XENLOG_DEBUG, "HVMOP op=%lu: not implemented\n", op);
> diff --git a/xen/arch/x86/hvm/event.c b/xen/arch/x86/hvm/event.c
> index d0b7d90..56c5514 100644
> --- a/xen/arch/x86/hvm/event.c
> +++ b/xen/arch/x86/hvm/event.c
> @@ -6,6 +6,7 @@
> * Copyright (c) 2004, Intel Corporation.
> * Copyright (c) 2005, International Business Machines Corporation.
> * Copyright (c) 2008, Citrix Systems, Inc.
> + * Copyright (c) 2016, Bitdefender S.R.L.
> *
> * This program is free software; you can redistribute it and/or modify it
> * under the terms and conditions of the GNU General Public License,
> @@ -21,71 +22,32 @@
> */
>
> #include <xen/vm_event.h>
> -#include <xen/paging.h>
> #include <asm/hvm/event.h>
> #include <asm/monitor.h>
> -#include <asm/altp2m.h>
> #include <asm/vm_event.h>
> #include <public/vm_event.h>
>
> -static int hvm_event_traps(uint8_t sync, vm_event_request_t *req)
> -{
> - int rc;
> - struct vcpu *curr = current;
> - struct domain *currd = curr->domain;
> -
> - rc = vm_event_claim_slot(currd, &currd->vm_event->monitor);
> - switch ( rc )
> - {
> - case 0:
> - break;
> - case -ENOSYS:
> - /*
> - * If there was no ring to handle the event, then
> - * simply continue executing normally.
> - */
> - return 1;
> - default:
> - return rc;
> - };
> -
> - if ( sync )
> - {
> - req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
> - vm_event_vcpu_pause(curr);
> - }
> -
> - if ( altp2m_active(currd) )
> - {
> - req->flags |= VM_EVENT_FLAG_ALTERNATE_P2M;
> - req->altp2m_idx = vcpu_altp2m(curr).p2midx;
> - }
> -
> - vm_event_fill_regs(req);
> - vm_event_put_request(currd, &currd->vm_event->monitor, req);
> -
> - return 1;
> -}
> -
> bool_t hvm_event_cr(unsigned int index, unsigned long value, unsigned long
> old)
> {
> - struct arch_domain *currad = ¤t->domain->arch;
> + struct vcpu *curr = current;
> + struct arch_domain *ad = &curr->domain->arch;
> unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index);
>
> - if ( (currad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) &&
> - (!(currad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) ||
> + if ( (ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) &&
> + (!(ad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) ||
> value != old) )
> {
> + bool_t sync = !!(ad->monitor.write_ctrlreg_sync & ctrlreg_bitmask);
> +
> vm_event_request_t req = {
> .reason = VM_EVENT_REASON_WRITE_CTRLREG,
> - .vcpu_id = current->vcpu_id,
> + .vcpu_id = curr->vcpu_id,
> .u.write_ctrlreg.index = index,
> .u.write_ctrlreg.new_value = value,
> .u.write_ctrlreg.old_value = old
> };
>
> - hvm_event_traps(currad->monitor.write_ctrlreg_sync & ctrlreg_bitmask,
> - &req);
> + vm_event_monitor_traps(curr, sync, &req);
> return 1;
> }
>
> @@ -95,30 +57,18 @@ bool_t hvm_event_cr(unsigned int index, unsigned long
> value, unsigned long old)
> void hvm_event_msr(unsigned int msr, uint64_t value)
> {
> struct vcpu *curr = current;
> - vm_event_request_t req = {
> - .reason = VM_EVENT_REASON_MOV_TO_MSR,
> - .vcpu_id = curr->vcpu_id,
> - .u.mov_to_msr.msr = msr,
> - .u.mov_to_msr.value = value,
> - };
> -
> - if ( curr->domain->arch.monitor.mov_to_msr_enabled )
> - hvm_event_traps(1, &req);
> -}
> -
> -void hvm_event_guest_request(void)
> -{
> - struct vcpu *curr = current;
> - struct arch_domain *currad = &curr->domain->arch;
> + struct arch_domain *ad = &curr->domain->arch;
>
> - if ( currad->monitor.guest_request_enabled )
> + if ( ad->monitor.mov_to_msr_enabled )
> {
> vm_event_request_t req = {
> - .reason = VM_EVENT_REASON_GUEST_REQUEST,
> + .reason = VM_EVENT_REASON_MOV_TO_MSR,
> .vcpu_id = curr->vcpu_id,
> + .u.mov_to_msr.msr = msr,
> + .u.mov_to_msr.value = value,
> };
>
> - hvm_event_traps(currad->monitor.guest_request_sync, &req);
> + vm_event_monitor_traps(curr, 1, &req);
> }
> }
>
> @@ -166,7 +116,7 @@ int hvm_event_breakpoint(unsigned long rip,
>
> req.vcpu_id = curr->vcpu_id;
>
> - return hvm_event_traps(1, &req);
> + return vm_event_monitor_traps(curr, 1, &req);
> }
>
> /*
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index f46d53c..41f3792 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -36,6 +36,7 @@
> #include <xen/wait.h>
> #include <xen/mem_access.h>
> #include <xen/rangeset.h>
> +#include <xen/vm_event.h>
> #include <asm/shadow.h>
> #include <asm/hap.h>
> #include <asm/current.h>
> @@ -6896,7 +6897,7 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
>
> case HVMOP_guest_request_vm_event:
> if ( guest_handle_is_null(arg) )
> - hvm_event_guest_request();
> + vm_event_monitor_guest_request();
> else
> rc = -EINVAL;
> break;
> diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c
> index b4bd008..1fec412 100644
> --- a/xen/arch/x86/monitor.c
> +++ b/xen/arch/x86/monitor.c
> @@ -124,24 +124,10 @@ int arch_monitor_domctl_event(struct domain *d,
> break;
> }
>
> - case XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
> - {
> - bool_t old_status = ad->monitor.guest_request_enabled;
> -
> - if ( unlikely(old_status == requested_status) )
> - return -EEXIST;
> -
> - domain_pause(d);
> - ad->monitor.guest_request_sync = mop->u.guest_request.sync;
> - ad->monitor.guest_request_enabled = requested_status;
> - domain_unpause(d);
> - break;
> - }
> -
> default:
> /*
> - * Should not be reached unless arch_monitor_get_capabilities() is
> not
> - * properly implemented.
> + * Should not be reached unless vm_event_monitor_get_capabilities()
> is
> + * not properly implemented.
> */
> ASSERT_UNREACHABLE();
> return -EOPNOTSUPP;
> diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
> index 2a2abd7..5635603 100644
> --- a/xen/arch/x86/vm_event.c
> +++ b/xen/arch/x86/vm_event.c
> @@ -57,6 +57,7 @@ void vm_event_cleanup_domain(struct domain *d)
>
> d->arch.mem_access_emulate_each_rep = 0;
> memset(&d->arch.monitor, 0, sizeof(d->arch.monitor));
> + memset(&d->monitor, 0, sizeof(d->monitor));
> }
>
> void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
> diff --git a/xen/common/monitor.c b/xen/common/monitor.c
> index 2a99a04..d950a7c 100644
> --- a/xen/common/monitor.c
> +++ b/xen/common/monitor.c
> @@ -24,10 +24,12 @@
> #include <xsm/xsm.h>
> #include <public/domctl.h>
> #include <asm/monitor.h>
> +#include <asm/vm_event.h>
>
> int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop)
> {
> int rc;
> + bool_t requested_status = 0;
>
> if ( unlikely(current->domain == d) ) /* no domain_pause() */
> return -EPERM;
> @@ -39,24 +41,48 @@ int monitor_domctl(struct domain *d, struct
> xen_domctl_monitor_op *mop)
> switch ( mop->op )
> {
> case XEN_DOMCTL_MONITOR_OP_ENABLE:
> + requested_status = 1;
> + /* fallthrough */
> case XEN_DOMCTL_MONITOR_OP_DISABLE:
> - /* Check if event type is available. */
> /* sanity check: avoid left-shift undefined behavior */
> if ( unlikely(mop->event > 31) )
> return -EINVAL;
> - if ( unlikely(!(arch_monitor_get_capabilities(d) & (1U <<
> mop->event))) )
> + /* Check if event type is available. */
> + if ( unlikely(!(vm_event_monitor_get_capabilities(d) & (1U <<
> mop->event))) )
> return -EOPNOTSUPP;
> - /* Arch-side handles enable/disable ops. */
> - return arch_monitor_domctl_event(d, mop);
> + break;
>
> case XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES:
> - mop->event = arch_monitor_get_capabilities(d);
> + mop->event = vm_event_monitor_get_capabilities(d);
> return 0;
>
> default:
> /* The monitor op is probably handled on the arch-side. */
> return arch_monitor_domctl_op(d, mop);
> }
> +
> + switch ( mop->event )
> + {
> + case XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
> + {
> + bool_t old_status = d->monitor.guest_request_enabled;
> +
> + if ( unlikely(old_status == requested_status) )
> + return -EEXIST;
> +
> + domain_pause(d);
> + d->monitor.guest_request_sync = mop->u.guest_request.sync;
> + d->monitor.guest_request_enabled = requested_status;
> + domain_unpause(d);
> + break;
> + }
> +
> + default:
> + /* Give arch-side the chance to handle this event */
> + return arch_monitor_domctl_event(d, mop);
> + }
> +
> + return 0;
> }
>
> /*
> diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
> index 9f43a76..2906407 100644
> --- a/xen/common/vm_event.c
> +++ b/xen/common/vm_event.c
> @@ -26,6 +26,7 @@
> #include <xen/vm_event.h>
> #include <xen/mem_access.h>
> #include <asm/p2m.h>
> +#include <asm/altp2m.h>
> #include <asm/vm_event.h>
> #include <xsm/xsm.h>
>
> @@ -781,6 +782,65 @@ void vm_event_vcpu_unpause(struct vcpu *v)
> }
>
> /*
> + * Monitor vm-events
> + */
> +
> +int vm_event_monitor_traps(struct vcpu *v, uint8_t sync,
> + vm_event_request_t *req)
> +{
> + int rc;
> + struct domain *d = v->domain;
> +
> + rc = vm_event_claim_slot(d, &d->vm_event->monitor);
> + switch ( rc )
> + {
> + case 0:
> + break;
> + case -ENOSYS:
> + /*
> + * If there was no ring to handle the event, then
> + * simply continue executing normally.
> + */
> + return 1;
> + default:
> + return rc;
> + };
> +
> + if ( sync )
> + {
> + req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
> + vm_event_vcpu_pause(v);
> + }
> +
> + if ( altp2m_active(d) )
> + {
> + req->flags |= VM_EVENT_FLAG_ALTERNATE_P2M;
> + req->altp2m_idx = altp2m_vcpu_idx(v);
> + }
> +
> + vm_event_fill_regs(req);
> + vm_event_put_request(d, &d->vm_event->monitor, req);
> +
> + return 1;
> +}
> +
> +void vm_event_monitor_guest_request(void)
> +{
> + struct vcpu *curr = current;
> + struct domain *d = curr->domain;
> +
> + if ( d->monitor.guest_request_enabled )
> + {
> + vm_event_request_t req = {
> + .reason = VM_EVENT_REASON_GUEST_REQUEST,
> + .vcpu_id = curr->vcpu_id,
> + };
> +
> + vm_event_monitor_traps(curr, d->monitor.guest_request_sync, &req);
> + }
> +}
> +
> +/*
> * Local variables:
> * mode: C
> * c-file-style: "BSD"
> diff --git a/xen/include/asm-arm/altp2m.h b/xen/include/asm-arm/altp2m.h
> new file mode 100644
> index 0000000..7b0d5c1
> --- /dev/null
> +++ b/xen/include/asm-arm/altp2m.h
> @@ -0,0 +1,39 @@
> +/*
> + * Alternate p2m
> + *
> + * Copyright (c) 2014, Intel Corporation.
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms and conditions of the GNU General Public License,
> + * version 2, as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along
> with
> + * this program; If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef __ASM_ARM_ALTP2M_H
> +#define __ASM_ARM_ALTP2M_H
> +
> +#include <xen/sched.h>
> +
> +/* Alternate p2m on/off per domain */
> +static inline bool_t altp2m_active(const struct domain *d)
> +{
> + /* Not implemented on ARM. */
> + return 0;
> +}
> +
> +/* Alternate p2m VCPU */
> +static inline uint16_t altp2m_vcpu_idx(struct vcpu *v)
> +{
> + /* Not implemented on ARM, should not be reached. */
> + BUG();
> + return 0;
> +}
> +
> +#endif /* __ASM_ARM_ALTP2M_H */
> diff --git a/xen/include/asm-arm/monitor.h b/xen/include/asm-arm/monitor.h
> index 82a4a66..6e36e99 100644
> --- a/xen/include/asm-arm/monitor.h
> +++ b/xen/include/asm-arm/monitor.h
> @@ -25,12 +25,6 @@
> #include <xen/sched.h>
> #include <public/domctl.h>
>
> -static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
> -{
> - /* No monitor vm-events implemented on ARM. */
> - return 0;
> -}
> -
> static inline
> int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op
> *mop)
> {
> @@ -45,7 +39,7 @@ int arch_monitor_domctl_event(struct domain *d,
> /*
> * No arch-specific monitor vm-events on ARM.
> *
> - * Should not be reached unless arch_monitor_get_capabilities() is not
> + * Should not be reached unless vm_event_monitor_get_capabilities() is
> not
> * properly implemented.
> */
> ASSERT_UNREACHABLE();
> diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h
> index 4d0fbf7..014d9ba 100644
> --- a/xen/include/asm-arm/vm_event.h
> +++ b/xen/include/asm-arm/vm_event.h
> @@ -21,18 +21,19 @@
>
> #include <xen/sched.h>
> #include <xen/vm_event.h>
> +#include <public/domctl.h>
>
> static inline
> int vm_event_init_domain(struct domain *d)
> {
> - /* Not supported on ARM. */
> + /* Nothing to do. */
> return 0;
> }
>
> static inline
> void vm_event_cleanup_domain(struct domain *d)
> {
> - /* Not supported on ARM. */
> + memset(&d->monitor, 0, sizeof(d->monitor));
> }
>
> static inline
> @@ -53,4 +54,18 @@ void vm_event_set_registers(struct vcpu *v,
> vm_event_response_t *rsp)
> /* Not supported on ARM. */
> }
>
> +static inline void vm_event_fill_regs(vm_event_request_t *req)
> +{
> + /* Not supported on ARM. */
> +}
> +
> +static inline uint32_t vm_event_monitor_get_capabilities(struct domain *d)
> +{
> + uint32_t capabilities = 0;
> +
> + capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST);
> +
> + return capabilities;
> +}
> +
> #endif /* __ASM_ARM_VM_EVENT_H__ */
> diff --git a/xen/include/asm-x86/altp2m.h b/xen/include/asm-x86/altp2m.h
> index fc82c1b..383b970 100644
> --- a/xen/include/asm-x86/altp2m.h
> +++ b/xen/include/asm-x86/altp2m.h
> @@ -15,8 +15,8 @@
> * this program; If not, see <http://www.gnu.org/licenses/>.
> */
>
> -#ifndef _X86_ALTP2M_H
> -#define _X86_ALTP2M_H
> +#ifndef __ASM_X86_ALTP2M_H
> +#define __ASM_X86_ALTP2M_H
>
> #include <xen/types.h>
> #include <xen/sched.h> /* for struct vcpu, struct domain */
> @@ -33,5 +33,9 @@ void altp2m_vcpu_initialise(struct vcpu *v);
> void altp2m_vcpu_destroy(struct vcpu *v);
> void altp2m_vcpu_reset(struct vcpu *v);
>
> -#endif /* _X86_ALTP2M_H */
> +static inline uint16_t altp2m_vcpu_idx(struct vcpu *v)
> +{
> + return vcpu_altp2m(v).p2midx;
> +}
>
> +#endif /* __ASM_X86_ALTP2M_H */
> diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
> index 4fad638..a976642 100644
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -376,7 +376,7 @@ struct arch_domain
> unsigned long *pirq_eoi_map;
> unsigned long pirq_eoi_map_mfn;
>
> - /* Monitor options */
> + /* Arch-specific monitor options */
> struct {
> unsigned int write_ctrlreg_enabled : 4;
> unsigned int write_ctrlreg_sync : 4;
> @@ -385,8 +385,6 @@ struct arch_domain
> unsigned int mov_to_msr_extended : 1;
> unsigned int singlestep_enabled : 1;
> unsigned int software_breakpoint_enabled : 1;
> - unsigned int guest_request_enabled : 1;
> - unsigned int guest_request_sync : 1;
> } monitor;
>
> /* Mem_access emulation control */
> diff --git a/xen/include/asm-x86/hvm/event.h b/xen/include/asm-x86/hvm/event.h
> index 7a83b45..03f7fee 100644
> --- a/xen/include/asm-x86/hvm/event.h
> +++ b/xen/include/asm-x86/hvm/event.h
> @@ -1,5 +1,7 @@
> /*
> - * event.h: Hardware virtual machine assist events.
> + * include/asm-x86/hvm/event.h
> + *
> + * Arch-specific hardware virtual machine event abstractions.
> *
> * This program is free software; you can redistribute it and/or modify it
> * under the terms and conditions of the GNU General Public License,
> @@ -17,6 +19,10 @@
> #ifndef __ASM_X86_HVM_EVENT_H__
> #define __ASM_X86_HVM_EVENT_H__
>
> +#include <xen/sched.h>
> +#include <xen/paging.h>
> +#include <public/vm_event.h>
> +
> enum hvm_event_breakpoint_type
> {
> HVM_EVENT_SOFTWARE_BREAKPOINT,
> @@ -30,12 +36,11 @@ enum hvm_event_breakpoint_type
> */
> bool_t hvm_event_cr(unsigned int index, unsigned long value,
> unsigned long old);
> -#define hvm_event_crX(what, new, old) \
> - hvm_event_cr(VM_EVENT_X86_##what, new, old)
> +#define hvm_event_crX(cr, new, old) \
> + hvm_event_cr(VM_EVENT_X86_##cr, new, old)
> void hvm_event_msr(unsigned int msr, uint64_t value);
> int hvm_event_breakpoint(unsigned long rip,
> enum hvm_event_breakpoint_type type);
> -void hvm_event_guest_request(void);
>
> #endif /* __ASM_X86_HVM_EVENT_H__ */
>
> diff --git a/xen/include/asm-x86/monitor.h b/xen/include/asm-x86/monitor.h
> index f1bf4bd..0954b59 100644
> --- a/xen/include/asm-x86/monitor.h
> +++ b/xen/include/asm-x86/monitor.h
> @@ -29,29 +29,6 @@
>
> #define monitor_ctrlreg_bitmask(ctrlreg_index) (1U << (ctrlreg_index))
>
> -static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
> -{
> - uint32_t capabilities = 0;
> -
> - /*
> - * At the moment only Intel HVM domains are supported. However, event
> - * delivery could be extended to AMD and PV domains.
> - */
> - if ( !is_hvm_domain(d) || !cpu_has_vmx )
> - return capabilities;
> -
> - capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG) |
> - (1U << XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR) |
> - (1U << XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT) |
> - (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST);
> -
> - /* Since we know this is on VMX, we can just call the hvm func */
> - if ( hvm_is_singlestep_supported() )
> - capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP);
> -
> - return capabilities;
> -}
> -
> static inline
> int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op
> *mop)
> {
> diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h
> index 026f42e..0470240 100644
> --- a/xen/include/asm-x86/vm_event.h
> +++ b/xen/include/asm-x86/vm_event.h
> @@ -44,4 +44,27 @@ void vm_event_set_registers(struct vcpu *v,
> vm_event_response_t *rsp);
>
> void vm_event_fill_regs(vm_event_request_t *req);
>
> +static inline uint32_t vm_event_monitor_get_capabilities(struct domain *d)
> +{
> + uint32_t capabilities = 0;
> +
> + /*
> + * At the moment only Intel HVM domains are supported. However, event
> + * delivery could be extended to AMD and PV domains.
> + */
> + if ( !is_hvm_domain(d) || !cpu_has_vmx )
> + return capabilities;
> +
> + capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG) |
> + (1U << XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR) |
> + (1U << XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT) |
> + (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST);
> +
> + /* Since we know this is on VMX, we can just call the hvm func */
> + if ( hvm_is_singlestep_supported() )
> + capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP);
> +
> + return capabilities;
> +}
> +
> #endif /* __ASM_X86_VM_EVENT_H__ */
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index b47a3fe..cc77d70 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -464,6 +464,12 @@ struct domain
> /* vNUMA topology accesses are protected by rwlock. */
> rwlock_t vnuma_rwlock;
> struct vnuma_info *vnuma;
> +
> + /* Common monitor options */
> + struct {
> + unsigned int guest_request_enabled : 1;
> + unsigned int guest_request_sync : 1;
> + } monitor;
> };
>
> /* Protect updates/reads (resp.) of domain_list and domain_hash. */
> diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
> index 92c75ff..beda9fe 100644
> --- a/xen/include/xen/vm_event.h
> +++ b/xen/include/xen/vm_event.h
> @@ -24,6 +24,7 @@
> #define __VM_EVENT_H__
>
> #include <xen/sched.h>
> +#include <public/vm_event.h>
>
> /* Clean up on domain destruction */
> void vm_event_cleanup(struct domain *d);
> @@ -74,6 +75,14 @@ int vm_event_domctl(struct domain *d,
> xen_domctl_vm_event_op_t *vec,
> void vm_event_vcpu_pause(struct vcpu *v);
> void vm_event_vcpu_unpause(struct vcpu *v);
>
> +/*
> + * Monitor vm-events
> + */
> +int vm_event_monitor_traps(struct vcpu *v, uint8_t sync,
> + vm_event_request_t *req);
> +
> +void vm_event_monitor_guest_request(void);
> +
> #endif /* __VM_EVENT_H__ */
>
>
> --
> 2.5.0
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |