[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V2] vm_event: make sure the domain is paused in key domctls
This patch pauses the domain for all writes through the 'ad' pointer in monitor_domctl(), defers a domain_unpause() call until after the CRs are updated for the MONITOR_EVENT_WRITE_CTRLREG case, and makes sure that the domain is paused for both vm_event enable and disable cases in vm_event_domctl(). Thanks go to Andrew Cooper for his review and suggestions. Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx> --- Changes since V1: - Annotated domctl d != current->domain checks with /* no domain_pause() */. - Moved the XEN_DOMCTL_monitor_op current->domain check into monitor_domctl(). - Reworked the return -EOPNOTSUPP statement to not leave the domain accidentally paused. - Found an additional instance of lacking domain_pause() at XEN_DOMCTL_set_access_required. - Replaced the vm_event_enable() pause_domain() call with a comment pointing to XSA-99, which made sure that the domain is paused in libxc for the enable case (at Tamas K Lengyel's suggestion). --- xen/arch/x86/monitor.c | 32 +++++++++++++++++++------------- xen/common/domctl.c | 12 ++++++------ xen/common/vm_event.c | 17 ++++++++++++++++- 3 files changed, 41 insertions(+), 20 deletions(-) diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c index 7611f7b..1d43880 100644 --- a/xen/arch/x86/monitor.c +++ b/xen/arch/x86/monitor.c @@ -69,6 +69,9 @@ int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop) struct arch_domain *ad = &d->arch; uint32_t capabilities = get_capabilities(d); + if ( current->domain == d ) /* no domain_pause() */ + return -EPERM; + rc = xsm_vm_event_control(XSM_PRIV, d, mop->op, mop->event); if ( rc ) return rc; @@ -80,7 +83,9 @@ int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop) return 0; case XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP: - d->arch.mem_access_emulate_each_rep = !!mop->event; + domain_pause(d); + ad->mem_access_emulate_each_rep = !!mop->event; + domain_unpause(d); return 0; } @@ -109,6 +114,8 @@ int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop) if ( rc ) return rc; + domain_pause(d); + if ( mop->u.mov_to_cr.sync ) ad->monitor.write_ctrlreg_sync |= ctrlreg_bitmask; else @@ -119,20 +126,18 @@ int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop) else ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask; - domain_pause(d); - if ( !status ) ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask; else ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask; - domain_unpause(d); - if ( mop->u.mov_to_cr.index == VM_EVENT_X86_CR3 ) /* Latches new CR3 mask through CR0 code */ for_each_vcpu ( d, v ) hvm_update_guest_cr(v, 0); + domain_unpause(d); + break; } @@ -145,16 +150,18 @@ int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop) return rc; if ( mop->op == XEN_DOMCTL_MONITOR_OP_ENABLE && + mop->u.mov_to_msr.extended_capture && + !hvm_enable_msr_exit_interception(d) ) + return -EOPNOTSUPP; + + domain_pause(d); + + if ( mop->op == XEN_DOMCTL_MONITOR_OP_ENABLE && mop->u.mov_to_msr.extended_capture ) - { - if ( hvm_enable_msr_exit_interception(d) ) ad->monitor.mov_to_msr_extended = 1; - else - return -EOPNOTSUPP; - } else + else ad->monitor.mov_to_msr_extended = 0; - domain_pause(d); ad->monitor.mov_to_msr_enabled = !status; domain_unpause(d); break; @@ -196,9 +203,8 @@ int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop) if ( rc ) return rc; - ad->monitor.guest_request_sync = mop->u.guest_request.sync; - domain_pause(d); + ad->monitor.guest_request_sync = mop->u.guest_request.sync; ad->monitor.guest_request_enabled = !status; domain_unpause(d); break; diff --git a/xen/common/domctl.c b/xen/common/domctl.c index dc1ea06..121a34a 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -709,7 +709,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) } case XEN_DOMCTL_soft_reset: - if ( d == current->domain ) + if ( d == current->domain ) /* no domain_pause() */ { ret = -EINVAL; break; @@ -1136,11 +1136,15 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) #ifdef CONFIG_HAS_MEM_ACCESS case XEN_DOMCTL_set_access_required: - if ( unlikely(current->domain == d) ) + if ( unlikely(current->domain == d) ) /* no domain_pause() */ ret = -EPERM; else + { + domain_pause(d); p2m_get_hostp2m(d)->access_required = op->u.access_required.access_required; + domain_unpause(d); + } break; #endif @@ -1175,10 +1179,6 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) } case XEN_DOMCTL_monitor_op: - ret = -EPERM; - if ( current->domain == d ) - break; - ret = monitor_domctl(d, &op->u.monitor_op); if ( !ret ) copyback = 1; diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c index 28a7add..9f43a76 100644 --- a/xen/common/vm_event.c +++ b/xen/common/vm_event.c @@ -567,7 +567,7 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec, if ( rc ) return rc; - if ( unlikely(d == current->domain) ) + if ( unlikely(d == current->domain) ) /* no domain_pause() */ { gdprintk(XENLOG_INFO, "Tried to do a memory event op on itself.\n"); return -EINVAL; @@ -624,6 +624,7 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec, if ( p2m->pod.entry_count ) break; + /* domain_pause() not required here, see XSA-99 */ rc = vm_event_enable(d, vec, ved, _VPF_mem_paging, HVM_PARAM_PAGING_RING_PFN, mem_paging_notification); @@ -632,7 +633,11 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec, case XEN_VM_EVENT_DISABLE: if ( ved->ring_page ) + { + domain_pause(d); rc = vm_event_disable(d, ved); + domain_unpause(d); + } break; case XEN_VM_EVENT_RESUME: @@ -658,6 +663,7 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec, switch( vec->op ) { case XEN_VM_EVENT_ENABLE: + /* domain_pause() not required here, see XSA-99 */ rc = vm_event_enable(d, vec, ved, _VPF_mem_access, HVM_PARAM_MONITOR_RING_PFN, monitor_notification); @@ -665,7 +671,11 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec, case XEN_VM_EVENT_DISABLE: if ( ved->ring_page ) + { + domain_pause(d); rc = vm_event_disable(d, ved); + domain_unpause(d); + } break; case XEN_VM_EVENT_RESUME: @@ -701,6 +711,7 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec, if ( !hap_enabled(d) ) break; + /* domain_pause() not required here, see XSA-99 */ rc = vm_event_enable(d, vec, ved, _VPF_mem_sharing, HVM_PARAM_SHARING_RING_PFN, mem_sharing_notification); @@ -708,7 +719,11 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec, case XEN_VM_EVENT_DISABLE: if ( ved->ring_page ) + { + domain_pause(d); rc = vm_event_disable(d, ved); + domain_unpause(d); + } break; case XEN_VM_EVENT_RESUME: -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |