|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 1/2] vm_event: Sanitize vm_event response handling
On 09/13/2016 09:12 PM, Tamas K Lengyel wrote:
> Setting response flags in vm_event are only ever safe if the vCPUs are paused.
> To reflect this we move all checks within the if block that already checks
> whether this is the case. Checks that are only supported on one architecture
> we relocate the bitmask operations to the arch-specific handlers to avoid
> the overhead on architectures that don't support it.
>
> Furthermore, we clean-up the emulation checks so it more clearly represents
> the
> decision-logic when emulation should take place. As part of this we also
> set the stage to allow emulation in response to other types of events, not
> just
> mem_access violations.
>
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
> ---
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
> Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> Cc: Julien Grall <julien.grall@xxxxxxx>
> ---
> xen/arch/x86/mm/p2m.c | 81
> +++++++++++++++++++-----------------------
> xen/arch/x86/vm_event.c | 35 +++++++++++++++++-
> xen/common/vm_event.c | 53 ++++++++++++++-------------
> xen/include/asm-arm/p2m.h | 4 +--
> xen/include/asm-arm/vm_event.h | 9 ++++-
> xen/include/asm-x86/p2m.h | 4 +--
> xen/include/asm-x86/vm_event.h | 5 ++-
> xen/include/xen/mem_access.h | 12 -------
> 8 files changed, 113 insertions(+), 90 deletions(-)
>
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 7d14c3b..6c01868 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1588,62 +1588,55 @@ void p2m_mem_paging_resume(struct domain *d,
> vm_event_response_t *rsp)
> }
> }
>
> -void p2m_mem_access_emulate_check(struct vcpu *v,
> - const vm_event_response_t *rsp)
> +bool_t p2m_mem_access_emulate_check(struct vcpu *v,
Judging by the reviews for my pending patch, I believe you'll be asked
to switch to bool / true / false here.
> + const vm_event_response_t *rsp)
> {
> - /* Mark vcpu for skipping one instruction upon rescheduling. */
> - if ( rsp->flags & VM_EVENT_FLAG_EMULATE )
> - {
> - xenmem_access_t access;
> - bool_t violation = 1;
> - const struct vm_event_mem_access *data = &rsp->u.mem_access;
> + xenmem_access_t access;
> + bool_t violation = 1;
> + const struct vm_event_mem_access *data = &rsp->u.mem_access;
>
> - if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 )
> + if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 )
> + {
> + switch ( access )
> {
> - switch ( access )
> - {
> - case XENMEM_access_n:
> - case XENMEM_access_n2rwx:
> - default:
> - violation = data->flags & MEM_ACCESS_RWX;
> - break;
> + case XENMEM_access_n:
> + case XENMEM_access_n2rwx:
> + default:
> + violation = data->flags & MEM_ACCESS_RWX;
> + break;
>
> - case XENMEM_access_r:
> - violation = data->flags & MEM_ACCESS_WX;
> - break;
> + case XENMEM_access_r:
> + violation = data->flags & MEM_ACCESS_WX;
> + break;
>
> - case XENMEM_access_w:
> - violation = data->flags & MEM_ACCESS_RX;
> - break;
> + case XENMEM_access_w:
> + violation = data->flags & MEM_ACCESS_RX;
> + break;
>
> - case XENMEM_access_x:
> - violation = data->flags & MEM_ACCESS_RW;
> - break;
> + case XENMEM_access_x:
> + violation = data->flags & MEM_ACCESS_RW;
> + break;
>
> - case XENMEM_access_rx:
> - case XENMEM_access_rx2rw:
> - violation = data->flags & MEM_ACCESS_W;
> - break;
> + case XENMEM_access_rx:
> + case XENMEM_access_rx2rw:
> + violation = data->flags & MEM_ACCESS_W;
> + break;
>
> - case XENMEM_access_wx:
> - violation = data->flags & MEM_ACCESS_R;
> - break;
> + case XENMEM_access_wx:
> + violation = data->flags & MEM_ACCESS_R;
> + break;
>
> - case XENMEM_access_rw:
> - violation = data->flags & MEM_ACCESS_X;
> - break;
> + case XENMEM_access_rw:
> + violation = data->flags & MEM_ACCESS_X;
> + break;
>
> - case XENMEM_access_rwx:
> - violation = 0;
> - break;
> - }
> + case XENMEM_access_rwx:
> + violation = 0;
> + break;
> }
> -
> - v->arch.vm_event->emulate_flags = violation ? rsp->flags : 0;
> -
> - if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) )
> - v->arch.vm_event->emul_read_data = rsp->data.emul_read_data;
> }
> +
> + return violation;
> }
>
> void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
> diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
> index e938ca3..343b9c8 100644
> --- a/xen/arch/x86/vm_event.c
> +++ b/xen/arch/x86/vm_event.c
> @@ -18,6 +18,7 @@
> * License along with this program; If not, see
> <http://www.gnu.org/licenses/>.
> */
>
> +#include <asm/p2m.h>
> #include <asm/vm_event.h>
>
> /* Implicitly serialized by the domctl lock. */
> @@ -56,8 +57,12 @@ void vm_event_cleanup_domain(struct domain *d)
> d->arch.mem_access_emulate_each_rep = 0;
> }
>
> -void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
> +void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
> + vm_event_response_t *rsp)
> {
> + if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP) )
> + return;
> +
> if ( !is_hvm_domain(d) )
> return;
>
> @@ -186,6 +191,34 @@ void vm_event_fill_regs(vm_event_request_t *req)
> req->data.regs.x86.cs_arbytes = seg.attr.bytes;
> }
>
> +void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
> +{
> + if ( !(rsp->flags & VM_EVENT_FLAG_EMULATE) )
> + {
> + v->arch.vm_event->emulate_flags = 0;
> + return;
> + }
> +
> + switch ( rsp->reason )
> + {
> + case VM_EVENT_REASON_MEM_ACCESS:
> + /*
> + * Emulate iff this is a response to a mem_access violation and there
> + * are still conflicting mem_access permissions in-place.
> + */
> + if ( p2m_mem_access_emulate_check(v, rsp) )
> + {
> + if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA )
> + v->arch.vm_event->emul_read_data = rsp->data.emul_read_data;
> +
> + v->arch.vm_event->emulate_flags = rsp->flags;
> + }
> + break;
> + default:
> + break;
> + };
> +}
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
> index 8398af7..907ab40 100644
> --- a/xen/common/vm_event.c
> +++ b/xen/common/vm_event.c
> @@ -398,42 +398,41 @@ void vm_event_resume(struct domain *d, struct
> vm_event_domain *ved)
> * In some cases the response type needs extra handling, so here
> * we call the appropriate handlers.
> */
> - switch ( rsp.reason )
> - {
> -#ifdef CONFIG_X86
> - case VM_EVENT_REASON_MOV_TO_MSR:
> -#endif
> - case VM_EVENT_REASON_WRITE_CTRLREG:
> - vm_event_register_write_resume(v, &rsp);
> - break;
> -
> -#ifdef CONFIG_HAS_MEM_ACCESS
> - case VM_EVENT_REASON_MEM_ACCESS:
> - mem_access_resume(v, &rsp);
> - break;
> -#endif
>
> + /* Check flags which apply only when the vCPU is paused */
> + if ( atomic_read(&v->vm_event_pause_count) )
> + {
> #ifdef CONFIG_HAS_MEM_PAGING
> - case VM_EVENT_REASON_MEM_PAGING:
> - p2m_mem_paging_resume(d, &rsp);
> - break;
> + if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING )
> + p2m_mem_paging_resume(d, &rsp);
> #endif
>
> - };
> + /*
> + * Check emulation flags in the arch-specific handler only, as it
> + * has to set arch-specific flags when supported, and to avoid
> + * bitmask overhead when it isn't supported.
> + */
> + vm_event_emulate_check(v, &rsp);
> +
> + /*
> + * Check in arch-specific handler to avoid bitmask overhead when
> + * not supported.
> + */
> + vm_event_register_write_resume(v, &rsp);
>
> - /* Check for altp2m switch */
> - if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
> - p2m_altp2m_check(v, rsp.altp2m_idx);
> + /*
> + * Check in arch-specific handler to avoid bitmask overhead when
> + * not supported.
> + */
> + vm_event_toggle_singlestep(d, v, &rsp);
> +
> + /* Check for altp2m switch */
> + if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
> + p2m_altp2m_check(v, rsp.altp2m_idx);
>
> - /* Check flags which apply only when the vCPU is paused */
> - if ( atomic_read(&v->vm_event_pause_count) )
> - {
> if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
> vm_event_set_registers(v, &rsp);
>
> - if ( rsp.flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP )
> - vm_event_toggle_singlestep(d, v);
> -
> if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
> vm_event_vcpu_unpause(v);
> }
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index 53c4d78..5e9bc54 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -121,10 +121,10 @@ typedef enum {
> p2m_to_mask(p2m_map_foreign)))
>
> static inline
> -void p2m_mem_access_emulate_check(struct vcpu *v,
> +bool_t p2m_mem_access_emulate_check(struct vcpu *v,
> const vm_event_response_t *rsp)
This returns bool_t ...
> {
> - /* Not supported on ARM. */
> + return false;
> }
... but you return false. Please switch to bool.
>
> static inline
> diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h
> index 9482636..66f2474 100644
> --- a/xen/include/asm-arm/vm_event.h
> +++ b/xen/include/asm-arm/vm_event.h
> @@ -34,7 +34,8 @@ static inline void vm_event_cleanup_domain(struct domain *d)
> memset(&d->monitor, 0, sizeof(d->monitor));
> }
>
> -static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu
> *v)
> +static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu
> *v,
> + vm_event_response_t *rsp)
> {
> /* Not supported on ARM. */
> }
> @@ -45,4 +46,10 @@ void vm_event_register_write_resume(struct vcpu *v,
> vm_event_response_t *rsp)
> /* Not supported on ARM. */
> }
>
> +static inline
> +void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
> +{
> + /* Not supported on ARM. */
> +}
> +
> #endif /* __ASM_ARM_VM_EVENT_H__ */
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 9fc9ead..1897def 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -677,8 +677,8 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
> gla,
>
> /* Check for emulation and mark vcpu for skipping one instruction
> * upon rescheduling if required. */
> -void p2m_mem_access_emulate_check(struct vcpu *v,
> - const vm_event_response_t *rsp);
> +bool_t p2m_mem_access_emulate_check(struct vcpu *v,
> + const vm_event_response_t *rsp);
Bool.
Thanks,
Razvan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |