|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 1/2] x86/hvm: Always do SMAP check when updating runstate_guest(v)
On 08/07/14 00:18, Feng Wu wrote:
> In the current implementation, we honor the guest's CPL and AC
> to determain whether do the SMAP check or not for runstate_guest(v).
> However, this doesn't work. The VMCS feild is invalid when we try
> to get geust's SS by hvm_get_segment_register(), since the
> right VMCS has not beed loaded for the current VCPU.
>
> In this patch, we always do the SMAP check when updating
> runstate_guest(v) for the guest when SMAP is enabled by it.
>
> Reported-by: Sander Eikelenboom <linux@xxxxxxxxxxxxxx>
> Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
Why can't the VMCS be reloaded in vmx_ctxt_switch_to() rather than
vmx_do_resume() ? The problem is not with updating the runstate area
persay, but due to using guest_walk_tables() during a context switch
before the VMCS has been loaded.
> ---
> xen/arch/x86/domain.c | 15 ++++++++++++---
> xen/arch/x86/mm/guest_walk.c | 41 ++++++++++++++++++++++++++++-------------
> xen/include/asm-x86/domain.h | 15 ++++++++++++++-
> 3 files changed, 54 insertions(+), 17 deletions(-)
>
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index e896210..b0c8810 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -1349,22 +1349,31 @@ static void paravirt_ctxt_switch_to(struct vcpu *v)
> }
>
> /* Update per-VCPU guest runstate shared memory area (if registered). */
> -bool_t update_runstate_area(const struct vcpu *v)
> +bool_t update_runstate_area(struct vcpu *v)
> {
> + bool_t rc;
> +
> if ( guest_handle_is_null(runstate_guest(v)) )
> return 1;
>
> + v->arch.smap_check_policy = SMAP_CHECK_ENABLED;
> +
> if ( has_32bit_shinfo(v->domain) )
> {
> struct compat_vcpu_runstate_info info;
>
> XLAT_vcpu_runstate_info(&info, &v->runstate);
> __copy_to_guest(v->runstate_guest.compat, &info, 1);
> - return 1;
> + rc = 1;
> + goto out;
> }
>
> - return __copy_to_guest(runstate_guest(v), &v->runstate, 1) !=
> + rc = __copy_to_guest(runstate_guest(v), &v->runstate, 1) !=
> sizeof(v->runstate);
> +
> +out:
> + v->arch.smap_check_policy = SMAP_CHECK_HONOR_CPL_AC;
> + return rc;
> }
>
> static void _update_runstate_area(struct vcpu *v)
> diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
> index bb38fda..1afa7fd 100644
> --- a/xen/arch/x86/mm/guest_walk.c
> +++ b/xen/arch/x86/mm/guest_walk.c
> @@ -164,25 +164,40 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain
> *p2m,
> struct segment_register seg;
> const struct cpu_user_regs *regs = guest_cpu_user_regs();
>
> - hvm_get_segment_register(v, x86_seg_ss, &seg);
> -
> /* SMEP: kernel-mode instruction fetches from user-mode mappings
> * should fault. Unlike NX or invalid bits, we're looking for _all_
> * entries in the walk to have _PAGE_USER set, so we need to do the
> * whole walk as if it were a user-mode one and then invert the
> answer. */
> smep = hvm_smep_enabled(v) && (pfec & PFEC_insn_fetch);
>
> - /*
> - * SMAP: kernel-mode data accesses from user-mode mappings should
> fault
> - * A fault is considered as a SMAP violation if the following
> - * conditions come true:
> - * - X86_CR4_SMAP is set in CR4
> - * - A user page is accessed
> - * - CPL = 3 or X86_EFLAGS_AC is clear
> - * - Page fault in kernel mode
> - */
> - smap = hvm_smap_enabled(v) &&
> - ((seg.attr.fields.dpl == 3) || !(regs->eflags &
> X86_EFLAGS_AC));
> + switch ( v->arch.smap_check_policy )
> + {
> + case SMAP_CHECK_HONOR_CPL_AC:
> + hvm_get_segment_register(v, x86_seg_ss, &seg);
> +
> + /*
> + * SMAP: kernel-mode data accesses from user-mode mappings
> + * should fault.
> + * A fault is considered as a SMAP violation if the following
> + * conditions come true:
> + * - X86_CR4_SMAP is set in CR4
> + * - A user page is accessed
> + * - CPL = 3 or X86_EFLAGS_AC is clear
> + * - Page fault in kernel mode
> + */
> + smap = hvm_smap_enabled(v) &&
> + ((seg.attr.fields.dpl == 3) ||
> + !(regs->eflags & X86_EFLAGS_AC));
> + break;
> + case SMAP_CHECK_ENABLED:
> + smap = hvm_smap_enabled(v);
Can you describe what behavioural change this will cause. From my
understanding, for a guest which has enabled CR4.SMAP, it will
unconditionally cause an -EFAULT for runstate areas in user pagetables?
~Andrew
> + break;
> + case SMAP_CHECK_DISABLED:
> + break;
> + default:
> + printk(XENLOG_INFO "Invalid SMAP check type!\n");
> + break;
> + }
> }
>
> if ( smep || smap )
> diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
> index abf55fb..d7cac4f 100644
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -446,13 +446,26 @@ struct arch_vcpu
>
> /* A secondary copy of the vcpu time info. */
> XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
> +
> + /*
> + * The SMAP check policy when updating runstate_guest(v) and the
> + * secondary system time.
> + * SMAP_CHECK_HONOR_CPL_AC - honor the guest's CPL and AC
> + * SMAP_CHECK_ENABLED - enable the check
> + * SMAP_CHECK_DISABLED - disable the check
> + */
> + uint8_t smap_check_policy;
> } __cacheline_aligned;
>
> +#define SMAP_CHECK_HONOR_CPL_AC 0
> +#define SMAP_CHECK_ENABLED 1
> +#define SMAP_CHECK_DISABLED 2
> +
> /* Shorthands to improve code legibility. */
> #define hvm_vmx hvm_vcpu.u.vmx
> #define hvm_svm hvm_vcpu.u.svm
>
> -bool_t update_runstate_area(const struct vcpu *);
> +bool_t update_runstate_area(struct vcpu *);
> bool_t update_secondary_system_time(const struct vcpu *,
> struct vcpu_time_info *);
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |