[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3] x86/HVM: make hvm_efer_valid() honor guest features



On 15/01/15 15:24, Jan Beulich wrote:
> Following the earlier similar change validating CR4 modifications.
>
> Note that this requires a non-obvious adjustment to hvm_cpuid(): On
> 32-bit hosts/tool stacks, the SYSCALL feature flag will be seen clear
> on Intel CPUs, yet 64-bit guests legitimately expect the feature for
> be available. Mimic hardware behavior: When executed from a 64-bit
> code segment, force the feature flag set.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

> ---
> v3: Drop cr0_pg > 0 test for LMA/LME check: This would need to be >= 0,
>     which is then redundant with the check for EFER_LMA (getting
>     cleared when cr0_pg gets passed a negative value). Force SYSCALL
>     feature flag on when guest is in 64-bit mode.
> v2: consider CR0.PG during restore when checking EFER.LMA
>
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -1672,20 +1672,53 @@ static int hvm_save_cpu_ctxt(struct doma
>      return 0;
>  }
>  
> -static bool_t hvm_efer_valid(struct domain *d,
> -                             uint64_t value, uint64_t efer_validbits)
> +static bool_t hvm_efer_valid(const struct vcpu *v, uint64_t value,
> +                             signed int cr0_pg)
>  {
> -    if ( nestedhvm_enabled(d) && cpu_has_svm )
> -        efer_validbits |= EFER_SVME;
> +    unsigned int ext1_ecx = 0, ext1_edx = 0;
>  
> -    return !((value & ~efer_validbits) ||
> -             ((sizeof(long) != 8) && (value & EFER_LME)) ||
> -             (!cpu_has_svm && (value & EFER_SVME)) ||
> -             (!cpu_has_nx && (value & EFER_NX)) ||
> -             (!cpu_has_syscall && (value & EFER_SCE)) ||
> -             (!cpu_has_lmsl && (value & EFER_LMSLE)) ||
> -             (!cpu_has_ffxsr && (value & EFER_FFXSE)) ||
> -             ((value & (EFER_LME|EFER_LMA)) == EFER_LMA));
> +    if ( cr0_pg < 0 && !is_hardware_domain(v->domain) )
> +    {
> +        unsigned int level;
> +
> +        ASSERT(v == current);
> +        hvm_cpuid(0x80000000, &level, NULL, NULL, NULL);
> +        if ( level >= 0x80000001 )
> +            hvm_cpuid(0x80000001, NULL, NULL, &ext1_ecx, &ext1_edx);
> +    }
> +    else
> +    {
> +        ext1_edx = boot_cpu_data.x86_capability[X86_FEATURE_LM / 32];
> +        ext1_ecx = boot_cpu_data.x86_capability[X86_FEATURE_SVM / 32];
> +    }
> +
> +    if ( (value & EFER_SCE) &&
> +         !(ext1_edx & cpufeat_mask(X86_FEATURE_SYSCALL)) )
> +        return 0;
> +
> +    if ( (value & (EFER_LME | EFER_LMA)) &&
> +         !(ext1_edx & cpufeat_mask(X86_FEATURE_LM)) )
> +        return 0;
> +
> +    if ( (value & EFER_LMA) && (!(value & EFER_LME) || !cr0_pg) )
> +        return 0;
> +
> +    if ( (value & EFER_NX) && !(ext1_edx & cpufeat_mask(X86_FEATURE_NX)) )
> +        return 0;
> +
> +    if ( (value & EFER_SVME) &&
> +         (!(ext1_ecx & cpufeat_mask(X86_FEATURE_SVM)) ||
> +          !nestedhvm_enabled(v->domain)) )
> +        return 0;
> +
> +    if ( (value & EFER_LMSLE) && !cpu_has_lmsl )
> +        return 0;
> +
> +    if ( (value & EFER_FFXSE) &&
> +         !(ext1_edx & cpufeat_mask(X86_FEATURE_FFXSR)) )
> +        return 0;
> +
> +    return 1;
>  }
>  
>  /* These reserved bits in lower 32 remain 0 after any load of CR0 */
> @@ -1763,7 +1796,6 @@ static int hvm_load_cpu_ctxt(struct doma
>      struct vcpu *v;
>      struct hvm_hw_cpu ctxt;
>      struct segment_register seg;
> -    uint64_t efer_validbits;
>  
>      /* Which vcpu is this? */
>      vcpuid = hvm_load_instance(h);
> @@ -1794,9 +1826,7 @@ static int hvm_load_cpu_ctxt(struct doma
>          return -EINVAL;
>      }
>  
> -    efer_validbits = EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_LMA
> -                   | EFER_NX | EFER_SCE;
> -    if ( !hvm_efer_valid(d, ctxt.msr_efer, efer_validbits) )
> +    if ( !hvm_efer_valid(v, ctxt.msr_efer, MASK_EXTR(ctxt.cr0, X86_CR0_PG)) )
>      {
>          printk(XENLOG_G_ERR "HVM%d restore: bad EFER %#" PRIx64 "\n",
>                 d->domain_id, ctxt.msr_efer);
> @@ -2936,12 +2966,10 @@ err:
>  int hvm_set_efer(uint64_t value)
>  {
>      struct vcpu *v = current;
> -    uint64_t efer_validbits;
>  
>      value &= ~EFER_LMA;
>  
> -    efer_validbits = EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_NX | EFER_SCE;
> -    if ( !hvm_efer_valid(v->domain, value, efer_validbits) )
> +    if ( !hvm_efer_valid(v, value, -1) )
>      {
>          gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
>                   "EFER: %#"PRIx64"\n", value);
> @@ -4286,6 +4314,9 @@ void hvm_cpuid(unsigned int input, unsig
>          /* Only provide PSE36 when guest runs in 32bit PAE or in long mode */
>          if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
>              *edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
> +        /* Force SYSCALL when guest is in 64-bit mode. */
> +        if ( hvm_guest_x86_mode(v) == 8 )
> +            *edx |= cpufeat_mask(X86_FEATURE_SYSCALL);
>          /* Hide data breakpoint extensions if the hardware has no support. */
>          if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
>              *ecx &= ~cpufeat_mask(X86_FEATURE_DBEXT);
>
>



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.