[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [Patch 6/6] Xen/MCE: Cleanup guest vMCE check



>>> On 23.07.12 at 11:45, "Liu, Jinsong" <jinsong.liu@xxxxxxxxx> wrote:
> Xen/MCE: Cleanup guest vMCE check
> 
> This patch simplify vMCE logic by removing guest vMCE check,
> since hypervisor should be agnostic to guest.
> 
> With guest vMCE check, hypervisor would actively kill guest when guest vMCE 
> not ready.
> Without guest vMCE check, hypervisor would always inject vMCE to guest: if 
> guest ready
> it would happily handle it, and if guest not ready, it automatically kill 
> itself.

How can the hypervisor know whether the guest would kill itself
in such case? Removing the check here is valid only is correctness
of operation is still guaranteed from the hypervisor's perspective.
And the description suggests this isn't the case.

Jan

> Singed-off-by: Liu, Jinsong <jinsong.liu@xxxxxxxxx>
> 
> diff -r 3a1957059e65 xen/arch/x86/cpu/mcheck/mce_intel.c
> --- a/xen/arch/x86/cpu/mcheck/mce_intel.c     Mon Jul 16 21:14:20 2012 +0800
> +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c     Mon Jul 16 21:35:55 2012 +0800
> @@ -671,12 +671,6 @@
>                  ASSERT(d);
>                  gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT);
>  
> -                if ( !is_vmce_ready(bank, d) )
> -                {
> -                    printk("DOM%d not ready for vMCE\n", d->domain_id);
> -                    goto vmce_failed;
> -                }
> -
>                  if ( unmmap_broken_page(d, _mfn(mfn), gfn) )
>                  {
>                      printk("Unmap broken memory %lx for DOM%d failed\n",
> diff -r 3a1957059e65 xen/arch/x86/cpu/mcheck/vmce.c
> --- a/xen/arch/x86/cpu/mcheck/vmce.c  Mon Jul 16 21:14:20 2012 +0800
> +++ b/xen/arch/x86/cpu/mcheck/vmce.c  Mon Jul 16 21:35:55 2012 +0800
> @@ -406,74 +406,6 @@
>      return 0;
>  }
>  
> -static int is_hvm_vmce_ready(struct mcinfo_bank *bank, struct domain *d)
> -{
> -    struct vcpu *v;
> -    int no_vmce = 0, i;
> -
> -    if (!is_hvm_domain(d))
> -        return 0;
> -
> -    /* kill guest if not enabled vMCE */
> -    for_each_vcpu(d, v)
> -    {
> -        if (!(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_MCE))
> -        {
> -            no_vmce = 1;
> -            break;
> -        }
> -
> -        if (!mce_broadcast)
> -            break;
> -    }
> -
> -    if (no_vmce)
> -        return 0;
> -
> -    /* Guest has virtualized family/model information */
> -    for ( i = 0; i < MAX_CPUID_INPUT; i++ )
> -    {
> -        if (d->arch.cpuids[i].input[0] == 0x1)
> -        {
> -            uint32_t veax = d->arch.cpuids[i].eax, vfam, vmod;
> -
> -                     vfam = (veax >> 8) & 15;
> -                     vmod = (veax >> 4) & 15;
> -
> -            if (vfam == 0x6 || vfam == 0xf)
> -                vmod += ((veax >> 16) & 0xF) << 4;
> -                     if (vfam == 0xf)
> -                             vfam += (veax >> 20) & 0xff;
> -
> -            if ( ( vfam != boot_cpu_data.x86 ) ||
> -                 (vmod != boot_cpu_data.x86_model) )
> -            {
> -                dprintk(XENLOG_WARNING,
> -                    "No vmce for different virtual family/model cpuid\n");
> -                no_vmce = 1;
> -            }
> -            break;
> -        }
> -    }
> -
> -    if (no_vmce)
> -        return 0;
> -
> -    return 1;
> -}
> -
> -int is_vmce_ready(struct mcinfo_bank *bank, struct domain *d)
> -{
> -    if ( d == dom0)
> -        return dom0_vmce_enabled();
> -
> -    /* No vMCE to HVM guest now */
> -    if ( is_hvm_domain(d) )
> -        return is_hvm_vmce_ready(bank, d);
> -
> -    return 0;
> -}
> -
>  /* It's said some ram is setup as mmio_direct for UC cache attribute */
>  #define P2M_UNMAP_TYPES (p2m_to_mask(p2m_ram_rw) \
>                                  | p2m_to_mask(p2m_ram_logdirty) \




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.