[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] x86: fix CMCI injection
On 26.02.13 07:04, Keir Fraser wrote: On 25/02/2013 16:49, "Jan Beulich" <JBeulich@xxxxxxxx> wrote:This fixes the wrong use of literal vector 0xF7 with an "int" instruction (invalidated by 25113:14609be41f36) and the fact that doing the injection via a software interrupt was never valid anyway (because cmci_interrupt() acks the LAPIC, which does the wrong thing if the interrupt didn't get delivered though it). In order to do latter, the patch introduces send_IPI_self(), at once removing two opend coded uses of "genapic" in the IRQ handling code. Reported-by: Yongjie Ren <yongjie.ren@xxxxxxxxx> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>Acked-by: Keir Fraser <keir@xxxxxxx> One nit inline. --- a/xen/arch/x86/cpu/mcheck/mce.c +++ b/xen/arch/x86/cpu/mcheck/mce.c @@ -34,6 +34,7 @@ bool_t __read_mostly mce_broadcast = 0; bool_t is_mc_panic; unsigned int __read_mostly nr_mce_banks; unsigned int __read_mostly firstbank; +uint8_t __read_mostly cmci_apic_vector; DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, poll_bankmask); DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, no_cmci_banks); @@ -1198,12 +1199,6 @@ static void x86_mc_mceinject(void *data) __asm__ __volatile__("int $0x12"); } -static void x86_cmci_inject(void *data) -{ - printk("Simulating CMCI on cpu %d\n", smp_processor_id()); - __asm__ __volatile__("int $0xf7"); -} - #if BITS_PER_LONG == 64 #define ID2COOKIE(id) ((mctelem_cookie_t)(id)) @@ -1479,11 +1474,15 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m on_selected_cpus(cpumap, x86_mc_mceinject, NULL, 1); break; case XEN_MC_INJECT_TYPE_CMCI: - if ( !cmci_support ) + if ( !cmci_apic_vector ) cmci_apic_vector is uninitialized here when the platform does not support CMCI. Christoph ret = x86_mcerr( "No CMCI supported in platform\n", -EINVAL); else - on_selected_cpus(cpumap, x86_cmci_inject, NULL, 1); + { + if ( cpumask_test_cpu(smp_processor_id(), cpumap) ) + send_IPI_self(cmci_apic_vector); + send_IPI_mask(cpumap, cmci_apic_vector); + } break; default: ret = x86_mcerr("Wrong mca type\n", -EINVAL); --- a/xen/arch/x86/cpu/mcheck/mce.h +++ b/xen/arch/x86/cpu/mcheck/mce.h @@ -37,6 +37,8 @@ enum mcheck_type { mcheck_intel }; +extern uint8_t cmci_apic_vector; + /* Init functions */ enum mcheck_type amd_mcheck_init(struct cpuinfo_x86 *c); enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c, bool_t bsp); --- a/xen/arch/x86/cpu/mcheck/mce_intel.c +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c @@ -644,7 +644,6 @@ static void intel_init_cmci(struct cpuin { u32 l, apic; int cpu = smp_processor_id(); - static uint8_t cmci_apic_vector; if (!mce_available(c) || !cmci_support) { if (opt_cpu_info) --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -646,7 +646,7 @@ void irq_move_cleanup_interrupt(struct c * to myself. */ if (irr & (1 << (vector % 32))) { - genapic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); + send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); TRACE_3D(TRC_HW_IRQ_MOVE_CLEANUP_DELAY, irq, vector, smp_processor_id()); goto unlock; @@ -692,7 +692,7 @@ static void send_cleanup_vector(struct i cpumask_and(&cleanup_mask, desc->arch.old_cpu_mask, &cpu_online_map); desc->arch.move_cleanup_count = cpumask_weight(&cleanup_mask); - genapic->send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); + send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); desc->arch.move_in_progress = 0; } --- a/xen/arch/x86/smp.c +++ b/xen/arch/x86/smp.c @@ -38,6 +38,11 @@ void send_IPI_mask(const cpumask_t *mask genapic->send_IPI_mask(mask, vector); } +void send_IPI_self(int vector) +{ + genapic->send_IPI_self(vector); +} + /* * Some notes on x86 processor bugs affecting SMP operation: * --- a/xen/include/asm-x86/smp.h +++ b/xen/include/asm-x86/smp.h @@ -29,7 +29,8 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_ void smp_send_nmi_allbutself(void); -void send_IPI_mask(const cpumask_t *mask, int vector); +void send_IPI_mask(const cpumask_t *, int vector); +void send_IPI_self(int vector); extern void (*mtrr_hook) (void); _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |