# User Christoph Egger # Date 1347276743 -7200 Factor common mc code out of intel specific code and move it into common files. No functional changes. Signed-off-by: Christoph Egger diff -r 06f42b46c057 -r c45743df3686 xen/arch/x86/cpu/mcheck/Makefile --- a/xen/arch/x86/cpu/mcheck/Makefile +++ b/xen/arch/x86/cpu/mcheck/Makefile @@ -2,10 +2,12 @@ obj-y += amd_nonfatal.o obj-y += k7.o obj-y += amd_k8.o obj-y += amd_f10.o +obj-y += mcbarrier.o obj-y += mctelem.o obj-y += mce.o obj-y += mce-apei.o obj-y += mce_intel.o obj-y += mce_amd_quirks.o +obj-y += mcutil.o obj-y += non-fatal.o obj-y += vmce.o diff -r 06f42b46c057 -r c45743df3686 xen/arch/x86/cpu/mcheck/mcbarrier.c --- /dev/null +++ b/xen/arch/x86/cpu/mcheck/mcbarrier.c @@ -0,0 +1,57 @@ +#include "mcbarrier.h" +#include "mcutil.h" +#include "mce.h" + +void mce_barrier_init(struct mce_softirq_barrier *bar) +{ + atomic_set(&bar->val, 0); + atomic_set(&bar->ingen, 0); + atomic_set(&bar->outgen, 0); +} + +void mce_barrier_dec(struct mce_softirq_barrier *bar) +{ + atomic_inc(&bar->outgen); + wmb(); + atomic_dec(&bar->val); +} + +void mce_barrier_enter(struct mce_softirq_barrier *bar) +{ + int gen; + + if (!mce_broadcast) + return; + atomic_inc(&bar->ingen); + gen = atomic_read(&bar->outgen); + mb(); + atomic_inc(&bar->val); + while ( atomic_read(&bar->val) != num_online_cpus() && + atomic_read(&bar->outgen) == gen) { + mb(); + mce_panic_check(); + } +} + +void mce_barrier_exit(struct mce_softirq_barrier *bar) +{ + int gen; + + if (!mce_broadcast) + return; + atomic_inc(&bar->outgen); + gen = atomic_read(&bar->ingen); + mb(); + atomic_dec(&bar->val); + while ( atomic_read(&bar->val) != 0 && + atomic_read(&bar->ingen) == gen ) { + mb(); + mce_panic_check(); + } +} + +void mce_barrier(struct mce_softirq_barrier *bar) +{ + mce_barrier_enter(bar); + mce_barrier_exit(bar); +} diff -r 06f42b46c057 -r c45743df3686 xen/arch/x86/cpu/mcheck/mcbarrier.h --- /dev/null +++ b/xen/arch/x86/cpu/mcheck/mcbarrier.h @@ -0,0 +1,44 @@ +#ifndef _MCHECK_BARRIER_H +#define _MCHECK_BARRIER_H + +#include + +/* MCE handling */ +struct mce_softirq_barrier { + atomic_t val; + atomic_t ingen; + atomic_t outgen; +}; + +/* + * Initialize a barrier. Just set it to 0. + */ +void mce_barrier_init(struct mce_softirq_barrier *); + +/* + * This function will need to be used when offlining a CPU in the + * recovery actions. + * + * Decrement a barrier only. Needed for cases where the CPU + * in question can't do it itself (e.g. it is being offlined). + */ +void mce_barrier_dec(struct mce_softirq_barrier *); + +/* + * Increment the generation number and the value. The generation number + * is incremented when entering a barrier. This way, it can be checked + * on exit if a CPU is trying to re-enter the barrier. This can happen + * if the first CPU to make it out immediately exits or re-enters, while + * another CPU that is still in the loop becomes otherwise occupied + * (e.g. it needs to service an interrupt, etc), missing the value + * it's waiting for. + * + * These barrier functions should always be paired, so that the + * counter value will reach 0 again after all CPUs have exited. + */ +void mce_barrier_enter(struct mce_softirq_barrier *); +void mce_barrier_exit(struct mce_softirq_barrier *); + +void mce_barrier(struct mce_softirq_barrier *); + +#endif /* _MCHECK_BARRIER_H */ diff -r 06f42b46c057 -r c45743df3686 xen/arch/x86/cpu/mcheck/mce_intel.c --- a/xen/arch/x86/cpu/mcheck/mce_intel.c +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c @@ -16,6 +16,8 @@ #include #include "mce.h" #include "x86_mca.h" +#include "mcbarrier.h" +#include "mcutil.h" DEFINE_PER_CPU(struct mca_banks *, mce_banks_owned); DEFINE_PER_CPU(struct mca_banks *, no_cmci_banks); @@ -164,13 +166,6 @@ static void intel_init_thermal(struct cp } #endif /* CONFIG_X86_MCE_THERMAL */ -/* MCE handling */ -struct mce_softirq_barrier { - atomic_t val; - atomic_t ingen; - atomic_t outgen; -}; - static struct mce_softirq_barrier mce_inside_bar, mce_severity_bar; static struct mce_softirq_barrier mce_trap_bar; @@ -186,9 +181,6 @@ static atomic_t severity_cpu = ATOMIC_IN static atomic_t found_error = ATOMIC_INIT(0); static cpumask_t mce_fatal_cpus; -static void mce_barrier_enter(struct mce_softirq_barrier *); -static void mce_barrier_exit(struct mce_softirq_barrier *); - static const struct mca_error_handler *__read_mostly mce_dhandlers; static const struct mca_error_handler *__read_mostly mce_uhandlers; static unsigned int __read_mostly mce_dhandler_num; @@ -385,25 +377,6 @@ static int mce_urgent_action(struct cpu_ * Round2: Do all MCE processing logic as normal. */ -static void mce_panic_check(void) -{ - if (is_mc_panic) { - local_irq_enable(); - for ( ; ; ) - halt(); - } -} - -/* - * Initialize a barrier. Just set it to 0. - */ -static void mce_barrier_init(struct mce_softirq_barrier *bar) -{ - atomic_set(&bar->val, 0); - atomic_set(&bar->ingen, 0); - atomic_set(&bar->outgen, 0); -} - static void mce_handler_init(void) { if (smp_processor_id() != 0) @@ -417,21 +390,6 @@ static void mce_handler_init(void) spin_lock_init(&mce_logout_lock); open_softirq(MACHINE_CHECK_SOFTIRQ, mce_softirq); } -#if 0 -/* - * This function will need to be used when offlining a CPU in the - * recovery actions. - * - * Decrement a barrier only. Needed for cases where the CPU - * in question can't do it itself (e.g. it is being offlined). - */ -static void mce_barrier_dec(struct mce_softirq_barrier *bar) -{ - atomic_inc(&bar->outgen); - wmb(); - atomic_dec(&bar->val); -} -#endif static void mce_spin_lock(spinlock_t *lk) { @@ -446,60 +404,6 @@ static void mce_spin_unlock(spinlock_t * spin_unlock(lk); } -/* - * Increment the generation number and the value. The generation number - * is incremented when entering a barrier. This way, it can be checked - * on exit if a CPU is trying to re-enter the barrier. This can happen - * if the first CPU to make it out immediately exits or re-enters, while - * another CPU that is still in the loop becomes otherwise occupied - * (e.g. it needs to service an interrupt, etc), missing the value - * it's waiting for. - * - * These barrier functions should always be paired, so that the - * counter value will reach 0 again after all CPUs have exited. - */ -static void mce_barrier_enter(struct mce_softirq_barrier *bar) -{ - int gen; - - if (!mce_broadcast) - return; - atomic_inc(&bar->ingen); - gen = atomic_read(&bar->outgen); - mb(); - atomic_inc(&bar->val); - while ( atomic_read(&bar->val) != num_online_cpus() && - atomic_read(&bar->outgen) == gen) { - mb(); - mce_panic_check(); - } -} - -static void mce_barrier_exit(struct mce_softirq_barrier *bar) -{ - int gen; - - if (!mce_broadcast) - return; - atomic_inc(&bar->outgen); - gen = atomic_read(&bar->ingen); - mb(); - atomic_dec(&bar->val); - while ( atomic_read(&bar->val) != 0 && - atomic_read(&bar->ingen) == gen ) { - mb(); - mce_panic_check(); - } -} - -#if 0 -static void mce_barrier(struct mce_softirq_barrier *bar) -{ - mce_barrier_enter(bar); - mce_barrier_exit(bar); -} -#endif - /* Intel MCE handler */ static inline void intel_get_extended_msr(struct mcinfo_extended *ext, u32 msr) { diff -r 06f42b46c057 -r c45743df3686 xen/arch/x86/cpu/mcheck/mcutil.c --- /dev/null +++ b/xen/arch/x86/cpu/mcheck/mcutil.c @@ -0,0 +1,13 @@ + +#include +#include "mcutil.h" +#include "mce.h" + +void mce_panic_check(void) +{ + if (is_mc_panic) { + local_irq_enable(); + for ( ; ; ) + halt(); + } +} diff -r 06f42b46c057 -r c45743df3686 xen/arch/x86/cpu/mcheck/mcutil.h --- /dev/null +++ b/xen/arch/x86/cpu/mcheck/mcutil.h @@ -0,0 +1,6 @@ +#ifndef _MCHECK_UTIL_H +#define _MCHECK_UTIL_H + +void mce_panic_check(void); + +#endif