|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: refactor mce code
# HG changeset patch
# User Christoph Egger <Christoph.Egger@xxxxxxx>
# Date 1347359312 -7200
# Node ID fc24482c07d2e69049ad729ca80ec6bdf1a0fa16
# Parent d9d4c7ed2fd2683d1928b36a2a07c6a519273d13
x86: refactor mce code
Factor common mc code out of intel specific code and move it into
common files. No functional changes.
Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
Committed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
diff -r d9d4c7ed2fd2 -r fc24482c07d2 xen/arch/x86/cpu/mcheck/Makefile
--- a/xen/arch/x86/cpu/mcheck/Makefile Tue Sep 11 12:26:25 2012 +0200
+++ b/xen/arch/x86/cpu/mcheck/Makefile Tue Sep 11 12:28:32 2012 +0200
@@ -2,10 +2,12 @@ obj-y += amd_nonfatal.o
obj-y += k7.o
obj-y += amd_k8.o
obj-y += amd_f10.o
+obj-y += barrier.o
obj-y += mctelem.o
obj-y += mce.o
obj-y += mce-apei.o
obj-y += mce_intel.o
obj-y += mce_amd_quirks.o
obj-y += non-fatal.o
+obj-y += util.o
obj-y += vmce.o
diff -r d9d4c7ed2fd2 -r fc24482c07d2 xen/arch/x86/cpu/mcheck/barrier.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/cpu/mcheck/barrier.c Tue Sep 11 12:28:32 2012 +0200
@@ -0,0 +1,59 @@
+#include "barrier.h"
+#include "util.h"
+#include "mce.h"
+
+void mce_barrier_init(struct mce_softirq_barrier *bar)
+{
+ atomic_set(&bar->val, 0);
+ atomic_set(&bar->ingen, 0);
+ atomic_set(&bar->outgen, 0);
+}
+
+void mce_barrier_dec(struct mce_softirq_barrier *bar)
+{
+ atomic_inc(&bar->outgen);
+ wmb();
+ atomic_dec(&bar->val);
+}
+
+void mce_barrier_enter(struct mce_softirq_barrier *bar)
+{
+ int gen;
+
+ if (!mce_broadcast)
+ return;
+ atomic_inc(&bar->ingen);
+ gen = atomic_read(&bar->outgen);
+ mb();
+ atomic_inc(&bar->val);
+ while ( atomic_read(&bar->val) != num_online_cpus() &&
+ atomic_read(&bar->outgen) == gen )
+ {
+ mb();
+ mce_panic_check();
+ }
+}
+
+void mce_barrier_exit(struct mce_softirq_barrier *bar)
+{
+ int gen;
+
+ if ( !mce_broadcast )
+ return;
+ atomic_inc(&bar->outgen);
+ gen = atomic_read(&bar->ingen);
+ mb();
+ atomic_dec(&bar->val);
+ while ( atomic_read(&bar->val) != 0 &&
+ atomic_read(&bar->ingen) == gen )
+ {
+ mb();
+ mce_panic_check();
+ }
+}
+
+void mce_barrier(struct mce_softirq_barrier *bar)
+{
+ mce_barrier_enter(bar);
+ mce_barrier_exit(bar);
+}
diff -r d9d4c7ed2fd2 -r fc24482c07d2 xen/arch/x86/cpu/mcheck/barrier.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/cpu/mcheck/barrier.h Tue Sep 11 12:28:32 2012 +0200
@@ -0,0 +1,44 @@
+#ifndef _MCHECK_BARRIER_H
+#define _MCHECK_BARRIER_H
+
+#include <asm/atomic.h>
+
+/* MCE handling */
+struct mce_softirq_barrier {
+ atomic_t val;
+ atomic_t ingen;
+ atomic_t outgen;
+};
+
+/*
+ * Initialize a barrier. Just set it to 0.
+ */
+void mce_barrier_init(struct mce_softirq_barrier *);
+
+/*
+ * This function will need to be used when offlining a CPU in the
+ * recovery actions.
+ *
+ * Decrement a barrier only. Needed for cases where the CPU
+ * in question can't do it itself (e.g. it is being offlined).
+ */
+void mce_barrier_dec(struct mce_softirq_barrier *);
+
+/*
+ * Increment the generation number and the value. The generation number
+ * is incremented when entering a barrier. This way, it can be checked
+ * on exit if a CPU is trying to re-enter the barrier. This can happen
+ * if the first CPU to make it out immediately exits or re-enters, while
+ * another CPU that is still in the loop becomes otherwise occupied
+ * (e.g. it needs to service an interrupt, etc), missing the value
+ * it's waiting for.
+ *
+ * These barrier functions should always be paired, so that the
+ * counter value will reach 0 again after all CPUs have exited.
+ */
+void mce_barrier_enter(struct mce_softirq_barrier *);
+void mce_barrier_exit(struct mce_softirq_barrier *);
+
+void mce_barrier(struct mce_softirq_barrier *);
+
+#endif /* _MCHECK_BARRIER_H */
diff -r d9d4c7ed2fd2 -r fc24482c07d2 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c Tue Sep 11 12:26:25 2012 +0200
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c Tue Sep 11 12:28:32 2012 +0200
@@ -16,6 +16,8 @@
#include <asm/apic.h>
#include "mce.h"
#include "x86_mca.h"
+#include "barrier.h"
+#include "util.h"
DEFINE_PER_CPU(struct mca_banks *, mce_banks_owned);
DEFINE_PER_CPU(struct mca_banks *, no_cmci_banks);
@@ -164,13 +166,6 @@ static void intel_init_thermal(struct cp
}
#endif /* CONFIG_X86_MCE_THERMAL */
-/* MCE handling */
-struct mce_softirq_barrier {
- atomic_t val;
- atomic_t ingen;
- atomic_t outgen;
-};
-
static struct mce_softirq_barrier mce_inside_bar, mce_severity_bar;
static struct mce_softirq_barrier mce_trap_bar;
@@ -186,9 +181,6 @@ static atomic_t severity_cpu = ATOMIC_IN
static atomic_t found_error = ATOMIC_INIT(0);
static cpumask_t mce_fatal_cpus;
-static void mce_barrier_enter(struct mce_softirq_barrier *);
-static void mce_barrier_exit(struct mce_softirq_barrier *);
-
static const struct mca_error_handler *__read_mostly mce_dhandlers;
static const struct mca_error_handler *__read_mostly mce_uhandlers;
static unsigned int __read_mostly mce_dhandler_num;
@@ -385,25 +377,6 @@ static int mce_urgent_action(struct cpu_
* Round2: Do all MCE processing logic as normal.
*/
-static void mce_panic_check(void)
-{
- if (is_mc_panic) {
- local_irq_enable();
- for ( ; ; )
- halt();
- }
-}
-
-/*
- * Initialize a barrier. Just set it to 0.
- */
-static void mce_barrier_init(struct mce_softirq_barrier *bar)
-{
- atomic_set(&bar->val, 0);
- atomic_set(&bar->ingen, 0);
- atomic_set(&bar->outgen, 0);
-}
-
static void mce_handler_init(void)
{
if (smp_processor_id() != 0)
@@ -417,21 +390,6 @@ static void mce_handler_init(void)
spin_lock_init(&mce_logout_lock);
open_softirq(MACHINE_CHECK_SOFTIRQ, mce_softirq);
}
-#if 0
-/*
- * This function will need to be used when offlining a CPU in the
- * recovery actions.
- *
- * Decrement a barrier only. Needed for cases where the CPU
- * in question can't do it itself (e.g. it is being offlined).
- */
-static void mce_barrier_dec(struct mce_softirq_barrier *bar)
-{
- atomic_inc(&bar->outgen);
- wmb();
- atomic_dec(&bar->val);
-}
-#endif
static void mce_spin_lock(spinlock_t *lk)
{
@@ -446,60 +404,6 @@ static void mce_spin_unlock(spinlock_t *
spin_unlock(lk);
}
-/*
- * Increment the generation number and the value. The generation number
- * is incremented when entering a barrier. This way, it can be checked
- * on exit if a CPU is trying to re-enter the barrier. This can happen
- * if the first CPU to make it out immediately exits or re-enters, while
- * another CPU that is still in the loop becomes otherwise occupied
- * (e.g. it needs to service an interrupt, etc), missing the value
- * it's waiting for.
- *
- * These barrier functions should always be paired, so that the
- * counter value will reach 0 again after all CPUs have exited.
- */
-static void mce_barrier_enter(struct mce_softirq_barrier *bar)
-{
- int gen;
-
- if (!mce_broadcast)
- return;
- atomic_inc(&bar->ingen);
- gen = atomic_read(&bar->outgen);
- mb();
- atomic_inc(&bar->val);
- while ( atomic_read(&bar->val) != num_online_cpus() &&
- atomic_read(&bar->outgen) == gen) {
- mb();
- mce_panic_check();
- }
-}
-
-static void mce_barrier_exit(struct mce_softirq_barrier *bar)
-{
- int gen;
-
- if (!mce_broadcast)
- return;
- atomic_inc(&bar->outgen);
- gen = atomic_read(&bar->ingen);
- mb();
- atomic_dec(&bar->val);
- while ( atomic_read(&bar->val) != 0 &&
- atomic_read(&bar->ingen) == gen ) {
- mb();
- mce_panic_check();
- }
-}
-
-#if 0
-static void mce_barrier(struct mce_softirq_barrier *bar)
-{
- mce_barrier_enter(bar);
- mce_barrier_exit(bar);
-}
-#endif
-
/* Intel MCE handler */
static inline void intel_get_extended_msr(struct mcinfo_extended *ext, u32 msr)
{
diff -r d9d4c7ed2fd2 -r fc24482c07d2 xen/arch/x86/cpu/mcheck/util.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/cpu/mcheck/util.c Tue Sep 11 12:28:32 2012 +0200
@@ -0,0 +1,14 @@
+
+#include <asm/system.h>
+#include "util.h"
+#include "mce.h"
+
+void mce_panic_check(void)
+{
+ if ( is_mc_panic )
+ {
+ local_irq_enable();
+ for ( ; ; )
+ halt();
+ }
+}
diff -r d9d4c7ed2fd2 -r fc24482c07d2 xen/arch/x86/cpu/mcheck/util.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/cpu/mcheck/util.h Tue Sep 11 12:28:32 2012 +0200
@@ -0,0 +1,6 @@
+#ifndef _MCHECK_UTIL_H
+#define _MCHECK_UTIL_H
+
+void mce_panic_check(void);
+
+#endif
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |