|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/MCE: Implement clearbank callback for AMD
# HG changeset patch
# User Christoph Egger <Christoph.Egger@xxxxxxx>
# Date 1351168089 -7200
# Node ID 1883c1d29de97454df8f3b6723f0a853a09f4af6
# Parent 5fb601e96d0fcabfd19605aa7a314cbb3d321e7d
x86/MCE: Implement clearbank callback for AMD
Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
Move initialization of mce_clear_banks into common code (would not get
initialized on AMD CPUs otherwise). Mark per-CPU struct mce_bank
pointers read-mostly.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Committed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
diff -r 5fb601e96d0f -r 1883c1d29de9 xen/arch/x86/cpu/mcheck/amd_k8.c
--- a/xen/arch/x86/cpu/mcheck/amd_k8.c Thu Oct 25 14:26:08 2012 +0200
+++ b/xen/arch/x86/cpu/mcheck/amd_k8.c Thu Oct 25 14:28:09 2012 +0200
@@ -72,7 +72,23 @@
/* Machine Check Handler for AMD K8 family series */
static void k8_machine_check(struct cpu_user_regs *regs, long error_code)
{
- mcheck_cmn_handler(regs, error_code, mca_allbanks, NULL);
+ mcheck_cmn_handler(regs, error_code, mca_allbanks,
+ __get_cpu_var(mce_clear_banks));
+}
+
+static int k8_need_clearbank_scan(enum mca_source who, uint64_t status)
+{
+ if (who != MCA_MCE_SCAN)
+ return 1;
+
+ /*
+ * For fatal error, it shouldn't be cleared so that sticky bank
+ * have a chance to be handled after reboot by polling.
+ */
+ if ((status & MCi_STATUS_UC) && (status & MCi_STATUS_PCC))
+ return 0;
+
+ return 1;
}
/* AMD K8 machine check */
@@ -85,6 +101,7 @@ enum mcheck_type amd_k8_mcheck_init(stru
mce_handler_init();
x86_mce_vector_register(k8_machine_check);
+ mce_need_clearbank_register(k8_need_clearbank_scan);
for (i = 0; i < nr_mce_banks; i++) {
if (quirkflag == MCEQUIRK_K8_GART && i == 4) {
diff -r 5fb601e96d0f -r 1883c1d29de9 xen/arch/x86/cpu/mcheck/mce.c
--- a/xen/arch/x86/cpu/mcheck/mce.c Thu Oct 25 14:26:08 2012 +0200
+++ b/xen/arch/x86/cpu/mcheck/mce.c Thu Oct 25 14:28:09 2012 +0200
@@ -35,6 +35,10 @@ bool_t is_mc_panic;
unsigned int __read_mostly nr_mce_banks;
unsigned int __read_mostly firstbank;
+DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, poll_bankmask);
+DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, no_cmci_banks);
+DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, mce_clear_banks);
+
static void intpose_init(void);
static void mcinfo_clear(struct mc_info *);
struct mca_banks *mca_allbanks;
@@ -691,22 +695,29 @@ int mca_cap_init(void)
return mca_allbanks ? 0:-ENOMEM;
}
-static void cpu_poll_bankmask_free(unsigned int cpu)
+static void cpu_bank_free(unsigned int cpu)
{
- struct mca_banks *mb = per_cpu(poll_bankmask, cpu);
+ struct mca_banks *poll = per_cpu(poll_bankmask, cpu);
+ struct mca_banks *clr = per_cpu(mce_clear_banks, cpu);
- mcabanks_free(mb);
+ mcabanks_free(poll);
+ mcabanks_free(clr);
}
-static int cpu_poll_bankmask_alloc(unsigned int cpu)
+static int cpu_bank_alloc(unsigned int cpu)
{
- struct mca_banks *mb;
+ struct mca_banks *poll = mcabanks_alloc();
+ struct mca_banks *clr = mcabanks_alloc();
- mb = mcabanks_alloc();
- if ( !mb )
+ if ( !poll || !clr )
+ {
+ mcabanks_free(poll);
+ mcabanks_free(clr);
return -ENOMEM;
+ }
- per_cpu(poll_bankmask, cpu) = mb;
+ per_cpu(poll_bankmask, cpu) = poll;
+ per_cpu(mce_clear_banks, cpu) = clr;
return 0;
}
@@ -719,11 +730,11 @@ static int cpu_callback(
switch ( action )
{
case CPU_UP_PREPARE:
- rc = cpu_poll_bankmask_alloc(cpu);
+ rc = cpu_bank_alloc(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DEAD:
- cpu_poll_bankmask_free(cpu);
+ cpu_bank_free(cpu);
break;
default:
break;
@@ -757,6 +768,10 @@ void mcheck_init(struct cpuinfo_x86 *c,
if (mca_cap_init())
return;
+ /* Early MCE initialisation for BSP. */
+ if ( bsp && cpu_bank_alloc(smp_processor_id()) )
+ BUG();
+
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
inited = amd_mcheck_init(c);
@@ -787,18 +802,14 @@ void mcheck_init(struct cpuinfo_x86 *c,
set_in_cr4(X86_CR4_MCE);
if ( bsp )
- {
- /* Early MCE initialisation for BSP. */
- if ( cpu_poll_bankmask_alloc(0) )
- BUG();
register_cpu_notifier(&cpu_nfb);
- }
set_poll_bankmask(c);
return;
out:
- if (smp_processor_id() == 0)
+ if ( bsp )
{
+ cpu_bank_free(smp_processor_id());
mcabanks_free(mca_allbanks);
mca_allbanks = NULL;
}
diff -r 5fb601e96d0f -r 1883c1d29de9 xen/arch/x86/cpu/mcheck/mce.h
--- a/xen/arch/x86/cpu/mcheck/mce.h Thu Oct 25 14:26:08 2012 +0200
+++ b/xen/arch/x86/cpu/mcheck/mce.h Thu Oct 25 14:28:09 2012 +0200
@@ -122,6 +122,7 @@ struct mca_summary {
DECLARE_PER_CPU(struct mca_banks *, poll_bankmask);
DECLARE_PER_CPU(struct mca_banks *, no_cmci_banks);
+DECLARE_PER_CPU(struct mca_banks *, mce_clear_banks);
extern bool_t cmci_support;
extern bool_t is_mc_panic;
diff -r 5fb601e96d0f -r 1883c1d29de9 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c Thu Oct 25 14:26:08 2012 +0200
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c Thu Oct 25 14:28:09 2012 +0200
@@ -21,9 +21,7 @@
#include "vmce.h"
#include "mcaction.h"
-DEFINE_PER_CPU(struct mca_banks *, mce_banks_owned);
-DEFINE_PER_CPU(struct mca_banks *, no_cmci_banks);
-DEFINE_PER_CPU(struct mca_banks *, mce_clear_banks);
+static DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, mce_banks_owned);
bool_t __read_mostly cmci_support = 0;
static bool_t __read_mostly ser_support = 0;
static bool_t __read_mostly mce_force_broadcast;
@@ -789,36 +787,28 @@ static void intel_init_mce(void)
static void cpu_mcabank_free(unsigned int cpu)
{
- struct mca_banks *mb1, *mb2, *mb3;
+ struct mca_banks *cmci = per_cpu(no_cmci_banks, cpu);
+ struct mca_banks *owned = per_cpu(mce_banks_owned, cpu);
- mb1 = per_cpu(mce_clear_banks, cpu);
- mb2 = per_cpu(no_cmci_banks, cpu);
- mb3 = per_cpu(mce_banks_owned, cpu);
-
- mcabanks_free(mb1);
- mcabanks_free(mb2);
- mcabanks_free(mb3);
+ mcabanks_free(cmci);
+ mcabanks_free(owned);
}
static int cpu_mcabank_alloc(unsigned int cpu)
{
- struct mca_banks *mb1, *mb2, *mb3;
+ struct mca_banks *cmci = mcabanks_alloc();
+ struct mca_banks *owned = mcabanks_alloc();
- mb1 = mcabanks_alloc();
- mb2 = mcabanks_alloc();
- mb3 = mcabanks_alloc();
- if (!mb1 || !mb2 || !mb3)
+ if (!cmci || !owned)
goto out;
- per_cpu(mce_clear_banks, cpu) = mb1;
- per_cpu(no_cmci_banks, cpu) = mb2;
- per_cpu(mce_banks_owned, cpu) = mb3;
+ per_cpu(no_cmci_banks, cpu) = cmci;
+ per_cpu(mce_banks_owned, cpu) = owned;
return 0;
out:
- mcabanks_free(mb1);
- mcabanks_free(mb2);
- mcabanks_free(mb3);
+ mcabanks_free(cmci);
+ mcabanks_free(owned);
return -ENOMEM;
}
diff -r 5fb601e96d0f -r 1883c1d29de9 xen/arch/x86/cpu/mcheck/mctelem.h
--- a/xen/arch/x86/cpu/mcheck/mctelem.h Thu Oct 25 14:26:08 2012 +0200
+++ b/xen/arch/x86/cpu/mcheck/mctelem.h Thu Oct 25 14:28:09 2012 +0200
@@ -23,7 +23,7 @@
* urgent uses, intended for use from machine check exception handlers,
* and non-urgent uses intended for use from error pollers.
* Associated with each logout entry of whatever class is a data area
- * sized per the single argument to mctelem_init. mcelem_init should be
+ * sized per the single argument to mctelem_init. mctelem_init should be
* called from MCA init code before anybody has the chance to change the
* machine check vector with mcheck_mca_logout or to use mcheck_mca_logout.
*
@@ -45,7 +45,7 @@
* which will return a cookie referencing the oldest (first committed)
* entry of the requested class. Access the associated data using
* mctelem_dataptr and when finished use mctelem_consume_oldest_end - in the
- * begin .. end bracket you are guaranteed that the entry canot be freed
+ * begin .. end bracket you are guaranteed that the entry can't be freed
* even if it is ack'd elsewhere). Once the ultimate consumer of the
* telemetry has processed it to stable storage it should acknowledge
* the telemetry quoting the cookie id, at which point we will free
diff -r 5fb601e96d0f -r 1883c1d29de9 xen/arch/x86/cpu/mcheck/non-fatal.c
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c Thu Oct 25 14:26:08 2012 +0200
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c Thu Oct 25 14:28:09 2012 +0200
@@ -23,7 +23,6 @@
#include "mce.h"
#include "vmce.h"
-DEFINE_PER_CPU(struct mca_banks *, poll_bankmask);
static struct timer mce_timer;
#define MCE_PERIOD MILLISECS(8000)
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |