[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-ia64-devel] [PATCH 0/12]MCA handler support for Xen/ia64 TAKE 2



Hi Alex,

   I attached updated patches as follows.

[mca-mca.patch]
   It was clarified that mod_timer() should not be defined to
set_timer(). These timer resolutions are incompatible, so I defined
jiffies and HZ for set_timer() resolution.
   Please check whether this patch solves the hang problem.

> xencomm_init_desc: could only translate 49152 of 130000 bytes
> xencomm_init_desc: could only translate 49152 of 170000 bytes

> Seems like we need a solution for handling larger error logs.  

[mca-sal_h.patch]
   I modified xencomm_create_mini() to xencomm_create() in sal.h so
that larger error logs could be handled.

> I think
> it may also be wrong that SAL_GET/CLEAR_STATE_INFO are exposed to
> non-privileged domains.  Thanks,

[mca-fw_emul.patch]
   I added to check whether current->domain is dom0. Otherwise, a
warning message is displayed.
   The MCA information is notified only to dom0 in this
implementation. For future, when we want to notify it to
non-privileged domains, we just remove this check.

Thanks,
KAZ

Signed-off-by: Yutaka Ezaki <yutaka.ezaki@xxxxxxxxxxxxxx>
Signed-off-by: Masaki Kanno <kanno.masaki@xxxxxxxxxxxxxx>
Signed-off-by: Kazuhiro Suzuki <kaz@xxxxxxxxxxxxxx>


From: Alex Williamson <alex.williamson@xxxxxx>
Subject: Re: [Xen-ia64-devel] [PATCH 0/12]MCA handler support for Xen/ia64 TAKE 
2
Date: Sun, 29 Oct 2006 10:54:14 -0700

> Hi Kaz,
> 
>    I'm still having several problems with the MCA support patches.  I've
> published my tree here:
> 
> http://free.linux.hp.com/~awilliam/xen-ia64-mca
> 
> Could you take a look and make sure I didn't break anything when
> applying the patches or updating them to the latest tree?  I'm seeing
> hangs during dom0 bootup.  The system has several error logs, so I
> thought there might be a race.  However, if I deconfigure the 2nd CPU
> (2-way Madison system), it hangs even before launching dom0.  Maybe it's
> trying to retrieve an error log from the offline CPU?  On another system
> I have that did boot up, I saw several of these errors:
> 
> xencomm_init_desc: could only translate 49152 of 130000 bytes
> xencomm_init_desc: could only translate 49152 of 170000 bytes
> 
> Seems like we need a solution for handling larger error logs.  I think
> it may also be wrong that SAL_GET/CLEAR_STATE_INFO are exposed to
> non-privileged domains.  Thanks,
> 
>       Alex
> 
> -- 
> Alex Williamson                             HP Open Source & Linux Org.
> 
diff -r 11b718eb22c9 xen/arch/ia64/linux-xen/mca.c
--- a/xen/arch/ia64/linux-xen/mca.c     Thu Nov 02 12:43:04 2006 -0700
+++ b/xen/arch/ia64/linux-xen/mca.c     Wed Nov 08 17:06:07 2006 +0900
@@ -81,6 +81,9 @@
 #include <xen/symbols.h>
 #include <xen/mm.h>
 #include <xen/console.h>
+#include <xen/event.h>
+#include <xen/softirq.h>
+#include <asm/xenmca.h>
 #endif
 
 #if defined(IA64_MCA_DEBUG_INFO)
@@ -108,18 +111,33 @@ unsigned long __per_cpu_mca[NR_CPUS];
 /* In mca_asm.S */
 extern void                    ia64_monarch_init_handler (void);
 extern void                    ia64_slave_init_handler (void);
+#ifdef XEN
+extern void setup_vector (unsigned int vec, struct irqaction *action);
+#define setup_irq(irq, action) setup_vector(irq, action)
+#endif
 
 static ia64_mc_info_t          ia64_mc_info;
 
-#ifndef XEN
+#ifdef XEN
+#define jiffies                        NOW()
+#undef HZ
+#define HZ                     ((unsigned long)(1000*1000*1000))
+#endif
+
 #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
 #define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
 #define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
 #define CPE_HISTORY_LENGTH    5
 #define CMC_HISTORY_LENGTH    5
 
+#ifndef XEN 
 static struct timer_list cpe_poll_timer;
 static struct timer_list cmc_poll_timer;
+#else
+#define mod_timer(timer, expires)      set_timer(timer, expires)
+static struct timer cpe_poll_timer;
+static struct timer cmc_poll_timer;
+#endif
 /*
  * This variable tells whether we are currently in polling mode.
  * Start with this in the wrong state so we won't play w/ timers
@@ -136,11 +154,9 @@ static int cpe_poll_enabled = 1;
 static int cpe_poll_enabled = 1;
 
 extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
-#endif /* !XEN */
 
 static int mca_init;
 
-#ifndef XEN
 /*
  * IA64_MCA log support
  */
@@ -157,11 +173,24 @@ typedef struct ia64_state_log_s
 
 static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
 
+#ifndef XEN
 #define IA64_LOG_ALLOCATE(it, size) \
        {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
                (ia64_err_rec_t *)alloc_bootmem(size); \
        ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
                (ia64_err_rec_t *)alloc_bootmem(size);}
+#else
+#define IA64_LOG_ALLOCATE(it, size) \
+       do { \
+               unsigned int pageorder; \
+               pageorder  = get_order_from_bytes(sizeof(struct ia64_mca_cpu)); 
\
+               ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
+                 (ia64_err_rec_t *)alloc_xenheap_pages(pageorder); \
+               ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
+                 (ia64_err_rec_t *)alloc_xenheap_pages(pageorder); \
+       } while(0)
+#endif
+
 #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
 #define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, 
s)
 #define IA64_LOG_UNLOCK(it)    
spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
@@ -176,6 +205,12 @@ static ia64_state_log_t ia64_state_log[I
 #define IA64_LOG_CURR_BUFFER(it)   (void 
*)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
 #define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
 
+#ifdef XEN
+struct list_head sal_queue[IA64_MAX_LOG_TYPES];
+sal_log_record_header_t *sal_record = NULL;
+DEFINE_SPINLOCK(sal_queue_lock);
+#endif
+
 /*
  * ia64_log_init
  *     Reset the OS ia64 log buffer
@@ -200,8 +235,19 @@ ia64_log_init(int sal_info_type)
        IA64_LOG_ALLOCATE(sal_info_type, max_size);
        memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
        memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
-}
-
+
+#ifdef XEN
+       if (sal_record == NULL) {
+               unsigned int pageorder;
+               pageorder  = get_order_from_bytes(max_size);
+               sal_record = (sal_log_record_header_t *)
+                            alloc_xenheap_pages(pageorder);
+               BUG_ON(sal_record == NULL);
+       }
+#endif
+}
+
+#ifndef XEN
 /*
  * ia64_log_get
  *
@@ -277,15 +323,159 @@ ia64_mca_log_sal_error_record(int sal_in
        if (rh->severity == sal_log_severity_corrected)
                ia64_sal_clear_state_info(sal_info_type);
 }
+#else /* !XEN */
+/*
+ * ia64_log_queue
+ *
+ *     Get the current MCA log from SAL and copy it into the OS log buffer.
+ *
+ *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
+ *  Outputs :   size        (total record length)
+ *              *buffer     (ptr to error record)
+ *
+ */
+static u64
+ia64_log_queue(int sal_info_type, int virq)
+{
+       sal_log_record_header_t     *log_buffer;
+       u64                         total_len = 0;
+       int                         s;
+       sal_queue_entry_t           *e;
+       unsigned long               flags;
+
+       IA64_LOG_LOCK(sal_info_type);
+
+       /* Get the process state information */
+       log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
+
+       total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
+
+       if (total_len) {
+               int queue_type;
+
+               spin_lock_irqsave(&sal_queue_lock, flags);
+
+               if (sal_info_type == SAL_INFO_TYPE_MCA && virq == VIRQ_MCA_CMC)
+                       queue_type = SAL_INFO_TYPE_CMC;
+               else
+                       queue_type = sal_info_type;
+
+               e = xmalloc(sal_queue_entry_t);
+               BUG_ON(e == NULL);
+               e->cpuid = smp_processor_id();
+               e->sal_info_type = sal_info_type;
+               e->vector = IA64_CMC_VECTOR;
+               e->virq = virq;
+               e->length = total_len;
+
+               list_add_tail(&e->list, &sal_queue[queue_type]);
+               spin_unlock_irqrestore(&sal_queue_lock, flags);
+
+               IA64_LOG_INDEX_INC(sal_info_type);
+               IA64_LOG_UNLOCK(sal_info_type);
+               if (sal_info_type != SAL_INFO_TYPE_MCA &&
+                   sal_info_type != SAL_INFO_TYPE_INIT) {
+                       IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. 
"
+                                      "Record length = %ld\n", __FUNCTION__,
+                                      sal_info_type, total_len);
+               }
+               return total_len;
+       } else {
+               IA64_LOG_UNLOCK(sal_info_type);
+               return 0;
+       }
+}
+#endif /* !XEN */
 
 /*
  * platform dependent error handling
  */
-#endif /* !XEN */
 #ifndef PLATFORM_MCA_HANDLERS
-#ifndef XEN
 
 #ifdef CONFIG_ACPI
+
+#ifdef XEN
+/**
+ *     Copy from linux/include/asm-generic/bug.h
+ */
+#define WARN_ON(condition) do { \
+       if (unlikely((condition)!=0)) { \
+               printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, 
__LINE__); \
+               dump_stack(); \
+       } \
+} while (0)
+
+/**
+ *     Copy from linux/kernel/irq/manage.c
+ *
+ *     disable_irq_nosync - disable an irq without waiting
+ *     @irq: Interrupt to disable
+ *
+ *     Disable the selected interrupt line.  Disables and Enables are
+ *     nested.
+ *     Unlike disable_irq(), this function does not ensure existing
+ *     instances of the IRQ handler have completed before returning.
+ *
+ *     This function may be called from IRQ context.
+ */
+void disable_irq_nosync(unsigned int irq)
+{
+       irq_desc_t *desc = irq_desc + irq;
+       unsigned long flags;
+
+       if (irq >= NR_IRQS)
+               return;
+
+       spin_lock_irqsave(&desc->lock, flags);
+       if (!desc->depth++) {
+               desc->status |= IRQ_DISABLED;
+               desc->handler->disable(irq);
+       }
+       spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/**
+ *     Copy from linux/kernel/irq/manage.c
+ *
+ *     enable_irq - enable handling of an irq
+ *     @irq: Interrupt to enable
+ *
+ *     Undoes the effect of one call to disable_irq().  If this
+ *     matches the last disable, processing of interrupts on this
+ *     IRQ line is re-enabled.
+ *
+ *     This function may be called from IRQ context.
+ */
+void enable_irq(unsigned int irq)
+{
+       irq_desc_t *desc = irq_desc + irq;
+       unsigned long flags;
+
+       if (irq >= NR_IRQS)
+               return;
+
+       spin_lock_irqsave(&desc->lock, flags);
+       switch (desc->depth) {
+       case 0:
+               WARN_ON(1);
+               break;
+       case 1: {
+               unsigned int status = desc->status & ~IRQ_DISABLED;
+
+               desc->status = status;
+               if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+                       desc->status = status | IRQ_REPLAY;
+                       hw_resend_irq(desc->handler,irq);
+               }
+               desc->handler->enable(irq);
+               /* fall-through */
+       }
+       default:
+               desc->depth--;
+       }
+       spin_unlock_irqrestore(&desc->lock, flags);
+}
+#endif /* XEN */
 
 int cpe_vector = -1;
 
@@ -302,8 +492,15 @@ ia64_mca_cpe_int_handler (int cpe_irq, v
        /* SAL spec states this should run w/ interrupts enabled */
        local_irq_enable();
 
+#ifndef XEN
        /* Get the CPE error record and log it */
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
+#else
+       /* CPE error does not inform to dom0 but the following codes are 
+          reserved for future implementation */
+/*     ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE); */
+/*     send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CPE); */
+#endif
 
        spin_lock(&cpe_history_lock);
        if (!cpe_poll_enabled && cpe_vector >= 0) {
@@ -345,7 +542,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, v
 }
 
 #endif /* CONFIG_ACPI */
-#endif /* !XEN */
 
 static void
 show_min_state (pal_min_state_area_t *minstate)
@@ -593,7 +789,6 @@ init_handler_platform (pal_min_state_are
        while (1);                      /* hang city if no debugger */
 }
 
-#ifndef XEN
 #ifdef CONFIG_ACPI
 /*
  * ia64_mca_register_cpev
@@ -624,9 +819,7 @@ ia64_mca_register_cpev (int cpev)
 }
 #endif /* CONFIG_ACPI */
 
-#endif /* !XEN */
 #endif /* PLATFORM_MCA_HANDLERS */
-#ifndef XEN
 
 /*
  * ia64_mca_cmc_vector_setup
@@ -713,6 +906,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
                       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
 }
 
+#ifndef XEN
 /*
  * ia64_mca_cmc_vector_disable_keventd
  *
@@ -736,6 +930,7 @@ ia64_mca_cmc_vector_enable_keventd(void 
 {
        on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
 }
+#endif /* !XEN */
 
 /*
  * ia64_mca_wakeup_ipi_wait
@@ -887,15 +1082,26 @@ static void
 static void
 ia64_return_to_sal_check(int recover)
 {
+#ifdef XEN
+       int cpu = smp_processor_id();
+#endif
 
        /* Copy over some relevant stuff from the sal_to_os_mca_handoff
         * so that it can be used at the time of os_mca_to_sal_handoff
         */
+#ifdef XEN
+       ia64_os_to_sal_handoff_state.imots_sal_gp =
+               ia64_sal_to_os_handoff_state[cpu].imsto_sal_gp;
+
+       ia64_os_to_sal_handoff_state.imots_sal_check_ra =
+               ia64_sal_to_os_handoff_state[cpu].imsto_sal_check_ra;
+#else
        ia64_os_to_sal_handoff_state.imots_sal_gp =
                ia64_sal_to_os_handoff_state.imsto_sal_gp;
 
        ia64_os_to_sal_handoff_state.imots_sal_check_ra =
                ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
+#endif
 
        if (recover)
                ia64_os_to_sal_handoff_state.imots_os_status = 
IA64_MCA_CORRECTED;
@@ -905,8 +1111,13 @@ ia64_return_to_sal_check(int recover)
        /* Default = tell SAL to return to same context */
        ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
 
+#ifdef XEN
+       ia64_os_to_sal_handoff_state.imots_new_min_state =
+               (u64 *)ia64_sal_to_os_handoff_state[cpu].pal_min_state;
+#else
        ia64_os_to_sal_handoff_state.imots_new_min_state =
                (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
+#endif
 
 }
 
@@ -954,27 +1165,44 @@ void
 void
 ia64_mca_ucmc_handler(void)
 {
+#ifdef XEN
+       int cpu = smp_processor_id();
+       pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
+               &ia64_sal_to_os_handoff_state[cpu].proc_state_param;
+#else
        pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
                &ia64_sal_to_os_handoff_state.proc_state_param;
+#endif
        int recover; 
 
+#ifndef XEN
        /* Get the MCA error record and log it */
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
+#else
+       ia64_log_queue(SAL_INFO_TYPE_MCA, VIRQ_MCA_CMC);
+       send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
+#endif
 
        /* TLB error is only exist in this SAL error record */
        recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
        /* other error recovery */
+#ifndef XEN
           || (ia64_mca_ucmc_extension 
                && ia64_mca_ucmc_extension(
                        IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
                        &ia64_sal_to_os_handoff_state,
                        &ia64_os_to_sal_handoff_state)); 
-
+#else
+       ;
+#endif
+
+#ifndef XEN
        if (recover) {
                sal_log_record_header_t *rh = 
IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
                rh->severity = sal_log_severity_corrected;
                ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
        }
+#endif
        /*
         *  Wakeup all the processors which are spinning in the rendezvous
         *  loop.
@@ -985,8 +1213,10 @@ ia64_mca_ucmc_handler(void)
        ia64_return_to_sal_check(recover);
 }
 
+#ifndef XEN
 static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, 
NULL);
 static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
+#endif
 
 /*
  * ia64_mca_cmc_int_handler
@@ -1016,8 +1246,13 @@ ia64_mca_cmc_int_handler(int cmc_irq, vo
        /* SAL spec states this should run w/ interrupts enabled */
        local_irq_enable();
 
+#ifndef XEN    
        /* Get the CMC error record and log it */
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
+#else
+       ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
+       send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
+#endif
 
        spin_lock(&cmc_history_lock);
        if (!cmc_polling_enabled) {
@@ -1034,7 +1269,12 @@ ia64_mca_cmc_int_handler(int cmc_irq, vo
 
                        cmc_polling_enabled = 1;
                        spin_unlock(&cmc_history_lock);
+#ifndef XEN    /* XXX FIXME */
                        schedule_work(&cmc_disable_work);
+#else
+                       cpumask_raise_softirq(cpu_online_map,
+                                             CMC_DISABLE_SOFTIRQ);
+#endif
 
                        /*
                         * Corrected errors will still be corrected, but
@@ -1083,7 +1323,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
        if (start_count == -1)
                start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
 
+#ifndef XEN
        ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
+#endif
 
        for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
 
@@ -1094,7 +1336,12 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
                if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
 
                        printk(KERN_WARNING "Returning to interrupt driven CMC 
handler\n");
+#ifndef XEN    /* XXX FIXME */
                        schedule_work(&cmc_enable_work);
+#else
+                       cpumask_raise_softirq(cpu_online_map,
+                                             CMC_ENABLE_SOFTIRQ);
+#endif
                        cmc_polling_enabled = 0;
 
                } else {
@@ -1104,7 +1351,6 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
 
                start_count = -1;
        }
-
        return IRQ_HANDLED;
 }
 
@@ -1118,7 +1364,11 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
  *
  */
 static void
+#ifndef XEN
 ia64_mca_cmc_poll (unsigned long dummy)
+#else
+ia64_mca_cmc_poll (void *dummy)
+#endif
 {
        /* Trigger a CMC interrupt cascade  */
        platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, 
IA64_IPI_DM_INT, 0);
@@ -1153,7 +1403,9 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
        if (start_count == -1)
                start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
 
+#ifndef XEN
        ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
+#endif
 
        for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
 
@@ -1180,7 +1432,6 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
                        mod_timer(&cpe_poll_timer, jiffies + poll_time);
                start_count = -1;
        }
-
        return IRQ_HANDLED;
 }
 
@@ -1195,14 +1446,17 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
  *
  */
 static void
+#ifndef XEN
 ia64_mca_cpe_poll (unsigned long dummy)
+#else
+ia64_mca_cpe_poll (void *dummy)
+#endif
 {
        /* Trigger a CPE interrupt cascade  */
        platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, 
IA64_IPI_DM_INT, 0);
 }
 
 #endif /* CONFIG_ACPI */
-#endif /* !XEN */
 
 /*
  * C portion of the OS INIT handler
@@ -1248,7 +1502,6 @@ ia64_init_handler (struct pt_regs *pt, s
        init_handler_platform(ms, pt, sw);      /* call platform specific 
routines */
 }
 
-#ifndef XEN
 static int __init
 ia64_mca_disable_cpe_polling(char *str)
 {
@@ -1260,42 +1513,53 @@ __setup("disable_cpe_poll", ia64_mca_dis
 
 static struct irqaction cmci_irqaction = {
        .handler =      ia64_mca_cmc_int_handler,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "cmc_hndlr"
 };
 
 static struct irqaction cmcp_irqaction = {
        .handler =      ia64_mca_cmc_int_caller,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "cmc_poll"
 };
 
 static struct irqaction mca_rdzv_irqaction = {
        .handler =      ia64_mca_rendez_int_handler,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "mca_rdzv"
 };
 
 static struct irqaction mca_wkup_irqaction = {
        .handler =      ia64_mca_wakeup_int_handler,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "mca_wkup"
 };
 
 #ifdef CONFIG_ACPI
 static struct irqaction mca_cpe_irqaction = {
        .handler =      ia64_mca_cpe_int_handler,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "cpe_hndlr"
 };
 
 static struct irqaction mca_cpep_irqaction = {
        .handler =      ia64_mca_cpe_int_caller,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "cpe_poll"
 };
 #endif /* CONFIG_ACPI */
-#endif /* !XEN */
 
 /* Do per-CPU MCA-related initialization.  */
 
@@ -1329,6 +1593,13 @@ ia64_mca_cpu_init(void *cpu_data)
 #endif
                }
        }
+#ifdef XEN
+       else {
+               int i;
+               for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
+                       ia64_log_queue(i, 0);
+       }
+#endif
 
         /*
          * The MCA info structure was allocated earlier and its
@@ -1395,17 +1666,14 @@ ia64_mca_init(void)
        ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
        ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
        ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
-#ifdef XEN
-       s64 rc;
-
-       slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
-
-       IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
-#else
        int i;
        s64 rc;
        struct ia64_sal_retval isrv;
        u64 timeout = IA64_MCA_RENDEZ_TIMEOUT;  /* platform specific */
+
+#ifdef XEN
+       slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
+#endif
 
        IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
 
@@ -1451,7 +1719,6 @@ ia64_mca_init(void)
        }
 
        IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup 
mech.\n", __FUNCTION__);
-#endif /* !XEN */
 
        ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
        /*
@@ -1503,7 +1770,6 @@ ia64_mca_init(void)
 
        IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", 
__FUNCTION__);
 
-#ifndef XEN
        /*
         *  Configure the CMCI/P vector and handler. Interrupts for CMC are
         *  per-processor, so AP CMC interrupts are setup in smp_callin() 
(smpboot.c).
@@ -1531,13 +1797,26 @@ ia64_mca_init(void)
        ia64_log_init(SAL_INFO_TYPE_INIT);
        ia64_log_init(SAL_INFO_TYPE_CMC);
        ia64_log_init(SAL_INFO_TYPE_CPE);
-#endif /* !XEN */
+
+#ifdef XEN
+       INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_MCA]);
+       INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_INIT]);
+       INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_CMC]);
+       INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_CPE]);
+
+       open_softirq(CMC_DISABLE_SOFTIRQ,
+                    (softirq_handler)ia64_mca_cmc_vector_disable);
+       open_softirq(CMC_ENABLE_SOFTIRQ,
+                    (softirq_handler)ia64_mca_cmc_vector_enable);
+
+       for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
+               ia64_log_queue(i, 0);
+#endif
 
        mca_init = 1;
        printk(KERN_INFO "MCA related initialization done\n");
 }
 
-#ifndef XEN
 /*
  * ia64_mca_late_init
  *
@@ -1555,20 +1834,34 @@ ia64_mca_late_init(void)
                return 0;
 
        /* Setup the CMCI/P vector and handler */
+#ifndef XEN
        init_timer(&cmc_poll_timer);
        cmc_poll_timer.function = ia64_mca_cmc_poll;
+#else
+       init_timer(&cmc_poll_timer, ia64_mca_cmc_poll, NULL, 
smp_processor_id());
+       printk("INIT_TIMER(cmc_poll_timer): on cpu%d\n", smp_processor_id());
+#endif
 
        /* Unmask/enable the vector */
        cmc_polling_enabled = 0;
+#ifndef XEN    /* XXX FIXME */
        schedule_work(&cmc_enable_work);
+#else
+       cpumask_raise_softirq(cpu_online_map, CMC_ENABLE_SOFTIRQ);
+#endif
 
        IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
 
 #ifdef CONFIG_ACPI
        /* Setup the CPEI/P vector and handler */
        cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
+#ifndef        XEN
        init_timer(&cpe_poll_timer);
        cpe_poll_timer.function = ia64_mca_cpe_poll;
+#else
+       init_timer(&cpe_poll_timer, ia64_mca_cpe_poll, NULL, 
smp_processor_id());
+       printk("INIT_TIMER(cpe_poll_timer): on cpu%d\n", smp_processor_id());
+#endif
 
        {
                irq_desc_t *desc;
@@ -1598,5 +1891,8 @@ ia64_mca_late_init(void)
        return 0;
 }
 
+#ifndef XEN
 device_initcall(ia64_mca_late_init);
-#endif /* !XEN */
+#else
+__initcall(ia64_mca_late_init);
+#endif
--- a/linux-2.6-xen-sparse/include/asm-ia64/sal.h       2006-09-13 
03:02:10.000000000 +0900
+++ b/linux-2.6-xen-sparse/include/asm-ia64/sal.h       2006-11-08 
16:02:49.000000000 +0900
@@ -42,6 +42,9 @@
 #include <asm/pal.h>
 #include <asm/system.h>
 #include <asm/fpu.h>
+#ifdef CONFIG_XEN
+#include <asm/xen/xencomm.h>
+#endif
 
 extern spinlock_t sal_lock;
 
@@ -686,10 +689,28 @@
 /* Get the processor and platform information logged by SAL with respect to 
the machine
  * state at the time of the MCAs, INITs, CMCs, or CPEs.
  */
+#ifdef CONFIG_XEN
+static inline u64 ia64_sal_get_state_info_size (u64 sal_info_type);
+#endif
+
 static inline u64
 ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
 {
        struct ia64_sal_retval isrv;
+#ifdef CONFIG_XEN
+       if (is_running_on_xen()) {
+               struct xencomm_handle *desc;
+
+               if (xencomm_create(sal_info,
+                       ia64_sal_get_state_info_size(sal_info_type),
+                       &desc, GFP_KERNEL))
+                       return 0;
+
+               SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
+                                  desc, 0, 0, 0, 0);
+               xencomm_free(desc);
+       } else
+#endif
        SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
                      sal_info, 0, 0, 0, 0);
        if (isrv.status)
diff -r 11b718eb22c9 xen/arch/ia64/xen/fw_emul.c
--- a/xen/arch/ia64/xen/fw_emul.c       Thu Nov 02 12:43:04 2006 -0700
+++ b/xen/arch/ia64/xen/fw_emul.c       Wed Nov 08 17:23:36 2006 +0900
@@ -22,6 +22,7 @@
 #include <linux/efi.h>
 #include <asm/pal.h>
 #include <asm/sal.h>
+#include <asm/xenmca.h>
 
 #include <public/sched.h>
 #include "hpsim_ssc.h"
@@ -34,6 +35,93 @@
 
 extern unsigned long running_on_sim;
 
+struct sal_mc_params {
+       u64 param_type;
+       u64 i_or_m;
+       u64 i_or_m_val;
+       u64 timeout;
+       u64 rz_always;
+} sal_mc_params[SAL_MC_PARAM_CPE_INT + 1];
+
+struct sal_vectors {
+       u64 vector_type;
+       u64 handler_addr1;
+       u64 gp1;
+       u64 handler_len1;
+       u64 handler_addr2;
+       u64 gp2;
+       u64 handler_len2;
+} sal_vectors[SAL_VECTOR_OS_BOOT_RENDEZ + 1];
+
+struct smp_call_args_t {
+       u64 type;
+       u64 ret;
+       u64 target;
+       struct domain *domain;
+       int corrected;
+       int status;
+       void *data;
+}; 
+
+extern sal_log_record_header_t *sal_record;
+DEFINE_SPINLOCK(sal_record_lock);
+
+extern spinlock_t sal_queue_lock;
+
+#define IA64_SAL_NO_INFORMATION_AVAILABLE      -5
+
+#if defined(IA64_SAL_DEBUG_INFO)
+static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
+
+# define IA64_SAL_DEBUG(fmt...)        printk("sal_emulator: " fmt)
+#else
+# define IA64_SAL_DEBUG(fmt...)
+#endif
+
+void get_state_info_on(void *data) {
+       struct smp_call_args_t *arg = data;
+       int flags;
+
+       spin_lock_irqsave(&sal_record_lock, flags);
+       memset(sal_record, 0, ia64_sal_get_state_info_size(arg->type));
+       arg->ret = ia64_sal_get_state_info(arg->type, (u64 *)sal_record);
+       IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) on CPU#%d returns %ld.\n",
+                      rec_name[arg->type], smp_processor_id(), arg->ret);
+       if (arg->corrected) {
+               sal_record->severity = sal_log_severity_corrected;
+               IA64_SAL_DEBUG("%s: 
IA64_SAL_CLEAR_STATE_INFO(SAL_INFO_TYPE_MCA)"
+                              " force\n", __FUNCTION__);
+       }
+       if (arg->ret > 0) {
+               /*
+                * Save current->domain and set to local(caller) domain for
+                * xencomm_paddr_to_maddr() which calculates maddr from
+                * paddr using mpa value of current->domain.
+                */
+               struct domain *save;
+               save = current->domain;
+               current->domain = arg->domain;
+               if (xencomm_copy_to_guest((void*)arg->target,
+                                         sal_record, arg->ret, 0)) {
+                       printk("SAL_GET_STATE_INFO can't copy to user!!!!\n");
+                       arg->status = IA64_SAL_NO_INFORMATION_AVAILABLE;
+                       arg->ret = 0;
+               }
+               /* Restore current->domain to saved value. */
+               current->domain = save;
+       }
+       spin_unlock_irqrestore(&sal_record_lock, flags);
+}
+
+void clear_state_info_on(void *data) {
+       struct smp_call_args_t *arg = data;
+
+       arg->ret = ia64_sal_clear_state_info(arg->type);
+       IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) on CPU#%d returns %ld.\n",
+                      rec_name[arg->type], smp_processor_id(), arg->ret);
+
+}
+  
 struct sal_ret_values
 sal_emulator (long index, unsigned long in1, unsigned long in2,
              unsigned long in3, unsigned long in4, unsigned long in5,
@@ -104,27 +192,159 @@ sal_emulator (long index, unsigned long 
                        }
                }
                else
-                       printk("*** CALLED SAL_SET_VECTORS %lu.  IGNORED...\n",
-                              in1);
+               {
+                       if (in1 > sizeof(sal_vectors)/sizeof(sal_vectors[0])-1)
+                               BUG();
+                       sal_vectors[in1].vector_type    = in1;
+                       sal_vectors[in1].handler_addr1  = in2;
+                       sal_vectors[in1].gp1            = in3;
+                       sal_vectors[in1].handler_len1   = in4;
+                       sal_vectors[in1].handler_addr2  = in5;
+                       sal_vectors[in1].gp2            = in6;
+                       sal_vectors[in1].handler_len2   = in7;
+               }
                break;
            case SAL_GET_STATE_INFO:
-               /* No more info.  */
-               status = -5;
-               r9 = 0;
+               if (current->domain == dom0) {
+                       sal_queue_entry_t *e;
+                       unsigned long flags;
+                       struct smp_call_args_t arg;
+
+                       spin_lock_irqsave(&sal_queue_lock, flags);
+                       if (list_empty(&sal_queue[in1])) {
+                               sal_log_record_header_t header;
+                               XEN_GUEST_HANDLE(void) handle =
+                                       *(XEN_GUEST_HANDLE(void)*)&in3;
+
+                               IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) "
+                                              "no sal_queue entry found.\n",
+                                              rec_name[in1]);
+                               memset(&header, 0, sizeof(header));
+
+                               if (copy_to_guest(handle, &header, 1)) {
+                                       printk("sal_emulator: "
+                                              "SAL_GET_STATE_INFO can't copy "
+                                              "empty header to user: 0x%lx\n",
+                                              in3);
+                               }
+                               status = IA64_SAL_NO_INFORMATION_AVAILABLE;
+                               r9 = 0;
+                               spin_unlock_irqrestore(&sal_queue_lock, flags);
+                               break;
+                       }
+                       e = list_entry(sal_queue[in1].next,
+                                      sal_queue_entry_t, list);
+                       spin_unlock_irqrestore(&sal_queue_lock, flags);
+
+                       IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s <= %s) "
+                                      "on CPU#%d.\n",
+                                      rec_name[e->sal_info_type],
+                                      rec_name[in1], e->cpuid);
+
+                       arg.type = e->sal_info_type;
+                       arg.target = in3;
+                       arg.corrected = !!((in1 != e->sal_info_type) && 
+                                       (e->sal_info_type == 
SAL_INFO_TYPE_MCA));
+                       arg.domain = current->domain;
+                       arg.status = 0;
+
+                       if (e->cpuid == smp_processor_id()) {
+                               IA64_SAL_DEBUG("SAL_GET_STATE_INFO: local\n");
+                               get_state_info_on(&arg);
+                       } else {
+                               int ret;
+                               IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
+                               ret = smp_call_function_single(e->cpuid,
+                                                              
get_state_info_on,
+                                                              &arg, 0, 1);
+                               if (ret < 0) {
+                                       printk("SAL_GET_STATE_INFO "
+                                              "smp_call_function_single error:"
+                                              " %d\n", ret);
+                                       arg.ret = 0;
+                                       arg.status =
+                                            IA64_SAL_NO_INFORMATION_AVAILABLE;
+                               }
+                       }
+                       r9 = arg.ret;
+                       status = arg.status;
+                       if (r9 == 0) {
+                               spin_lock_irqsave(&sal_queue_lock, flags);
+                               list_del(&e->list);
+                               spin_unlock_irqrestore(&sal_queue_lock, flags);
+                               xfree(e);
+                       }
+               } else
+                    printk("NON-PRIV DOMAIN CALLED SAL_GET_STATE_INFO\n");
                break;
            case SAL_GET_STATE_INFO_SIZE:
-               /* Return a dummy size.  */
-               status = 0;
-               r9 = 128;
+               r9 = ia64_sal_get_state_info_size(in1);
                break;
            case SAL_CLEAR_STATE_INFO:
-               /* Noop.  */
+               if (current->domain == dom0) {
+                       sal_queue_entry_t *e;
+                       unsigned long flags;
+                       struct smp_call_args_t arg;
+
+                       spin_lock_irqsave(&sal_queue_lock, flags);
+                       if (list_empty(&sal_queue[in1])) {
+                               IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) "
+                                              "no sal_queue entry found.\n",
+                                              rec_name[in1]);
+                               status = IA64_SAL_NO_INFORMATION_AVAILABLE;
+                               r9 = 0;
+                               spin_unlock_irqrestore(&sal_queue_lock, flags);
+                               break;
+                       }
+                       e = list_entry(sal_queue[in1].next,
+                                      sal_queue_entry_t, list);
+
+                       list_del(&e->list);
+                       spin_unlock_irqrestore(&sal_queue_lock, flags);
+
+                       IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s <= %s) "
+                                      "on CPU#%d.\n",
+                                      rec_name[e->sal_info_type],
+                                      rec_name[in1], e->cpuid);
+                       
+
+                       arg.type = e->sal_info_type;
+                       arg.status = 0;
+                       if (e->cpuid == smp_processor_id()) {
+                               IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: local\n");
+                               clear_state_info_on(&arg);
+                       } else {
+                               int ret;
+                               IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: 
remote\n");
+                               ret = smp_call_function_single(e->cpuid,
+                                       clear_state_info_on, &arg, 0, 1);
+                               if (ret < 0) {
+                                       printk("sal_emulator: "
+                                              "SAL_CLEAR_STATE_INFO "
+                                              "smp_call_function_single error:"
+                                              " %d\n", ret);
+                                       arg.ret = 0;
+                                       arg.status =
+                                            IA64_SAL_NO_INFORMATION_AVAILABLE;
+                               }
+                       }
+                       r9 = arg.ret;
+                       status = arg.status;
+                       xfree(e);
+               } else
+                    printk("NON-PRIV DOMAIN CALLED SAL_CLEAR_STATE_INFO\n");
                break;
            case SAL_MC_RENDEZ:
                printk("*** CALLED SAL_MC_RENDEZ.  IGNORED...\n");
                break;
            case SAL_MC_SET_PARAMS:
-               printk("*** CALLED SAL_MC_SET_PARAMS.  IGNORED...\n");
+               if (in1 > sizeof(sal_mc_params)/sizeof(sal_mc_params[0]))
+                       BUG();
+               sal_mc_params[in1].param_type   = in1;
+               sal_mc_params[in1].i_or_m       = in2;
+               sal_mc_params[in1].i_or_m_val   = in3;
+               sal_mc_params[in1].timeout      = in4;
+               sal_mc_params[in1].rz_always    = in5;
                break;
            case SAL_CACHE_FLUSH:
                if (1) {
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.