--- sle11sp3.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2012-10-19 16:11:54.000000000 +0200 +++ sle11sp3/arch/x86/include/mach-xen/asm/hypervisor.h 2014-01-15 13:02:20.000000000 +0100 @@ -235,6 +235,9 @@ static inline int gnttab_post_map_adjust #ifdef CONFIG_XEN #define is_running_on_xen() 1 extern char hypercall_page[PAGE_SIZE]; +#define in_hypercall(regs) (!user_mode_vm(regs) && \ + (regs)->ip >= (unsigned long)hypercall_page && \ + (regs)->ip < (unsigned long)hypercall_page + PAGE_SIZE) #else extern char *hypercall_stubs; #define is_running_on_xen() (!!hypercall_stubs) --- sle11sp3.orig/arch/x86/kernel/entry_32-xen.S 2012-10-19 16:10:09.000000000 +0200 +++ sle11sp3/arch/x86/kernel/entry_32-xen.S 2014-01-16 10:49:07.000000000 +0100 @@ -980,6 +980,20 @@ ENTRY(hypervisor_callback) call evtchn_do_upcall add $4,%esp CFI_ADJUST_CFA_OFFSET -4 +#ifndef CONFIG_PREEMPT + test %al,%al + jz ret_from_intr + GET_THREAD_INFO(%edx) + cmpl $0,TI_preempt_count(%edx) + jnz ret_from_intr + testl $_TIF_NEED_RESCHED,TI_flags(%edx) + jz ret_from_intr + testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) + jz ret_from_intr + movb $0,PER_CPU_VAR(privcmd_hcall) + call preempt_schedule_irq + movb $1,PER_CPU_VAR(privcmd_hcall) +#endif jmp ret_from_intr CFI_ENDPROC --- sle11sp3.orig/arch/x86/kernel/entry_64-xen.S 2011-10-06 13:06:38.000000000 +0200 +++ sle11sp3/arch/x86/kernel/entry_64-xen.S 2014-01-16 10:52:27.000000000 +0100 @@ -982,6 +982,20 @@ ENTRY(do_hypervisor_callback) # do_hyp popq %rsp CFI_DEF_CFA_REGISTER rsp decl PER_CPU_VAR(irq_count) +#ifndef CONFIG_PREEMPT + test %al,%al + jz error_exit + GET_THREAD_INFO(%rdx) + cmpl $0,TI_preempt_count(%rdx) + jnz error_exit + bt $TIF_NEED_RESCHED,TI_flags(%rdx) + jnc error_exit + bt $9,EFLAGS-ARGOFFSET(%rsp) + jnc error_exit + movb $0,PER_CPU_VAR(privcmd_hcall) + call preempt_schedule_irq + movb $1,PER_CPU_VAR(privcmd_hcall) +#endif jmp error_exit CFI_ENDPROC END(do_hypervisor_callback) --- sle11sp3.orig/drivers/xen/core/evtchn.c 2013-02-05 17:47:43.000000000 +0100 +++ sle11sp3/drivers/xen/core/evtchn.c 2014-01-15 13:42:02.000000000 +0100 @@ -379,7 +379,14 @@ static DEFINE_PER_CPU(unsigned int, curr #endif /* NB. Interrupts are disabled on entry. */ -asmlinkage void __irq_entry evtchn_do_upcall(struct pt_regs *regs) +asmlinkage +#ifdef CONFIG_PREEMPT +void +#define return(x) return +#else +bool +#endif +__irq_entry evtchn_do_upcall(struct pt_regs *regs) { unsigned long l1, l2; unsigned long masked_l1, masked_l2; @@ -393,7 +400,7 @@ asmlinkage void __irq_entry evtchn_do_up __this_cpu_or(upcall_state, UPC_NESTED_LATCH); /* Avoid a callback storm when we reenable delivery. */ vcpu_info_write(evtchn_upcall_pending, 0); - return; + return(false); } old_regs = set_irq_regs(regs); @@ -511,6 +518,9 @@ asmlinkage void __irq_entry evtchn_do_up irq_exit(); xen_spin_irq_exit(); set_irq_regs(old_regs); + + return(__this_cpu_read(privcmd_hcall) && in_hypercall(regs)); +#undef return } static int find_unbound_irq(unsigned int node, struct irq_cfg **pcfg, --- sle11sp3.orig/drivers/xen/privcmd/privcmd.c 2012-12-12 12:05:51.000000000 +0100 +++ sle11sp3/drivers/xen/privcmd/privcmd.c 2014-01-16 10:01:23.000000000 +0100 @@ -23,6 +23,18 @@ #include #include #include +#include + +#ifndef CONFIG_PREEMPT +DEFINE_PER_CPU(bool, privcmd_hcall); +#endif + +static inline void _privcmd_hcall(bool state) +{ +#ifndef CONFIG_PREEMPT + this_cpu_write(privcmd_hcall, state); +#endif +} static struct proc_dir_entry *privcmd_intf; static struct proc_dir_entry *capabilities_intf; @@ -97,6 +109,7 @@ static long privcmd_ioctl(struct file *f ret = -ENOSYS; if (hypercall.op >= (PAGE_SIZE >> 5)) break; + _privcmd_hcall(true); ret = _hypercall(long, (unsigned int)hypercall.op, (unsigned long)hypercall.arg[0], (unsigned long)hypercall.arg[1], @@ -104,8 +117,10 @@ static long privcmd_ioctl(struct file *f (unsigned long)hypercall.arg[3], (unsigned long)hypercall.arg[4]); #else + _privcmd_hcall(true); ret = privcmd_hypercall(&hypercall); #endif + _privcmd_hcall(false); } break; --- sle11sp3.orig/include/xen/evtchn.h 2011-12-09 15:38:45.000000000 +0100 +++ sle11sp3/include/xen/evtchn.h 2014-01-15 14:32:14.000000000 +0100 @@ -143,7 +143,13 @@ void irq_resume(void); #endif /* Entry point for notifications into Linux subsystems. */ -asmlinkage void evtchn_do_upcall(struct pt_regs *regs); +asmlinkage +#ifdef CONFIG_PREEMPT +void +#else +bool +#endif +evtchn_do_upcall(struct pt_regs *regs); /* Mark a PIRQ as unavailable for dynamic allocation. */ void evtchn_register_pirq(int irq); @@ -221,6 +227,8 @@ void notify_remote_via_ipi(unsigned int void clear_ipi_evtchn(void); #endif +DECLARE_PER_CPU(bool, privcmd_hcall); + #if defined(CONFIG_XEN_SPINLOCK_ACQUIRE_NESTING) \ && CONFIG_XEN_SPINLOCK_ACQUIRE_NESTING void xen_spin_irq_enter(void); --- sle11sp3.orig/kernel/sched.c 2014-01-10 14:11:39.000000000 +0100 +++ sle11sp3/kernel/sched.c 2014-01-16 11:05:05.000000000 +0100 @@ -4690,6 +4690,9 @@ asmlinkage void __sched notrace preempt_ } EXPORT_SYMBOL(preempt_schedule); +#endif +#if defined(CONFIG_PREEMPT) || defined(CONFIG_XEN) + /* * this is the entry point to schedule() from kernel preemption * off of irq context. @@ -4699,6 +4702,14 @@ EXPORT_SYMBOL(preempt_schedule); asmlinkage void __sched preempt_schedule_irq(void) { struct thread_info *ti = current_thread_info(); +#ifdef CONFIG_XEN//temp +static DEFINE_PER_CPU(unsigned long, cnt); +static DEFINE_PER_CPU(unsigned long, thr); +if(__this_cpu_inc_return(cnt) > __this_cpu_read(thr)) { + __this_cpu_or(thr, __this_cpu_read(cnt)); + printk("psi[%02u] %08x:%d #%lx\n", raw_smp_processor_id(), ti->preempt_count, need_resched(), __this_cpu_read(cnt)); +} +#endif /* Catch callers which need to be fixed */ BUG_ON(ti->preempt_count || !irqs_disabled());