diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c --- a/xen/arch/x86/acpi/cpu_idle.c +++ b/xen/arch/x86/acpi/cpu_idle.c @@ -367,6 +367,28 @@ return atomic_read(&this_cpu(schedule_data).urgent_count); } +static int cpu_has_isr_pending(void) +{ + int i; + + for ( i = 1; i < 8; i++ ) + if ( apic_read(APIC_ISR + (i << 4)) != 0 ) + return 1; + return 0; +} + +int errata_c6_eoi_fix_needed(void) +{ + int model = boot_cpu_data.x86_model; + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + ((model == 0x1a || model == 0x1e || model == 0x1f || + model == 0x25 || model == 0x2c || model == 0x2f) && + !directed_eoi_enabled) ) + return 1; + return 0; +} + static void acpi_processor_idle(void) { struct acpi_processor_power *power = processor_powers[smp_processor_id()]; @@ -417,6 +439,16 @@ return; } + /* + * There was an errata with some Core i7 processors that, EOI + * transaction may not be sent if software enters core C6 during an + * interrupt service routine. So we don't want to get into deep Cx + * state if there was isr pending. + */ + if ( cpu_has_apic && errata_c6_eoi_fix_needed() && + cx->type == ACPI_STATE_C3 && cpu_has_isr_pending() ) + cx = power->safe_state; + power->last_state = cx; /*