|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86: support newer Intel CPU models
This just follows what the January 2015 edition of the SDM documents,
with additional clarification from Intel:
- Broadwell models 0x4f and 0x56 don't cross-reference other tables,
but should be treated like other Boradwell (0x3d),
- Xeon Phi model 0x57 lists LASTBRANCH_TOS but not where the actual
stack is. Being told it's Silvermont based, attach it there.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -62,6 +62,7 @@
#define GET_HW_RES_IN_NS(msr, val) \
do { rdmsrl(msr, val); val = tsc_ticks2ns(val); } while( 0 )
+#define GET_MC6_RES(val) GET_HW_RES_IN_NS(0x664, val) /* Atom E3000 only */
#define GET_PC2_RES(val) GET_HW_RES_IN_NS(0x60D, val) /* SNB onwards */
#define GET_PC3_RES(val) GET_HW_RES_IN_NS(0x3F8, val)
#define GET_PC6_RES(val) GET_HW_RES_IN_NS(0x3F9, val)
@@ -73,6 +74,7 @@
#define GET_CC3_RES(val) GET_HW_RES_IN_NS(0x3FC, val)
#define GET_CC6_RES(val) GET_HW_RES_IN_NS(0x3FD, val)
#define GET_CC7_RES(val) GET_HW_RES_IN_NS(0x3FE, val) /* SNB onwards */
+#define PHI_CC6_RES(val) GET_HW_RES_IN_NS(0x3FF, val) /* Xeon Phi only */
static void lapic_timer_nop(void) { }
void (*__read_mostly lapic_timer_off)(void);
@@ -122,6 +124,8 @@ struct acpi_processor_power *__read_most
struct hw_residencies
{
+ uint64_t mc0;
+ uint64_t mc6;
uint64_t pc2;
uint64_t pc3;
uint64_t pc4;
@@ -162,8 +166,11 @@ static void do_get_hw_residencies(void *
case 0x3C:
case 0x3F:
case 0x46:
- /* future */
+ /* Broadwell */
case 0x3D:
+ case 0x4F:
+ case 0x56:
+ /* future */
case 0x4E:
GET_PC2_RES(hw_res->pc2);
GET_CC7_RES(hw_res->cc7);
@@ -183,6 +190,16 @@ static void do_get_hw_residencies(void *
GET_CC3_RES(hw_res->cc3);
GET_CC6_RES(hw_res->cc6);
break;
+ /* next gen Xeon Phi */
+ case 0x57:
+ GET_CC3_RES(hw_res->mc0); /* abusing GET_CC3_RES */
+ GET_CC6_RES(hw_res->mc6); /* abusing GET_CC6_RES */
+ GET_PC2_RES(hw_res->pc2);
+ GET_PC3_RES(hw_res->pc3);
+ GET_PC6_RES(hw_res->pc6);
+ GET_PC7_RES(hw_res->pc7);
+ PHI_CC6_RES(hw_res->cc6);
+ break;
/* various Atoms */
case 0x27:
GET_PC3_RES(hw_res->pc2); /* abusing GET_PC3_RES */
@@ -191,10 +208,13 @@ static void do_get_hw_residencies(void *
break;
/* Silvermont */
case 0x37:
+ GET_MC6_RES(hw_res->mc6);
case 0x4A:
case 0x4D:
case 0x5A:
case 0x5D:
+ /* Airmont */
+ case 0x4C:
GET_PC7_RES(hw_res->pc6); /* abusing GET_PC7_RES */
GET_CC1_RES(hw_res->cc1);
GET_CC6_RES(hw_res->cc6);
@@ -218,6 +238,9 @@ static void print_hw_residencies(uint32_
get_hw_residencies(cpu, &hw_res);
+ if ( hw_res.mc0 | hw_res.mc6 )
+ printk("MC0[%"PRIu64"] MC6[%"PRIu64"]\n",
+ hw_res.mc0, hw_res.mc6);
printk("PC2[%"PRIu64"] PC%d[%"PRIu64"] PC6[%"PRIu64"] PC7[%"PRIu64"]\n",
hw_res.pc2,
hw_res.pc4 ? 4 : 3, hw_res.pc4 ?: hw_res.pc3,
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2055,14 +2055,20 @@ static const struct lbr_info *last_branc
case 58: case 62:
/* Haswell */
case 60: case 63: case 69: case 70:
+ /* Broadwell */
+ case 61: case 79: case 86:
/* future */
- case 61: case 78:
+ case 78:
return nh_lbr;
break;
/* Atom */
case 28: case 38: case 39: case 53: case 54:
/* Silvermont */
case 55: case 74: case 77: case 90: case 93:
+ /* next gen Xeon Phi */
+ case 87:
+ /* Airmont */
+ case 76:
return at_lbr;
break;
}
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -859,9 +859,16 @@ int vmx_vpmu_initialise(struct vcpu *v,
case 0x45:
case 0x46:
- /* future: */
+ /* Broadwell */
case 0x3d:
+ case 0x4f:
+ case 0x56:
+
+ /* future: */
case 0x4e:
+
+ /* next gen Xeon Phi */
+ case 0x57:
ret = core2_vpmu_initialise(v, vpmu_flags);
if ( !ret )
vpmu->arch_vpmu_ops = &core2_vpmu_ops;
Attachment:
x86-Intel-CPU-models.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |