[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Merge
# HG changeset patch # User Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx> # Date 1277477633 -3600 # Node ID 342431ea89f417c802cbaaf85d38d845335f407d # Parent 8e0a20f050a14feba1068ce9a0df9cb0effd5452 # Parent 4001ab0d578520c6836c6e8fea066b3a8eba32eb Merge --- xen/arch/ia64/xen/xensetup.c | 2 - xen/arch/x86/acpi/cpufreq/cpufreq.c | 18 ++++----- xen/arch/x86/acpi/cpufreq/powernow.c | 23 ++++++----- xen/arch/x86/apic.c | 56 ++++++++++++++-------------- xen/arch/x86/cpu/centaur.c | 19 ++++----- xen/arch/x86/cpu/common.c | 7 +-- xen/arch/x86/cpu/mcheck/mce.c | 20 ++++------ xen/arch/x86/cpu/mcheck/mce_intel.c | 68 +++++++++++++++++------------------ xen/arch/x86/cpu/mcheck/vmce.c | 4 -- xen/arch/x86/domain.c | 12 +----- xen/arch/x86/microcode_amd.c | 7 +-- xen/arch/x86/microcode_intel.c | 25 ++++++------ xen/arch/x86/nmi.c | 14 +++---- xen/arch/x86/oprofile/nmi_int.c | 16 ++------ xen/arch/x86/oprofile/op_x86_model.h | 7 --- xen/include/asm-x86/msr.h | 6 +-- 16 files changed, 140 insertions(+), 164 deletions(-) diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/ia64/xen/xensetup.c --- a/xen/arch/ia64/xen/xensetup.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/ia64/xen/xensetup.c Fri Jun 25 15:53:53 2010 +0100 @@ -559,6 +559,7 @@ skip_move: late_setup_arch(&cmdline); + timer_init(); idle_vcpu[0] = (struct vcpu*) ia64_r13; scheduler_init(); @@ -569,7 +570,6 @@ skip_move: local_irq_disable(); init_IRQ (); init_xen_time(); /* initialise the time */ - timer_init(); rcu_init(); diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/acpi/cpufreq/cpufreq.c --- a/xen/arch/x86/acpi/cpufreq/cpufreq.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c Fri Jun 25 15:53:53 2010 +0100 @@ -51,7 +51,7 @@ enum { SYSTEM_IO_CAPABLE, }; -#define INTEL_MSR_RANGE (0xffff) +#define INTEL_MSR_RANGE (0xffffull) #define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1) static struct acpi_cpufreq_data *drv_data[NR_CPUS]; @@ -137,13 +137,12 @@ static void do_drv_read(void *drvcmd) static void do_drv_read(void *drvcmd) { struct drv_cmd *cmd; - u32 h; cmd = (struct drv_cmd *)drvcmd; switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: - rdmsr(cmd->addr.msr.reg, cmd->val, h); + rdmsrl(cmd->addr.msr.reg, cmd->val); break; case SYSTEM_IO_CAPABLE: acpi_os_read_port((acpi_io_address)cmd->addr.io.port, @@ -157,15 +156,16 @@ static void do_drv_write(void *drvcmd) static void do_drv_write(void *drvcmd) { struct drv_cmd *cmd; - u32 lo, hi; + uint64_t msr_content; cmd = (struct drv_cmd *)drvcmd; switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: - rdmsr(cmd->addr.msr.reg, lo, hi); - lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); - wrmsr(cmd->addr.msr.reg, lo, hi); + rdmsrl(cmd->addr.msr.reg, msr_content); + msr_content = (msr_content & ~INTEL_MSR_RANGE) + | (cmd->val & INTEL_MSR_RANGE); + wrmsrl(cmd->addr.msr.reg, msr_content); break; case SYSTEM_IO_CAPABLE: acpi_os_write_port((acpi_io_address)cmd->addr.io.port, @@ -252,8 +252,8 @@ static void read_measured_perf_ctrs(void { struct perf_pair *readin = _readin; - rdmsr(MSR_IA32_APERF, readin->aperf.split.lo, readin->aperf.split.hi); - rdmsr(MSR_IA32_MPERF, readin->mperf.split.lo, readin->mperf.split.hi); + rdmsrl(MSR_IA32_APERF, readin->aperf.whole); + rdmsrl(MSR_IA32_MPERF, readin->mperf.whole); } /* diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/acpi/cpufreq/powernow.c --- a/xen/arch/x86/acpi/cpufreq/powernow.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/acpi/cpufreq/powernow.c Fri Jun 25 15:53:53 2010 +0100 @@ -50,7 +50,7 @@ #define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */ #define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */ #define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */ -#define MSR_HWCR_CPBDIS_MASK 0x02000000 +#define MSR_HWCR_CPBDIS_MASK 0x02000000ULL struct powernow_cpufreq_data { struct processor_performance *acpi_data; @@ -77,15 +77,15 @@ static void transition_pstate(void *drvc cmd = (struct drv_cmd *) drvcmd; if (cmd->turbo != CPUFREQ_TURBO_UNSUPPORTED) { - u32 lo, hi; - rdmsr(MSR_K8_HWCR, lo, hi); + uint64_t msr_content; + rdmsrl(MSR_K8_HWCR, msr_content); if (cmd->turbo == CPUFREQ_TURBO_ENABLED) - lo &= ~MSR_HWCR_CPBDIS_MASK; + msr_content &= ~MSR_HWCR_CPBDIS_MASK; else - lo |= MSR_HWCR_CPBDIS_MASK; - wrmsr(MSR_K8_HWCR, lo, hi); - } - wrmsr(MSR_PSTATE_CTRL, cmd->val, 0); + msr_content |= MSR_HWCR_CPBDIS_MASK; + wrmsrl(MSR_K8_HWCR, msr_content); + } + wrmsrl(MSR_PSTATE_CTRL, cmd->val); } static int powernow_cpufreq_target(struct cpufreq_policy *policy, @@ -194,7 +194,8 @@ static int powernow_cpufreq_cpu_init(str struct powernow_cpufreq_data *data; unsigned int result = 0; struct processor_performance *perf; - u32 max_hw_pstate, hi = 0, lo = 0; + u32 max_hw_pstate; + uint64_t msr_content; struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; data = xmalloc(struct powernow_cpufreq_data); @@ -226,8 +227,8 @@ static int powernow_cpufreq_cpu_init(str result = -ENODEV; goto err_unreg; } - rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); - max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; + rdmsrl(MSR_PSTATE_CUR_LIMIT, msr_content); + max_hw_pstate = (msr_content & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; if (perf->control_register.space_id != perf->status_register.space_id) { result = -ENODEV; diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/apic.c --- a/xen/arch/x86/apic.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/apic.c Fri Jun 25 15:53:53 2010 +0100 @@ -332,23 +332,19 @@ void disconnect_bsp_APIC(int virt_wire_s void disable_local_APIC(void) { - unsigned long value; - clear_local_APIC(); /* * Disable APIC (implies clearing of registers * for 82489DX!). */ - value = apic_read(APIC_SPIV); - value &= ~APIC_SPIV_APIC_ENABLED; - apic_write_around(APIC_SPIV, value); + apic_write_around(APIC_SPIV, + apic_read(APIC_SPIV) & ~APIC_SPIV_APIC_ENABLED); if (enabled_via_apicbase) { - unsigned int l, h; - rdmsr(MSR_IA32_APICBASE, l, h); - l &= ~MSR_IA32_APICBASE_ENABLE; - wrmsr(MSR_IA32_APICBASE, l, h); + uint64_t msr_content; + rdmsrl(MSR_IA32_APICBASE, msr_content); + wrmsrl(MSR_IA32_APICBASE, msr_content & ~MSR_IA32_APICBASE_ENABLE); } } @@ -708,7 +704,7 @@ int lapic_suspend(void) int lapic_resume(void) { - unsigned int l, h; + uint64_t msr_content; unsigned long flags; int maxlvt; @@ -725,10 +721,10 @@ int lapic_resume(void) */ if ( !x2apic_enabled ) { - rdmsr(MSR_IA32_APICBASE, l, h); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; - wrmsr(MSR_IA32_APICBASE, l, h); + rdmsrl(MSR_IA32_APICBASE, msr_content); + msr_content &= ~MSR_IA32_APICBASE_BASE; + wrmsrl(MSR_IA32_APICBASE, + msr_content | MSR_IA32_APICBASE_ENABLE | mp_lapic_addr); } else enable_x2apic(); @@ -817,7 +813,8 @@ custom_param("apic_verbosity", apic_set_ static int __init detect_init_APIC (void) { - u32 h, l, features; + uint64_t msr_content; + u32 features; /* Disabled by kernel option? */ if (enable_local_apic < 0) @@ -854,12 +851,14 @@ static int __init detect_init_APIC (void * software for Intel P6 or later and AMD K7 * (Model > 1) or later. */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (!(l & MSR_IA32_APICBASE_ENABLE)) { + rdmsrl(MSR_IA32_APICBASE, msr_content); + if (!(msr_content & MSR_IA32_APICBASE_ENABLE)) { printk("Local APIC disabled by BIOS -- reenabling.\n"); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; - wrmsr(MSR_IA32_APICBASE, l, h); + msr_content &= ~MSR_IA32_APICBASE_BASE; + msr_content |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; + wrmsrl(MSR_IA32_APICBASE, + msr_content | MSR_IA32_APICBASE_ENABLE + | APIC_DEFAULT_PHYS_BASE); enabled_via_apicbase = 1; } } @@ -877,9 +876,9 @@ static int __init detect_init_APIC (void mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; /* The BIOS may have set up the APIC at some other address */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (l & MSR_IA32_APICBASE_ENABLE) - mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; + rdmsrl(MSR_IA32_APICBASE, msr_content); + if (msr_content & MSR_IA32_APICBASE_ENABLE) + mp_lapic_addr = msr_content & MSR_IA32_APICBASE_BASE; if (nmi_watchdog != NMI_NONE) nmi_watchdog = NMI_LOCAL_APIC; @@ -897,7 +896,7 @@ no_apic: void enable_x2apic(void) { - u32 lo, hi; + uint64_t msr_content; if ( smp_processor_id() == 0 ) { @@ -925,11 +924,12 @@ void enable_x2apic(void) BUG_ON(!x2apic_enabled); /* APs only enable x2apic when BSP did so. */ } - rdmsr(MSR_IA32_APICBASE, lo, hi); - if ( !(lo & MSR_IA32_APICBASE_EXTD) ) + rdmsrl(MSR_IA32_APICBASE, msr_content); + if ( !(msr_content & MSR_IA32_APICBASE_EXTD) ) { - lo |= MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD; - wrmsr(MSR_IA32_APICBASE, lo, 0); + msr_content |= MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD; + msr_content = (uint32_t)msr_content; + wrmsrl(MSR_IA32_APICBASE, msr_content); printk("x2APIC mode enabled.\n"); } else diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/cpu/centaur.c --- a/xen/arch/x86/cpu/centaur.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/cpu/centaur.c Fri Jun 25 15:53:53 2010 +0100 @@ -17,7 +17,7 @@ static void __init init_c3(struct cpuinfo_x86 *c) { - u32 lo, hi; + uint64_t msr_content; /* Test for Centaur Extended Feature Flags presence */ if (cpuid_eax(0xC0000000) >= 0xC0000001) { @@ -25,17 +25,17 @@ static void __init init_c3(struct cpuinf /* enable ACE unit, if present and disabled */ if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { - rdmsr (MSR_VIA_FCR, lo, hi); - lo |= ACE_FCR; /* enable ACE unit */ - wrmsr (MSR_VIA_FCR, lo, hi); + rdmsrl(MSR_VIA_FCR, msr_content); + /* enable ACE unit */ + wrmsrl(MSR_VIA_FCR, msr_content | ACE_FCR); printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); } /* enable RNG unit, if present and disabled */ if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { - rdmsr (MSR_VIA_RNG, lo, hi); - lo |= RNG_ENABLE; /* enable RNG unit */ - wrmsr (MSR_VIA_RNG, lo, hi); + rdmsrl(MSR_VIA_RNG, msr_content); + /* enable RNG unit */ + wrmsrl(MSR_VIA_RNG, msr_content | RNG_ENABLE); printk(KERN_INFO "CPU: Enabled h/w RNG\n"); } @@ -47,9 +47,8 @@ static void __init init_c3(struct cpuinf /* Cyrix III family needs CX8 & PGE explicity enabled. */ if (c->x86_model >=6 && c->x86_model <= 9) { - rdmsr (MSR_VIA_FCR, lo, hi); - lo |= (1<<1 | 1<<7); - wrmsr (MSR_VIA_FCR, lo, hi); + rdmsrl(MSR_VIA_FCR, msr_content); + wrmsrl(MSR_VIA_FCR, msr_content | (1ULL << 1 | 1ULL << 7)); set_bit(X86_FEATURE_CX8, c->x86_capability); } diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/cpu/common.c --- a/xen/arch/x86/cpu/common.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/cpu/common.c Fri Jun 25 15:53:53 2010 +0100 @@ -324,10 +324,9 @@ static void __cpuinit squash_the_stupid_ { if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { /* Disable processor serial number */ - unsigned long lo,hi; - rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); - lo |= 0x200000; - wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); + uint64_t msr_content; + rdmsrl(MSR_IA32_BBL_CR_CTL,msr_content); + wrmsrl(MSR_IA32_BBL_CR_CTL, msr_content | 0x200000); printk(KERN_NOTICE "CPU serial number disabled.\n"); clear_bit(X86_FEATURE_PN, c->x86_capability); diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/cpu/mcheck/mce.c --- a/xen/arch/x86/cpu/mcheck/mce.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/cpu/mcheck/mce.c Fri Jun 25 15:53:53 2010 +0100 @@ -723,22 +723,20 @@ int set_poll_bankmask(struct cpuinfo_x86 /* The perbank ctl/status init is platform specific because of AMD's quirk */ int mca_cap_init(void) { - u32 l, h; - u64 value; - - rdmsr(MSR_IA32_MCG_CAP, l, h); - value = ((u64)h << 32) | l; - - if (l & MCG_CTL_P) /* Control register present ? */ - wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); - - if (nr_mce_banks && (l & MCG_CAP_COUNT) != nr_mce_banks) + uint64_t msr_content; + + rdmsrl(MSR_IA32_MCG_CAP, msr_content); + + if (msr_content & MCG_CTL_P) /* Control register present ? */ + wrmsrl(MSR_IA32_MCG_CTL, 0xffffffffffffffffULL); + + if (nr_mce_banks && (msr_content & MCG_CAP_COUNT) != nr_mce_banks) { dprintk(XENLOG_WARNING, "Different bank number on cpu %x\n", smp_processor_id()); return -ENODEV; } - nr_mce_banks = l & MCG_CAP_COUNT; + nr_mce_banks = msr_content & MCG_CAP_COUNT; /* mcabanks_alloc depends on nr_mcebanks */ if (!mca_allbanks) diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/cpu/mcheck/mce_intel.c --- a/xen/arch/x86/cpu/mcheck/mce_intel.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c Fri Jun 25 15:53:53 2010 +0100 @@ -39,7 +39,7 @@ static void unexpected_thermal_interrupt /* P4/Xeon Thermal transition interrupt handler */ static void intel_thermal_interrupt(struct cpu_user_regs *regs) { - u32 l, h; + uint64_t msr_content; unsigned int cpu = smp_processor_id(); static s_time_t next[NR_CPUS]; @@ -48,8 +48,8 @@ static void intel_thermal_interrupt(stru return; next[cpu] = NOW() + MILLISECS(5000); - rdmsr(MSR_IA32_THERM_STATUS, l, h); - if (l & 0x1) { + rdmsrl(MSR_IA32_THERM_STATUS, msr_content); + if (msr_content & 0x1) { printk(KERN_EMERG "CPU%d: Temperature above threshold\n", cpu); printk(KERN_EMERG "CPU%d: Running in modulated clock mode\n", cpu); @@ -75,7 +75,8 @@ fastcall void smp_thermal_interrupt(stru /* P4/Xeon Thermal regulation detect and init */ static void intel_init_thermal(struct cpuinfo_x86 *c) { - u32 l, h; + uint64_t msr_content; + uint32_t val; int tm2 = 0; unsigned int cpu = smp_processor_id(); @@ -91,39 +92,38 @@ static void intel_init_thermal(struct cp * be some SMM goo which handles it, so we can't even put a handler * since it might be delivered via SMI already -zwanem. */ - rdmsr (MSR_IA32_MISC_ENABLE, l, h); - h = apic_read(APIC_LVTTHMR); - if ((l & (1<<3)) && (h & APIC_DM_SMI)) { + rdmsrl(MSR_IA32_MISC_ENABLE, msr_content); + val = apic_read(APIC_LVTTHMR); + if ((msr_content & (1ULL<<3)) && (val & APIC_DM_SMI)) { printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",cpu); return; /* -EBUSY */ } - if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) + if (cpu_has(c, X86_FEATURE_TM2) && (msr_content & (1ULL << 13))) tm2 = 1; /* check whether a vector already exists, temporarily masked? */ - if (h & APIC_VECTOR_MASK) { + if (val & APIC_VECTOR_MASK) { printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already installed\n", - cpu, (h & APIC_VECTOR_MASK)); + cpu, (val & APIC_VECTOR_MASK)); return; /* -EBUSY */ } /* The temperature transition interrupt handler setup */ - h = THERMAL_APIC_VECTOR; /* our delivery vector */ - h |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ - apic_write_around(APIC_LVTTHMR, h); - - rdmsr (MSR_IA32_THERM_INTERRUPT, l, h); - wrmsr (MSR_IA32_THERM_INTERRUPT, l | 0x03 , h); + val = THERMAL_APIC_VECTOR; /* our delivery vector */ + val |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ + apic_write_around(APIC_LVTTHMR, val); + + rdmsrl(MSR_IA32_THERM_INTERRUPT, msr_content); + wrmsrl(MSR_IA32_THERM_INTERRUPT, msr_content | 0x03); /* ok we're good to go... */ vendor_thermal_interrupt = intel_thermal_interrupt; - rdmsr (MSR_IA32_MISC_ENABLE, l, h); - wrmsr (MSR_IA32_MISC_ENABLE, l | (1<<3), h); - - l = apic_read (APIC_LVTTHMR); - apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED); + rdmsrl(MSR_IA32_MISC_ENABLE, msr_content); + wrmsrl(MSR_IA32_MISC_ENABLE, msr_content | (1ULL<<3)); + + apic_write_around(APIC_LVTTHMR, apic_read(APIC_LVTTHMR) & ~APIC_LVT_MASKED); if (opt_cpu_info) printk(KERN_INFO "CPU%u: Thermal monitoring enabled (%s)\n", cpu, tm2 ? "TM2" : "TM1"); @@ -1128,21 +1128,21 @@ static void intel_init_mca(struct cpuinf static void intel_init_mca(struct cpuinfo_x86 *c) { int broadcast, cmci=0, ser=0, ext_num = 0, first; - u32 l, h; + uint64_t msr_content; broadcast = mce_is_broadcast(c); - rdmsr(MSR_IA32_MCG_CAP, l, h); - - if ((l & MCG_CMCI_P) && cpu_has_apic) + rdmsrl(MSR_IA32_MCG_CAP, msr_content); + + if ((msr_content & MCG_CMCI_P) && cpu_has_apic) cmci = 1; /* Support Software Error Recovery */ - if (l & MCG_SER_P) + if (msr_content & MCG_SER_P) ser = 1; - if (l & MCG_EXT_P) - ext_num = (l >> MCG_EXT_CNT) & 0xff; + if (msr_content & MCG_EXT_P) + ext_num = (msr_content >> MCG_EXT_CNT) & 0xff; first = mce_firstbank(c); @@ -1186,7 +1186,7 @@ static void intel_mce_post_reset(void) static void intel_init_mce(void) { - u32 l, h; + uint64_t msr_content; int i; intel_mce_post_reset(); @@ -1196,17 +1196,17 @@ static void intel_init_mce(void) { /* Some banks are shared across cores, use MCi_CTRL to judge whether * this bank has been initialized by other cores already. */ - rdmsr(MSR_IA32_MCx_CTL(i), l, h); - if (!(l | h)) + rdmsrl(MSR_IA32_MCx_CTL(i), msr_content); + if (!msr_content) { /* if ctl is 0, this bank is never initialized */ mce_printk(MCE_VERBOSE, "mce_init: init bank%d\n", i); - wrmsr (MSR_IA32_MCx_CTL(i), 0xffffffff, 0xffffffff); - wrmsr (MSR_IA32_MCx_STATUS(i), 0x0, 0x0); + wrmsrl(MSR_IA32_MCx_CTL(i), 0xffffffffffffffffULL); + wrmsrl(MSR_IA32_MCx_STATUS(i), 0x0ULL); } } if (firstbank) /* if cmci enabled, firstbank = 0 */ - wrmsr (MSR_IA32_MC0_STATUS, 0x0, 0x0); + wrmsrl(MSR_IA32_MC0_STATUS, 0x0ULL); x86_mce_vector_register(intel_machine_check); mce_recoverable_register(intel_recoverable_scan); diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/cpu/mcheck/vmce.c --- a/xen/arch/x86/cpu/mcheck/vmce.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/cpu/mcheck/vmce.c Fri Jun 25 15:53:53 2010 +0100 @@ -439,7 +439,6 @@ int vmce_domain_inject( int vmce_init(struct cpuinfo_x86 *c) { - u32 l, h; u64 value; int i; @@ -460,8 +459,7 @@ int vmce_init(struct cpuinfo_x86 *c) if (g_mcg_cap & MCG_CTL_P) rdmsrl(MSR_IA32_MCG_CTL, h_mcg_ctl); - rdmsr(MSR_IA32_MCG_CAP, l, h); - value = ((u64)h << 32) | l; + rdmsrl(MSR_IA32_MCG_CAP, value); /* For Guest vMCE usage */ g_mcg_cap = value & ~MCG_CMCI_P; diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/domain.c Fri Jun 25 15:53:53 2010 +0100 @@ -1088,21 +1088,15 @@ static void load_segments(struct vcpu *n { /* This can only be non-zero if selector is NULL. */ if ( nctxt->fs_base ) - wrmsr(MSR_FS_BASE, - nctxt->fs_base, - nctxt->fs_base>>32); + wrmsrl(MSR_FS_BASE, nctxt->fs_base); /* Most kernels have non-zero GS base, so don't bother testing. */ /* (This is also a serialising instruction, avoiding AMD erratum #88.) */ - wrmsr(MSR_SHADOW_GS_BASE, - nctxt->gs_base_kernel, - nctxt->gs_base_kernel>>32); + wrmsrl(MSR_SHADOW_GS_BASE, nctxt->gs_base_kernel); /* This can only be non-zero if selector is NULL. */ if ( nctxt->gs_base_user ) - wrmsr(MSR_GS_BASE, - nctxt->gs_base_user, - nctxt->gs_base_user>>32); + wrmsrl(MSR_GS_BASE, nctxt->gs_base_user); /* If in kernel mode then switch the GS bases around. */ if ( (n->arch.flags & TF_kernel_mode) ) diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/microcode_amd.c --- a/xen/arch/x86/microcode_amd.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/microcode_amd.c Fri Jun 25 15:53:53 2010 +0100 @@ -47,7 +47,6 @@ static int collect_cpu_info(int cpu, str static int collect_cpu_info(int cpu, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data[cpu]; - uint32_t dummy; memset(csig, 0, sizeof(*csig)); @@ -58,7 +57,7 @@ static int collect_cpu_info(int cpu, str return -EINVAL; } - rdmsr(MSR_AMD_PATCHLEVEL, csig->rev, dummy); + rdmsrl(MSR_AMD_PATCHLEVEL, csig->rev); printk(KERN_INFO "microcode: collect_cpu_info: patch_id=0x%x\n", csig->rev); @@ -126,7 +125,7 @@ static int apply_microcode(int cpu) { unsigned long flags; struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu); - uint32_t rev, dummy; + uint32_t rev; struct microcode_amd *mc_amd = uci->mc.mc_amd; /* We should bind the task to the CPU */ @@ -140,7 +139,7 @@ static int apply_microcode(int cpu) wrmsrl(MSR_AMD_PATCHLOADER, (unsigned long)&mc_amd->hdr.data_code); /* get patch id after patching */ - rdmsr(MSR_AMD_PATCHLEVEL, rev, dummy); + rdmsrl(MSR_AMD_PATCHLEVEL, rev); spin_unlock_irqrestore(µcode_update_lock, flags); diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/microcode_intel.c --- a/xen/arch/x86/microcode_intel.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/microcode_intel.c Fri Jun 25 15:53:53 2010 +0100 @@ -62,7 +62,7 @@ static int collect_cpu_info(int cpu_num, static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data[cpu_num]; - unsigned int val[2]; + uint64_t msr_content; BUG_ON(cpu_num != smp_processor_id()); @@ -81,15 +81,16 @@ static int collect_cpu_info(int cpu_num, if ( (c->x86_model >= 5) || (c->x86 > 6) ) { /* get processor flags from MSR 0x17 */ - rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); - csig->pf = 1 << ((val[1] >> 18) & 7); - } - - wrmsr(MSR_IA32_UCODE_REV, 0, 0); + rdmsrl(MSR_IA32_PLATFORM_ID, msr_content); + csig->pf = 1 << ((msr_content >> 50) & 7); + } + + wrmsrl(MSR_IA32_UCODE_REV, 0x0ULL); /* see notes above for revision 1.07. Apparent chip bug */ sync_core(); /* get the current revision from MSR 0x8B */ - rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); + rdmsrl(MSR_IA32_UCODE_REV, msr_content); + csig->rev = (uint32_t)(msr_content >> 32); pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", csig->sig, csig->pf, csig->rev); @@ -249,6 +250,7 @@ static int apply_microcode(int cpu) static int apply_microcode(int cpu) { unsigned long flags; + uint64_t msr_content; unsigned int val[2]; int cpu_num = raw_smp_processor_id(); struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu_num); @@ -263,16 +265,15 @@ static int apply_microcode(int cpu) spin_lock_irqsave(µcode_update_lock, flags); /* write microcode via MSR 0x79 */ - wrmsr(MSR_IA32_UCODE_WRITE, - (unsigned long) uci->mc.mc_intel->bits, - (unsigned long) uci->mc.mc_intel->bits >> 16 >> 16); - wrmsr(MSR_IA32_UCODE_REV, 0, 0); + wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)uci->mc.mc_intel->bits); + wrmsrl(MSR_IA32_UCODE_REV, 0x0ULL); /* see notes above for revision 1.07. Apparent chip bug */ sync_core(); /* get the current revision from MSR 0x8B */ - rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); + rdmsrl(MSR_IA32_UCODE_REV, msr_content); + val[1] = (uint32_t)(msr_content >> 32); spin_unlock_irqrestore(µcode_update_lock, flags); if ( val[1] != uci->mc.mc_intel->hdr.rev ) diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/nmi.c --- a/xen/arch/x86/nmi.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/nmi.c Fri Jun 25 15:53:53 2010 +0100 @@ -276,9 +276,9 @@ static void __pminit setup_p6_watchdog(u static int __pminit setup_p4_watchdog(void) { - unsigned int misc_enable, dummy; - - rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); + uint64_t misc_enable; + + rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (!(misc_enable & MSR_IA32_MISC_ENABLE_PERF_AVAIL)) return 0; @@ -304,11 +304,11 @@ static int __pminit setup_p4_watchdog(vo clear_msr_range(MSR_P4_BPU_CCCR0, 18); clear_msr_range(MSR_P4_BPU_PERFCTR0, 18); - wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); - wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); + wrmsrl(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0); + wrmsrl(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE); write_watchdog_counter("P4_IQ_COUNTER0"); apic_write(APIC_LVTPC, APIC_DM_NMI); - wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); + wrmsrl(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val); return 1; } @@ -442,7 +442,7 @@ void nmi_watchdog_tick(struct cpu_user_r * - LVTPC is masked on interrupt and must be * unmasked by the LVTPC handler. */ - wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); + wrmsrl(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val); apic_write(APIC_LVTPC, APIC_DM_NMI); } else if ( nmi_perfctr_msr == MSR_P6_PERFCTR0 ) diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/oprofile/nmi_int.c --- a/xen/arch/x86/oprofile/nmi_int.c Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/oprofile/nmi_int.c Fri Jun 25 15:53:53 2010 +0100 @@ -104,15 +104,11 @@ static void nmi_cpu_save_registers(struc unsigned int i; for (i = 0; i < nr_ctrs; ++i) { - rdmsr(counters[i].addr, - counters[i].saved.low, - counters[i].saved.high); + rdmsrl(counters[i].addr, counters[i].value); } for (i = 0; i < nr_ctrls; ++i) { - rdmsr(controls[i].addr, - controls[i].saved.low, - controls[i].saved.high); + rdmsrl(controls[i].addr, controls[i].value); } } @@ -222,15 +218,11 @@ static void nmi_restore_registers(struct unsigned int i; for (i = 0; i < nr_ctrls; ++i) { - wrmsr(controls[i].addr, - controls[i].saved.low, - controls[i].saved.high); + wrmsrl(controls[i].addr, controls[i].value); } for (i = 0; i < nr_ctrs; ++i) { - wrmsr(counters[i].addr, - counters[i].saved.low, - counters[i].saved.high); + wrmsrl(counters[i].addr, counters[i].value); } } diff -r 8e0a20f050a1 -r 342431ea89f4 xen/arch/x86/oprofile/op_x86_model.h --- a/xen/arch/x86/oprofile/op_x86_model.h Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/arch/x86/oprofile/op_x86_model.h Fri Jun 25 15:53:53 2010 +0100 @@ -11,14 +11,9 @@ #ifndef OP_X86_MODEL_H #define OP_X86_MODEL_H -struct op_saved_msr { - unsigned int high; - unsigned int low; -}; - struct op_msr { unsigned long addr; - struct op_saved_msr saved; + uint64_t value; }; struct op_msrs { diff -r 8e0a20f050a1 -r 342431ea89f4 xen/include/asm-x86/msr.h --- a/xen/include/asm-x86/msr.h Fri Jun 25 15:43:50 2010 +0100 +++ b/xen/include/asm-x86/msr.h Fri Jun 25 15:53:53 2010 +0100 @@ -37,7 +37,7 @@ static inline void wrmsrl(unsigned int m /* rdmsr with exception handling */ #define rdmsr_safe(msr,val) ({\ int _rc; \ - uint32_t val1, val2; \ + uint32_t lo, hi; \ __asm__ __volatile__( \ "1: rdmsr\n2:\n" \ ".section .fixup,\"ax\"\n" \ @@ -47,9 +47,9 @@ static inline void wrmsrl(unsigned int m " "__FIXUP_ALIGN"\n" \ " "__FIXUP_WORD" 1b,3b\n" \ ".previous\n" \ - : "=a" (val1), "=d" (val2), "=&r" (_rc) \ + : "=a" (lo), "=d" (hi), "=&r" (_rc) \ : "c" (msr), "2" (0), "i" (-EFAULT)); \ - val = val2 | ((uint64_t)val1 << 32); \ + val = lo | ((uint64_t)hi << 32); \ _rc; }) /* wrmsr with exception handling */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |