[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH v1 02/15] x86/msr: Replace __rdmsr() with native_rdmsrl()
__rdmsr() is the lowest level primitive MSR read API, and its direct use is NOT preferred. Use its wrapper function native_rdmsrl() instead. No functional change intended. Signed-off-by: Xin Li (Intel) <xin@xxxxxxxxx> --- arch/x86/coco/sev/core.c | 2 +- arch/x86/events/amd/brs.c | 2 +- arch/x86/hyperv/hv_vtl.c | 4 ++-- arch/x86/hyperv/ivm.c | 2 +- arch/x86/include/asm/mshyperv.h | 2 +- arch/x86/include/asm/msr.h | 5 +++++ arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/cpu/mce/core.c | 4 ++-- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 2 +- arch/x86/kvm/vmx/vmx.c | 4 ++-- arch/x86/mm/mem_encrypt_identity.c | 4 ++-- 11 files changed, 19 insertions(+), 14 deletions(-) diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c index b0c1a7a57497..d38e6f0ff9c4 100644 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@ -276,7 +276,7 @@ static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state) static inline u64 sev_es_rd_ghcb_msr(void) { - return __rdmsr(MSR_AMD64_SEV_ES_GHCB); + return native_rdmsrl(MSR_AMD64_SEV_ES_GHCB); } static __always_inline void sev_es_wr_ghcb_msr(u64 val) diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c index 4a47f3c108de..3ad7d87b5403 100644 --- a/arch/x86/events/amd/brs.c +++ b/arch/x86/events/amd/brs.c @@ -49,7 +49,7 @@ static __always_inline void set_debug_extn_cfg(u64 val) static __always_inline u64 get_debug_extn_cfg(void) { - return __rdmsr(MSR_AMD_DBG_EXTN_CFG); + return native_rdmsrl(MSR_AMD_DBG_EXTN_CFG); } static bool __init amd_brs_detect(void) diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c index 13242ed8ff16..4a27e475d35f 100644 --- a/arch/x86/hyperv/hv_vtl.c +++ b/arch/x86/hyperv/hv_vtl.c @@ -149,11 +149,11 @@ static int hv_vtl_bringup_vcpu(u32 target_vp_index, int cpu, u64 eip_ignored) input->vp_context.rip = rip; input->vp_context.rsp = rsp; input->vp_context.rflags = 0x0000000000000002; - input->vp_context.efer = __rdmsr(MSR_EFER); + input->vp_context.efer = native_rdmsrl(MSR_EFER); input->vp_context.cr0 = native_read_cr0(); input->vp_context.cr3 = __native_read_cr3(); input->vp_context.cr4 = native_read_cr4(); - input->vp_context.msr_cr_pat = __rdmsr(MSR_IA32_CR_PAT); + input->vp_context.msr_cr_pat = native_rdmsrl(MSR_IA32_CR_PAT); input->vp_context.idtr.limit = idt_ptr.size; input->vp_context.idtr.base = idt_ptr.address; input->vp_context.gdtr.limit = gdt_ptr.size; diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index 77bf05f06b9e..95cf2113a72a 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -110,7 +110,7 @@ u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size) static inline u64 rd_ghcb_msr(void) { - return __rdmsr(MSR_AMD64_SEV_ES_GHCB); + return native_rdmsrl(MSR_AMD64_SEV_ES_GHCB); } static inline void wr_ghcb_msr(u64 val) diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index bab5ccfc60a7..2ca6ef89530d 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -304,7 +304,7 @@ void hv_set_non_nested_msr(unsigned int reg, u64 value); static __always_inline u64 hv_raw_get_msr(unsigned int reg) { - return __rdmsr(reg); + return native_rdmsrl(reg); } #else /* CONFIG_HYPERV */ diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 27ea8793705d..fb3d7c4cb774 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -106,6 +106,11 @@ do { \ (void)((val2) = (u32)(__val >> 32)); \ } while (0) +static __always_inline u64 native_rdmsrl(const u32 msr) +{ + return __rdmsr(msr); +} + #define native_wrmsr(msr, low, high) \ __wrmsr(msr, low, high) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 12126adbc3a9..a268db71d944 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -164,7 +164,7 @@ static void ppin_init(struct cpuinfo_x86 *c) /* Is the enable bit set? */ if (val & 2UL) { - c->ppin = __rdmsr(info->msr_ppin); + c->ppin = native_rdmsrl(info->msr_ppin); set_cpu_cap(c, info->feature); return; } diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 0eaeaba12df2..0e050af723f5 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -121,7 +121,7 @@ void mce_prep_record_common(struct mce *m) { m->cpuid = cpuid_eax(1); m->cpuvendor = boot_cpu_data.x86_vendor; - m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP); + m->mcgcap = native_rdmsrl(MSR_IA32_MCG_CAP); /* need the internal __ version to avoid deadlocks */ m->time = __ktime_get_real_seconds(); } @@ -1298,7 +1298,7 @@ static noinstr bool mce_check_crashing_cpu(void) (crashing_cpu != -1 && crashing_cpu != cpu)) { u64 mcgstatus; - mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS); + mcgstatus = native_rdmsrl(MSR_IA32_MCG_STATUS); if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { if (mcgstatus & MCG_STATUS_LMCES) diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 55536120c8d1..675fd9f93e33 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -480,7 +480,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr) * the buffer and evict pseudo-locked memory read earlier from the * cache. */ - saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL); + saved_msr = native_rdmsrl(MSR_MISC_FEATURE_CONTROL); native_wrmsrl(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits); closid_p = this_cpu_read(pqr_state.cur_closid); rmid_p = this_cpu_read(pqr_state.cur_rmid); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 5c5766467a61..2a24060397cd 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -380,7 +380,7 @@ static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) if (!vmx->disable_fb_clear) return; - msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL); + msr = native_rdmsrl(MSR_IA32_MCU_OPT_CTRL); msr |= FB_CLEAR_DIS; native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr); /* Cache the MSR value to avoid reading it later */ @@ -7307,7 +7307,7 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, return; if (flags & VMX_RUN_SAVE_SPEC_CTRL) - vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL); + vmx->spec_ctrl = native_rdmsrl(MSR_IA32_SPEC_CTRL); /* * If the guest/host SPEC_CTRL values differ, restore the host value. diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index 5eecdd92da10..3005b07a0016 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -526,7 +526,7 @@ void __head sme_enable(struct boot_params *bp) me_mask = 1UL << (ebx & 0x3f); /* Check the SEV MSR whether SEV or SME is enabled */ - RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV); + RIP_REL_REF(sev_status) = msr = native_rdmsrl(MSR_AMD64_SEV); feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; /* @@ -557,7 +557,7 @@ void __head sme_enable(struct boot_params *bp) return; /* For SME, check the SYSCFG MSR */ - msr = __rdmsr(MSR_AMD64_SYSCFG); + msr = native_rdmsrl(MSR_AMD64_SYSCFG); if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) return; } -- 2.49.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |