[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH v1 07/15] x86/msr: Remove pmu_msr_{read,write}()
Now pmu_msr_{read,write}() just do pmu_msr_chk_emulated(), so remove them and call pmu_msr_chk_emulated() directly. Suggested-by: H. Peter Anvin (Intel) <hpa@xxxxxxxxx> Signed-off-by: Xin Li (Intel) <xin@xxxxxxxxx> --- arch/x86/xen/enlighten_pv.c | 17 ++++++++++------- arch/x86/xen/pmu.c | 24 ++++-------------------- arch/x86/xen/xen-ops.h | 3 +-- 3 files changed, 15 insertions(+), 29 deletions(-) diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 7401cce19939..a047dadf4511 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1090,8 +1090,9 @@ static void xen_write_cr4(unsigned long cr4) static u64 xen_do_read_msr(unsigned int msr, int *err) { u64 val = 0; /* Avoid uninitialized value for safe variant. */ + bool emulated; - if (pmu_msr_read(msr, &val, err)) + if (pmu_msr_chk_emulated(msr, &val, true, &emulated) && emulated) return val; if (err) @@ -1134,6 +1135,7 @@ static void xen_do_write_msr(unsigned int msr, unsigned int low, unsigned int high, int *err) { u64 val; + bool emulated; switch (msr) { case MSR_FS_BASE: @@ -1163,12 +1165,13 @@ static void xen_do_write_msr(unsigned int msr, unsigned int low, default: val = (u64)high << 32 | low; - if (!pmu_msr_write(msr, val)) { - if (err) - *err = native_write_msr_safe(msr, val); - else - native_write_msr(msr, val); - } + if (pmu_msr_chk_emulated(msr, &val, false, &emulated) && emulated) + return; + + if (err) + *err = native_write_msr_safe(msr, val); + else + native_write_msr(msr, val); } } diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index 1364cd3fb3ef..4d20503430dd 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -128,7 +128,7 @@ static inline uint32_t get_fam15h_addr(u32 addr) return addr; } -static inline bool is_amd_pmu_msr(unsigned int msr) +static bool is_amd_pmu_msr(u32 msr) { if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) @@ -194,8 +194,7 @@ static bool is_intel_pmu_msr(u32 msr_index, int *type, int *index) } } -static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, - int index, bool is_read) +static bool xen_intel_pmu_emulate(u32 msr, u64 *val, int type, int index, bool is_read) { uint64_t *reg = NULL; struct xen_pmu_intel_ctxt *ctxt; @@ -257,7 +256,7 @@ static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, return false; } -static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) +static bool xen_amd_pmu_emulate(u32 msr, u64 *val, bool is_read) { uint64_t *reg = NULL; int i, off = 0; @@ -298,8 +297,7 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) return false; } -static bool pmu_msr_chk_emulated(unsigned int msr, uint64_t *val, bool is_read, - bool *emul) +bool pmu_msr_chk_emulated(u32 msr, u64 *val, bool is_read, bool *emul) { int type, index = 0; @@ -313,20 +311,6 @@ static bool pmu_msr_chk_emulated(unsigned int msr, uint64_t *val, bool is_read, return true; } -bool pmu_msr_read(u32 msr, u64 *val, int *err) -{ - bool emulated; - - return pmu_msr_chk_emulated(msr, val, true, &emulated) && emulated; -} - -bool pmu_msr_write(u32 msr, u64 val) -{ - bool emulated; - - return pmu_msr_chk_emulated(msr, &val, false, &emulated) && emulated; -} - static unsigned long long xen_amd_read_pmc(int counter) { struct xen_pmu_amd_ctxt *ctxt; diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 4a0a1d73d8b8..6545661010ce 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -274,8 +274,7 @@ void xen_pmu_finish(int cpu); static inline void xen_pmu_init(int cpu) {} static inline void xen_pmu_finish(int cpu) {} #endif -bool pmu_msr_read(u32 msr, u64 *val, int *err); -bool pmu_msr_write(u32 msr, u64 val); +bool pmu_msr_chk_emulated(u32 msr, u64 *val, bool is_read, bool *emul); int pmu_apic_update(uint32_t reg); unsigned long long xen_read_pmc(int counter); -- 2.49.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |