[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH v2 16/34] x86/msr: Change function type of native_read_msr_safe()



Change function type of native_read_msr_safe() to

    int native_read_msr_safe(u32 msr, u64 *val)

to make it the same as the type of native_write_msr_safe().

Signed-off-by: Xin Li (Intel) <xin@xxxxxxxxx>
---
 arch/x86/include/asm/msr.h            | 21 +++++++++++----------
 arch/x86/include/asm/paravirt_types.h |  4 ++--
 arch/x86/kvm/svm/svm.c                | 19 +++++++------------
 arch/x86/xen/enlighten_pv.c           |  9 ++++++---
 arch/x86/xen/pmu.c                    | 14 ++++++++------
 5 files changed, 34 insertions(+), 33 deletions(-)

diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index dd1114053173..c955339be9c9 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -135,18 +135,22 @@ static inline u64 native_read_msr(u32 msr)
        return val;
 }
 
-static inline u64 native_read_msr_safe(u32 msr, int *err)
+static inline int native_read_msr_safe(u32 msr, u64 *p)
 {
+       int err;
        DECLARE_ARGS(val, low, high);
 
        asm volatile("1: rdmsr ; xor %[err],%[err]\n"
                     "2:\n\t"
                     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
-                    : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
+                    : [err] "=r" (err), EAX_EDX_RET(val, low, high)
                     : "c" (msr));
        if (tracepoint_enabled(read_msr))
-               do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
-       return EAX_EDX_VAL(val, low, high);
+               do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), err);
+
+       *p = EAX_EDX_VAL(val, low, high);
+
+       return err;
 }
 
 /* Can be uninlined because referenced by paravirt */
@@ -242,8 +246,8 @@ static inline int wrmsrq_safe(u32 msr, u64 val)
 /* rdmsr with exception handling */
 #define rdmsr_safe(msr, low, high)                             \
 ({                                                             \
-       int __err;                                              \
-       u64 __val = native_read_msr_safe((msr), &__err);        \
+       u64 __val;                                              \
+       int __err = native_read_msr_safe((msr), &__val);        \
        (*low) = (u32)__val;                                    \
        (*high) = (u32)(__val >> 32);                           \
        __err;                                                  \
@@ -251,10 +255,7 @@ static inline int wrmsrq_safe(u32 msr, u64 val)
 
 static inline int rdmsrq_safe(u32 msr, u64 *p)
 {
-       int err;
-
-       *p = native_read_msr_safe(msr, &err);
-       return err;
+       return native_read_msr_safe(msr, p);
 }
 #endif /* !CONFIG_PARAVIRT_XXL */
 
diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
index 91b3423d36ce..d2db38c32bc5 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -96,9 +96,9 @@ struct pv_cpu_ops {
 
        /*
         * Safe MSR operations.
-        * read sets err to 0 or -EIO.  write returns 0 or -EIO.
+        * Returns 0 or -EIO.
         */
-       u64 (*read_msr_safe)(unsigned int msr, int *err);
+       int (*read_msr_safe)(unsigned int msr, u64 *val);
        int (*write_msr_safe)(u32 msr, u64 val);
 
        void (*start_context_switch)(struct task_struct *prev);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 4ef9978dce70..838606f784c9 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -475,15 +475,13 @@ static void svm_inject_exception(struct kvm_vcpu *vcpu)
 
 static void svm_init_erratum_383(void)
 {
-       int err;
        u64 val;
 
        if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
                return;
 
        /* Use _safe variants to not break nested virtualization */
-       val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
-       if (err)
+       if (native_read_msr_safe(MSR_AMD64_DC_CFG, &val))
                return;
 
        val |= (1ULL << 47);
@@ -648,13 +646,12 @@ static int svm_enable_virtualization_cpu(void)
         * erratum is present everywhere).
         */
        if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
-               uint64_t len, status = 0;
+               u64 len, status = 0;
                int err;
 
-               len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
+               err = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &len);
                if (!err)
-                       status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
-                                                     &err);
+                       err = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, 
&status);
 
                if (err)
                        osvw_status = osvw_len = 0;
@@ -2145,14 +2142,13 @@ static int ac_interception(struct kvm_vcpu *vcpu)
 
 static bool is_erratum_383(void)
 {
-       int err, i;
+       int i;
        u64 value;
 
        if (!erratum_383_found)
                return false;
 
-       value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
-       if (err)
+       if (native_read_msr_safe(MSR_IA32_MC0_STATUS, &value))
                return false;
 
        /* Bit 62 may or may not be set for this mce */
@@ -2165,8 +2161,7 @@ static bool is_erratum_383(void)
        for (i = 0; i < 6; ++i)
                native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0);
 
-       value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
-       if (!err) {
+       if (!native_read_msr_safe(MSR_IA32_MCG_STATUS, &value)) {
                value &= ~(1ULL << 2);
                native_write_msr_safe(MSR_IA32_MCG_STATUS, value);
        }
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 052f68c92111..195e6501a000 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1095,7 +1095,7 @@ static u64 xen_do_read_msr(unsigned int msr, int *err)
                return val;
 
        if (err)
-               val = native_read_msr_safe(msr, err);
+               *err = native_read_msr_safe(msr, &val);
        else
                val = native_read_msr(msr);
 
@@ -1162,9 +1162,12 @@ static void xen_do_write_msr(u32 msr, u64 val, int *err)
        }
 }
 
-static u64 xen_read_msr_safe(unsigned int msr, int *err)
+static int xen_read_msr_safe(unsigned int msr, u64 *val)
 {
-       return xen_do_read_msr(msr, err);
+       int err;
+
+       *val = xen_do_read_msr(msr, &err);
+       return err;
 }
 
 static int xen_write_msr_safe(u32 msr, u64 val)
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index afb02f43ee3f..ee908dfcff48 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -319,11 +319,12 @@ static u64 xen_amd_read_pmc(int counter)
        uint8_t xenpmu_flags = get_xenpmu_flags();
 
        if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
-               uint32_t msr;
-               int err;
+               u32 msr;
+               u64 val;
 
                msr = amd_counters_base + (counter * amd_msr_step);
-               return native_read_msr_safe(msr, &err);
+               native_read_msr_safe(msr, &val);
+               return val;
        }
 
        ctxt = &xenpmu_data->pmu.c.amd;
@@ -340,15 +341,16 @@ static u64 xen_intel_read_pmc(int counter)
        uint8_t xenpmu_flags = get_xenpmu_flags();
 
        if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
-               uint32_t msr;
-               int err;
+               u32 msr;
+               u64 val;
 
                if (counter & (1 << INTEL_PMC_TYPE_SHIFT))
                        msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff);
                else
                        msr = MSR_IA32_PERFCTR0 + counter;
 
-               return native_read_msr_safe(msr, &err);
+               native_read_msr_safe(msr, &val);
+               return val;
        }
 
        ctxt = &xenpmu_data->pmu.c.intel;
-- 
2.49.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.