[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH v2 04/34] x86/msr: Convert rdpmcq() into a function



Signed-off-by: Xin Li (Intel) <xin@xxxxxxxxx>
---
 arch/x86/events/amd/uncore.c              |  2 +-
 arch/x86/events/core.c                    |  2 +-
 arch/x86/events/intel/core.c              |  4 ++--
 arch/x86/events/intel/ds.c                |  2 +-
 arch/x86/include/asm/msr.h                |  5 ++++-
 arch/x86/include/asm/paravirt.h           |  4 +---
 arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 12 ++++++------
 7 files changed, 16 insertions(+), 15 deletions(-)

diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index b9933ab3116c..f2601c662783 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -152,7 +152,7 @@ static void amd_uncore_read(struct perf_event *event)
        if (hwc->event_base_rdpmc < 0)
                rdmsrq(hwc->event_base, new);
        else
-               rdpmcq(hwc->event_base_rdpmc, new);
+               new = rdpmcq(hwc->event_base_rdpmc);
 
        local64_set(&hwc->prev_count, new);
        delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 3da1f0b3446c..0a3939b9965e 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -139,7 +139,7 @@ u64 x86_perf_event_update(struct perf_event *event)
         */
        prev_raw_count = local64_read(&hwc->prev_count);
        do {
-               rdpmcq(hwc->event_base_rdpmc, new_raw_count);
+               new_raw_count = rdpmcq(hwc->event_base_rdpmc);
        } while (!local64_try_cmpxchg(&hwc->prev_count,
                                      &prev_raw_count, new_raw_count));
 
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index ba623e6cae1b..4370d0d86013 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2739,12 +2739,12 @@ static u64 intel_update_topdown_event(struct perf_event 
*event, int metric_end,
 
        if (!val) {
                /* read Fixed counter 3 */
-               rdpmcq((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
+               slots = rdpmcq(3 | INTEL_PMC_FIXED_RDPMC_BASE);
                if (!slots)
                        return 0;
 
                /* read PERF_METRICS */
-               rdpmcq(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
+               metrics = rdpmcq(INTEL_PMC_FIXED_RDPMC_METRICS);
        } else {
                slots = val[0];
                metrics = val[1];
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 4074567219de..845439fd9c03 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -2279,7 +2279,7 @@ intel_pmu_save_and_restart_reload(struct perf_event 
*event, int count)
        WARN_ON(this_cpu_read(cpu_hw_events.enabled));
 
        prev_raw_count = local64_read(&hwc->prev_count);
-       rdpmcq(hwc->event_base_rdpmc, new_raw_count);
+       new_raw_count = rdpmcq(hwc->event_base_rdpmc);
        local64_set(&hwc->prev_count, new_raw_count);
 
        /*
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index ed32637b1df6..01dc8e61ef97 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -234,7 +234,10 @@ static inline int rdmsrq_safe(u32 msr, u64 *p)
        return err;
 }
 
-#define rdpmcq(counter, val) ((val) = native_read_pmc(counter))
+static __always_inline u64 rdpmcq(int counter)
+{
+       return native_read_pmc(counter);
+}
 
 #endif /* !CONFIG_PARAVIRT_XXL */
 
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 63ca099f8368..590824916394 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -239,13 +239,11 @@ static inline int rdmsrq_safe(unsigned msr, u64 *p)
        return err;
 }
 
-static inline u64 paravirt_read_pmc(int counter)
+static __always_inline u64 rdpmcq(int counter)
 {
        return PVOP_CALL1(u64, cpu.read_pmc, counter);
 }
 
-#define rdpmcq(counter, val) ((val) = paravirt_read_pmc(counter))
-
 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned 
entries)
 {
        PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c 
b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index a5e21f44b0ca..276ffab194f6 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -1019,8 +1019,8 @@ static int measure_residency_fn(struct perf_event_attr 
*miss_attr,
         * used in L1 cache, second to capture accurate value that does not
         * include cache misses incurred because of instruction loads.
         */
-       rdpmcq(hit_pmcnum, hits_before);
-       rdpmcq(miss_pmcnum, miss_before);
+       hits_before = rdpmcq(hit_pmcnum);
+       miss_before = rdpmcq(miss_pmcnum);
        /*
         * From SDM: Performing back-to-back fast reads are not guaranteed
         * to be monotonic.
@@ -1028,8 +1028,8 @@ static int measure_residency_fn(struct perf_event_attr 
*miss_attr,
         * before proceeding.
         */
        rmb();
-       rdpmcq(hit_pmcnum, hits_before);
-       rdpmcq(miss_pmcnum, miss_before);
+       hits_before = rdpmcq(hit_pmcnum);
+       miss_before = rdpmcq(miss_pmcnum);
        /*
         * Use LFENCE to ensure all previous instructions are retired
         * before proceeding.
@@ -1051,8 +1051,8 @@ static int measure_residency_fn(struct perf_event_attr 
*miss_attr,
         * before proceeding.
         */
        rmb();
-       rdpmcq(hit_pmcnum, hits_after);
-       rdpmcq(miss_pmcnum, miss_after);
+       hits_after = rdpmcq(hit_pmcnum);
+       miss_after = rdpmcq(miss_pmcnum);
        /*
         * Use LFENCE to ensure all previous instructions are retired
         * before proceeding.
-- 
2.49.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.