[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH 1/2] x86, lib, xenpv: Add WBNOINVD helper functions



In line with WBINVD usage, add WBONINVD helper functions, accounting
for kernels built with and without CONFIG_PARAVIRT_XXL.

Signed-off-by: Kevin Loughlin <kevinloughlin@xxxxxxxxxx>
---
 arch/x86/include/asm/paravirt.h       |  7 +++++++
 arch/x86/include/asm/paravirt_types.h |  1 +
 arch/x86/include/asm/smp.h            |  7 +++++++
 arch/x86/include/asm/special_insns.h  | 12 +++++++++++-
 arch/x86/kernel/paravirt.c            |  6 ++++++
 arch/x86/lib/cache-smp.c              | 12 ++++++++++++
 arch/x86/xen/enlighten_pv.c           |  1 +
 7 files changed, 45 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index d4eb9e1d61b8..c040af2d8eff 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -187,6 +187,13 @@ static __always_inline void wbinvd(void)
        PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT_XEN);
 }
 
+extern noinstr void pv_native_wbnoinvd(void);
+
+static __always_inline void wbnoinvd(void)
+{
+       PVOP_ALT_VCALL0(cpu.wbnoinvd, "wbnoinvd", ALT_NOT_XEN);
+}
+
 static inline u64 paravirt_read_msr(unsigned msr)
 {
        return PVOP_CALL1(u64, cpu.read_msr, msr);
diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
index 8d4fbe1be489..9a3f38ad1958 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -87,6 +87,7 @@ struct pv_cpu_ops {
 #endif
 
        void (*wbinvd)(void);
+       void (*wbnoinvd)(void);
 
        /* cpuid emulation, mostly so that caps bits can be disabled */
        void (*cpuid)(unsigned int *eax, unsigned int *ebx,
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index ca073f40698f..ecf93a243b83 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -112,6 +112,7 @@ void native_play_dead(void);
 void play_dead_common(void);
 void wbinvd_on_cpu(int cpu);
 int wbinvd_on_all_cpus(void);
+int wbnoinvd_on_all_cpus(void);
 
 void smp_kick_mwait_play_dead(void);
 
@@ -160,6 +161,12 @@ static inline int wbinvd_on_all_cpus(void)
        return 0;
 }
 
+static inline int wbnoinvd_on_all_cpus(void)
+{
+       wbnoinvd();
+       return 0;
+}
+
 static inline struct cpumask *cpu_llc_shared_mask(int cpu)
 {
        return (struct cpumask *)cpumask_of(0);
diff --git a/arch/x86/include/asm/special_insns.h 
b/arch/x86/include/asm/special_insns.h
index aec6e2d3aa1d..c2d16ddcd79b 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -117,7 +117,12 @@ static inline void wrpkru(u32 pkru)
 
 static __always_inline void native_wbinvd(void)
 {
-       asm volatile("wbinvd": : :"memory");
+       asm volatile("wbinvd" : : : "memory");
+}
+
+static __always_inline void native_wbnoinvd(void)
+{
+       asm volatile("wbnoinvd" : : : "memory");
 }
 
 static inline unsigned long __read_cr4(void)
@@ -173,6 +178,11 @@ static __always_inline void wbinvd(void)
        native_wbinvd();
 }
 
+static __always_inline void wbnoinvd(void)
+{
+       native_wbnoinvd();
+}
+
 #endif /* CONFIG_PARAVIRT_XXL */
 
 static __always_inline void clflush(volatile void *__p)
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index fec381533555..a66b708d8a1e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -121,6 +121,11 @@ noinstr void pv_native_wbinvd(void)
        native_wbinvd();
 }
 
+noinstr void pv_native_wbnoinvd(void)
+{
+       native_wbnoinvd();
+}
+
 static noinstr void pv_native_safe_halt(void)
 {
        native_safe_halt();
@@ -149,6 +154,7 @@ struct paravirt_patch_template pv_ops = {
        .cpu.write_cr0          = native_write_cr0,
        .cpu.write_cr4          = native_write_cr4,
        .cpu.wbinvd             = pv_native_wbinvd,
+       .cpu.wbnoinvd           = pv_native_wbnoinvd,
        .cpu.read_msr           = native_read_msr,
        .cpu.write_msr          = native_write_msr,
        .cpu.read_msr_safe      = native_read_msr_safe,
diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
index 7af743bd3b13..7ac5cca53031 100644
--- a/arch/x86/lib/cache-smp.c
+++ b/arch/x86/lib/cache-smp.c
@@ -20,3 +20,15 @@ int wbinvd_on_all_cpus(void)
        return 0;
 }
 EXPORT_SYMBOL(wbinvd_on_all_cpus);
+
+static void __wbnoinvd(void *dummy)
+{
+       wbnoinvd();
+}
+
+int wbnoinvd_on_all_cpus(void)
+{
+       on_each_cpu(__wbnoinvd, NULL, 1);
+       return 0;
+}
+EXPORT_SYMBOL(wbnoinvd_on_all_cpus);
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index d6818c6cafda..a5c76a6f8976 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1162,6 +1162,7 @@ static const typeof(pv_ops) xen_cpu_ops __initconst = {
                .write_cr4 = xen_write_cr4,
 
                .wbinvd = pv_native_wbinvd,
+               .wbnoinvd = pv_native_wbnoinvd,
 
                .read_msr = xen_read_msr,
                .write_msr = xen_write_msr,
-- 
2.47.0.338.g60cca15819-goog




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.