[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH for-4.19] xen/arch: Centralise __read_mostly and __ro_after_init



These being in cache.h is inherited from Linux, but is an inappropriate
location to live.

__read_mostly is an optimisation related to data placement in order to avoid
having shared data in cachelines that are likely to be written to, but it
really is just a section of the linked image separating data by usage
patterns; it has nothing to do with cache sizes or flushing logic.

Worse, __ro_after_init was only in xen/cache.h because __read_mostly was in
arch/cache.h, and has literally nothing whatsoever to do with caches.

Move the definitions into xen/sections.h, which in paritcular means that
RISC-V doesn't need to repeat the problematic pattern.  Take the opportunity
to provide a short descriptions of what these are used for.

For now, leave TODO comments next to the other identical definitions.  It
turns out that unpicking cache.h is more complicated than it appears because a
number of files use it for transitive dependencies.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien@xxxxxxx>
CC: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
CC: Bertrand Marquis <bertrand.marquis@xxxxxxx>
CC: Michal Orzel <michal.orzel@xxxxxxx>
CC: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
CC: Shawn Anastasio <sanastasio@xxxxxxxxxxxxxxxxxxxxx>

For 4.19.  This is to help the RISC-V "full build of Xen" effort without
introducing a pattern that's going to require extra effort to undo after the
fact.
---
 xen/arch/arm/include/asm/cache.h |  1 +
 xen/arch/ppc/include/asm/cache.h |  1 +
 xen/arch/x86/include/asm/cache.h |  1 +
 xen/include/xen/cache.h          |  1 +
 xen/include/xen/sections.h       | 21 +++++++++++++++++++++
 5 files changed, 25 insertions(+)

diff --git a/xen/arch/arm/include/asm/cache.h b/xen/arch/arm/include/asm/cache.h
index 240b6ae0eaa3..029b2896fb3e 100644
--- a/xen/arch/arm/include/asm/cache.h
+++ b/xen/arch/arm/include/asm/cache.h
@@ -6,6 +6,7 @@
 #define L1_CACHE_SHIFT  (CONFIG_ARM_L1_CACHE_SHIFT)
 #define L1_CACHE_BYTES  (1 << L1_CACHE_SHIFT)
 
+/* TODO: Phase out the use of this via cache.h */
 #define __read_mostly __section(".data.read_mostly")
 
 #endif
diff --git a/xen/arch/ppc/include/asm/cache.h b/xen/arch/ppc/include/asm/cache.h
index 0d7323d7892f..13c0bf3242c8 100644
--- a/xen/arch/ppc/include/asm/cache.h
+++ b/xen/arch/ppc/include/asm/cache.h
@@ -3,6 +3,7 @@
 #ifndef _ASM_PPC_CACHE_H
 #define _ASM_PPC_CACHE_H
 
+/* TODO: Phase out the use of this via cache.h */
 #define __read_mostly __section(".data.read_mostly")
 
 #endif /* _ASM_PPC_CACHE_H */
diff --git a/xen/arch/x86/include/asm/cache.h b/xen/arch/x86/include/asm/cache.h
index e4770efb22b9..956c05493e23 100644
--- a/xen/arch/x86/include/asm/cache.h
+++ b/xen/arch/x86/include/asm/cache.h
@@ -9,6 +9,7 @@
 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
 
+/* TODO: Phase out the use of this via cache.h */
 #define __read_mostly __section(".data.read_mostly")
 
 #ifndef __ASSEMBLY__
diff --git a/xen/include/xen/cache.h b/xen/include/xen/cache.h
index f52a0aedf768..55456823c543 100644
--- a/xen/include/xen/cache.h
+++ b/xen/include/xen/cache.h
@@ -15,6 +15,7 @@
 #define __cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
 #endif
 
+/* TODO: Phase out the use of this via cache.h */
 #define __ro_after_init __section(".data.ro_after_init")
 
 #endif /* __LINUX_CACHE_H */
diff --git a/xen/include/xen/sections.h b/xen/include/xen/sections.h
index b6cb5604c285..6d4db2b38f0f 100644
--- a/xen/include/xen/sections.h
+++ b/xen/include/xen/sections.h
@@ -3,9 +3,30 @@
 #ifndef __XEN_SECTIONS_H__
 #define __XEN_SECTIONS_H__
 
+#include <xen/compiler.h>
+
 /* SAF-0-safe */
 extern char __init_begin[], __init_end[];
 
+/*
+ * Some data is expected to be written very rarely (if at all).
+ *
+ * For performance reasons is it helpful to group such data in the build, to
+ * avoid the linker placing it adjacent to often-written data.
+ */
+#define __read_mostly __section(".data.read_mostly")
+
+/*
+ * Some data should be chosen during boot and be immutable thereafter.
+ *
+ * Variables annotated with __ro_after_init will become read-only after boot
+ * and suffer a runtime access fault if modified.
+ *
+ * For architectures/platforms which haven't implemented support, these
+ * variables will be treated as regular mutable data.
+ */
+#define __ro_after_init __section(".data.ro_after_init")
+
 #endif /* !__XEN_SECTIONS_H__ */
 /*
  * Local variables:

base-commit: 8b4243a9b560c89bb259db5a27832c253d4bebc7
-- 
2.39.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.