|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 8/8] pdx: Add CONFIG_HAS_PDX_COMPRESSION as a Kconfig option
Adds a new compile-time flag to allow disabling pdx compression and
compiles out compression-related code/data. It also shorts the pdx<->pfn
conversion macros and creates stubs for masking fucntions.
Signed-off-by: Alejandro Vallejo <alejandro.vallejo@xxxxxxxxx>
---
xen/arch/x86/domain.c | 19 +++++++++++++------
xen/common/Kconfig | 10 ++++++++++
xen/common/pdx.c | 15 +++++++++++----
xen/include/xen/pdx.h | 34 ++++++++++++++++++++++++++++++++++
4 files changed, 68 insertions(+), 10 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 39c2153165..c818ccc4d5 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -458,7 +458,7 @@ void domain_cpu_policy_changed(struct domain *d)
}
}
-#ifndef CONFIG_BIGMEM
+#if !defined(CONFIG_BIGMEM) && defined(CONFIG_HAS_PDX_COMPRESSION)
/*
* The hole may be at or above the 44-bit boundary, so we need to determine
* the total bit count until reaching 32 significant (not squashed out) bits
@@ -485,13 +485,20 @@ static unsigned int __init noinline
_domain_struct_bits(void)
struct domain *alloc_domain_struct(void)
{
struct domain *d;
-#ifdef CONFIG_BIGMEM
- const unsigned int bits = 0;
-#else
+
/*
- * We pack the PDX of the domain structure into a 32-bit field within
- * the page_info structure. Hence the MEMF_bits() restriction.
+ * Without CONFIG_BIGMEM, we pack the PDX of the domain structure into
+ * a 32-bit field within the page_info structure. Hence the MEMF_bits()
+ * restriction. With PDX compression in place the number of bits must
+ * be calculated at runtime, but it's fixed otherwise.
+ *
+ * On systems with CONFIG_BIGMEM there's no packing, and so there's no
+ * such restriction.
*/
+#if defined(CONFIG_BIGMEM) || !defined(CONFIG_HAS_PDX_COMPRESSION)
+ const unsigned int bits = IS_ENABLED(CONFIG_BIGMEM) ? 0 :
+ 32 + PAGE_SHIFT;
+#else
static unsigned int __read_mostly bits;
if ( unlikely(!bits) )
diff --git a/xen/common/Kconfig b/xen/common/Kconfig
index 40ec63c4b2..6605a60ff7 100644
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -23,6 +23,16 @@ config GRANT_TABLE
If unsure, say Y.
+config HAS_PDX_COMPRESSION
+ bool "PDX (Page inDeX) compression support"
+ default ARM
+ help
+ PDX compression is a technique that allows the hypervisor to
+ represent physical addresses in a very space-efficient manner.
+ This is very helpful reducing memory wastage in systems with
+ memory banks with base addresses far from each other, but carrier
+ a performance cost.
+
config ALTERNATIVE_CALL
bool
diff --git a/xen/common/pdx.c b/xen/common/pdx.c
index cc963a3cb3..d0fac9d7c7 100644
--- a/xen/common/pdx.c
+++ b/xen/common/pdx.c
@@ -31,11 +31,15 @@ unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS(
bool __mfn_valid(unsigned long mfn)
{
- if ( unlikely(evaluate_nospec(mfn >= max_page)) )
+ bool invalid = mfn >= max_page;
+#ifdef CONFIG_HAS_PDX_COMPRESSION
+ invalid |= mfn & pfn_hole_mask;
+#endif
+
+ if ( unlikely(evaluate_nospec(invalid)) )
return false;
- return likely(!(mfn & pfn_hole_mask)) &&
- likely(test_bit(pfn_to_pdx(mfn) / PDX_GROUP_COUNT,
- pdx_group_valid));
+
+ return test_bit(pfn_to_pdx(mfn) / PDX_GROUP_COUNT, pdx_group_valid);
}
void set_pdx_range(unsigned long smfn, unsigned long emfn)
@@ -49,6 +53,8 @@ void set_pdx_range(unsigned long smfn, unsigned long emfn)
__set_bit(idx, pdx_group_valid);
}
+#ifdef CONFIG_HAS_PDX_COMPRESSION
+
/*
* Diagram to make sense of the following variables. The masks and shifts
* are done on mfn values in order to convert to/from pdx:
@@ -178,6 +184,7 @@ void __init pfn_pdx_hole_setup(unsigned long mask)
pfn_top_mask = ~(pfn_pdx_bottom_mask | pfn_hole_mask);
ma_top_mask = pfn_top_mask << PAGE_SHIFT;
}
+#endif /* CONFIG_HAS_PDX_COMPRESSION */
/*
diff --git a/xen/include/xen/pdx.h b/xen/include/xen/pdx.h
index ce27177b56..5531890d1c 100644
--- a/xen/include/xen/pdx.h
+++ b/xen/include/xen/pdx.h
@@ -98,6 +98,8 @@ bool __mfn_valid(unsigned long mfn);
#define mfn_to_pdx(mfn) pfn_to_pdx(mfn_x(mfn))
#define pdx_to_mfn(pdx) _mfn(pdx_to_pfn(pdx))
+#ifdef CONFIG_HAS_PDX_COMPRESSION
+
extern unsigned long pfn_pdx_bottom_mask, ma_va_bottom_mask;
extern unsigned int pfn_pdx_hole_shift;
extern unsigned long pfn_hole_mask;
@@ -225,7 +227,39 @@ static inline uint64_t directmapoff_to_maddr(unsigned long
offset)
* position marks a potentially compressible bit.
*/
void pfn_pdx_hole_setup(unsigned long mask);
+#else /* CONFIG_HAS_PDX_COMPRESSION */
+
+/* Without PDX compression we can skip some computations */
+
+/* pdx<->pfn == identity */
+#define pdx_to_pfn(x) (x)
+#define pfn_to_pdx(x) (x)
+
+/* directmap is indexed by by maddr */
+#define maddr_to_directmapoff(x) (x)
+#define directmapoff_to_maddr(x) (x)
+
+static inline bool pdx_is_region_compressible(unsigned long smfn,
+ unsigned long emfn)
+{
+ return true;
+}
+
+static inline uint64_t pdx_init_mask(uint64_t base_addr)
+{
+ return 0;
+}
+
+static inline uint64_t pdx_region_mask(uint64_t base, uint64_t len)
+{
+ return 0;
+}
+
+static inline void pfn_pdx_hole_setup(unsigned long mask)
+{
+}
+#endif /* CONFIG_HAS_PDX_COMPRESSION */
#endif /* __XEN_PDX_H__ */
/*
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |