[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 1/8] x86/pdx: simplify calculation of domain struct allocation boundary
When not using CONFIG_BIGMEM there are some restrictions in the address width for allocations of the domain structure, as it's PDX truncated to 32bits it's stashed into page_info structure for domain allocated pages. The current logic to calculate this limit is based on the internals of the PDX compression used, which is not strictly required. Instead simplify the logic to rely on the existing PDX to PFN conversion helpers used elsewhere. This has the added benefit of allowing alternative PDX compression algorithms to be implemented without requiring to change the calculation of the domain structure allocation boundary. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- xen/arch/x86/domain.c | 35 ++++++----------------------------- 1 file changed, 6 insertions(+), 29 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 7536b6c8717e..2f438ce367cf 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -461,30 +461,6 @@ void domain_cpu_policy_changed(struct domain *d) } } -#if !defined(CONFIG_BIGMEM) && defined(CONFIG_PDX_COMPRESSION) -/* - * The hole may be at or above the 44-bit boundary, so we need to determine - * the total bit count until reaching 32 significant (not squashed out) bits - * in PFN representations. - * Note that the way "bits" gets initialized/updated/bounds-checked guarantees - * that the function will never return zero, and hence will never be called - * more than once (which is important due to it being deliberately placed in - * .init.text). - */ -static unsigned int __init noinline _domain_struct_bits(void) -{ - unsigned int bits = 32 + PAGE_SHIFT; - unsigned int sig = hweight32(~pfn_hole_mask); - unsigned int mask = pfn_hole_mask >> 32; - - for ( ; bits < BITS_PER_LONG && sig < 32; ++bits, mask >>= 1 ) - if ( !(mask & 1) ) - ++sig; - - return bits; -} -#endif - struct domain *alloc_domain_struct(void) { struct domain *d; @@ -498,14 +474,15 @@ struct domain *alloc_domain_struct(void) * On systems with CONFIG_BIGMEM there's no packing, and so there's no * such restriction. */ -#if defined(CONFIG_BIGMEM) || !defined(CONFIG_PDX_COMPRESSION) - const unsigned int bits = IS_ENABLED(CONFIG_BIGMEM) ? 0 : - 32 + PAGE_SHIFT; +#if defined(CONFIG_BIGMEM) + const unsigned int bits = 0; #else - static unsigned int __read_mostly bits; + static unsigned int __ro_after_init bits; if ( unlikely(!bits) ) - bits = _domain_struct_bits(); + bits = flsl(pfn_to_paddr(pdx_to_pfn( + 1UL << (sizeof(((struct page_info *)NULL)->v.inuse._domain) * 8)))) + - 1; #endif BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE); -- 2.49.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |