From 6639fd9e75cb5c3a0351205b9d89ee12ca0b5cc1 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 28 Nov 2025 15:22:21 +0000 Subject: xen/x86: move d->arch.physaddr_bitsize field handling to pv32 The d->arch.physaddr_bitsize field is used only by PV32 code, so: - move domain_set_alloc_bitsize() function into PV32 code and inline it into switch_compat() - move domain_clamp_alloc_bitsize() function into PV32 code and convert to macro - move d->arch.physaddr_bitsize field under PV32 ifdef into struct pv_domain Signed-off-by: Grygorii Strashko diff --git a/xen/arch/x86/include/asm/domain.h b/xen/arch/x86/include/asm/domain.h index 386ec6174589..3f2d55c2b9cf 100644 --- a/xen/arch/x86/include/asm/domain.h +++ b/xen/arch/x86/include/asm/domain.h @@ -279,6 +279,11 @@ struct pv_domain atomic_t nr_l4_pages; +#ifdef CONFIG_PV32 + /* Maximum physical-address bitwidth supported by this guest. */ + unsigned int physaddr_bitsize; +#endif + /* Is a 32-bit PV guest? */ bool is_32bit; /* XPTI active? */ @@ -319,9 +324,6 @@ struct arch_domain unsigned int hv_compat_vstart; #endif - /* Maximum physical-address bitwidth supported by this guest. */ - unsigned int physaddr_bitsize; - /* I/O-port admin-specified access capabilities. */ struct rangeset *ioport_caps; uint32_t pci_cf8; diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h index 9438f5ea0119..a308a98df2a4 100644 --- a/xen/arch/x86/include/asm/mm.h +++ b/xen/arch/x86/include/asm/mm.h @@ -619,8 +619,12 @@ void __iomem *ioremap_wc(paddr_t pa, size_t len); extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm); -unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits); -#define domain_clamp_alloc_bitsize(d, bits) domain_clamp_alloc_bitsize(d, bits) +#ifdef CONFIG_PV32 +#define domain_clamp_alloc_bitsize(d, bits) \ + (((d) && (d)->arch.pv.physaddr_bitsize) \ + ? min_t(uint32_t, (d)->arch.pv.physaddr_bitsize, (bits)) \ + : (bits)) +#endif unsigned long domain_get_maximum_gpfn(struct domain *d); diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c index 21158ce1812e..94f7976e819f 100644 --- a/xen/arch/x86/pv/dom0_build.c +++ b/xen/arch/x86/pv/dom0_build.c @@ -626,8 +626,10 @@ static int __init dom0_construct(const struct boot_domain *bd) initrd_mfn = paddr_to_pfn(initrd->start); mfn = initrd_mfn; count = PFN_UP(initrd_len); - if ( d->arch.physaddr_bitsize && - ((mfn + count - 1) >> (d->arch.physaddr_bitsize - PAGE_SHIFT)) ) + +#ifdef CONFIG_PV32 + if ( d->arch.pv.physaddr_bitsize && + ((mfn + count - 1) >> (d->arch.pv.physaddr_bitsize - PAGE_SHIFT)) ) { order = get_order_from_pages(count); page = alloc_domheap_pages(d, order, MEMF_no_scrub); @@ -650,6 +652,7 @@ static int __init dom0_construct(const struct boot_domain *bd) initrd->start = pfn_to_paddr(initrd_mfn); } else +#endif { while ( count-- ) if ( assign_pages(mfn_to_page(_mfn(mfn++)), 1, d, 0) ) diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c index 11db6a6d8396..d58e4e213e5c 100644 --- a/xen/arch/x86/pv/domain.c +++ b/xen/arch/x86/pv/domain.c @@ -255,7 +255,7 @@ int switch_compat(struct domain *d) } if ( MACH2PHYS_COMPAT_NR_ENTRIES(d) < max_page ) - d->arch.physaddr_bitsize = + d->arch.pv.physaddr_bitsize = /* 2^n entries can be contained in guest's p2m mapping space */ fls(MACH2PHYS_COMPAT_NR_ENTRIES(d)) - 1 + PAGE_SHIFT; diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index 42fd4fe4e9b5..8eadab7933d0 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -1119,13 +1119,6 @@ int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs) return ret; } -unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits) -{ - if ( (d == NULL) || (d->arch.physaddr_bitsize == 0) ) - return bits; - return min(d->arch.physaddr_bitsize, bits); -} - static int transfer_pages_to_heap(struct mem_hotadd_info *info) { unsigned long i;