[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v10 01/11] xen: add function for obtaining highest possible memory address
Add a function for obtaining the highest possible physical memory address of the system. This value is influenced by: - hypervisor configuration (CONFIG_BIGMEM) - processor capability (max. addressable physical memory) - memory map at boot time - memory hotplug capability Add this value to xen_sysctl_physinfo in order to enable dom0 to do a proper sizing of grant frame limits of guests. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- V10: - avoid holes in xen_sysctl_physinfo, use uint64_aligned_t (Jan Beulich) V9: - merge patch with following one (Jan Beulich) - bump sysctl interface version (Julien Grall) - drop thin common shim of get_upper_mfn_bound() (Jan Beulich) - let get_upper_mfn_bound() return the highest MFN, not the one following it (Jan Beulich) --- xen/arch/arm/mm.c | 6 ++++++ xen/arch/x86/mm.c | 11 +++++++++++ xen/common/sysctl.c | 1 + xen/include/public/sysctl.h | 7 ++++--- xen/include/xen/mm.h | 3 +++ 5 files changed, 25 insertions(+), 3 deletions(-) diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index f3834b3dab..9a37f29ce6 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -1472,6 +1472,12 @@ void clear_and_clean_page(struct page_info *page) unmap_domain_page(p); } +unsigned long get_upper_mfn_bound(void) +{ + /* No memory hotplug yet, so current memory limit is the final one. */ + return max_page - 1; +} + /* * Local variables: * mode: C diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index afd5a101a4..d9df5ca69f 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5178,6 +5178,17 @@ void write_32bit_pse_identmap(uint32_t *l2) _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); } +unsigned long get_upper_mfn_bound(void) +{ + unsigned long max_mfn; + + max_mfn = mem_hotplug ? PFN_DOWN(mem_hotplug) : max_page; +#ifndef CONFIG_BIGMEM + max_mfn = min(max_mfn, 1UL << 32); +#endif + return min(max_mfn, 1UL << (paddr_bits - PAGE_SHIFT)) - 1; +} + /* * Local variables: * mode: C diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c index 3480f582fa..08198b7150 100644 --- a/xen/common/sysctl.c +++ b/xen/common/sysctl.c @@ -266,6 +266,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) get_outstanding_claims(&pi->free_pages, &pi->outstanding_pages); pi->scrub_pages = 0; pi->cpu_khz = cpu_khz; + pi->max_mfn = get_upper_mfn_bound(); arch_do_physinfo(pi); if ( copy_to_guest(u_sysctl, op, 1) ) diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h index 4d32a87cca..1e6097584b 100644 --- a/xen/include/public/sysctl.h +++ b/xen/include/public/sysctl.h @@ -36,7 +36,7 @@ #include "physdev.h" #include "tmem.h" -#define XEN_SYSCTL_INTERFACE_VERSION 0x0000000F +#define XEN_SYSCTL_INTERFACE_VERSION 0x00000010 /* * Read console content from Xen buffer ring. @@ -96,14 +96,15 @@ struct xen_sysctl_physinfo { uint32_t nr_nodes; /* # nodes currently online */ uint32_t max_node_id; /* Largest possible node ID on this host */ uint32_t cpu_khz; + uint32_t capabilities; /* XEN_SYSCTL_PHYSCAP_??? */ + uint64_aligned_t total_pages; uint64_aligned_t free_pages; uint64_aligned_t scrub_pages; uint64_aligned_t outstanding_pages; uint32_t hw_cap[8]; - /* XEN_SYSCTL_PHYSCAP_??? */ - uint32_t capabilities; + uint64_aligned_t max_mfn; /* Largest possible MFN on this host */ }; /* diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index f8b6177c32..e813c07b22 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -599,6 +599,9 @@ int prepare_ring_for_helper(struct domain *d, unsigned long gmfn, struct page_info **_page, void **_va); void destroy_ring_for_helper(void **_va, struct page_info *page); +/* Return the upper bound of MFNs, including hotplug memory. */ +unsigned long get_upper_mfn_bound(void); + #include <asm/flushtlb.h> static inline void accumulate_tlbflush(bool *need_tlbflush, -- 2.12.3 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |