|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] xen/page_alloc: statically allocate bootmem_region_list
The existing code assumes that the first mfn passed to the boot
allocator is mapped, which creates problems when, e.g., we do not have
a direct map, and may create other bootstrapping problems in the
future. Make it static. The size is kept the same as before (1 page).
Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx>
---
xen/common/page_alloc.c | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7cb1bd368b..7afb651b79 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -244,9 +244,12 @@ PAGE_LIST_HEAD(page_broken_list);
*/
mfn_t first_valid_mfn = INVALID_MFN_INITIALIZER;
-static struct bootmem_region {
+struct bootmem_region {
unsigned long s, e; /* MFNs @s through @e-1 inclusive are free */
-} *__initdata bootmem_region_list;
+};
+/* Statically allocate a page for bootmem_region_list. */
+static struct bootmem_region __initdata
+ bootmem_region_list[PAGE_SIZE/sizeof(struct bootmem_region)];
static unsigned int __initdata nr_bootmem_regions;
struct scrub_region {
@@ -263,9 +266,6 @@ static void __init bootmem_region_add(unsigned long s,
unsigned long e)
{
unsigned int i;
- if ( (bootmem_region_list == NULL) && (s < e) )
- bootmem_region_list = mfn_to_virt(s++);
-
if ( s >= e )
return;
@@ -1869,7 +1869,6 @@ void __init end_boot_allocator(void)
init_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);
}
nr_bootmem_regions = 0;
- init_heap_pages(virt_to_page(bootmem_region_list), 1);
if ( !dma_bitsize && (num_online_nodes() > 1) )
dma_bitsize = arch_get_dma_bitsize();
--
2.17.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |