|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v5 14/44] x86/boot: transition relocation calculations to struct boot_module
Use struct boot_module fields, start and size, when calculating the relocation
address and size. It also ensures that early_mod references are kept in sync.
Signed-off-by: Daniel P. Smith <dpsmith@xxxxxxxxxxxxxxxxxxxx>
---
xen/arch/x86/setup.c | 36 +++++++++++++++++-------------------
1 file changed, 17 insertions(+), 19 deletions(-)
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index f968758048ed..4f540c461b26 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1490,7 +1490,7 @@ void asmlinkage __init noreturn __start_xen(unsigned long
mbi_p)
struct boot_module *bm = &bi->mods[j];
unsigned long size;
- size = PAGE_ALIGN(bm->headroom + bm->mod->mod_end);
+ size = PAGE_ALIGN(bm->headroom + bm->size);
if ( bi->mods[j].flags & BOOTMOD_FLAG_X86_RELOCATED )
continue;
@@ -1504,13 +1504,13 @@ void asmlinkage __init noreturn __start_xen(unsigned
long mbi_p)
if ( s < end &&
(bm->headroom ||
- ((end - size) >> PAGE_SHIFT) > bm->mod->mod_start) )
+ paddr_to_pfn(end - size) > paddr_to_pfn(bm->start)) )
{
- move_memory(end - size + bm->headroom,
- (uint64_t)bm->mod->mod_start << PAGE_SHIFT,
- bm->mod->mod_end);
- bm->mod->mod_start = (end - size) >> PAGE_SHIFT;
- bm->mod->mod_end += bm->headroom;
+ move_memory(end - size + bm->headroom, bm->start, bm->size);
+ bm->start = (end - size);
+ bm->mod->mod_start = paddr_to_pfn(bm->start);
+ bm->size += bm->headroom;
+ bm->mod->mod_end = bm->size;
bm->flags |= BOOTMOD_FLAG_X86_RELOCATED;
}
}
@@ -1542,11 +1542,10 @@ void asmlinkage __init noreturn __start_xen(unsigned
long mbi_p)
panic("Not enough memory to relocate the dom0 kernel image\n");
for ( i = 0; i < bi->nr_modules; ++i )
{
- uint64_t s = (uint64_t)bi->mods[i].mod->mod_start
- << PAGE_SHIFT;
+ uint64_t s = (uint64_t)bi->mods[i].start;
reserve_e820_ram(&boot_e820, s,
- s + PAGE_ALIGN(bi->mods[i].mod->mod_end));
+ s + PAGE_ALIGN(bi->mods[i].size));
}
if ( !xen_phys_start )
@@ -1624,9 +1623,8 @@ void asmlinkage __init noreturn __start_xen(unsigned long
mbi_p)
map_e = boot_e820.map[j].addr + boot_e820.map[j].size;
for ( j = 0; j < bi->nr_modules; ++j )
{
- uint64_t end = pfn_to_paddr(
- bi->mods[j].mod->mod_start) +
- bi->mods[j].mod->mod_end;
+ uint64_t end = bi->mods[j].start +
+ bi->mods[j].size;
if ( map_e < end )
map_e = end;
@@ -1700,13 +1698,13 @@ void asmlinkage __init noreturn __start_xen(unsigned
long mbi_p)
for ( i = 0; i < bi->nr_modules; ++i )
{
- set_pdx_range(bi->mods[i].mod->mod_start,
- bi->mods[i].mod->mod_start +
- PFN_UP(bi->mods[i].mod->mod_end));
+ set_pdx_range(paddr_to_pfn(bi->mods[i].mod->mod_start),
+ paddr_to_pfn(bi->mods[i].mod->mod_start) +
+ PFN_UP(bi->mods[i].size));
map_pages_to_xen(
- (unsigned long)mfn_to_virt(bi->mods[i].mod->mod_start),
- _mfn(bi->mods[i].mod->mod_start),
- PFN_UP(bi->mods[i].mod->mod_end), PAGE_HYPERVISOR);
+ (unsigned long)maddr_to_virt(bi->mods[i].start),
+ maddr_to_mfn(bi->mods[i].start),
+ PFN_UP(bi->mods[i].size), PAGE_HYPERVISOR);
}
#ifdef CONFIG_KEXEC
--
2.30.2
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |