[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 2/3] x86/boot: Drop move_memory() and use memcpy() directly
The way move_memory() sets up the virtual mappings means that there are always two non-overlapping regions. The virtual layout means that memmove()'s forward/backwards check doesn't do what the caller intends, as the check ought to be performed in physical space rather than virtual. Luckily both callers already provide non-overlapping mappings, so this bug doesn't manifest, and we can move to memcpy() to avoid a backwards copy. Backwards rep movs's are typically far slower than forwards copies. Furthermore, both callers already have suitable directmap mappings. There is no need to spend time managing early boot mappings, or chunking the copy through them. For the main Xen relocation, we can read out of the virtual mapping that we're executing on, and write directly into the directmap. In fact, this removes one dependency on Xen being "at 0" (the XEN_IMG_OFFSET passed as src) for relocation to occur. For the module relocation, just transcribe the move_memory() call into an equivalent memcpy(). Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> CC: Wei Liu <wl@xxxxxxx> --- xen/arch/x86/setup.c | 58 +++++----------------------------------------------- 1 file changed, 5 insertions(+), 53 deletions(-) diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index 0492856292cf..a6ff450daab7 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -413,53 +413,6 @@ void *__init bootstrap_map(const module_t *mod) return ret; } -static void *__init move_memory( - uint64_t dst, uint64_t src, unsigned int size, bool keep) -{ - unsigned int blksz = BOOTSTRAP_MAP_LIMIT - BOOTSTRAP_MAP_BASE; - unsigned int mask = (1L << L2_PAGETABLE_SHIFT) - 1; - - if ( src + size > BOOTSTRAP_MAP_BASE ) - blksz >>= 1; - - while ( size ) - { - module_t mod; - unsigned int soffs = src & mask; - unsigned int doffs = dst & mask; - unsigned int sz; - void *d, *s; - - mod.mod_start = (src - soffs) >> PAGE_SHIFT; - mod.mod_end = soffs + size; - if ( mod.mod_end > blksz ) - mod.mod_end = blksz; - sz = mod.mod_end - soffs; - s = bootstrap_map(&mod); - - mod.mod_start = (dst - doffs) >> PAGE_SHIFT; - mod.mod_end = doffs + size; - if ( mod.mod_end > blksz ) - mod.mod_end = blksz; - if ( sz > mod.mod_end - doffs ) - sz = mod.mod_end - doffs; - d = bootstrap_map(&mod); - - memmove(d + doffs, s + soffs, sz); - - dst += sz; - src += sz; - size -= sz; - - if ( keep ) - return size ? NULL : d + doffs; - - bootstrap_map(NULL); - } - - return NULL; -} - #undef BOOTSTRAP_MAP_LIMIT static uint64_t __init consider_modules( @@ -1243,7 +1196,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) * data until after we have switched to the relocated pagetables! */ barrier(); - move_memory(e, XEN_IMG_OFFSET, _end - _start, 1); + memcpy(__va(__pa(_start)), _start, _end - _start); /* Walk idle_pg_table, relocating non-leaf entries. */ pl4e = __va(__pa(idle_pg_table)); @@ -1300,8 +1253,6 @@ void __init noreturn __start_xen(unsigned long mbi_p) "1" (__va(__pa(cpu0_stack))), "2" (STACK_SIZE / 8) : "memory" ); - bootstrap_map(NULL); - printk("New Xen image base address: %#lx\n", xen_phys_start); } @@ -1325,9 +1276,10 @@ void __init noreturn __start_xen(unsigned long mbi_p) (headroom || ((end - size) >> PAGE_SHIFT) > mod[j].mod_start) ) { - move_memory(end - size + headroom, - (uint64_t)mod[j].mod_start << PAGE_SHIFT, - mod[j].mod_end, 0); + memcpy(__va(end - size + headroom), + __va((uint64_t)mod[j].mod_start << PAGE_SHIFT), + mod[j].mod_end); + mod[j].mod_start = (end - size) >> PAGE_SHIFT; mod[j].mod_end += headroom; mod[j].reserved = 1; -- 2.11.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |