[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 6/6] x86/boot: Ensure the BSS is aligned on an 8 byte boundary
... and zero it more efficiently. A diff of the resulting binaries shows that the size of the BSS doesn't actually change from the extra ALIGN()s, but they do guarantee that it is safe to clear in an more efficient manner than 1 byte at a time. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CC: Keir Fraser <keir@xxxxxxx> CC: Jan Beulich <JBeulich@xxxxxxxx> --- xen/arch/x86/boot/head.S | 3 ++- xen/arch/x86/xen.lds.S | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S index 2d0e56c..b5a1d45 100644 --- a/xen/arch/x86/boot/head.S +++ b/xen/arch/x86/boot/head.S @@ -127,7 +127,8 @@ __start: mov $sym_phys(__bss_end),%ecx sub %edi,%ecx xor %eax,%eax - rep stosb + shr $2,%ecx + rep stosl /* Interrogate CPU extended features via CPUID. */ mov $0x80000000,%eax diff --git a/xen/arch/x86/xen.lds.S b/xen/arch/x86/xen.lds.S index 4699a04..b1926e3 100644 --- a/xen/arch/x86/xen.lds.S +++ b/xen/arch/x86/xen.lds.S @@ -163,6 +163,7 @@ SECTIONS __init_end = .; .bss : { /* BSS */ + . = ALIGN(8); __bss_start = .; *(.bss.stack_aligned) . = ALIGN(PAGE_SIZE); @@ -175,6 +176,7 @@ SECTIONS *(.bss.percpu.read_mostly) . = ALIGN(SMP_CACHE_BYTES); __per_cpu_data_end = .; + . = ALIGN(8); __bss_end = .; } :text _end = . ; -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |