[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2/4] xen/perf: Define .glob for the different hypercalls.



>>> On 03.07.12 at 17:40, Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> wrote:
> This allows us in perf to have this:
> 
>  99.67%  [kernel]             [k] xen_hypercall_sched_op
>   0.11%  [kernel]             [k] xen_hypercall_xen_version
> 
> instead of the borring ever-encompassing:
> 
>  99.13%  [kernel]              [k] hypercall_page
> 
> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> ---
>  arch/x86/xen/xen-head.S |  102 
> ++++++++++++++++++++++++++++++++++++++++++++++-
>  1 files changed, 100 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
> index aaa7291..f6ba51d 100644
> --- a/arch/x86/xen/xen-head.S
> +++ b/arch/x86/xen/xen-head.S
> @@ -28,9 +28,107 @@ ENTRY(startup_xen)
>       __FINIT
>  
>  .pushsection .text
> -     .align PAGE_SIZE
> +     .balign PAGE_SIZE
>  ENTRY(hypercall_page)
> -     .skip PAGE_SIZE
> +ENTRY(xen_hypercall_set_trap_table)
> +     .skip 32
> +ENTRY(xen_hypercall_mmu_update)
> +     .skip 32
> +ENTRY(xen_hypercall_set_gdt)
> +     .skip 32
> +ENTRY(xen_hypercall_stack_switch)
> +     .skip 32
> +ENTRY(xen_hypercall_set_callbacks)
> +     .skip 32
> +ENTRY(xen_hypercall_fpu_taskswitch)
> +     .skip 32
> +ENTRY(xen_hypercall_sched_op_compat)
> +     .skip 32
> +ENTRY(xen_hypercall_platform_op)
> +     .skip 32
> +ENTRY(xen_hypercall_set_debugreg)
> +     .skip 32
> +ENTRY(xen_hypercall_get_debugreg)
> +     .skip 32
> +ENTRY(xen_hypercall_update_descriptor)
> +     .skip 32
> +ENTRY(xen_hypercall_ni)
> +     .skip 32
> +ENTRY(xen_hypercall_memory_op)
> +     .skip 32
> +ENTRY(xen_hypercall_multicall)
> +     .skip 32
> +ENTRY(xen_hypercall_update_va_mapping)
> +     .skip 32
> +ENTRY(xen_hypercall_set_timer_op)
> +     .skip 32
> +ENTRY(xen_hypercall_event_channel_op_compat)
> +     .skip 32
> +ENTRY(xen_hypercall_xen_version)
> +     .skip 32
> +ENTRY(xen_hypercall_console_io)
> +     .skip 32
> +ENTRY(xen_hypercall_physdev_op_compat)
> +     .skip 32
> +ENTRY(xen_hypercall_grant_table_op)
> +     .skip 32
> +ENTRY(xen_hypercall_vm_assist)
> +     .skip 32
> +ENTRY(xen_hypercall_update_va_mapping_otherdomain)
> +     .skip 32
> +ENTRY(xen_hypercall_iret)
> +     .skip 32
> +ENTRY(xen_hypercall_vcpu_op)
> +     .skip 32
> +ENTRY(xen_hypercall_set_segment_base)
> +     .skip 32
> +ENTRY(xen_hypercall_mmuext_op)
> +     .skip 32
> +ENTRY(xen_hypercall_xsm_op)
> +     .skip 32
> +ENTRY(xen_hypercall_nmi_op)
> +     .skip 32
> +ENTRY(xen_hypercall_sched_op)
> +     .skip 32
> +ENTRY(xen_hypercall_callback_op)
> +     .skip 32
> +ENTRY(xen_hypercall_xenoprof_op)
> +     .skip 32
> +ENTRY(xen_hypercall_event_channel_op)
> +     .skip 32
> +ENTRY(xen_hypercall_physdev_op)
> +     .skip 32
> +ENTRY(xen_hypercall_hvm_op)
> +     .skip 32
> +ENTRY(xen_hypercall_sysctl)
> +     .skip 32
> +ENTRY(xen_hypercall_domctl)
> +     .skip 32
> +ENTRY(xen_hypercall_kexec_op)
> +     .skip 32
> +ENTRY(xen_hypercall_tmem_op) /* 38 */
> +     .skip 32
> +ENTRY(xen_hypercall_rsvr)
> +     .skip 320
> +ENTRY(xen_hypercall_mca) /* 48 */
> +     .skip 32
> +ENTRY(xen_hypercall_arch_1)
> +     .skip 32
> +ENTRY(xen_hypercall_arch_2)
> +     .skip 32
> +ENTRY(xen_hypercall_arch_3)
> +     .skip 32
> +ENTRY(xen_hypercall_arch_4)
> +     .skip 32
> +ENTRY(xen_hypercall_arch_5)
> +     .skip 32
> +ENTRY(xen_hypercall_arch_6)
> +     .skip 32
> +ENTRY(xen_hypercall_arch_7)
> +     .skip 32
> +ENTRY(xen_hypercall_other)
> +     .skip 2272

May I suggest that you use

        .balign PAGE_SIZE

here again, avoiding the need to adjust the number with every
future addition at the end?

Jan

> +
>  .popsection
>  
>       ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,       .asciz "linux")



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.