[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] vpmu: Remove unnecessary spaces at the end of lines
# HG changeset patch # User Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx> # Date 1328796521 28800 # Node ID 1817738de02198c4bfe31c6e030c6c3924ee1335 # Parent 33344b15a5dc686c9273a7de0fa46ffaac555f37 vpmu: Remove unnecessary spaces at the end of lines Signed-off-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx> Committed-by: Keir Fraser <keir@xxxxxxx> --- diff -r 33344b15a5dc -r 1817738de021 xen/arch/x86/oprofile/nmi_int.c --- a/xen/arch/x86/oprofile/nmi_int.c Thu Feb 09 06:08:02 2012 -0800 +++ b/xen/arch/x86/oprofile/nmi_int.c Thu Feb 09 06:08:41 2012 -0800 @@ -24,10 +24,10 @@ #include <asm/apic.h> #include <asm/regs.h> #include <asm/current.h> - + #include "op_counter.h" #include "op_x86_model.h" - + struct op_counter_config counter_config[OP_MAX_COUNTER]; struct op_ibs_config ibs_config; @@ -91,12 +91,12 @@ if ( ovf && is_active(current->domain) && !xen_mode ) send_guest_vcpu_virq(current, VIRQ_XENOPROF); - if ( ovf == 2 ) + if ( ovf == 2 ) current->nmi_pending = 1; return 1; } - - + + static void nmi_cpu_save_registers(struct op_msrs *msrs) { unsigned int const nr_ctrs = model->num_counters; @@ -108,7 +108,7 @@ for (i = 0; i < nr_ctrs; ++i) { rdmsrl(counters[i].addr, counters[i].value); } - + for (i = 0; i < nr_ctrls; ++i) { rdmsrl(controls[i].addr, controls[i].value); } @@ -195,7 +195,7 @@ * of msrs are distinct for save and setup operations */ on_each_cpu(nmi_save_registers, NULL, 1); - return 0; + return 0; } int nmi_enable_virq(void) @@ -208,7 +208,7 @@ void nmi_disable_virq(void) { unset_nmi_callback(); -} +} static void nmi_restore_registers(struct op_msrs * msrs) @@ -222,12 +222,12 @@ for (i = 0; i < nr_ctrls; ++i) { wrmsrl(controls[i].addr, controls[i].value); } - + for (i = 0; i < nr_ctrs; ++i) { wrmsrl(counters[i].addr, counters[i].value); } } - + static void nmi_cpu_shutdown(void * dummy) { @@ -236,7 +236,7 @@ nmi_restore_registers(msrs); } - + void nmi_release_counters(void) { on_each_cpu(nmi_cpu_shutdown, NULL, 1); @@ -244,7 +244,7 @@ free_msrs(); } - + static void nmi_cpu_start(void * dummy) { int cpu = smp_processor_id(); @@ -253,15 +253,15 @@ apic_write(APIC_LVTPC, APIC_DM_NMI); model->start(msrs); } - + int nmi_start(void) { on_each_cpu(nmi_cpu_start, NULL, 1); return 0; } - - + + static void nmi_cpu_stop(void * dummy) { unsigned int v; @@ -285,8 +285,8 @@ apic_write(APIC_LVTPC, saved_lvtpc[cpu]); apic_write(APIC_LVTERR, v); } - - + + void nmi_stop(void) { on_each_cpu(nmi_cpu_stop, NULL, 1); @@ -294,7 +294,7 @@ static int __init p4_init(char ** cpu_type) -{ +{ __u8 cpu_model = current_cpu_data.x86_model; if ((cpu_model > 6) || (cpu_model == 5)) { @@ -402,7 +402,7 @@ __u8 vendor = current_cpu_data.x86_vendor; __u8 family = current_cpu_data.x86; __u8 _model = current_cpu_data.x86_model; - + if (!cpu_has_apic) { printk("xenoprof: Initialization failed. No APIC\n"); return -ENODEV; @@ -451,7 +451,7 @@ break; } break; - + case X86_VENDOR_INTEL: switch (family) { /* Pentium IV */ diff -r 33344b15a5dc -r 1817738de021 xen/arch/x86/oprofile/op_model_ppro.c --- a/xen/arch/x86/oprofile/op_model_ppro.c Thu Feb 09 06:08:02 2012 -0800 +++ b/xen/arch/x86/oprofile/op_model_ppro.c Thu Feb 09 06:08:41 2012 -0800 @@ -21,7 +21,7 @@ #include <asm/current.h> #include <asm/hvm/vpmu.h> #include <asm/hvm/vmx/vpmu_core2.h> - + #include "op_x86_model.h" #include "op_counter.h" @@ -42,7 +42,7 @@ static int num_counters = 2; static int counter_width = 32; -#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) +#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) #define CTRL_READ(msr_content,msrs,c) do {rdmsrl((msrs->controls[(c)].addr), (msr_content));} while (0) #define CTRL_WRITE(msr_content,msrs,c) do {wrmsrl((msrs->controls[(c)].addr), (msr_content));} while (0) @@ -54,11 +54,11 @@ #define CTRL_SET_KERN(val,k) (val |= ((k & 1ULL) << 17)) #define CTRL_SET_UM(val, m) (val |= (m << 8)) #define CTRL_SET_EVENT(val, e) (val |= e) -#define IS_ACTIVE(val) (val & (1ULL << 22) ) +#define IS_ACTIVE(val) (val & (1ULL << 22) ) #define IS_ENABLE(val) (val & (1ULL << 20) ) static unsigned long reset_value[OP_MAX_COUNTER]; int ppro_has_global_ctrl = 0; - + static void ppro_fill_in_addresses(struct op_msrs * const msrs) { int i; @@ -74,7 +74,7 @@ { uint64_t msr_content; int i; - + if (cpu_has_arch_perfmon) { union cpuid10_eax eax; eax.full = cpuid_eax(0xa); @@ -98,7 +98,7 @@ CTRL_CLEAR(msr_content); CTRL_WRITE(msr_content, msrs, i); } - + /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < num_counters; ++i) wrmsrl(msrs->counters[i].addr, ~0x0ULL); @@ -142,8 +142,8 @@ if (CTR_OVERFLOWED(val)) { xenoprof_log_event(current, regs, eip, mode, i); wrmsrl(msrs->counters[i].addr, -reset_value[i]); - if ( is_passive(current->domain) && (mode != 2) && - vpmu_is_set(vcpu_vpmu(current), PASSIVE_DOMAIN_ALLOCATED) ) + if ( is_passive(current->domain) && (mode != 2) && + vpmu_is_set(vcpu_vpmu(current), PASSIVE_DOMAIN_ALLOCATED) ) { if ( IS_ACTIVE(msrs_content[i].control) ) { @@ -164,7 +164,7 @@ return ovf; } - + static void ppro_start(struct op_msrs const * const msrs) { uint64_t msr_content; @@ -206,7 +206,7 @@ if ( (msr_index >= MSR_IA32_PERFCTR0) && (msr_index < (MSR_IA32_PERFCTR0 + num_counters)) ) { - *type = MSR_TYPE_ARCH_COUNTER; + *type = MSR_TYPE_ARCH_COUNTER; *index = msr_index - MSR_IA32_PERFCTR0; return 1; } @@ -237,7 +237,7 @@ gdprintk(XENLOG_WARNING, "Insufficient memory for oprofile, oprofile is " "unavailable on domain %d vcpu %d.\n", v->vcpu_id, v->domain->domain_id); - return 0; + return 0; } static void ppro_free_msr(struct vcpu *v) @@ -261,13 +261,13 @@ case MSR_TYPE_ARCH_CTRL: *msr_content = msrs[index].control; break; - } + } } static void ppro_save_msr(struct vcpu *v, int type, int index, u64 msr_content) { struct arch_msr_pair *msrs = vcpu_vpmu(v)->context; - + switch ( type ) { case MSR_TYPE_ARCH_COUNTER: @@ -276,7 +276,7 @@ case MSR_TYPE_ARCH_CTRL: msrs[index].control = msr_content; break; - } + } } /* _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |