[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 15/19] xen: credit2: only marshall trace point arguments if tracing enabled
Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> --- Cc: George Dunlap <george.dunlap@xxxxxxxxxx> Cc: Anshul Makkar <anshul.makkar@xxxxxxxxxx> Cc: David Vrabel <david.vrabel@xxxxxxxxxx> --- xen/common/sched_credit2.c | 114 +++++++++++++++++++++++--------------------- 1 file changed, 59 insertions(+), 55 deletions(-) diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index e9f3f13..3fdc91c 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -636,6 +636,7 @@ __update_runq_load(const struct scheduler *ops, ASSERT(rqd->avgload <= STIME_MAX && rqd->b_avgload <= STIME_MAX); + if ( unlikely(tb_init_done) ) { struct { uint64_t rq_avgload, b_avgload; @@ -646,9 +647,9 @@ __update_runq_load(const struct scheduler *ops, d.rq_avgload = rqd->avgload; d.b_avgload = rqd->b_avgload; d.shift = P; - trace_var(TRC_CSCHED2_UPDATE_RUNQ_LOAD, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_UPDATE_RUNQ_LOAD, 1, + sizeof(d), + (unsigned char *)&d); } } @@ -691,6 +692,7 @@ __update_svc_load(const struct scheduler *ops, } svc->load_last_update = now; + if ( unlikely(tb_init_done) ) { struct { uint64_t v_avgload; @@ -701,9 +703,9 @@ __update_svc_load(const struct scheduler *ops, d.vcpu = svc->vcpu->vcpu_id; d.v_avgload = svc->avgload; d.shift = P; - trace_var(TRC_CSCHED2_UPDATE_VCPU_LOAD, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_UPDATE_VCPU_LOAD, 1, + sizeof(d), + (unsigned char *)&d); } } @@ -759,6 +761,7 @@ runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc) pos = __runq_insert(runq, svc); + if ( unlikely(tb_init_done) ) { struct { unsigned vcpu:16, dom:16; @@ -767,9 +770,9 @@ runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc) d.dom = svc->vcpu->domain->domain_id; d.vcpu = svc->vcpu->vcpu_id; d.pos = pos; - trace_var(TRC_CSCHED2_RUNQ_POS, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_RUNQ_POS, 1, + sizeof(d), + (unsigned char *)&d); } return; @@ -812,7 +815,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now) ASSERT(new->rqd == rqd); - /* TRACE */ + if ( unlikely(tb_init_done) ) { struct { unsigned vcpu:16, dom:16; @@ -822,9 +825,9 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now) d.vcpu = new->vcpu->vcpu_id; d.processor = new->vcpu->processor; d.credit = new->credit; - trace_var(TRC_CSCHED2_TICKLE_NEW, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_TICKLE_NEW, 1, + sizeof(d), + (unsigned char *)&d); } /* @@ -882,7 +885,8 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now) lowest = cur->credit; } - /* TRACE */ { + if ( unlikely(tb_init_done) ) + { struct { unsigned vcpu:16, dom:16; unsigned credit; @@ -890,9 +894,9 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now) d.dom = cur->vcpu->domain->domain_id; d.vcpu = cur->vcpu->vcpu_id; d.credit = cur->credit; - trace_var(TRC_CSCHED2_TICKLE_CHECK, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_TICKLE_CHECK, 1, + sizeof(d), + (unsigned char *)&d); } } @@ -910,14 +914,15 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now) tickle: BUG_ON(ipid == -1); - /* TRACE */ { + if ( unlikely(tb_init_done) ) + { struct { unsigned cpu:16, pad:16; } d; d.cpu = ipid; d.pad = 0; - trace_var(TRC_CSCHED2_TICKLE, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_TICKLE, 1, + sizeof(d), + (unsigned char *)&d); } __cpumask_set_cpu(ipid, &rqd->tickled); cpu_raise_softirq(ipid, SCHEDULE_SOFTIRQ); @@ -979,7 +984,8 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now, svc->start_time = now; - /* TRACE */ { + if ( unlikely(tb_init_done) ) + { struct { unsigned vcpu:16, dom:16; unsigned credit_start, credit_end; @@ -990,9 +996,9 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now, d.credit_start = start_credit; d.credit_end = svc->credit; d.multiplier = m; - trace_var(TRC_CSCHED2_CREDIT_RESET, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_CREDIT_RESET, 1, + sizeof(d), + (unsigned char *)&d); } } @@ -1028,7 +1034,7 @@ void burn_credits(struct csched2_runqueue_data *rqd, __func__, now, svc->start_time); } - /* TRACE */ + if ( unlikely(tb_init_done) ) { struct { unsigned vcpu:16, dom:16; @@ -1039,9 +1045,9 @@ void burn_credits(struct csched2_runqueue_data *rqd, d.vcpu = svc->vcpu->vcpu_id; d.credit = svc->credit; d.delta = delta; - trace_var(TRC_CSCHED2_CREDIT_BURN, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_CREDIT_BURN, 1, + sizeof(d), + (unsigned char *)&d); } } @@ -1077,16 +1083,16 @@ static void update_max_weight(struct csched2_runqueue_data *rqd, int new_weight, SCHED_STAT_CRANK(upd_max_weight_full); } - /* TRACE */ + if ( unlikely(tb_init_done) ) { struct { unsigned rqi:16, max_weight:16; } d; d.rqi = rqd->id; d.max_weight = rqd->max_weight; - trace_var(TRC_CSCHED2_RUNQ_MAX_WEIGHT, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_RUNQ_MAX_WEIGHT, 1, + sizeof(d), + (unsigned char *)&d); } } @@ -1166,7 +1172,7 @@ __runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd) /* Expected new load based on adding this vcpu */ rqd->b_avgload += svc->avgload; - /* TRACE */ + if ( unlikely(tb_init_done) ) { struct { unsigned vcpu:16, dom:16; @@ -1175,9 +1181,9 @@ __runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd) d.dom = svc->vcpu->domain->domain_id; d.vcpu = svc->vcpu->vcpu_id; d.rqi=rqd->id; - trace_var(TRC_CSCHED2_RUNQ_ASSIGN, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_RUNQ_ASSIGN, 1, + sizeof(d), + (unsigned char *)&d); } } @@ -1489,7 +1495,7 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc) out_up: spin_unlock(&prv->lock); - /* TRACE */ + if ( unlikely(tb_init_done) ) { struct { uint64_t b_avgload; @@ -1501,9 +1507,9 @@ out_up: d.vcpu = vc->vcpu_id; d.rq_id = c2r(ops, new_cpu); d.new_cpu = new_cpu; - trace_var(TRC_CSCHED2_PICKED_CPU, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_PICKED_CPU, 1, + sizeof(d), + (unsigned char *)&d); } return new_cpu; @@ -1561,7 +1567,7 @@ static void migrate(const struct scheduler *ops, bool_t running = svc->flags & CSFLAG_scheduled; bool_t on_runq = __vcpu_on_runq(svc); - /* TRACE */ + if ( unlikely(tb_init_done) ) { struct { unsigned vcpu:16, dom:16; @@ -1571,9 +1577,9 @@ static void migrate(const struct scheduler *ops, d.vcpu = svc->vcpu->vcpu_id; d.rqi = svc->rqd->id; d.trqi = trqd->id; - trace_var(TRC_CSCHED2_MIGRATE, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_MIGRATE, 1, + sizeof(d), + (unsigned char *)&d); } if ( running ) @@ -1696,10 +1702,8 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now) cpus_max = cpumask_weight(&st.lrqd->active); i = cpumask_weight(&st.orqd->active); - if ( i > cpus_max ) - cpus_max = i; - /* TRACE */ + if ( unlikely(tb_init_done) ) { struct { unsigned lrq_id:16, orq_id:16; @@ -1708,9 +1712,9 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now) d.lrq_id = st.lrqd->id; d.orq_id = st.orqd->id; d.load_delta = st.load_delta; - trace_var(TRC_CSCHED2_LOAD_CHECK, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_LOAD_CHECK, 1, + sizeof(d), + (unsigned char *)&d); } /* @@ -1741,7 +1745,7 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now) if ( unlikely(st.orqd->id < 0) ) goto out_up; - /* TRACE */ + if ( unlikely(tb_init_done) ) { struct { uint64_t lb_avgload, ob_avgload; @@ -1751,9 +1755,9 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now) d.lb_avgload = st.lrqd->b_avgload; d.orq_id = st.orqd->id; d.ob_avgload = st.orqd->b_avgload; - trace_var(TRC_CSCHED2_LOAD_BALANCE, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_LOAD_BALANCE, 1, + sizeof(d), + (unsigned char *)&d); } now = NOW(); _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |