|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] xen/sched: Clean up trace handling
commit d64693df1ab98f5278cf32ab3e0c12295237c42b
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon Sep 20 14:07:43 2021 +0100
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Mon May 20 18:29:44 2024 +0100
xen/sched: Clean up trace handling
There is no need for bitfields anywhere - use more sensible types. There is
also no need to cast 'd' to (unsigned char *) before passing it to a
function
taking void *. Switch to new trace_time() API.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Dario Faggioli <dfaggioli@xxxxxxxx>
Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxx>
---
xen/common/sched/core.c | 56 ++++++++++++++++++-------------------
xen/common/sched/credit.c | 70 ++++++++++++++++++++++-------------------------
xen/common/sched/null.c | 63 ++++++++++++++++++++++++------------------
3 files changed, 97 insertions(+), 92 deletions(-)
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index babac7aad6..d84b65f197 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -205,7 +205,7 @@ static inline struct scheduler *vcpu_scheduler(const struct
vcpu *v)
static inline void trace_runstate_change(const struct vcpu *v, int new_state)
{
- struct { uint32_t vcpu:16, domain:16; } d;
+ struct { uint16_t vcpu, domain; } d;
uint32_t event;
if ( likely(!tb_init_done) )
@@ -218,12 +218,12 @@ static inline void trace_runstate_change(const struct
vcpu *v, int new_state)
event |= ( v->runstate.state & 0x3 ) << 8;
event |= ( new_state & 0x3 ) << 4;
- __trace_var(event, 1/*tsc*/, sizeof(d), &d);
+ trace_time(event, sizeof(d), &d);
}
static inline void trace_continue_running(const struct vcpu *v)
{
- struct { uint32_t vcpu:16, domain:16; } d;
+ struct { uint16_t vcpu, domain; } d;
if ( likely(!tb_init_done) )
return;
@@ -231,7 +231,7 @@ static inline void trace_continue_running(const struct vcpu
*v)
d.vcpu = v->vcpu_id;
d.domain = v->domain->domain_id;
- __trace_var(TRC_SCHED_CONTINUE_RUNNING, 1/*tsc*/, sizeof(d), &d);
+ trace_time(TRC_SCHED_CONTINUE_RUNNING, sizeof(d), &d);
}
static inline void vcpu_urgent_count_update(struct vcpu *v)
@@ -872,7 +872,7 @@ int sched_init_domain(struct domain *d, unsigned int poolid)
return ret;
SCHED_STAT_CRANK(dom_init);
- TRACE_1D(TRC_SCHED_DOM_ADD, d->domain_id);
+ TRACE_TIME(TRC_SCHED_DOM_ADD, d->domain_id);
rcu_read_lock(&sched_res_rculock);
@@ -895,7 +895,7 @@ void sched_destroy_domain(struct domain *d)
if ( d->cpupool )
{
SCHED_STAT_CRANK(dom_destroy);
- TRACE_1D(TRC_SCHED_DOM_REM, d->domain_id);
+ TRACE_TIME(TRC_SCHED_DOM_REM, d->domain_id);
rcu_read_lock(&sched_res_rculock);
@@ -936,7 +936,7 @@ void vcpu_sleep_nosync(struct vcpu *v)
unsigned long flags;
spinlock_t *lock;
- TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
+ TRACE_TIME(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
rcu_read_lock(&sched_res_rculock);
@@ -965,7 +965,7 @@ void vcpu_wake(struct vcpu *v)
spinlock_t *lock;
struct sched_unit *unit = v->sched_unit;
- TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
+ TRACE_TIME(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
rcu_read_lock(&sched_res_rculock);
@@ -1425,7 +1425,7 @@ void vcpu_block(void)
}
else
{
- TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
+ TRACE_TIME(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
raise_softirq(SCHEDULE_SOFTIRQ);
}
}
@@ -1498,7 +1498,7 @@ static long do_poll(const struct sched_poll *sched_poll)
if ( sched_poll->timeout != 0 )
set_timer(&v->poll_timer, sched_poll->timeout);
- TRACE_2D(TRC_SCHED_BLOCK, d->domain_id, v->vcpu_id);
+ TRACE_TIME(TRC_SCHED_BLOCK, d->domain_id, v->vcpu_id);
raise_softirq(SCHEDULE_SOFTIRQ);
return 0;
@@ -1526,7 +1526,7 @@ long vcpu_yield(void)
SCHED_STAT_CRANK(vcpu_yield);
- TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
+ TRACE_TIME(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
raise_softirq(SCHEDULE_SOFTIRQ);
return 0;
}
@@ -1904,9 +1904,8 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void)
arg)
if ( copy_from_guest(&sched_shutdown, arg, 1) )
break;
- TRACE_3D(TRC_SCHED_SHUTDOWN,
- current->domain->domain_id, current->vcpu_id,
- sched_shutdown.reason);
+ TRACE_TIME(TRC_SCHED_SHUTDOWN, current->domain->domain_id,
+ current->vcpu_id, sched_shutdown.reason);
ret = domain_shutdown(current->domain, (u8)sched_shutdown.reason);
break;
@@ -1921,8 +1920,8 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void)
arg)
if ( copy_from_guest(&sched_shutdown, arg, 1) )
break;
- TRACE_3D(TRC_SCHED_SHUTDOWN_CODE,
- d->domain_id, current->vcpu_id, sched_shutdown.reason);
+ TRACE_TIME(TRC_SCHED_SHUTDOWN_CODE, d->domain_id, current->vcpu_id,
+ sched_shutdown.reason);
spin_lock(&d->shutdown_lock);
if ( d->shutdown_code == SHUTDOWN_CODE_INVALID )
@@ -2086,7 +2085,7 @@ long sched_adjust(struct domain *d, struct
xen_domctl_scheduler_op *op)
rcu_read_lock(&sched_res_rculock);
if ( (ret = sched_adjust_dom(dom_scheduler(d), d, op)) == 0 )
- TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
+ TRACE_TIME(TRC_SCHED_ADJDOM, d->domain_id);
rcu_read_unlock(&sched_res_rculock);
@@ -2181,14 +2180,13 @@ static void sched_switch_units(struct sched_resource
*sr,
sr->curr = next;
sr->prev = prev;
- TRACE_3D(TRC_SCHED_SWITCH_INFPREV, prev->domain->domain_id,
- prev->unit_id, now - prev->state_entry_time);
- TRACE_4D(TRC_SCHED_SWITCH_INFNEXT, next->domain->domain_id,
- next->unit_id,
- (next->vcpu_list->runstate.state == RUNSTATE_runnable) ?
- (now - next->state_entry_time) : 0, prev->next_time);
- TRACE_4D(TRC_SCHED_SWITCH, prev->domain->domain_id, prev->unit_id,
- next->domain->domain_id, next->unit_id);
+ TRACE_TIME(TRC_SCHED_SWITCH_INFPREV, prev->domain->domain_id,
+ prev->unit_id, now - prev->state_entry_time);
+ TRACE_TIME(TRC_SCHED_SWITCH_INFNEXT, next->domain->domain_id,
next->unit_id,
+ (next->vcpu_list->runstate.state == RUNSTATE_runnable) ?
+ (now - next->state_entry_time) : 0, prev->next_time);
+ TRACE_TIME(TRC_SCHED_SWITCH, prev->domain->domain_id, prev->unit_id,
+ next->domain->domain_id, next->unit_id);
ASSERT(!unit_running(next));
@@ -2379,10 +2377,10 @@ static void sched_context_switch(struct vcpu *vprev,
struct vcpu *vnext,
{
if ( unlikely(vprev == vnext) )
{
- TRACE_4D(TRC_SCHED_SWITCH_INFCONT,
- vnext->domain->domain_id, vnext->sched_unit->unit_id,
- now - vprev->runstate.state_entry_time,
- vprev->sched_unit->next_time);
+ TRACE_TIME(TRC_SCHED_SWITCH_INFCONT,
+ vnext->domain->domain_id, vnext->sched_unit->unit_id,
+ now - vprev->runstate.state_entry_time,
+ vprev->sched_unit->next_time);
sched_context_switched(vprev, vnext);
/*
diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
index a72d071527..020f44595e 100644
--- a/xen/common/sched/credit.c
+++ b/xen/common/sched/credit.c
@@ -493,7 +493,7 @@ static inline void __runq_tickle(const struct csched_unit
*new)
{
/* Avoid TRACE_*: saves checking !tb_init_done each step */
for_each_cpu(cpu, &mask)
- __trace_var(TRC_CSCHED_TICKLE, 1, sizeof(cpu), &cpu);
+ trace_time(TRC_CSCHED_TICKLE, sizeof(cpu), &cpu);
}
/*
@@ -862,8 +862,7 @@ _csched_cpu_pick(const struct scheduler *ops, const struct
sched_unit *unit,
if ( commit && spc )
spc->idle_bias = cpu;
- TRACE_3D(TRC_CSCHED_PICKED_CPU, unit->domain->domain_id, unit->unit_id,
- cpu);
+ TRACE_TIME(TRC_CSCHED_PICKED_CPU, unit->domain->domain_id, unit->unit_id,
cpu);
return cpu;
}
@@ -907,8 +906,8 @@ __csched_unit_acct_start(struct csched_private *prv, struct
csched_unit *svc)
}
}
- TRACE_3D(TRC_CSCHED_ACCOUNT_START, sdom->dom->domain_id,
- svc->unit->unit_id, sdom->active_unit_count);
+ TRACE_TIME(TRC_CSCHED_ACCOUNT_START, sdom->dom->domain_id,
+ svc->unit->unit_id, sdom->active_unit_count);
spin_unlock_irqrestore(&prv->lock, flags);
}
@@ -933,8 +932,8 @@ __csched_unit_acct_stop_locked(struct csched_private *prv,
list_del_init(&sdom->active_sdom_elem);
}
- TRACE_3D(TRC_CSCHED_ACCOUNT_STOP, sdom->dom->domain_id,
- svc->unit->unit_id, sdom->active_unit_count);
+ TRACE_TIME(TRC_CSCHED_ACCOUNT_STOP, sdom->dom->domain_id,
+ svc->unit->unit_id, sdom->active_unit_count);
}
static void
@@ -957,8 +956,8 @@ csched_unit_acct(struct csched_private *prv, unsigned int
cpu)
if ( svc->pri == CSCHED_PRI_TS_BOOST )
{
svc->pri = CSCHED_PRI_TS_UNDER;
- TRACE_2D(TRC_CSCHED_BOOST_END, svc->sdom->dom->domain_id,
- svc->unit->unit_id);
+ TRACE_TIME(TRC_CSCHED_BOOST_END, svc->sdom->dom->domain_id,
+ svc->unit->unit_id);
}
/*
@@ -1164,8 +1163,7 @@ csched_unit_wake(const struct scheduler *ops, struct
sched_unit *unit)
if ( !migrating && svc->pri == CSCHED_PRI_TS_UNDER &&
!test_bit(CSCHED_FLAG_UNIT_PARKED, &svc->flags) )
{
- TRACE_2D(TRC_CSCHED_BOOST_START, unit->domain->domain_id,
- unit->unit_id);
+ TRACE_TIME(TRC_CSCHED_BOOST_START, unit->domain->domain_id,
unit->unit_id);
SCHED_STAT_CRANK(unit_boost);
svc->pri = CSCHED_PRI_TS_BOOST;
}
@@ -1671,8 +1669,8 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int
balance_step)
if ( __csched_unit_is_migrateable(prv, unit, cpu, cpumask_scratch) )
{
/* We got a candidate. Grab it! */
- TRACE_3D(TRC_CSCHED_STOLEN_UNIT, peer_cpu,
- unit->domain->domain_id, unit->unit_id);
+ TRACE_TIME(TRC_CSCHED_STOLEN_UNIT, peer_cpu,
+ unit->domain->domain_id, unit->unit_id);
SCHED_UNIT_STAT_CRANK(speer, migrate_q);
SCHED_STAT_CRANK(migrate_queued);
runq_remove(speer);
@@ -1783,7 +1781,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
*/
if ( CSCHED_PCPU(peer_cpu)->nr_runnable <= 1 )
{
- TRACE_2D(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* skipp'n */
0);
+ TRACE_TIME(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* skipp'n */
0);
goto next_cpu;
}
@@ -1799,11 +1797,11 @@ csched_load_balance(struct csched_private *prv, int cpu,
if ( !lock )
{
SCHED_STAT_CRANK(steal_trylock_failed);
- TRACE_2D(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* skip */ 0);
+ TRACE_TIME(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* skip */ 0);
goto next_cpu;
}
- TRACE_2D(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* checked */ 1);
+ TRACE_TIME(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* checked */ 1);
/* Any work over there to steal? */
speer = cpumask_test_cpu(peer_cpu, online) ?
@@ -1860,21 +1858,18 @@ static void cf_check csched_schedule(
SCHED_STAT_CRANK(schedule);
CSCHED_UNIT_CHECK(unit);
- /*
- * Here in Credit1 code, we usually just call TRACE_nD() helpers, and
- * don't care about packing. But scheduling happens very often, so it
- * actually is important that the record is as small as possible.
- */
if ( unlikely(tb_init_done) )
{
struct {
- unsigned cpu:16, tasklet:8, idle:8;
- } d;
- d.cpu = cur_cpu;
- d.tasklet = tasklet_work_scheduled;
- d.idle = is_idle_unit(unit);
- __trace_var(TRC_CSCHED_SCHEDULE, 1, sizeof(d),
- (unsigned char *)&d);
+ uint16_t cpu;
+ uint8_t tasklet, idle;
+ } d = {
+ .cpu = cur_cpu,
+ .tasklet = tasklet_work_scheduled,
+ .idle = is_idle_unit(unit),
+ };
+
+ trace_time(TRC_CSCHED_SCHEDULE, sizeof(d), &d);
}
runtime = now - unit->state_entry_time;
@@ -1936,14 +1931,15 @@ static void cf_check csched_schedule(
if ( unlikely(tb_init_done) )
{
struct {
- unsigned unit:16, dom:16;
- unsigned runtime;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.runtime = runtime;
- __trace_var(TRC_CSCHED_RATELIMIT, 1, sizeof(d),
- (unsigned char *)&d);
+ uint16_t unit, dom;
+ uint32_t runtime;
+ } d = {
+ .dom = unit->domain->domain_id,
+ .unit = unit->unit_id,
+ .runtime = runtime,
+ };
+
+ trace_time(TRC_CSCHED_RATELIMIT, sizeof(d), &d);
}
goto out;
@@ -1968,7 +1964,7 @@ static void cf_check csched_schedule(
/* Tasklet work (which runs in idle UNIT context) overrides all else.
*/
if ( tasklet_work_scheduled )
{
- TRACE_0D(TRC_CSCHED_SCHED_TASKLET);
+ TRACE_TIME(TRC_CSCHED_SCHED_TASKLET);
snext = CSCHED_UNIT(sched_idle_unit(sched_cpu));
snext->pri = CSCHED_PRI_TS_BOOST;
}
diff --git a/xen/common/sched/null.c b/xen/common/sched/null.c
index 2091337fcd..7e31440e5b 100644
--- a/xen/common/sched/null.c
+++ b/xen/common/sched/null.c
@@ -331,11 +331,13 @@ pick_res(const struct null_private *prv, const struct
sched_unit *unit)
struct {
uint16_t unit, dom;
uint32_t new_cpu;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.new_cpu = new_cpu;
- __trace_var(TRC_SNULL_PICKED_CPU, 1, sizeof(d), &d);
+ } d = {
+ .unit = unit->unit_id,
+ .dom = unit->domain->domain_id,
+ .new_cpu = new_cpu,
+ };
+
+ trace_time(TRC_SNULL_PICKED_CPU, sizeof(d), &d);
}
return get_sched_res(new_cpu);
@@ -359,11 +361,13 @@ static void unit_assign(struct null_private *prv, struct
sched_unit *unit,
struct {
uint16_t unit, dom;
uint32_t cpu;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.cpu = cpu;
- __trace_var(TRC_SNULL_UNIT_ASSIGN, 1, sizeof(d), &d);
+ } d = {
+ .unit = unit->unit_id,
+ .dom = unit->domain->domain_id,
+ .cpu = cpu,
+ };
+
+ trace_time(TRC_SNULL_UNIT_ASSIGN, sizeof(d), &d);
}
}
@@ -390,11 +394,13 @@ static bool unit_deassign(struct null_private *prv, const
struct sched_unit *uni
struct {
uint16_t unit, dom;
uint32_t cpu;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.cpu = cpu;
- __trace_var(TRC_SNULL_UNIT_DEASSIGN, 1, sizeof(d), &d);
+ } d = {
+ .unit = unit->unit_id,
+ .dom = unit->domain->domain_id,
+ .cpu = cpu,
+ };
+
+ trace_time(TRC_SNULL_UNIT_DEASSIGN, sizeof(d), &d);
}
spin_lock(&prv->waitq_lock);
@@ -695,12 +701,14 @@ static void cf_check null_unit_migrate(
struct {
uint16_t unit, dom;
uint16_t cpu, new_cpu;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.cpu = sched_unit_master(unit);
- d.new_cpu = new_cpu;
- __trace_var(TRC_SNULL_MIGRATE, 1, sizeof(d), &d);
+ } d = {
+ .unit = unit->unit_id,
+ .dom = unit->domain->domain_id,
+ .cpu = sched_unit_master(unit),
+ .new_cpu = new_cpu,
+ };
+
+ trace_time(TRC_SNULL_MIGRATE, sizeof(d), &d);
}
/*
@@ -824,9 +832,11 @@ static void cf_check null_schedule(
struct {
uint16_t tasklet, cpu;
int16_t unit, dom;
- } d;
- d.cpu = cur_cpu;
- d.tasklet = tasklet_work_scheduled;
+ } d = {
+ .tasklet = tasklet_work_scheduled,
+ .cpu = cur_cpu,
+ };
+
if ( npc->unit == NULL )
{
d.unit = d.dom = -1;
@@ -836,12 +846,13 @@ static void cf_check null_schedule(
d.unit = npc->unit->unit_id;
d.dom = npc->unit->domain->domain_id;
}
- __trace_var(TRC_SNULL_SCHEDULE, 1, sizeof(d), &d);
+
+ trace_time(TRC_SNULL_SCHEDULE, sizeof(d), &d);
}
if ( tasklet_work_scheduled )
{
- trace_var(TRC_SNULL_TASKLET, 1, 0, NULL);
+ TRACE_TIME(TRC_SNULL_TASKLET);
prev->next_task = sched_idle_unit(sched_cpu);
}
else
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |