|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 09/15] tools: tracing: handle RCU events in xentrace and xenalyze
Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
---
tools/xentrace/analyze.h | 1
tools/xentrace/formats | 9 ++++
tools/xentrace/xenalyze.c | 114 +++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 123 insertions(+), 1 deletion(-)
diff --git a/tools/xentrace/analyze.h b/tools/xentrace/analyze.h
index 40ee551..29e74fd 100644
--- a/tools/xentrace/analyze.h
+++ b/tools/xentrace/analyze.h
@@ -13,6 +13,7 @@
#define TRC_PV_MAIN 5
#define TRC_SHADOW_MAIN 6
#define TRC_HW_MAIN 7
+#define TRC_XEN_MAIN 8
#define TRC_LOST_RECORDS_END (TRC_GEN + 50)
diff --git a/tools/xentrace/formats b/tools/xentrace/formats
index 2e653ca..77dbd93 100644
--- a/tools/xentrace/formats
+++ b/tools/xentrace/formats
@@ -205,6 +205,15 @@
0x0080200d CPU%(cpu)d %(tsc)d (+%(reltsc)8d) irq_disable [ from =
0x%(2)08x%(1)08x ]
0x0080200e CPU%(cpu)d %(tsc)d (+%(reltsc)8d) irq_enable [ from =
0x%(2)08x%(1)08x ]
+0x01001001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) rcu_force_qstate [ ]
+0x01001002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) rcu_call [ fn =
0x%(2)08x%(1)08x ]
+0x01001003 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) rcu_start_batch [ CPU mask:
0x%(6)08x%(5)08x%(4)08x%(3)08x%(2)08x%(1)08x ]
+0x01001004 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) rcu_do_batch [ fn =
0x%(2)08x%(1)08x, qlen = 0x%(4)08x%(3)08x ]
+0x01001005 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) rcu_cpu_quiet [ ]
+0x01001006 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) rcu_check_qstate [ pending =
%(d)d ]
+0x01001007 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) rcu_do_callbacks [ ]
+0x01001008 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) rcu_pending [ %(1)d ]
+
0x00084001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) hpet create [ tn = %(1)d, irq
= %(2)d, delta = 0x%(4)08x%(3)08x, period = 0x%(6)08x%(5)08x ]
0x00084002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) pit create [ delta =
0x%(1)016x, period = 0x%(2)016x ]
0x00084003 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) rtc create [ delta =
0x%(1)016x , period = 0x%(2)016x ]
diff --git a/tools/xentrace/xenalyze.c b/tools/xentrace/xenalyze.c
index c4a8340..e98596a 100644
--- a/tools/xentrace/xenalyze.c
+++ b/tools/xentrace/xenalyze.c
@@ -1752,7 +1752,8 @@ enum {
TOPLEVEL_PV,
TOPLEVEL_SHADOW,
TOPLEVEL_HW,
- TOPLEVEL_MAX=TOPLEVEL_HW+1,
+ TOPLEVEL_XEN,
+ TOPLEVEL_MAX=TOPLEVEL_XEN+1,
};
char * toplevel_name[TOPLEVEL_MAX] = {
@@ -1764,6 +1765,7 @@ char * toplevel_name[TOPLEVEL_MAX] = {
[TOPLEVEL_PV]="pv",
[TOPLEVEL_SHADOW]="shadow",
[TOPLEVEL_HW]="hw",
+ [TOPLEVEL_XEN]="xen",
};
struct trace_volume {
@@ -8588,6 +8590,100 @@ void irq_process(struct pcpu_info *p) {
}
}
+void rcu_process(struct pcpu_info *p) {
+ struct record_info *ri = &p->ri;
+
+ switch ( ri->event )
+ {
+ case TRC_XEN_RCU_FORCE_QSTATE:
+ {
+ if ( opt.dump_all )
+ printf(" %s rcu_force_quiescent_state\n",
+ ri->dump_header);
+ break;
+ }
+ case TRC_XEN_RCU_CALL_RCU:
+ {
+ struct {
+ uint64_t addr;
+ } *r = (typeof(r))ri->d;
+
+ if ( opt.dump_all )
+ printf(" %s rcu_call fn=%p\n",
+ ri->dump_header, (void*)r->addr);
+ break;
+ }
+ case TRC_XEN_RCU_DO_BATCH:
+ {
+ struct {
+ uint64_t addr;
+ int64_t qlen;
+ } *r = (typeof(r))ri->d;
+
+ if ( opt.dump_all )
+ printf(" %s rcu_do_batch, fn=%p, qlen=%ld\n",
+ ri->dump_header, (void*)r->addr, r->qlen);
+ break;
+ }
+ case TRC_XEN_RCU_START_BATCH:
+ {
+ struct {
+ uint32_t mask[6];
+ } *r = (typeof(r))ri->d;
+
+ if ( opt.dump_all )
+ {
+ int i = 5;
+
+ while ( i >= 0 && !r->mask[i] ) i--;
+ printf(" %s rcu_start_batch, cpumask 0x", ri->dump_header);
+ for ( ; i >= 0 ; i-- )
+ printf("%08x", r->mask[i]);
+ printf("\n");
+ }
+ }
+ case TRC_XEN_RCU_CPU_QUIET:
+ {
+ if ( opt.dump_all )
+ printf(" %s rcu_cpu_quiet\n", ri->dump_header);
+ break;
+ }
+ case TRC_XEN_RCU_CHECK_QSTATE:
+ {
+ struct {
+ uint32_t qs_pending;
+ } *r = (typeof(r))ri->d;
+
+ if ( opt.dump_all )
+ printf(" %s rcu_check_quiesc_state, qs_pending=%u\n",
+ ri->dump_header, r->qs_pending);
+ break;
+ }
+ case TRC_XEN_RCU_DO_CALLBKS:
+ {
+ if ( opt.dump_all )
+ printf(" %s rcu_process_callbacks\n", ri->dump_header);
+ break;
+ }
+ case TRC_XEN_RCU_PENDING:
+ {
+ struct {
+ uint32_t pending;
+ } *r = (typeof(r))ri->d;
+
+ if ( opt.dump_all )
+ printf(" %s rcu_pending? %s\n",
+ ri->dump_header,
+ r->pending ? "yes" : "no");
+ break;
+ }
+ default:
+ if( opt.dump_all )
+ dump_generic(stdout, ri);
+ break;
+ }
+}
+
#define TRC_HW_SUB_PM 1
#define TRC_HW_SUB_IRQ 2
void hw_process(struct pcpu_info *p)
@@ -8606,6 +8702,19 @@ void hw_process(struct pcpu_info *p)
}
+#define TRC_XEN_SUB_RCU 1
+void xen_process(struct pcpu_info *p)
+{
+ struct record_info *ri = &p->ri;
+
+ switch(ri->evt.sub)
+ {
+ case TRC_XEN_SUB_RCU:
+ rcu_process(p);
+ break;
+ }
+}
+
#define TRC_DOM0_SUB_DOMOPS 1
void dom0_process(struct pcpu_info *p)
{
@@ -9464,6 +9573,9 @@ void process_record(struct pcpu_info *p) {
case TRC_HW_MAIN:
hw_process(p);
break;
+ case TRC_XEN_MAIN:
+ xen_process(p);
+ break;
case TRC_DOM0OP_MAIN:
dom0_process(p);
break;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |