|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 2/6] xen/memory: Remove tail padding from TRC_MEM_* records
Four TRC_MEM_* records supply custom structures with tail padding, leaking
stack rubble into the trace buffer. Three of the records were fine in 32-bit
builds of Xen, due to the relaxed alignment of 64-bit integers, but
POD_SUPERPAGE_SPLITER was broken right from the outset.
We could pack the datastructures to remove the padding, but xentrace_format
has no way of rendering the upper half of a 16-bit field. Instead, expand all
16-bit fields to 32-bit.
For POD_SUPERPAGE_SPLINTER, introduce an order field as it is relevant
information, and to matche DECREASE_RESERVATION, and so it doesn't require a
__packed attribute to drop tail padding.
Update xenalyze's structures to match, and introduce xentrace_format rendering
which was absent previously.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
CC: Ian Jackson <iwj@xxxxxxxxxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Julien Grall <julien@xxxxxxx>
CC: Dario Faggioli <dfaggioli@xxxxxxxx>
The xentrace_format script isn't remotely Py3 compatible, and was another
script missed by our previous efforts.
---
tools/xentrace/formats | 4 ++++
tools/xentrace/xenalyze.c | 12 ++++++------
xen/arch/x86/mm/p2m-pod.c | 17 +++++++++--------
xen/common/memory.c | 4 ++--
4 files changed, 21 insertions(+), 16 deletions(-)
diff --git a/tools/xentrace/formats b/tools/xentrace/formats
index deac4d8598b0..0fcc327a4078 100644
--- a/tools/xentrace/formats
+++ b/tools/xentrace/formats
@@ -136,6 +136,10 @@
0x0010f001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) page_grant_map [ domid =
%(1)d ]
0x0010f002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) page_grant_unmap [ domid =
%(1)d ]
0x0010f003 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) page_grant_transfer [ domid =
%(1)d ]
+0x0010f005 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) decrease_reservation [
d%(3)d gfn 0x%(2)08x%(1)08x, order %(4)u ]
+0x0010f010 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) pod_populate [
d%(5)d gfn 0x%(2)08x%(1)08x => mfn 0x%(4)08x%(3)08x, order %(6)u ]
+0x0010f011 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) pod_zero_reclaim [
d%(5)d gfn 0x%(2)08x%(1)08x => mfn 0x%(4)08x%(3)08x, order %(6)u ]
+0x0010f012 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) pod_superpage_splinter [
d%(3)d gfn 0x%(2)08x%(1)08x, order %(4)u ]
0x00201001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) hypercall [ eip = 0x%(1)08x,
eax = 0x%(2)08x ]
0x00201101 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) hypercall [ rip =
0x%(2)08x%(1)08x, eax = 0x%(3)08x ]
diff --git a/tools/xentrace/xenalyze.c b/tools/xentrace/xenalyze.c
index 5de167031e01..12dcca964645 100644
--- a/tools/xentrace/xenalyze.c
+++ b/tools/xentrace/xenalyze.c
@@ -8121,7 +8121,7 @@ void mem_pod_zero_reclaim_process(struct pcpu_info *p)
struct {
uint64_t gfn, mfn;
- int d:16,order:16;
+ uint32_t d, order;
} *r = (typeof(r))ri->d;
if ( v && v->hvm.vmexit_valid )
@@ -8171,7 +8171,7 @@ void mem_pod_populate_process(struct pcpu_info *p)
struct {
uint64_t gfn, mfn;
- int d:16,order:16;
+ uint32_t d, order;
} *r = (typeof(r))ri->d;
if ( opt.dump_all )
@@ -8204,14 +8204,14 @@ void mem_pod_superpage_splinter_process(struct
pcpu_info *p)
struct {
uint64_t gfn;
- int d:16;
+ uint32_t d, order;
} *r = (typeof(r))ri->d;
if ( opt.dump_all )
{
- printf(" %s pod_spage_splinter d%d g %llx\n",
+ printf(" %s pod_spage_splinter d%d o%d g %"PRIx64"\n",
ri->dump_header,
- r->d, (unsigned long long)r->gfn);
+ r->d, r->order, r->gfn);
}
}
@@ -8255,7 +8255,7 @@ void mem_decrease_reservation_process(struct pcpu_info *p)
struct {
uint64_t gfn;
- int d:16,order:16;
+ uint32_t d, order;
} *r = (typeof(r))ri->d;
if ( opt.dump_all )
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 8abc57265c10..90f02ae765f6 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -819,8 +819,8 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t
gfn)
if ( tb_init_done )
{
struct {
- u64 gfn, mfn;
- int d:16,order:16;
+ uint64_t gfn, mfn;
+ uint32_t d, order;
} t;
t.gfn = gfn_x(gfn);
@@ -987,8 +987,8 @@ p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t
*gfns, unsigned int count
if ( tb_init_done )
{
struct {
- u64 gfn, mfn;
- int d:16,order:16;
+ uint64_t gfn, mfn;
+ uint32_t d, order;
} t;
t.gfn = gfn_x(gfns[i]);
@@ -1217,8 +1217,8 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn,
if ( tb_init_done )
{
struct {
- u64 gfn, mfn;
- int d:16,order:16;
+ uint64_t gfn, mfn;
+ uint32_t d, order;
} t;
t.gfn = gfn_x(gfn);
@@ -1260,12 +1260,13 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t
gfn,
if ( tb_init_done )
{
struct {
- u64 gfn;
- int d:16;
+ uint64_t gfn;
+ uint32_t d, order;
} t;
t.gfn = gfn_x(gfn);
t.d = d->domain_id;
+ t.order = order;
__trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), &t);
}
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 63642278fda9..8fd88ccb70bf 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -450,8 +450,8 @@ static void decrease_reservation(struct memop_args *a)
if ( tb_init_done )
{
struct {
- u64 gfn;
- int d:16,order:16;
+ uint64_t gfn;
+ uint32_t d, order;
} t;
t.gfn = gmfn;
--
2.11.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |