|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v8 17/20] xenctx: Allow output for offline vcpu when specified.
xc_domain_hvm_getcontext_partial() will fail to get data for an
offline cpu. Switch back to PV mode of calculating ctxt_word_size
and guest_word_size in the case.
Do this change to xc_translate_foreign_address() also. In order to
handle 32bit PAE mode, use the hvm way not the pv way to do the
cr3 handling.
Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
---
v8: Also change xc_translate_foreign_address() to work.
tools/libxc/xc_pagetab.c | 46 +++++++++++++++++++++++++++++++++++++++-------
tools/xentrace/xenctx.c | 6 ++++++
2 files changed, 45 insertions(+), 7 deletions(-)
diff --git a/tools/libxc/xc_pagetab.c b/tools/libxc/xc_pagetab.c
index 8525967..9d3349f 100644
--- a/tools/libxc/xc_pagetab.c
+++ b/tools/libxc/xc_pagetab.c
@@ -41,15 +41,47 @@ unsigned long xc_translate_foreign_address(xc_interface
*xch, uint32_t dom,
/* What kind of paging are we dealing with? */
if (dominfo.hvm) {
- struct hvm_hw_cpu ctx;
+ struct hvm_hw_cpu hvm_ctx;
if (xc_domain_hvm_getcontext_partial(xch, dom,
HVM_SAVE_CODE(CPU), vcpu,
- &ctx, sizeof ctx) != 0)
- return 0;
- if (!(ctx.cr0 & CR0_PG))
- return virt >> PAGE_SHIFT;
- pt_levels = (ctx.msr_efer&EFER_LMA) ? 4 : (ctx.cr4&CR4_PAE) ? 3 : 2;
- paddr = ctx.cr3 & ((pt_levels == 3) ? ~0x1full : ~0xfffull);
+ &hvm_ctx, sizeof hvm_ctx) != 0)
+ {
+ unsigned int gwidth;
+ vcpu_guest_context_any_t ctx;
+
+ if ( errno != EBADSLT )
+ return 0;
+ /*
+ * Offline CPU, use xc_vcpu_getcontext() if possible
+ */
+ if ( xc_vcpu_getcontext(xch, dom, vcpu, &ctx) != 0 )
+ return 0;
+ if ( xc_domain_get_guest_width(xch, dom, &gwidth) != 0 )
+ return 0;
+ if ( gwidth == 8 )
+ {
+ if ( !(ctx.x64.ctrlreg[0] & CR0_PG) )
+ return virt >> PAGE_SHIFT;
+ pt_levels = 4;
+ paddr = ctx.x64.ctrlreg[3] & ~0xfffull;
+ }
+ else
+ {
+ if ( !(ctx.x32.ctrlreg[0] & CR0_PG) )
+ return virt >> PAGE_SHIFT;
+ pt_levels = (ctx.x32.ctrlreg[4] & CR4_PAE) ? 3 : 2;
+ paddr = ctx.x32.ctrlreg[3] & ((pt_levels == 3) ?
+ ~0x1full : ~0xfffull);
+ }
+ }
+ else
+ {
+ if ( !(hvm_ctx.cr0 & CR0_PG) )
+ return virt >> PAGE_SHIFT;
+ pt_levels = (hvm_ctx.msr_efer & EFER_LMA) ?
+ 4 : (hvm_ctx.cr4 & CR4_PAE) ? 3 : 2;
+ paddr = hvm_ctx.cr3 & ((pt_levels == 3) ? ~0x1full : ~0xfffull);
+ }
} else {
unsigned int gwidth;
vcpu_guest_context_any_t ctx;
diff --git a/tools/xentrace/xenctx.c b/tools/xentrace/xenctx.c
index 1ab837e..9aed57e 100644
--- a/tools/xentrace/xenctx.c
+++ b/tools/xentrace/xenctx.c
@@ -1010,6 +1010,11 @@ static void dump_ctx(int vcpu)
if (xc_domain_hvm_getcontext_partial(
xenctx.xc_handle, xenctx.domid, HVM_SAVE_CODE(CPU),
vcpu, &cpuctx, sizeof cpuctx) != 0) {
+ if ( errno == EBADSLT )
+ {
+ printf("Note: vcpu%d offline:\n\n", vcpu);
+ goto vcpu_off_line;
+ }
perror("xc_domain_hvm_getcontext_partial");
return;
}
@@ -1023,6 +1028,7 @@ static void dump_ctx(int vcpu)
ctxt_word_size = (strstr(xen_caps, "xen-3.0-x86_64")) ? 8 : 4;
} else {
unsigned int gw;
+ vcpu_off_line:
if ( !xc_domain_get_guest_width(xenctx.xc_handle, xenctx.domid,
&gw) )
ctxt_word_size = guest_word_size = gw;
}
--
1.8.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |