|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v3 5/9] x86/PVH: actually show Dom0's register state from debug key '0'
On Tue, Sep 21, 2021 at 09:19:06AM +0200, Jan Beulich wrote:
> vcpu_show_registers() didn't do anything for HVM so far. Note though
> that some extra hackery is needed for VMX - see the code comment.
>
> Note further that the show_guest_stack() invocation is left alone here:
> While strictly speaking guest_kernel_mode() should be predicated by a
> PV / !HVM check, show_guest_stack() itself will bail immediately for
> HVM.
>
> While there and despite not being PVH-specific, take the opportunity and
> filter offline vCPU-s: There's not really any register state associated
> with them, so avoid spamming the log with useless information while
> still leaving an indication of the fact.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> I was pondering whether to also have the VMCS/VMCB dumped for every
> vCPU, to present full state. The downside is that for larger systems
> this would be a lot of output.
At least for Intel there's already a debug key to dump VMCS, so I'm
unsure it's worth dumping it here also, as a user can get the
information elsewhere (that's what I've always used to debug PVH
TBH).
> ---
> v2: New.
>
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -631,6 +631,12 @@ void vcpu_show_execution_state(struct vc
> {
> unsigned long flags;
>
> + if ( test_bit(_VPF_down, &v->pause_flags) )
> + {
> + printk("*** %pv is offline ***\n", v);
> + return;
> + }
> +
> printk("*** Dumping Dom%d vcpu#%d state: ***\n",
> v->domain->domain_id, v->vcpu_id);
>
> @@ -642,6 +648,21 @@ void vcpu_show_execution_state(struct vc
>
> vcpu_pause(v); /* acceptably dangerous */
>
> +#ifdef CONFIG_HVM
> + /*
> + * For VMX special care is needed: Reading some of the register state
> will
> + * require VMCS accesses. Engaging foreign VMCSes involves acquiring of a
> + * lock, which check_lock() would object to when done from an
> IRQs-disabled
> + * region. Despite this being a layering violation, engage the VMCS right
> + * here. This then also avoids doing so several times in close
> succession.
> + */
> + if ( cpu_has_vmx && is_hvm_vcpu(v) )
> + {
> + ASSERT(!in_irq());
> + vmx_vmcs_enter(v);
> + }
> +#endif
> +
> /* Prevent interleaving of output. */
> flags = console_lock_recursive_irqsave();
>
> @@ -651,6 +672,11 @@ void vcpu_show_execution_state(struct vc
>
> console_unlock_recursive_irqrestore(flags);
>
> +#ifdef CONFIG_HVM
> + if ( cpu_has_vmx && is_hvm_vcpu(v) )
> + vmx_vmcs_exit(v);
> +#endif
> +
> vcpu_unpause(v);
> }
>
> --- a/xen/arch/x86/x86_64/traps.c
> +++ b/xen/arch/x86/x86_64/traps.c
> @@ -49,6 +49,39 @@ static void read_registers(struct cpu_us
> crs[7] = read_gs_shadow();
> }
>
> +static void get_hvm_registers(struct vcpu *v, struct cpu_user_regs *regs,
> + unsigned long crs[8])
Would this better be placed in hvm.c now that it's a HVM only
function?
> +{
> + struct segment_register sreg;
> +
> + crs[0] = v->arch.hvm.guest_cr[0];
> + crs[2] = v->arch.hvm.guest_cr[2];
> + crs[3] = v->arch.hvm.guest_cr[3];
> + crs[4] = v->arch.hvm.guest_cr[4];
> +
> + hvm_get_segment_register(v, x86_seg_cs, &sreg);
> + regs->cs = sreg.sel;
> +
> + hvm_get_segment_register(v, x86_seg_ds, &sreg);
> + regs->ds = sreg.sel;
> +
> + hvm_get_segment_register(v, x86_seg_es, &sreg);
> + regs->es = sreg.sel;
> +
> + hvm_get_segment_register(v, x86_seg_fs, &sreg);
> + regs->fs = sreg.sel;
> + crs[5] = sreg.base;
> +
> + hvm_get_segment_register(v, x86_seg_gs, &sreg);
> + regs->gs = sreg.sel;
> + crs[6] = sreg.base;
> +
> + hvm_get_segment_register(v, x86_seg_ss, &sreg);
> + regs->ss = sreg.sel;
> +
> + crs[7] = hvm_get_shadow_gs_base(v);
> +}
> +
> static void _show_registers(
> const struct cpu_user_regs *regs, unsigned long crs[8],
> enum context context, const struct vcpu *v)
> @@ -99,27 +132,8 @@ void show_registers(const struct cpu_use
>
> if ( guest_mode(regs) && is_hvm_vcpu(v) )
> {
> - struct segment_register sreg;
> + get_hvm_registers(v, &fault_regs, fault_crs);
> context = CTXT_hvm_guest;
> - fault_crs[0] = v->arch.hvm.guest_cr[0];
> - fault_crs[2] = v->arch.hvm.guest_cr[2];
> - fault_crs[3] = v->arch.hvm.guest_cr[3];
> - fault_crs[4] = v->arch.hvm.guest_cr[4];
> - hvm_get_segment_register(v, x86_seg_cs, &sreg);
> - fault_regs.cs = sreg.sel;
> - hvm_get_segment_register(v, x86_seg_ds, &sreg);
> - fault_regs.ds = sreg.sel;
> - hvm_get_segment_register(v, x86_seg_es, &sreg);
> - fault_regs.es = sreg.sel;
> - hvm_get_segment_register(v, x86_seg_fs, &sreg);
> - fault_regs.fs = sreg.sel;
> - fault_crs[5] = sreg.base;
> - hvm_get_segment_register(v, x86_seg_gs, &sreg);
> - fault_regs.gs = sreg.sel;
> - fault_crs[6] = sreg.base;
> - hvm_get_segment_register(v, x86_seg_ss, &sreg);
> - fault_regs.ss = sreg.sel;
> - fault_crs[7] = hvm_get_shadow_gs_base(v);
> }
> else
> {
> @@ -159,24 +173,35 @@ void show_registers(const struct cpu_use
> void vcpu_show_registers(const struct vcpu *v)
> {
> const struct cpu_user_regs *regs = &v->arch.user_regs;
> - bool kernel = guest_kernel_mode(v, regs);
> + struct cpu_user_regs aux_regs;
> + enum context context;
> unsigned long crs[8];
>
> - /* Only handle PV guests for now */
> - if ( !is_pv_vcpu(v) )
> - return;
> -
> - crs[0] = v->arch.pv.ctrlreg[0];
> - crs[2] = arch_get_cr2(v);
> - crs[3] = pagetable_get_paddr(kernel ?
> - v->arch.guest_table :
> - v->arch.guest_table_user);
> - crs[4] = v->arch.pv.ctrlreg[4];
> - crs[5] = v->arch.pv.fs_base;
> - crs[6 + !kernel] = v->arch.pv.gs_base_kernel;
> - crs[7 - !kernel] = v->arch.pv.gs_base_user;
> + if ( is_hvm_vcpu(v) )
> + {
> + aux_regs = *regs;
> + get_hvm_registers(v->domain->vcpu[v->vcpu_id], &aux_regs, crs);
I wonder if you could load the values directly into v->arch.user_regs,
but maybe that would taint some other info already there. I certainly
haven't looked closely.
Thanks, Roger.
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |