|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 11/24] x86/emul: Implement singlestep as a retire flag
The behaviour of singlestep is to raise #DB after the instruction has been
completed, but implementing it with inject_hw_exception() causes x86_emulate()
to return X86EMUL_EXCEPTION, despite succesfully completing execution of the
instruction, including register writeback.
Instead, use a retire flag to indicate singlestep, which causes x86_emulate()
to return X86EMUL_OKAY.
Update all callers of x86_emulate() to use the new retire flag. This fixes
the behaviour of singlestep for shadow pagetable updates and mmcfg/mmio_ro
intercepts, which previously discarded the exception.
With this change, all uses of X86EMUL_EXCEPTION from x86_emulate() are
believed to have strictly fault semantics.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
v3:
* New
---
xen/arch/x86/hvm/emulate.c | 3 +++
xen/arch/x86/mm.c | 11 ++++++++++-
xen/arch/x86/mm/shadow/multi.c | 21 ++++++++++++++++++++-
xen/arch/x86/x86_emulate/x86_emulate.c | 9 ++++-----
xen/arch/x86/x86_emulate/x86_emulate.h | 6 ++++++
5 files changed, 43 insertions(+), 7 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index fe62500..91c79fa 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1788,6 +1788,9 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt
*hvmemul_ctxt,
if ( rc != X86EMUL_OKAY )
return rc;
+ if ( hvmemul_ctxt->ctxt.retire.singlestep )
+ hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+
new_intr_shadow = hvmemul_ctxt->intr_shadow;
/* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index b7c7122..231c7bf 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5382,6 +5382,9 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
if ( rc == X86EMUL_UNHANDLEABLE )
goto bail;
+ if ( ptwr_ctxt.ctxt.retire.singlestep )
+ pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+
perfc_incr(ptwr_emulations);
return EXCRET_fault_fixed;
@@ -5503,7 +5506,13 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long
addr,
else
rc = x86_emulate(&ctxt, &mmio_ro_emulate_ops);
- return rc != X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0;
+ if ( rc == X86EMUL_UNHANDLEABLE )
+ return 0;
+
+ if ( ctxt.retire.singlestep )
+ pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+
+ return EXCRET_fault_fixed;
}
void *alloc_xen_pagetable(void)
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 9ee48a8..ddfb815 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3422,6 +3422,16 @@ static int sh_page_fault(struct vcpu *v,
v->arch.paging.last_write_emul_ok = 0;
#endif
+ if ( emul_ctxt.ctxt.retire.singlestep )
+ {
+ if ( is_hvm_vcpu(v) )
+ hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ else
+ pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+
+ goto emulate_done;
+ }
+
#if GUEST_PAGING_LEVELS == 3 /* PAE guest */
if ( r == X86EMUL_OKAY ) {
int i, emulation_count=0;
@@ -3433,7 +3443,7 @@ static int sh_page_fault(struct vcpu *v,
shadow_continue_emulation(&emul_ctxt, regs);
v->arch.paging.last_write_was_pt = 0;
r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
- if ( r == X86EMUL_OKAY )
+ if ( r == X86EMUL_OKAY && !emul_ctxt.ctxt.retire.raw )
{
emulation_count++;
if ( v->arch.paging.last_write_was_pt )
@@ -3449,6 +3459,15 @@ static int sh_page_fault(struct vcpu *v,
{
perfc_incr(shadow_em_ex_fail);
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EMULATION_LAST_FAILED);
+
+ if ( emul_ctxt.ctxt.retire.singlestep )
+ {
+ if ( is_hvm_vcpu(v) )
+ hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ else
+ pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ }
+
break; /* Don't emulate again if we failed! */
}
}
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c
b/xen/arch/x86/x86_emulate/x86_emulate.c
index 8a1f1f5..0af532e 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -2417,7 +2417,6 @@ x86_emulate(
struct x86_emulate_state state;
int rc;
uint8_t b, d;
- bool tf = ctxt->regs->eflags & EFLG_TF;
struct operand src = { .reg = PTR_POISON };
struct operand dst = { .reg = PTR_POISON };
enum x86_swint_type swint_type;
@@ -5415,11 +5414,11 @@ x86_emulate(
if ( !mode_64bit() )
_regs.eip = (uint32_t)_regs.eip;
- *ctxt->regs = _regs;
+ /* Was singestepping active at the start of this instruction? */
+ if ( (rc == X86EMUL_OKAY) && (ctxt->regs->eflags & EFLG_TF) )
+ ctxt->retire.singlestep = true;
- /* Inject #DB if single-step tracing was enabled at instruction start. */
- if ( tf && (rc == X86EMUL_OKAY) && ops->inject_hw_exception )
- rc = ops->inject_hw_exception(EXC_DB, -1, ctxt) ? : X86EMUL_EXCEPTION;
+ *ctxt->regs = _regs;
done:
_put_fpu();
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h
b/xen/arch/x86/x86_emulate/x86_emulate.h
index fc28976..da8924b 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -483,6 +483,7 @@ struct x86_emulate_ctxt
bool hlt:1; /* Instruction HLTed. */
bool mov_ss:1; /* Instruction sets MOV-SS irq shadow. */
bool sti:1; /* Instruction sets STI irq shadow. */
+ bool singlestep:1; /* Singlestepping was active. */
};
} retire;
};
@@ -572,12 +573,17 @@ static inline int x86_emulate_wrapper(
struct x86_emulate_ctxt *ctxt,
const struct x86_emulate_ops *ops)
{
+ unsigned long orig_eip = ctxt->regs->eip;
int rc = x86_emulate(ctxt, ops);
/* Retire flags should only be set for successful instruction emulation. */
if ( rc != X86EMUL_OKAY )
ASSERT(ctxt->retire.raw == 0);
+ /* All cases returning X86EMUL_EXCEPTION should have fault semantics. */
+ if ( rc == X86EMUL_EXCEPTION )
+ ASSERT(ctxt->regs->eip == orig_eip);
+
return rc;
}
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |