|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/2] x86/HVM: re-order operations in hvm_ud_intercept()
Don't fetch CS explicitly, leverage the fact that hvm_emulate_prepare()
already does (and that hvm_virtual_to_linear_addr() doesn't alter it).
At once increase the length passed to hvm_virtual_to_linear_addr() by
one: There definitely needs to be at least one more opcode byte, and we
can avoid missing a wraparound case this way.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3834,19 +3834,20 @@ void hvm_ud_intercept(struct cpu_user_re
{
struct hvm_emulate_ctxt ctxt;
+ hvm_emulate_prepare(&ctxt, regs);
+
if ( opt_hvm_fep )
{
struct vcpu *cur = current;
- struct segment_register cs;
+ const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
unsigned long addr;
char sig[5]; /* ud2; .ascii "xen" */
- hvm_get_segment_register(cur, x86_seg_cs, &cs);
- if ( hvm_virtual_to_linear_addr(x86_seg_cs, &cs, regs->eip,
- sizeof(sig), hvm_access_insn_fetch,
+ if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->eip,
+ sizeof(sig) + 1, hvm_access_insn_fetch,
(hvm_long_mode_enabled(cur) &&
- cs.attr.fields.l) ? 64 :
- cs.attr.fields.db ? 32 : 16, &addr) &&
+ cs->attr.fields.l) ? 64 :
+ cs->attr.fields.db ? 32 : 16, &addr) &&
(hvm_fetch_from_guest_virt_nofault(sig, addr, sizeof(sig),
0) == HVMCOPY_okay) &&
(memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
@@ -3856,8 +3857,6 @@ void hvm_ud_intercept(struct cpu_user_re
}
}
- hvm_emulate_prepare(&ctxt, regs);
-
switch ( hvm_emulate_one(&ctxt) )
{
case X86EMUL_UNHANDLEABLE:
Attachment:
x86-HVM-fep-reorder.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |