|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] x86/treewide: Drop the TRAP_* legacy names
commit 881ba20eb0222305a9d2cd090c9345992794f4f5
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Feb 17 23:45:36 2023 +0000
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Apr 5 20:33:49 2023 +0100
x86/treewide: Drop the TRAP_* legacy names
We have two naming schemes for exceptions; X86_EXC_?? which use the
archtiectural abbreviations, and TRAP_* which is a mix of terminology and
nonstandard abbrevations. Switch to X86_EXC_* uniformly.
No funcational change, confirmed by diffing the disassembly. Only 7 binary
changes, and they're all __LINE__ being passed into printk().
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/x86/cpu/mcheck/vmce.c | 2 +-
xen/arch/x86/cpuid.c | 6 +--
xen/arch/x86/crash.c | 6 +--
xen/arch/x86/domain.c | 2 +-
xen/arch/x86/extable.c | 8 ++--
xen/arch/x86/hvm/emulate.c | 17 ++++----
xen/arch/x86/hvm/hvm.c | 61 ++++++++++++++-------------
xen/arch/x86/hvm/io.c | 2 +-
xen/arch/x86/hvm/svm/emulate.c | 2 +-
xen/arch/x86/hvm/svm/intr.c | 2 +-
xen/arch/x86/hvm/svm/nestedsvm.c | 32 +++++++-------
xen/arch/x86/hvm/svm/svm.c | 76 +++++++++++++++++-----------------
xen/arch/x86/hvm/svm/vmcb.c | 4 +-
xen/arch/x86/hvm/vm_event.c | 10 ++---
xen/arch/x86/hvm/vmx/intr.c | 2 +-
xen/arch/x86/hvm/vmx/realmode.c | 16 +++----
xen/arch/x86/hvm/vmx/vmcs.c | 8 ++--
xen/arch/x86/hvm/vmx/vmx.c | 75 +++++++++++++++++----------------
xen/arch/x86/hvm/vmx/vvmx.c | 28 ++++++-------
xen/arch/x86/include/asm/domain.h | 2 +-
xen/arch/x86/include/asm/hvm/hvm.h | 8 ++--
xen/arch/x86/include/asm/processor.h | 44 ++++----------------
xen/arch/x86/include/asm/x86-defns.h | 2 +
xen/arch/x86/machine_kexec.c | 2 +-
xen/arch/x86/mm.c | 2 +-
xen/arch/x86/mm/mem_access.c | 2 +-
xen/arch/x86/mm/shadow/hvm.c | 2 +-
xen/arch/x86/mm/shadow/multi.c | 10 ++---
xen/arch/x86/pv/callback.c | 6 +--
xen/arch/x86/pv/emul-gate-op.c | 34 +++++++--------
xen/arch/x86/pv/emul-inv-op.c | 2 +-
xen/arch/x86/pv/emul-priv-op.c | 14 +++----
xen/arch/x86/pv/emulate.c | 2 +-
xen/arch/x86/pv/iret.c | 2 +-
xen/arch/x86/pv/ro-page-fault.c | 4 +-
xen/arch/x86/pv/traps.c | 10 ++---
xen/arch/x86/traps.c | 70 +++++++++++++++----------------
xen/arch/x86/x86_64/compat/entry.S | 12 +++---
xen/arch/x86/x86_64/entry.S | 54 ++++++++++++------------
xen/arch/x86/x86_emulate/util-xen.c | 10 ++---
xen/arch/x86/x86_emulate/x86_emulate.h | 4 +-
41 files changed, 313 insertions(+), 344 deletions(-)
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index af30811afd..6f1a7e4de0 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -414,7 +414,7 @@ int inject_vmce(struct domain *d, int vcpu)
continue;
if ( (is_hvm_domain(d) ||
- pv_trap_callback_registered(v, TRAP_machine_check)) &&
+ pv_trap_callback_registered(v, X86_EXC_MC)) &&
!test_and_set_bool(v->arch.mce_pending) )
{
mce_printk(MCE_VERBOSE, "MCE: inject vMCE to %pv\n", v);
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index f311372cdf..455a09b2dd 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -219,7 +219,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
/* OSXSAVE clear in policy. Fast-forward CR4 back in. */
if ( (v->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE) ||
(p->basic.xsave &&
- regs->entry_vector == TRAP_invalid_op &&
+ regs->entry_vector == X86_EXC_UD &&
guest_kernel_mode(v, regs) &&
(read_cr4() & X86_CR4_OSXSAVE)) )
res->c |= cpufeat_mask(X86_FEATURE_OSXSAVE);
@@ -255,7 +255,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
* emulated CPUID from a faulted CPUID by whether a #UD or #GP
* fault is currently being serviced. Yuck...
*/
- if ( cpu_has_monitor && regs->entry_vector == TRAP_gp_fault )
+ if ( cpu_has_monitor && regs->entry_vector == X86_EXC_GP )
res->c |= cpufeat_mask(X86_FEATURE_MONITOR);
/*
@@ -280,7 +280,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
regs = guest_cpu_user_regs();
if ( is_pv_domain(d) && is_hardware_domain(d) &&
guest_kernel_mode(v, regs) && cpu_has_monitor &&
- regs->entry_vector == TRAP_gp_fault )
+ regs->entry_vector == X86_EXC_GP )
*res = raw_cpu_policy.basic.raw[5];
break;
diff --git a/xen/arch/x86/crash.c b/xen/arch/x86/crash.c
index 7850f0af24..a789416ca3 100644
--- a/xen/arch/x86/crash.c
+++ b/xen/arch/x86/crash.c
@@ -60,7 +60,7 @@ static int noreturn cf_check do_nmi_crash(
* This update is safe from a security point of view, as this
* pcpu is never going to try to sysret back to a PV vcpu.
*/
- set_ist(&idt_tables[cpu][TRAP_machine_check], IST_NONE);
+ set_ist(&idt_tables[cpu][X86_EXC_MC], IST_NONE);
kexec_crash_save_cpu();
__stop_this_cpu();
@@ -130,9 +130,9 @@ static void nmi_shootdown_cpus(void)
* Disable IST for MCEs to avoid stack corruption race conditions, and
* change the NMI handler to a nop to avoid deviation from this codepath.
*/
- _set_gate_lower(&idt_tables[cpu][TRAP_nmi],
+ _set_gate_lower(&idt_tables[cpu][X86_EXC_NMI],
SYS_DESC_irq_gate, 0, &trap_nop);
- set_ist(&idt_tables[cpu][TRAP_machine_check], IST_NONE);
+ set_ist(&idt_tables[cpu][X86_EXC_MC], IST_NONE);
set_nmi_callback(do_nmi_crash);
smp_send_nmi_allbutself();
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 91f57e3a3b..596233418a 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -113,7 +113,7 @@ void play_dead(void)
local_irq_disable();
/* Change the NMI handler to a nop (see comment below). */
- _set_gate_lower(&idt_tables[cpu][TRAP_nmi], SYS_DESC_irq_gate, 0,
+ _set_gate_lower(&idt_tables[cpu][X86_EXC_NMI], SYS_DESC_irq_gate, 0,
&trap_nop);
/*
diff --git a/xen/arch/x86/extable.c b/xen/arch/x86/extable.c
index 6758ba1dca..c3771c2e39 100644
--- a/xen/arch/x86/extable.c
+++ b/xen/arch/x86/extable.c
@@ -136,15 +136,15 @@ static int __init cf_check stub_selftest(void)
} tests[] __initconst = {
#define endbr64 0xf3, 0x0f, 0x1e, 0xfa
{ .opc = { endbr64, 0x0f, 0xb9, 0xc3, 0xc3 }, /* ud1 */
- .res.fields.trapnr = TRAP_invalid_op },
+ .res.fields.trapnr = X86_EXC_UD },
{ .opc = { endbr64, 0x90, 0x02, 0x00, 0xc3 }, /* nop; add (%rax),%al */
.rax = 0x0123456789abcdef,
- .res.fields.trapnr = TRAP_gp_fault },
+ .res.fields.trapnr = X86_EXC_GP },
{ .opc = { endbr64, 0x02, 0x04, 0x04, 0xc3 }, /* add (%rsp,%rax),%al */
.rax = 0xfedcba9876543210,
- .res.fields.trapnr = TRAP_stack_error },
+ .res.fields.trapnr = X86_EXC_SS },
{ .opc = { endbr64, 0xcc, 0xc3, 0xc3, 0xc3 }, /* int3 */
- .res.fields.trapnr = TRAP_int3 },
+ .res.fields.trapnr = X86_EXC_BP },
#undef endbr64
};
unsigned long addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2;
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 5691725d6c..75ee98a73b 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -910,9 +910,8 @@ static int hvmemul_virtual_to_linear(
* determine the kind of exception (#GP or #TS) in that case.
*/
if ( is_x86_user_segment(seg) )
- x86_emul_hw_exception((seg == x86_seg_ss)
- ? TRAP_stack_error
- : TRAP_gp_fault, 0, &hvmemul_ctxt->ctxt);
+ x86_emul_hw_exception((seg == x86_seg_ss) ? X86_EXC_SS : X86_EXC_GP,
+ 0, &hvmemul_ctxt->ctxt);
return X86EMUL_EXCEPTION;
}
@@ -2227,7 +2226,7 @@ static int cf_check hvmemul_write_cr(
}
if ( rc == X86EMUL_EXCEPTION )
- x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+ x86_emul_hw_exception(X86_EXC_GP, 0, ctxt);
return rc;
}
@@ -2263,7 +2262,7 @@ static int cf_check hvmemul_read_msr(
int rc = hvm_msr_read_intercept(reg, val);
if ( rc == X86EMUL_EXCEPTION )
- x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+ x86_emul_hw_exception(X86_EXC_GP, 0, ctxt);
return rc;
}
@@ -2276,7 +2275,7 @@ static int cf_check hvmemul_write_msr(
int rc = hvm_msr_write_intercept(reg, val, true);
if ( rc == X86EMUL_EXCEPTION )
- x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+ x86_emul_hw_exception(X86_EXC_GP, 0, ctxt);
return rc;
}
@@ -2530,7 +2529,7 @@ static int cf_check hvmemul_tlb_op(
paging_invlpg(current, addr);
else
{
- x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
+ x86_emul_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC, ctxt);
rc = X86EMUL_EXCEPTION;
}
break;
@@ -2548,7 +2547,7 @@ static int cf_check hvmemul_vmfunc(
return X86EMUL_UNHANDLEABLE;
rc = alternative_call(hvm_funcs.altp2m_vcpu_emulate_vmfunc, ctxt->regs);
if ( rc == X86EMUL_EXCEPTION )
- x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
+ x86_emul_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC, ctxt);
return rc;
}
@@ -2676,7 +2675,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt
*hvmemul_ctxt,
}
if ( hvmemul_ctxt->ctxt.retire.singlestep )
- hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_DB, X86_EVENT_NO_EC);
new_intr_shadow = hvmemul_ctxt->intr_shadow;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7020fdce99..6885ae4a9e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -255,25 +255,25 @@ int hvm_event_needs_reinjection(uint8_t type, uint8_t
vector)
uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2)
{
const unsigned int contributory_exceptions =
- (1 << TRAP_divide_error) |
- (1 << TRAP_invalid_tss) |
- (1 << TRAP_no_segment) |
- (1 << TRAP_stack_error) |
- (1 << TRAP_gp_fault);
+ (1 << X86_EXC_DE) |
+ (1 << X86_EXC_TS) |
+ (1 << X86_EXC_NP) |
+ (1 << X86_EXC_SS) |
+ (1 << X86_EXC_GP);
const unsigned int page_faults =
- (1 << TRAP_page_fault) |
- (1 << TRAP_virtualisation);
+ (1 << X86_EXC_PF) |
+ (1 << X86_EXC_VE);
/* Exception during double-fault delivery always causes a triple fault. */
- if ( vec1 == TRAP_double_fault )
+ if ( vec1 == X86_EXC_DF )
{
hvm_triple_fault();
- return TRAP_double_fault; /* dummy return */
+ return X86_EXC_DF; /* dummy return */
}
/* Exception during page-fault delivery always causes a double fault. */
if ( (1u << vec1) & page_faults )
- return TRAP_double_fault;
+ return X86_EXC_DF;
/* Discard the first exception if it's benign or if we now have a #PF. */
if ( !((1u << vec1) & contributory_exceptions) ||
@@ -281,7 +281,7 @@ uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t
vec2)
return vec2;
/* Cannot combine the exceptions: double fault. */
- return TRAP_double_fault;
+ return X86_EXC_DF;
}
void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable)
@@ -1718,7 +1718,7 @@ void hvm_inject_event(const struct x86_event *event)
struct vcpu *curr = current;
const uint8_t vector = event->vector;
const bool has_ec = ((event->type == X86_EVENTTYPE_HW_EXCEPTION) &&
- (vector < 32) && ((TRAP_HAVE_EC & (1u << vector))));
+ (vector < 32) && ((X86_EXC_HAVE_EC & (1u <<
vector))));
ASSERT(vector == event->vector); /* Confirm no truncation. */
if ( has_ec )
@@ -1800,7 +1800,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
return -1;
case NESTEDHVM_PAGEFAULT_MMIO:
if ( !handle_mmio() )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
return 1;
case NESTEDHVM_PAGEFAULT_L0_ERROR:
/* gpa is now translated to l1 guest address, update gfn. */
@@ -1817,7 +1817,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
if ( !nestedhvm_vcpu_in_guestmode(curr) && hvm_mmio_internal(gpa) )
{
if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
rc = 1;
goto out;
}
@@ -1944,7 +1944,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
(p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server))) )
{
if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
rc = 1;
goto out_put_gfn;
}
@@ -2052,7 +2052,7 @@ int hvm_handle_xsetbv(u32 index, u64 new_bv)
rc = x86emul_write_xcr(index, new_bv, NULL);
if ( rc != X86EMUL_OKAY )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
return rc;
}
@@ -2185,7 +2185,7 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
}
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
return rc;
@@ -2835,7 +2835,7 @@ static int task_switch_load_seg(
seg_desc_t *pdesc = NULL, desc;
u8 dpl, rpl;
bool_t writable;
- int fault_type = TRAP_invalid_tss;
+ int fault_type = X86_EXC_TS;
struct vcpu *v = current;
if ( eflags & X86_EFLAGS_VM )
@@ -2928,8 +2928,7 @@ static int task_switch_load_seg(
/* Segment present in memory? */
if ( !(desc.b & _SEGMENT_P) )
{
- fault_type = (seg != x86_seg_ss) ? TRAP_no_segment
- : TRAP_stack_error;
+ fault_type = (seg != x86_seg_ss) ? X86_EXC_NP : X86_EXC_SS;
goto fault;
}
} while ( !(desc.b & 0x100) && /* Ensure Accessed flag is set */
@@ -3026,9 +3025,9 @@ void hvm_task_switch(
if ( ((tss_sel & 0xfff8) + 7) > gdt.limit )
{
- hvm_inject_hw_exception((taskswitch_reason == TSW_iret) ?
- TRAP_invalid_tss : TRAP_gp_fault,
- tss_sel & 0xfff8);
+ hvm_inject_hw_exception(
+ (taskswitch_reason == TSW_iret) ? X86_EXC_TS : X86_EXC_GP,
+ tss_sel & 0xfff8);
goto out;
}
@@ -3055,20 +3054,20 @@ void hvm_task_switch(
if ( tr.type != ((taskswitch_reason == TSW_iret) ? 0xb : 0x9) )
{
hvm_inject_hw_exception(
- (taskswitch_reason == TSW_iret) ? TRAP_invalid_tss : TRAP_gp_fault,
+ (taskswitch_reason == TSW_iret) ? X86_EXC_TS : X86_EXC_GP,
tss_sel & 0xfff8);
goto out;
}
if ( !tr.p )
{
- hvm_inject_hw_exception(TRAP_no_segment, tss_sel & 0xfff8);
+ hvm_inject_hw_exception(X86_EXC_NP, tss_sel & 0xfff8);
goto out;
}
if ( tr.limit < (sizeof(tss)-1) )
{
- hvm_inject_hw_exception(TRAP_invalid_tss, tss_sel & 0xfff8);
+ hvm_inject_hw_exception(X86_EXC_TS, tss_sel & 0xfff8);
goto out;
}
@@ -3137,7 +3136,7 @@ void hvm_task_switch(
rc = hvm_set_cr3(tss.cr3, false, true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
if ( rc != X86EMUL_OKAY )
goto out;
@@ -3221,7 +3220,7 @@ void hvm_task_switch(
}
if ( (tss.trace & 1) && !exn_raised )
- hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_DB, X86_EVENT_NO_EC);
out:
hvm_unmap_entry(optss_desc);
@@ -3487,7 +3486,7 @@ int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned
int inst_len)
if ( curr->arch.msrs->misc_features_enables.cpuid_faulting &&
hvm_get_cpl(curr) > 0 )
{
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
return 1; /* Don't advance the guest IP! */
}
@@ -3864,7 +3863,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
if ( !should_emulate )
{
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
return;
}
@@ -3872,7 +3871,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
{
case X86EMUL_UNHANDLEABLE:
case X86EMUL_UNIMPLEMENTED:
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
break;
case X86EMUL_EXCEPTION:
hvm_inject_event(&ctxt.ctxt.event);
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 5ae209d3b6..ae2feebd79 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -78,7 +78,7 @@ bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate,
const char *descr)
case X86EMUL_UNRECOGNIZED:
hvm_dump_emulation_state(XENLOG_G_WARNING, descr, &ctxt, rc);
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
break;
case X86EMUL_EXCEPTION:
diff --git a/xen/arch/x86/hvm/svm/emulate.c b/xen/arch/x86/hvm/svm/emulate.c
index 391f025516..2c95d1ce06 100644
--- a/xen/arch/x86/hvm/svm/emulate.c
+++ b/xen/arch/x86/hvm/svm/emulate.c
@@ -113,7 +113,7 @@ unsigned int svm_get_insn_len(struct vcpu *v, unsigned int
instr_enc)
hvm_dump_emulation_state(XENLOG_G_WARNING, "SVM Insn len",
&ctxt, X86EMUL_UNHANDLEABLE);
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
return 0;
}
diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index dbb0022190..c1a057ce86 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -48,7 +48,7 @@ static void svm_inject_nmi(struct vcpu *v)
event.raw = 0;
event.v = true;
event.type = X86_EVENTTYPE_NMI;
- event.vector = TRAP_nmi;
+ event.vector = X86_EXC_NMI;
ASSERT(!vmcb->event_inj.v);
vmcb->event_inj = event;
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 63ed9fc248..2003f28f66 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -193,7 +193,7 @@ static uint64_t nestedsvm_fpu_vmentry(uint64_t n1cr0,
* Sync FPU state with l2 guest.
*/
vcr0 |= X86_CR0_TS;
- n2vmcb->_exception_intercepts |= (1U << TRAP_no_device);
+ n2vmcb->_exception_intercepts |= (1U << X86_EXC_NM);
}
else if ( !(vcr0 & X86_CR0_TS) && (n2vmcb->_cr0 & X86_CR0_TS) )
{
@@ -202,7 +202,7 @@ static uint64_t nestedsvm_fpu_vmentry(uint64_t n1cr0,
* Sync FPU state with l2 guest.
*/
vcr0 &= ~X86_CR0_TS;
- n2vmcb->_exception_intercepts &= ~(1U << TRAP_no_device);
+ n2vmcb->_exception_intercepts &= ~(1U << X86_EXC_NM);
}
return vcr0;
@@ -218,7 +218,7 @@ static void nestedsvm_fpu_vmexit(struct vmcb_struct *n1vmcb,
* Sync FPU state with l1 guest.
*/
n1vmcb->_cr0 |= X86_CR0_TS;
- n1vmcb->_exception_intercepts |= (1U << TRAP_no_device);
+ n1vmcb->_exception_intercepts |= (1U << X86_EXC_NM);
}
else if ( !(n1cr0 & X86_CR0_TS) && (n1vmcb->_cr0 & X86_CR0_TS) )
{
@@ -227,7 +227,7 @@ static void nestedsvm_fpu_vmexit(struct vmcb_struct *n1vmcb,
* Sync FPU state with l1 guest.
*/
n1vmcb->_cr0 &= ~X86_CR0_TS;
- n1vmcb->_exception_intercepts &= ~(1U << TRAP_no_device);
+ n1vmcb->_exception_intercepts &= ~(1U << X86_EXC_NM);
}
}
@@ -282,7 +282,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
v->arch.hvm.guest_efer = n1vmcb->_efer;
rc = hvm_set_efer(n1vmcb->_efer);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
if ( rc != X86EMUL_OKAY )
gdprintk(XENLOG_ERR, "hvm_set_efer failed, rc: %u\n", rc);
@@ -290,7 +290,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
v->arch.hvm.guest_cr[4] = n1vmcb->_cr4;
rc = hvm_set_cr4(n1vmcb->_cr4, true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
if ( rc != X86EMUL_OKAY )
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
@@ -301,7 +301,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
n1vmcb->rflags &= ~X86_EFLAGS_VM;
rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
if ( rc != X86EMUL_OKAY )
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
svm->ns_cr0 = v->arch.hvm.guest_cr[0];
@@ -334,7 +334,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
}
rc = hvm_set_cr3(n1vmcb->_cr3, false, true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
if ( rc != X86EMUL_OKAY )
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
@@ -552,7 +552,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
v->arch.hvm.guest_efer = ns_vmcb->_efer;
rc = hvm_set_efer(ns_vmcb->_efer);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
if ( rc != X86EMUL_OKAY )
gdprintk(XENLOG_ERR, "hvm_set_efer failed, rc: %u\n", rc);
@@ -560,7 +560,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
v->arch.hvm.guest_cr[4] = ns_vmcb->_cr4;
rc = hvm_set_cr4(ns_vmcb->_cr4, true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
if ( rc != X86EMUL_OKAY )
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
@@ -570,7 +570,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
v->arch.hvm.guest_cr[0] = ns_vmcb->_cr0;
rc = hvm_set_cr0(cr0, true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
if ( rc != X86EMUL_OKAY )
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
@@ -589,7 +589,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
/* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
rc = hvm_set_cr3(ns_vmcb->_cr3, false, true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
if ( rc != X86EMUL_OKAY )
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
}
@@ -605,7 +605,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
/* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
rc = hvm_set_cr3(ns_vmcb->_cr3, false, true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
if ( rc != X86EMUL_OKAY )
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
}
@@ -783,7 +783,7 @@ nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs)
default:
gdprintk(XENLOG_ERR,
"nsvm_vcpu_vmentry failed, injecting #UD\n");
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
/* Must happen after hvm_inject_hw_exception or it doesn't work right.
*/
nv->nv_vmswitch_in_progress = 0;
return 1;
@@ -1575,7 +1575,7 @@ void svm_vmexit_do_stgi(struct cpu_user_regs *regs,
struct vcpu *v)
*/
if ( !nestedhvm_enabled(v->domain) )
{
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
return;
}
@@ -1596,7 +1596,7 @@ void svm_vmexit_do_clgi(struct cpu_user_regs *regs,
struct vcpu *v)
if ( !nsvm_efer_svm_enabled(v) )
{
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
return;
}
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b8fe759db4..8d8b250101 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -80,7 +80,7 @@ static DEFINE_SPINLOCK(osvw_lock);
static void svm_crash_or_fault(struct vcpu *v)
{
if ( vmcb_get_cpl(v->arch.hvm.svm.vmcb) )
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
else
domain_crash(v->domain);
}
@@ -107,7 +107,7 @@ void __update_guest_eip(struct cpu_user_regs *regs,
unsigned int inst_len)
curr->arch.hvm.svm.vmcb->int_stat.intr_shadow = 0;
if ( regs->eflags & X86_EFLAGS_TF )
- hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_DB, X86_EVENT_NO_EC);
}
static void cf_check svm_cpu_down(void)
@@ -122,7 +122,7 @@ static void svm_fpu_enter(struct vcpu *v)
vcpu_restore_fpu_lazy(v);
vmcb_set_exception_intercepts(
n1vmcb,
- vmcb_get_exception_intercepts(n1vmcb) & ~(1U << TRAP_no_device));
+ vmcb_get_exception_intercepts(n1vmcb) & ~(1U << X86_EXC_NM));
}
static void cf_check svm_fpu_leave(struct vcpu *v)
@@ -142,7 +142,7 @@ static void cf_check svm_fpu_leave(struct vcpu *v)
{
vmcb_set_exception_intercepts(
n1vmcb,
- vmcb_get_exception_intercepts(n1vmcb) | (1U << TRAP_no_device));
+ vmcb_get_exception_intercepts(n1vmcb) | (1U << X86_EXC_NM));
vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) | X86_CR0_TS);
}
}
@@ -588,9 +588,9 @@ static void cf_check svm_cpuid_policy_changed(struct vcpu
*v)
if ( opt_hvm_fep ||
(v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
- bitmap |= (1U << TRAP_invalid_op);
+ bitmap |= (1U << X86_EXC_UD);
else
- bitmap &= ~(1U << TRAP_invalid_op);
+ bitmap &= ~(1U << X86_EXC_UD);
vmcb_set_exception_intercepts(vmcb, bitmap);
@@ -1026,8 +1026,8 @@ static void noreturn cf_check svm_do_resume(void)
v->arch.hvm.debug_state_latch = debug_state;
vmcb_set_exception_intercepts(
- vmcb, debug_state ? (intercepts | (1U << TRAP_int3))
- : (intercepts & ~(1U << TRAP_int3)));
+ vmcb, debug_state ? (intercepts | (1U << X86_EXC_BP))
+ : (intercepts & ~(1U << X86_EXC_BP)));
}
if ( v->arch.hvm.svm.launch_core != smp_processor_id() )
@@ -1215,7 +1215,7 @@ static void svm_emul_swint_injection(struct x86_event
*event)
const struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
const struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned int trap = event->vector, type = event->type;
- unsigned int fault = TRAP_gp_fault, ec = 0;
+ unsigned int fault = X86_EXC_GP, ec = 0;
pagefault_info_t pfinfo;
struct segment_register cs, idtr;
unsigned int idte_size, idte_offset;
@@ -1261,7 +1261,7 @@ static void svm_emul_swint_injection(struct x86_event
*event)
{
if ( rc == HVMTRANS_bad_linear_to_gfn )
{
- fault = TRAP_page_fault;
+ fault = X86_EXC_PF;
ec = pfinfo.ec;
event->cr2 = pfinfo.linear;
}
@@ -1297,7 +1297,7 @@ static void svm_emul_swint_injection(struct x86_event
*event)
/* Is this entry present? */
if ( !(idte.b & (1u << 15)) )
{
- fault = TRAP_no_segment;
+ fault = X86_EXC_NP;
goto raise_exception;
}
@@ -1338,14 +1338,14 @@ static void cf_check svm_inject_event(const struct
x86_event *event)
switch ( _event.vector | -(_event.type == X86_EVENTTYPE_SW_INTERRUPT) )
{
- case TRAP_debug:
+ case X86_EXC_DB:
if ( regs->eflags & X86_EFLAGS_TF )
{
__restore_debug_registers(vmcb, curr);
vmcb_set_dr6(vmcb, vmcb_get_dr6(vmcb) | DR_STEP);
}
/* fall through */
- case TRAP_int3:
+ case X86_EXC_BP:
if ( curr->domain->debugger_attached )
{
/* Debug/Int3: Trap to debugger. */
@@ -1354,7 +1354,7 @@ static void cf_check svm_inject_event(const struct
x86_event *event)
}
break;
- case TRAP_page_fault:
+ case X86_EXC_PF:
ASSERT(_event.type == X86_EVENTTYPE_HW_EXCEPTION);
curr->arch.hvm.guest_cr[2] = _event.cr2;
vmcb_set_cr2(vmcb, _event.cr2);
@@ -1365,7 +1365,7 @@ static void cf_check svm_inject_event(const struct
x86_event *event)
{
_event.vector = hvm_combine_hw_exceptions(
eventinj.vector, _event.vector);
- if ( _event.vector == TRAP_double_fault )
+ if ( _event.vector == X86_EXC_DF )
_event.error_code = 0;
}
@@ -1438,7 +1438,7 @@ static void cf_check svm_inject_event(const struct
x86_event *event)
ASSERT(!eventinj.ev || eventinj.ec == (uint16_t)eventinj.ec);
vmcb->event_inj = eventinj;
- if ( _event.vector == TRAP_page_fault &&
+ if ( _event.vector == X86_EXC_PF &&
_event.type == X86_EVENTTYPE_HW_EXCEPTION )
HVMTRACE_LONG_2D(PF_INJECT, _event.error_code,
TRC_PAR_LONG(_event.cr2));
@@ -1722,7 +1722,7 @@ static void cf_check svm_fpu_dirty_intercept(void)
{
/* Check if l1 guest must make FPU ready for the l2 guest */
if ( v->arch.hvm.guest_cr[0] & X86_CR0_TS )
- hvm_inject_hw_exception(TRAP_no_device, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_NM, X86_EVENT_NO_EC);
else
vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) & ~X86_CR0_TS);
return;
@@ -2166,7 +2166,7 @@ static void svm_do_msr_access(struct cpu_user_regs *regs)
if ( rc == X86EMUL_OKAY )
__update_guest_eip(regs, inst_len);
else if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
}
static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
@@ -2189,7 +2189,7 @@ static void svm_vmexit_do_rdtsc(struct cpu_user_regs
*regs, bool rdtscp)
if ( rdtscp && !currd->arch.cpuid->extd.rdtscp )
{
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
return;
}
@@ -2227,14 +2227,14 @@ svm_vmexit_do_vmrun(struct cpu_user_regs *regs,
{
if ( !nsvm_efer_svm_enabled(v) )
{
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
return;
}
if ( !nestedsvm_vmcb_map(v, vmcbaddr) )
{
gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting #GP\n");
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
return;
}
@@ -2280,7 +2280,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
if ( !nsvm_efer_svm_enabled(v) )
{
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
return;
}
@@ -2289,7 +2289,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
{
gdprintk(XENLOG_ERR,
"VMLOAD: mapping failed, injecting #GP\n");
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
return;
}
@@ -2315,7 +2315,7 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb,
if ( !nsvm_efer_svm_enabled(v) )
{
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
return;
}
@@ -2324,7 +2324,7 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb,
{
gdprintk(XENLOG_ERR,
"VMSAVE: mapping vmcb failed, injecting #GP\n");
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
return;
}
@@ -2767,7 +2767,7 @@ void svm_vmexit_handler(void)
if ( rc < 0 )
goto unexpected_exit_type;
if ( !rc )
- hvm_inject_exception(TRAP_debug,
+ hvm_inject_exception(X86_EXC_DB,
trap_type, insn_len, X86_EVENT_NO_EC);
}
else
@@ -2784,7 +2784,7 @@ void svm_vmexit_handler(void)
{
/* AMD Vol2, 15.11: INT3, INTO, BOUND intercepts do not update
RIP. */
__update_guest_eip(regs, insn_len);
- current->arch.gdbsx_vcpu_event = TRAP_int3;
+ current->arch.gdbsx_vcpu_event = X86_EXC_BP;
domain_pause_for_debugger();
}
else
@@ -2796,7 +2796,7 @@ void svm_vmexit_handler(void)
if ( rc < 0 )
goto unexpected_exit_type;
if ( !rc )
- hvm_inject_exception(TRAP_int3,
+ hvm_inject_exception(X86_EXC_BP,
X86_EVENTTYPE_SW_EXCEPTION,
insn_len, X86_EVENT_NO_EC);
}
@@ -2837,8 +2837,8 @@ void svm_vmexit_handler(void)
}
case VMEXIT_EXCEPTION_AC:
- HVMTRACE_1D(TRAP, TRAP_alignment_check);
- hvm_inject_hw_exception(TRAP_alignment_check, vmcb->exitinfo1);
+ HVMTRACE_1D(TRAP, X86_EXC_AC);
+ hvm_inject_hw_exception(X86_EXC_AC, vmcb->exitinfo1);
break;
case VMEXIT_EXCEPTION_UD:
@@ -2891,8 +2891,8 @@ void svm_vmexit_handler(void)
* semantics.
*/
case X86_EVENTTYPE_HW_EXCEPTION:
- if ( vmcb->exit_int_info.vector == TRAP_int3 ||
- vmcb->exit_int_info.vector == TRAP_overflow )
+ if ( vmcb->exit_int_info.vector == X86_EXC_BP ||
+ vmcb->exit_int_info.vector == X86_EXC_OF )
break;
/* Fallthrough */
case X86_EVENTTYPE_EXT_INTR:
@@ -2956,7 +2956,7 @@ void svm_vmexit_handler(void)
__update_guest_eip(regs, vmcb->ei.io.nrip - vmcb->rip);
}
else if ( !hvm_emulate_one_insn(x86_insn_is_portio, "port I/O") )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
break;
case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
@@ -2964,7 +2964,7 @@ void svm_vmexit_handler(void)
if ( cpu_has_svm_decode && (vmcb->exitinfo1 & (1ULL << 63)) )
svm_vmexit_do_cr_access(vmcb, regs);
else if ( !hvm_emulate_one_insn(x86_insn_is_cr_access, "CR access") )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
break;
case VMEXIT_INVLPG:
@@ -2974,13 +2974,13 @@ void svm_vmexit_handler(void)
__update_guest_eip(regs, vmcb->nextrip - vmcb->rip);
}
else if ( !hvm_emulate_one_insn(is_invlpg, "invlpg") )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
break;
case VMEXIT_INVLPGA:
if ( !nsvm_efer_svm_enabled(v) )
{
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
break;
}
if ( (insn_len = svm_get_insn_len(v, INSTR_INVLPGA)) == 0 )
@@ -3021,7 +3021,7 @@ void svm_vmexit_handler(void)
case VMEXIT_MWAIT:
case VMEXIT_SKINIT:
case VMEXIT_RDPRU:
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
break;
case VMEXIT_VMRUN:
@@ -3042,7 +3042,7 @@ void svm_vmexit_handler(void)
case VMEXIT_XSETBV:
if ( vmcb_get_cpl(vmcb) )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
else if ( (insn_len = svm_get_insn_len(v, INSTR_XSETBV)) &&
hvm_handle_xsetbv(regs->ecx, msr_fold(regs)) == X86EMUL_OKAY
)
__update_guest_eip(regs, insn_len);
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index ba93375e87..5497463586 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -140,7 +140,7 @@ static int construct_vmcb(struct vcpu *v)
vmcb->_exception_intercepts =
HVM_TRAP_MASK |
- (v->arch.fully_eager_fpu ? 0 : (1U << TRAP_no_device));
+ (v->arch.fully_eager_fpu ? 0 : (1U << X86_EXC_NM));
if ( paging_mode_hap(v->domain) )
{
@@ -164,7 +164,7 @@ static int construct_vmcb(struct vcpu *v)
}
else
{
- vmcb->_exception_intercepts |= (1U << TRAP_page_fault);
+ vmcb->_exception_intercepts |= (1U << X86_EXC_PF);
}
if ( cpu_has_pause_filter )
diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c
index 19aac19bc3..3b064bcfad 100644
--- a/xen/arch/x86/hvm/vm_event.c
+++ b/xen/arch/x86/hvm/vm_event.c
@@ -87,7 +87,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
VM_EVENT_FLAG_SET_EMUL_INSN_DATA )
kind = EMUL_KIND_SET_CONTEXT_INSN;
- hvm_emulate_one_vm_event(kind, TRAP_invalid_op,
+ hvm_emulate_one_vm_event(kind, X86_EXC_UD,
X86_EVENT_NO_EC);
v->arch.vm_event->emulate_flags = 0;
@@ -96,7 +96,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
if ( unlikely(w->do_write.cr0) )
{
if ( hvm_set_cr0(w->cr0, false) == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
w->do_write.cr0 = 0;
}
@@ -104,7 +104,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
if ( unlikely(w->do_write.cr4) )
{
if ( hvm_set_cr4(w->cr4, false) == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
w->do_write.cr4 = 0;
}
@@ -112,7 +112,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
if ( unlikely(w->do_write.cr3) )
{
if ( hvm_set_cr3(w->cr3, w->cr3_noflush, false) == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
w->do_write.cr3 = 0;
}
@@ -121,7 +121,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
{
if ( hvm_msr_write_intercept(w->msr, w->value, false) ==
X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
w->do_write.msr = 0;
}
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index 6a8316de0e..87fb537b7c 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -328,7 +328,7 @@ void vmx_intr_assist(void)
}
else if ( intack.source == hvm_intsrc_mce )
{
- hvm_inject_hw_exception(TRAP_machine_check, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_MC, X86_EVENT_NO_EC);
}
else if ( cpu_has_vmx_virtual_intr_delivery &&
intack.source != hvm_intsrc_pic &&
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 4ac93e0810..ff44ddcfa6 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -48,21 +48,21 @@ static void realmode_deliver_exception(
if ( insn_len != 0 )
{
insn_len = 0;
- vector = TRAP_gp_fault;
+ vector = X86_EXC_GP;
goto again;
}
/* Exception or hardware interrupt. */
switch ( vector )
{
- case TRAP_double_fault:
+ case X86_EXC_DF:
hvm_triple_fault();
return;
- case TRAP_gp_fault:
- vector = TRAP_double_fault;
+ case X86_EXC_GP:
+ vector = X86_EXC_DF;
goto again;
default:
- vector = TRAP_gp_fault;
+ vector = X86_EXC_GP;
goto again;
}
}
@@ -116,14 +116,14 @@ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt
*hvmemul_ctxt)
if ( curr->arch.hvm.guest_cr[0] & X86_CR0_PE )
goto fail;
- realmode_deliver_exception(TRAP_invalid_op, 0, hvmemul_ctxt);
+ realmode_deliver_exception(X86_EXC_UD, 0, hvmemul_ctxt);
}
if ( rc == X86EMUL_EXCEPTION )
{
if ( unlikely(curr->domain->debugger_attached) &&
- ((hvmemul_ctxt->ctxt.event.vector == TRAP_debug) ||
- (hvmemul_ctxt->ctxt.event.vector == TRAP_int3)) )
+ ((hvmemul_ctxt->ctxt.event.vector == X86_EXC_DB) ||
+ (hvmemul_ctxt->ctxt.event.vector == X86_EXC_BP)) )
{
domain_pause_for_debugger();
}
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index d3c75b3803..07f7a9d76b 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1295,8 +1295,8 @@ static int construct_vmcs(struct vcpu *v)
__vmwrite(VMCS_LINK_POINTER, ~0UL);
v->arch.hvm.vmx.exception_bitmap = HVM_TRAP_MASK
- | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault))
- | (v->arch.fully_eager_fpu ? 0 : (1U << TRAP_no_device));
+ | (paging_mode_hap(d) ? 0 : (1U << X86_EXC_PF))
+ | (v->arch.fully_eager_fpu ? 0 : (1U << X86_EXC_NM));
if ( cpu_has_vmx_notify_vm_exiting )
__vmwrite(NOTIFY_WINDOW, vm_notify_window);
@@ -1871,9 +1871,9 @@ void noreturn vmx_asm_do_vmentry(void);
static void vmx_update_debug_state(struct vcpu *v)
{
if ( v->arch.hvm.debug_state_latch )
- v->arch.hvm.vmx.exception_bitmap |= 1U << TRAP_int3;
+ v->arch.hvm.vmx.exception_bitmap |= 1U << X86_EXC_BP;
else
- v->arch.hvm.vmx.exception_bitmap &= ~(1U << TRAP_int3);
+ v->arch.hvm.vmx.exception_bitmap &= ~(1U << X86_EXC_BP);
vmx_vmcs_enter(v);
vmx_update_exception_bitmap(v);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index ee4c41628c..bfc9693f7e 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -789,9 +789,9 @@ static void cf_check vmx_cpuid_policy_changed(struct vcpu
*v)
if ( opt_hvm_fep ||
(v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
- v->arch.hvm.vmx.exception_bitmap |= (1U << TRAP_invalid_op);
+ v->arch.hvm.vmx.exception_bitmap |= (1U << X86_EXC_UD);
else
- v->arch.hvm.vmx.exception_bitmap &= ~(1U << TRAP_invalid_op);
+ v->arch.hvm.vmx.exception_bitmap &= ~(1U << X86_EXC_UD);
vmx_vmcs_enter(v);
vmx_update_exception_bitmap(v);
@@ -1071,7 +1071,7 @@ static int cf_check vmx_load_vmcs_ctxt(struct vcpu *v,
struct hvm_hw_cpu *ctxt)
static void vmx_fpu_enter(struct vcpu *v)
{
vcpu_restore_fpu_lazy(v);
- v->arch.hvm.vmx.exception_bitmap &= ~(1u << TRAP_no_device);
+ v->arch.hvm.vmx.exception_bitmap &= ~(1u << X86_EXC_NM);
vmx_update_exception_bitmap(v);
v->arch.hvm.vmx.host_cr0 &= ~X86_CR0_TS;
__vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
@@ -1098,7 +1098,7 @@ static void cf_check vmx_fpu_leave(struct vcpu *v)
{
v->arch.hvm.hw_cr[0] |= X86_CR0_TS;
__vmwrite(GUEST_CR0, v->arch.hvm.hw_cr[0]);
- v->arch.hvm.vmx.exception_bitmap |= (1u << TRAP_no_device);
+ v->arch.hvm.vmx.exception_bitmap |= (1u << X86_EXC_NM);
vmx_update_exception_bitmap(v);
}
}
@@ -1988,7 +1988,7 @@ void vmx_inject_nmi(void)
nvmx_enqueue_n2_exceptions (v,
INTR_INFO_VALID_MASK |
MASK_INSR(X86_EVENTTYPE_NMI, INTR_INFO_INTR_TYPE_MASK) |
- MASK_INSR(TRAP_nmi, INTR_INFO_VECTOR_MASK),
+ MASK_INSR(X86_EXC_NMI, INTR_INFO_VECTOR_MASK),
X86_EVENT_NO_EC, hvm_intsrc_nmi);
return;
}
@@ -2013,14 +2013,14 @@ static void cf_check vmx_inject_event(const struct
x86_event *event)
switch ( _event.vector | -(_event.type == X86_EVENTTYPE_SW_INTERRUPT) )
{
- case TRAP_debug:
+ case X86_EXC_DB:
if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
{
__restore_debug_registers(curr);
write_debugreg(6, read_debugreg(6) | DR_STEP);
}
if ( !nestedhvm_vcpu_in_guestmode(curr) ||
- !nvmx_intercepts_exception(curr, TRAP_debug, _event.error_code) )
+ !nvmx_intercepts_exception(curr, X86_EXC_DB, _event.error_code) )
{
unsigned long val;
@@ -2032,7 +2032,7 @@ static void cf_check vmx_inject_event(const struct
x86_event *event)
if ( cpu_has_monitor_trap_flag )
break;
/* fall through */
- case TRAP_int3:
+ case X86_EXC_BP:
if ( curr->domain->debugger_attached )
{
/* Debug/Int3: Trap to debugger. */
@@ -2041,7 +2041,7 @@ static void cf_check vmx_inject_event(const struct
x86_event *event)
}
break;
- case TRAP_page_fault:
+ case X86_EXC_PF:
ASSERT(_event.type == X86_EVENTTYPE_HW_EXCEPTION);
curr->arch.hvm.guest_cr[2] = _event.cr2;
break;
@@ -2058,7 +2058,7 @@ static void cf_check vmx_inject_event(const struct
x86_event *event)
{
_event.vector = hvm_combine_hw_exceptions(
(uint8_t)intr_info, _event.vector);
- if ( _event.vector == TRAP_double_fault )
+ if ( _event.vector == X86_EXC_DF )
_event.error_code = 0;
}
@@ -2078,7 +2078,7 @@ static void cf_check vmx_inject_event(const struct
x86_event *event)
else
__vmx_inject_exception(_event.vector, _event.type, _event.error_code);
- if ( (_event.vector == TRAP_page_fault) &&
+ if ( (_event.vector == X86_EXC_PF) &&
(_event.type == X86_EVENTTYPE_HW_EXCEPTION) )
HVMTRACE_LONG_2D(PF_INJECT, _event.error_code,
TRC_PAR_LONG(curr->arch.hvm.guest_cr[2]));
@@ -2433,7 +2433,7 @@ static bool cf_check vmx_vcpu_emulate_ve(struct vcpu *v)
__vmread(GUEST_PHYSICAL_ADDRESS, &veinfo->gpa);
vmx_vmcs_exit(v);
- hvm_inject_hw_exception(TRAP_virtualisation,
+ hvm_inject_hw_exception(X86_EXC_VE,
X86_EVENT_NO_EC);
out:
@@ -3065,7 +3065,7 @@ void update_guest_eip(void)
}
if ( regs->eflags & X86_EFLAGS_TF )
- hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_DB, X86_EVENT_NO_EC);
}
static void cf_check vmx_fpu_dirty_intercept(void)
@@ -3163,7 +3163,7 @@ static int vmx_cr_access(cr_access_qual_t qual)
HVMTRACE_LONG_1D(LMSW, value);
if ( (rc = hvm_set_cr0(value, true)) == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
return rc;
}
@@ -4081,9 +4081,9 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
__vmread(VM_EXIT_INTR_INFO, &intr_info);
BUG_ON(!(intr_info & INTR_INFO_VALID_MASK));
vector = intr_info & INTR_INFO_VECTOR_MASK;
- if ( vector == TRAP_machine_check )
+ if ( vector == X86_EXC_MC )
do_machine_check(regs);
- if ( (vector == TRAP_nmi) &&
+ if ( (vector == X86_EXC_NMI) &&
((intr_info & INTR_INFO_INTR_TYPE_MASK) ==
MASK_INSR(X86_EVENTTYPE_NMI, INTR_INFO_INTR_TYPE_MASK)) )
{
@@ -4178,9 +4178,8 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
switch ( exit_reason )
{
case EXIT_REASON_EXCEPTION_NMI:
- if ( vector != TRAP_page_fault
- && vector != TRAP_nmi
- && vector != TRAP_machine_check )
+ if ( vector != X86_EXC_PF && vector != X86_EXC_NMI &&
+ vector != X86_EXC_MC )
{
default:
perfc_incr(realmode_exits);
@@ -4229,14 +4228,14 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
*/
if ( unlikely(intr_info & INTR_INFO_NMI_UNBLOCKED_BY_IRET) &&
!(idtv_info & INTR_INFO_VALID_MASK) &&
- (vector != TRAP_double_fault) )
+ (vector != X86_EXC_DF) )
undo_nmis_unblocked_by_iret();
perfc_incra(cause_vector, vector);
switch ( vector )
{
- case TRAP_debug:
+ case X86_EXC_DB:
/*
* Updates DR6 where debugger can peek (See 3B 23.2.1,
* Table 23-1, "Exit Qualification for Debug Exceptions").
@@ -4303,7 +4302,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
else
domain_pause_for_debugger();
break;
- case TRAP_int3:
+ case X86_EXC_BP:
HVMTRACE_1D(TRAP, vector);
if ( !v->domain->debugger_attached )
{
@@ -4324,15 +4323,15 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
else
{
update_guest_eip(); /* Safe: INT3 */
- v->arch.gdbsx_vcpu_event = TRAP_int3;
+ v->arch.gdbsx_vcpu_event = X86_EXC_BP;
domain_pause_for_debugger();
}
break;
- case TRAP_no_device:
+ case X86_EXC_NM:
HVMTRACE_1D(TRAP, vector);
vmx_fpu_dirty_intercept();
break;
- case TRAP_page_fault:
+ case X86_EXC_PF:
__vmread(EXIT_QUALIFICATION, &exit_qualification);
__vmread(VM_EXIT_INTR_ERROR_CODE, &ecode);
regs->error_code = ecode;
@@ -4357,22 +4356,22 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
hvm_inject_page_fault(regs->error_code, exit_qualification);
break;
- case TRAP_alignment_check:
+ case X86_EXC_AC:
HVMTRACE_1D(TRAP, vector);
vmx_propagate_intr(intr_info);
break;
- case TRAP_nmi:
+ case X86_EXC_NMI:
if ( MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK) !=
X86_EVENTTYPE_NMI )
goto exit_and_crash;
HVMTRACE_0D(NMI);
/* Already handled above. */
break;
- case TRAP_machine_check:
+ case X86_EXC_MC:
HVMTRACE_0D(MCE);
/* Already handled above. */
break;
- case TRAP_invalid_op:
+ case X86_EXC_UD:
HVMTRACE_1D(TRAP, vector);
hvm_ud_intercept(regs);
break;
@@ -4453,7 +4452,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
case EXIT_REASON_RDTSCP:
if ( !currd->arch.cpuid->extd.rdtscp )
{
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
break;
}
@@ -4494,7 +4493,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
break;
case X86EMUL_EXCEPTION:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
break;
}
break;
@@ -4508,7 +4507,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
break;
case X86EMUL_EXCEPTION:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
break;
}
break;
@@ -4530,7 +4529,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
case EXIT_REASON_VMFUNC:
if ( vmx_vmfunc_intercept(regs) != X86EMUL_OKAY )
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
else
update_guest_eip();
break;
@@ -4544,7 +4543,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
* as far as vmexit.
*/
WARN_ON(exit_reason == EXIT_REASON_GETSEC);
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
break;
case EXIT_REASON_TPR_BELOW_THRESHOLD:
@@ -4552,7 +4551,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
case EXIT_REASON_APIC_ACCESS:
if ( !vmx_handle_eoi_write() && !handle_mmio() )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
break;
case EXIT_REASON_EOI_INDUCED:
@@ -4592,7 +4591,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
if ( io_qual.str )
{
if ( !hvm_emulate_one_insn(x86_insn_is_portio, "port I/O") )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
}
else
{
@@ -4721,7 +4720,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
gprintk(XENLOG_ERR, "Unexpected vmexit: reason %lu\n", exit_reason);
if ( vmx_get_cpl() )
- hvm_inject_hw_exception(TRAP_invalid_op,
+ hvm_inject_hw_exception(X86_EXC_UD,
X86_EVENT_NO_EC);
else
domain_crash(v->domain);
@@ -4752,7 +4751,7 @@ out:
{
__vmread(VM_ENTRY_INTR_INFO, &intr_info);
if ( !(intr_info & INTR_INFO_VALID_MASK) )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
/* Need to fix rIP nevertheless. */
if ( mode == 8 )
regs->rip = (long)(regs->rip << (64 - VADDR_BITS)) >>
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 674cdabb07..27221fdb73 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -474,7 +474,7 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
return X86EMUL_OKAY;
gp_fault:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
return X86EMUL_EXCEPTION;
}
@@ -526,7 +526,7 @@ bool cf_check nvmx_intercepts_exception(
exception_bitmap = get_vvmcs(v, EXCEPTION_BITMAP);
r = exception_bitmap & (1 << vector) ? 1: 0;
- if ( vector == TRAP_page_fault )
+ if ( vector == X86_EXC_PF )
{
pfec_match = get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MATCH);
pfec_mask = get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MASK);
@@ -1100,15 +1100,15 @@ static void load_shadow_guest_state(struct vcpu *v)
rc = hvm_set_cr4(get_vvmcs(v, GUEST_CR4), true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
rc = hvm_set_cr0(get_vvmcs(v, GUEST_CR0), true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
rc = hvm_set_cr3(get_vvmcs(v, GUEST_CR3), false, true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
control = get_vvmcs(v, VM_ENTRY_CONTROLS);
if ( control & VM_ENTRY_LOAD_GUEST_PAT )
@@ -1118,7 +1118,7 @@ static void load_shadow_guest_state(struct vcpu *v)
rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL),
false);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
}
hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, 0);
@@ -1314,15 +1314,15 @@ static void load_vvmcs_host_state(struct vcpu *v)
rc = hvm_set_cr4(get_vvmcs(v, HOST_CR4), true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
rc = hvm_set_cr0(get_vvmcs(v, HOST_CR0), true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
rc = hvm_set_cr3(get_vvmcs(v, HOST_CR3), false, true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
control = get_vvmcs(v, VM_EXIT_CONTROLS);
if ( control & VM_EXIT_LOAD_HOST_PAT )
@@ -1332,7 +1332,7 @@ static void load_vvmcs_host_state(struct vcpu *v)
rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
get_vvmcs(v, HOST_PERF_GLOBAL_CTRL),
true);
if ( rc == X86EMUL_EXCEPTION )
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
}
hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, 0);
@@ -2083,13 +2083,13 @@ int nvmx_handle_vmx_insn(struct cpu_user_regs *regs,
unsigned int exit_reason)
(vmx_guest_x86_mode(curr) < (hvm_long_mode_active(curr) ? 8 : 2)) ||
(exit_reason != EXIT_REASON_VMXON && !nvmx_vcpu_in_vmx(curr)) )
{
- hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
return X86EMUL_EXCEPTION;
}
if ( vmx_get_cpl() > 0 )
{
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(X86_EXC_GP, 0);
return X86EMUL_EXCEPTION;
}
@@ -2464,12 +2464,12 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
* decided by L0 and L1 exception bitmap, if the vetor is set by
* both, L0 has priority on #PF and #NM, L1 has priority on others
*/
- if ( vector == TRAP_page_fault )
+ if ( vector == X86_EXC_PF )
{
if ( paging_mode_hap(v->domain) )
nvcpu->nv_vmexit_pending = 1;
}
- else if ( vector == TRAP_no_device )
+ else if ( vector == X86_EXC_NM )
{
if ( v->fpu_dirtied )
nvcpu->nv_vmexit_pending = 1;
diff --git a/xen/arch/x86/include/asm/domain.h
b/xen/arch/x86/include/asm/domain.h
index 466388a98e..899ca7f70f 100644
--- a/xen/arch/x86/include/asm/domain.h
+++ b/xen/arch/x86/include/asm/domain.h
@@ -734,7 +734,7 @@ static inline void pv_inject_hw_exception(unsigned int
vector, int errcode)
static inline void pv_inject_page_fault(int errcode, unsigned long cr2)
{
const struct x86_event event = {
- .vector = TRAP_page_fault,
+ .vector = X86_EXC_PF,
.type = X86_EVENTTYPE_HW_EXCEPTION,
.error_code = errcode,
.cr2 = cr2,
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
b/xen/arch/x86/include/asm/hvm/hvm.h
index 43d3fc2498..258e0a1f29 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -540,7 +540,7 @@ static inline void hvm_inject_hw_exception(unsigned int
vector, int errcode)
static inline void hvm_inject_page_fault(int errcode, unsigned long cr2)
{
struct x86_event event = {
- .vector = TRAP_page_fault,
+ .vector = X86_EXC_PF,
.type = X86_EVENTTYPE_HW_EXCEPTION,
.error_code = errcode,
.cr2 = cr2,
@@ -564,9 +564,9 @@ static inline void hvm_invlpg(struct vcpu *v, unsigned long
linear)
(X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))
/* These exceptions must always be intercepted. */
-#define HVM_TRAP_MASK ((1U << TRAP_debug) | \
- (1U << TRAP_alignment_check) | \
- (1U << TRAP_machine_check))
+#define HVM_TRAP_MASK ((1U << X86_EXC_DB) | \
+ (1U << X86_EXC_AC) | \
+ (1U << X86_EXC_MC))
/* Called in boot/resume paths. Must cope with no HVM support. */
static inline int hvm_cpu_up(void)
diff --git a/xen/arch/x86/include/asm/processor.h
b/xen/arch/x86/include/asm/processor.h
index 678daeb484..0eaa2c3094 100644
--- a/xen/arch/x86/include/asm/processor.h
+++ b/xen/arch/x86/include/asm/processor.h
@@ -17,34 +17,6 @@
#include <asm/x86-defns.h>
#include <asm/x86-vendors.h>
-/*
- * Trap/fault mnemonics.
- */
-#define TRAP_divide_error 0
-#define TRAP_debug 1
-#define TRAP_nmi 2
-#define TRAP_int3 3
-#define TRAP_overflow 4
-#define TRAP_bounds 5
-#define TRAP_invalid_op 6
-#define TRAP_no_device 7
-#define TRAP_double_fault 8
-#define TRAP_copro_seg 9
-#define TRAP_invalid_tss 10
-#define TRAP_no_segment 11
-#define TRAP_stack_error 12
-#define TRAP_gp_fault 13
-#define TRAP_page_fault 14
-#define TRAP_spurious_int 15
-#define TRAP_copro_error 16
-#define TRAP_alignment_check 17
-#define TRAP_machine_check 18
-#define TRAP_simd_error 19
-#define TRAP_virtualisation 20
-#define TRAP_nr 32
-
-#define TRAP_HAVE_EC X86_EXC_HAVE_EC
-
/* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
/* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */
#define TRAP_syscall 256
@@ -403,18 +375,18 @@ static inline void set_ist(idt_entry_t *idt, unsigned int
ist)
static inline void enable_each_ist(idt_entry_t *idt)
{
- set_ist(&idt[TRAP_double_fault], IST_DF);
- set_ist(&idt[TRAP_nmi], IST_NMI);
- set_ist(&idt[TRAP_machine_check], IST_MCE);
- set_ist(&idt[TRAP_debug], IST_DB);
+ set_ist(&idt[X86_EXC_DF], IST_DF);
+ set_ist(&idt[X86_EXC_NMI], IST_NMI);
+ set_ist(&idt[X86_EXC_MC], IST_MCE);
+ set_ist(&idt[X86_EXC_DB], IST_DB);
}
static inline void disable_each_ist(idt_entry_t *idt)
{
- set_ist(&idt[TRAP_double_fault], IST_NONE);
- set_ist(&idt[TRAP_nmi], IST_NONE);
- set_ist(&idt[TRAP_machine_check], IST_NONE);
- set_ist(&idt[TRAP_debug], IST_NONE);
+ set_ist(&idt[X86_EXC_DF], IST_NONE);
+ set_ist(&idt[X86_EXC_NMI], IST_NONE);
+ set_ist(&idt[X86_EXC_MC], IST_NONE);
+ set_ist(&idt[X86_EXC_DB], IST_NONE);
}
#define IDT_ENTRIES 256
diff --git a/xen/arch/x86/include/asm/x86-defns.h
b/xen/arch/x86/include/asm/x86-defns.h
index fe1caba6f8..e350227e57 100644
--- a/xen/arch/x86/include/asm/x86-defns.h
+++ b/xen/arch/x86/include/asm/x86-defns.h
@@ -147,6 +147,8 @@
#define X86_EXC_VC 29 /* VMM Communication */
#define X86_EXC_SX 30 /* Security Exception */
+#define X86_EXC_NUM 32 /* 32 reserved vectors */
+
/* Bitmap of exceptions which have error codes. */
#define X86_EXC_HAVE_EC \
((1u << X86_EXC_DF) | (1u << X86_EXC_TS) | (1u << X86_EXC_NP) | \
diff --git a/xen/arch/x86/machine_kexec.c b/xen/arch/x86/machine_kexec.c
index 1dd0c9aad8..d50772ad6c 100644
--- a/xen/arch/x86/machine_kexec.c
+++ b/xen/arch/x86/machine_kexec.c
@@ -170,7 +170,7 @@ void machine_kexec(struct kexec_image *image)
{
if ( idt_tables[i] == NULL )
continue;
- _update_gate_addr_lower(&idt_tables[i][TRAP_machine_check], &trap_nop);
+ _update_gate_addr_lower(&idt_tables[i][X86_EXC_MC], &trap_nop);
}
/* Reset CPUID masking and faulting to the host's default. */
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 0fe14faa5f..8e6d000950 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1232,7 +1232,7 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain
*l1e_owner)
gprintk(XENLOG_WARNING,
"Attempt to implicitly unmap %pd's grant PTE %" PRIpte "\n",
l1e_owner, l1e_get_intpte(l1e));
- pv_inject_hw_exception(TRAP_gp_fault, 0);
+ pv_inject_hw_exception(X86_EXC_GP, 0);
}
#endif
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index f3aed9fcc9..676d922973 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -219,7 +219,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
npfec.kind == npfec_kind_in_gpt )
{
v->arch.vm_event->send_event = true;
- hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op,
X86_EVENT_NO_EC);
+ hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, X86_EXC_UD,
X86_EVENT_NO_EC);
v->arch.vm_event->send_event = false;
return true;
diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c
index cc84af0192..3a00daf9c4 100644
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -98,7 +98,7 @@ static int hvm_translate_virtual_addr(
*/
if ( is_x86_user_segment(seg) )
x86_emul_hw_exception(
- (seg == x86_seg_ss) ? TRAP_stack_error : TRAP_gp_fault,
+ (seg == x86_seg_ss) ? X86_EXC_SS : X86_EXC_GP,
0, &sh_ctxt->ctxt);
return X86EMUL_EXCEPTION;
}
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index ddcf34972d..f86cc5b079 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2736,9 +2736,9 @@ static int cf_check sh_page_fault(
* stream under Xen's feet.
*/
if ( emul_ctxt.ctxt.event.type == X86_EVENTTYPE_HW_EXCEPTION &&
- ((emul_ctxt.ctxt.event.vector == TRAP_page_fault) ||
- (((emul_ctxt.ctxt.event.vector == TRAP_gp_fault) ||
- (emul_ctxt.ctxt.event.vector == TRAP_stack_error)) &&
+ ((emul_ctxt.ctxt.event.vector == X86_EXC_PF) ||
+ (((emul_ctxt.ctxt.event.vector == X86_EXC_GP) ||
+ (emul_ctxt.ctxt.event.vector == X86_EXC_SS)) &&
emul_ctxt.ctxt.event.error_code == 0)) )
hvm_inject_event(&emul_ctxt.ctxt.event);
else
@@ -2800,7 +2800,7 @@ static int cf_check sh_page_fault(
#endif
if ( emul_ctxt.ctxt.retire.singlestep )
- hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_DB, X86_EVENT_NO_EC);
#if GUEST_PAGING_LEVELS == 3 /* PAE guest */
/*
@@ -2841,7 +2841,7 @@ static int cf_check sh_page_fault(
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EMULATION_LAST_FAILED);
if ( emul_ctxt.ctxt.retire.singlestep )
- hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ hvm_inject_hw_exception(X86_EXC_DB, X86_EVENT_NO_EC);
break; /* Don't emulate again if we failed! */
}
diff --git a/xen/arch/x86/pv/callback.c b/xen/arch/x86/pv/callback.c
index 067ee3b795..ca3bc30e91 100644
--- a/xen/arch/x86/pv/callback.c
+++ b/xen/arch/x86/pv/callback.c
@@ -29,12 +29,12 @@ static int register_guest_nmi_callback(unsigned long
address)
{
struct vcpu *curr = current;
struct domain *d = curr->domain;
- struct trap_info *t = &curr->arch.pv.trap_ctxt[TRAP_nmi];
+ struct trap_info *t = &curr->arch.pv.trap_ctxt[X86_EXC_NMI];
if ( !is_canonical_address(address) )
return -EINVAL;
- t->vector = TRAP_nmi;
+ t->vector = X86_EXC_NMI;
t->flags = 0;
t->cs = (is_pv_32bit_domain(d) ?
FLAT_COMPAT_KERNEL_CS : FLAT_KERNEL_CS);
@@ -54,7 +54,7 @@ static int register_guest_nmi_callback(unsigned long address)
static void unregister_guest_nmi_callback(void)
{
struct vcpu *curr = current;
- struct trap_info *t = &curr->arch.pv.trap_ctxt[TRAP_nmi];
+ struct trap_info *t = &curr->arch.pv.trap_ctxt[X86_EXC_NMI];
memset(t, 0, sizeof(*t));
}
diff --git a/xen/arch/x86/pv/emul-gate-op.c b/xen/arch/x86/pv/emul-gate-op.c
index 758a20ad9d..9524982e7d 100644
--- a/xen/arch/x86/pv/emul-gate-op.c
+++ b/xen/arch/x86/pv/emul-gate-op.c
@@ -185,12 +185,12 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs)
(((ar >> 13) & 3) < (regs->cs & 3)) ||
((ar & _SEGMENT_TYPE) != 0xc00) )
{
- pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ pv_inject_hw_exception(X86_EXC_GP, regs->error_code);
return;
}
if ( !(ar & _SEGMENT_P) )
{
- pv_inject_hw_exception(TRAP_no_segment, regs->error_code);
+ pv_inject_hw_exception(X86_EXC_NP, regs->error_code);
return;
}
dpl = (ar >> 13) & 3;
@@ -206,7 +206,7 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs)
!(ar & _SEGMENT_P) ||
!(ar & _SEGMENT_CODE) )
{
- pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ pv_inject_hw_exception(X86_EXC_GP, regs->error_code);
return;
}
@@ -219,7 +219,7 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs)
if ( PTR_ERR(state) == -X86EMUL_EXCEPTION )
pv_inject_event(&ctxt.ctxt.event);
else
- pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ pv_inject_hw_exception(X86_EXC_GP, regs->error_code);
return;
}
@@ -268,7 +268,7 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs)
(opnd_sel & ~3) != regs->error_code ||
dpl < (opnd_sel & 3) )
{
- pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ pv_inject_hw_exception(X86_EXC_GP, regs->error_code);
return;
}
@@ -279,17 +279,17 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs)
((ar >> 13) & 3) > (regs->cs & 3) :
((ar >> 13) & 3) != (regs->cs & 3)) )
{
- pv_inject_hw_exception(TRAP_gp_fault, sel);
+ pv_inject_hw_exception(X86_EXC_GP, sel);
return;
}
if ( !(ar & _SEGMENT_P) )
{
- pv_inject_hw_exception(TRAP_no_segment, sel);
+ pv_inject_hw_exception(X86_EXC_NP, sel);
return;
}
if ( off > limit )
{
- pv_inject_hw_exception(TRAP_gp_fault, 0);
+ pv_inject_hw_exception(X86_EXC_GP, 0);
return;
}
@@ -316,7 +316,7 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs)
/* Inner stack known only for kernel ring. */
if ( (sel & 3) != GUEST_KERNEL_RPL(v->domain) )
{
- pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ pv_inject_hw_exception(X86_EXC_GP, regs->error_code);
return;
}
esp = v->arch.pv.kernel_sp;
@@ -328,19 +328,19 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs)
(ar & _SEGMENT_CODE) ||
!(ar & _SEGMENT_WR) )
{
- pv_inject_hw_exception(TRAP_invalid_tss, ss & ~3);
+ pv_inject_hw_exception(X86_EXC_TS, ss & ~3);
return;
}
if ( !(ar & _SEGMENT_P) ||
!check_stack_limit(ar, limit, esp, (4 + nparm) * 4) )
{
- pv_inject_hw_exception(TRAP_stack_error, ss & ~3);
+ pv_inject_hw_exception(X86_EXC_SS, ss & ~3);
return;
}
stkp = (unsigned int *)(unsigned long)((unsigned int)base + esp);
if ( !compat_access_ok(stkp - 4 - nparm, 16 + nparm * 4) )
{
- pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ pv_inject_hw_exception(X86_EXC_GP, regs->error_code);
return;
}
push(regs->ss);
@@ -356,12 +356,12 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs)
(ar & _SEGMENT_CODE) ||
!(ar & _SEGMENT_WR) ||
!check_stack_limit(ar, limit, esp + nparm * 4, nparm * 4)
)
- return pv_inject_hw_exception(TRAP_gp_fault,
regs->error_code);
+ return pv_inject_hw_exception(X86_EXC_GP,
regs->error_code);
ustkp = (unsigned int *)(unsigned long)
((unsigned int)base + regs->esp + nparm * 4);
if ( !compat_access_ok(ustkp - nparm, 0 + nparm * 4) )
{
- pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ pv_inject_hw_exception(X86_EXC_GP, regs->error_code);
return;
}
do
@@ -387,18 +387,18 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs)
if ( !pv_emul_read_descriptor(ss, v, &base, &limit, &ar, 0) ||
((ar >> 13) & 3) != (sel & 3) )
{
- pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ pv_inject_hw_exception(X86_EXC_GP, regs->error_code);
return;
}
if ( !check_stack_limit(ar, limit, esp, 2 * 4) )
{
- pv_inject_hw_exception(TRAP_stack_error, 0);
+ pv_inject_hw_exception(X86_EXC_SS, 0);
return;
}
stkp = (unsigned int *)(unsigned long)((unsigned int)base + esp);
if ( !compat_access_ok(stkp - 2, 2 * 4) )
{
- pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ pv_inject_hw_exception(X86_EXC_GP, regs->error_code);
return;
}
}
diff --git a/xen/arch/x86/pv/emul-inv-op.c b/xen/arch/x86/pv/emul-inv-op.c
index 2c07eed9a0..32eb34e1a2 100644
--- a/xen/arch/x86/pv/emul-inv-op.c
+++ b/xen/arch/x86/pv/emul-inv-op.c
@@ -57,7 +57,7 @@ static int emulate_forced_invalid_op(struct cpu_user_regs
*regs)
!guest_kernel_mode(current, regs) )
{
regs->rip = eip;
- pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ pv_inject_hw_exception(X86_EXC_GP, regs->error_code);
return EXCRET_fault_fixed;
}
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 04416f1979..a30a172066 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -609,8 +609,7 @@ static int pv_emul_virt_to_linear(unsigned long base,
unsigned long offset,
rc = X86EMUL_EXCEPTION;
if ( unlikely(rc == X86EMUL_EXCEPTION) )
- x86_emul_hw_exception(seg != x86_seg_ss ? TRAP_gp_fault
- : TRAP_stack_error,
+ x86_emul_hw_exception(seg != x86_seg_ss ? X86_EXC_GP : X86_EXC_SS,
0, ctxt);
return rc;
@@ -645,7 +644,7 @@ static int cf_check rep_ins(
(sreg.type & (_SEGMENT_CODE >> 8)) ||
!(sreg.type & (_SEGMENT_WR >> 8)) )
{
- x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+ x86_emul_hw_exception(X86_EXC_GP, 0, ctxt);
return X86EMUL_EXCEPTION;
}
@@ -711,8 +710,7 @@ static int cf_check rep_outs(
((sreg.type & (_SEGMENT_CODE >> 8)) &&
!(sreg.type & (_SEGMENT_WR >> 8))) )
{
- x86_emul_hw_exception(seg != x86_seg_ss ? TRAP_gp_fault
- : TRAP_stack_error,
+ x86_emul_hw_exception(seg != x86_seg_ss ? X86_EXC_GP : X86_EXC_SS,
0, ctxt);
return X86EMUL_EXCEPTION;
}
@@ -893,7 +891,7 @@ static int cf_check read_msr(
if ( (ret = guest_rdmsr(curr, reg, val)) != X86EMUL_UNHANDLEABLE )
{
if ( ret == X86EMUL_EXCEPTION )
- x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+ x86_emul_hw_exception(X86_EXC_GP, 0, ctxt);
goto done;
}
@@ -1041,7 +1039,7 @@ static int cf_check write_msr(
if ( (ret = guest_wrmsr(curr, reg, val)) != X86EMUL_UNHANDLEABLE )
{
if ( ret == X86EMUL_EXCEPTION )
- x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+ x86_emul_hw_exception(X86_EXC_GP, 0, ctxt);
return ret;
}
@@ -1376,7 +1374,7 @@ int pv_emulate_privileged_op(struct cpu_user_regs *regs)
{
curr->arch.dr6 |= ctxt.bpmatch | DR_STATUS_RESERVED_ONE;
if ( !(curr->arch.pv.trap_bounce.flags & TBF_EXCEPTION) )
- pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ pv_inject_hw_exception(X86_EXC_DB, X86_EVENT_NO_EC);
}
/* fall through */
case X86EMUL_RETRY:
diff --git a/xen/arch/x86/pv/emulate.c b/xen/arch/x86/pv/emulate.c
index 0a7907ec5e..2eff77c577 100644
--- a/xen/arch/x86/pv/emulate.c
+++ b/xen/arch/x86/pv/emulate.c
@@ -86,7 +86,7 @@ void pv_emul_instruction_done(struct cpu_user_regs *regs,
unsigned long rip)
if ( regs->eflags & X86_EFLAGS_TF )
{
current->arch.dr6 |= DR_STEP | DR_STATUS_RESERVED_ONE;
- pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ pv_inject_hw_exception(X86_EXC_DB, X86_EVENT_NO_EC);
}
}
diff --git a/xen/arch/x86/pv/iret.c b/xen/arch/x86/pv/iret.c
index 316a23e77e..d3a1fb2c68 100644
--- a/xen/arch/x86/pv/iret.c
+++ b/xen/arch/x86/pv/iret.c
@@ -186,7 +186,7 @@ int compat_iret(void)
regs->esp = ksp;
regs->ss = v->arch.pv.kernel_ss;
- ti = &v->arch.pv.trap_ctxt[TRAP_gp_fault];
+ ti = &v->arch.pv.trap_ctxt[X86_EXC_GP];
if ( TI_GET_IF(ti) )
eflags &= ~X86_EFLAGS_IF;
regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
diff --git a/xen/arch/x86/pv/ro-page-fault.c b/xen/arch/x86/pv/ro-page-fault.c
index 0d02c7d2ab..e17ed4e437 100644
--- a/xen/arch/x86/pv/ro-page-fault.c
+++ b/xen/arch/x86/pv/ro-page-fault.c
@@ -390,7 +390,7 @@ int pv_ro_page_fault(unsigned long addr, struct
cpu_user_regs *regs)
* or a guest playing with the instruction stream under Xen's feet.
*/
if ( ctxt.event.type == X86_EVENTTYPE_HW_EXCEPTION &&
- ctxt.event.vector == TRAP_page_fault )
+ ctxt.event.vector == X86_EXC_PF )
pv_inject_event(&ctxt.event);
else
gdprintk(XENLOG_WARNING,
@@ -400,7 +400,7 @@ int pv_ro_page_fault(unsigned long addr, struct
cpu_user_regs *regs)
/* Fallthrough */
case X86EMUL_OKAY:
if ( ctxt.retire.singlestep )
- pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ pv_inject_hw_exception(X86_EXC_DB, X86_EVENT_NO_EC);
/* Fallthrough */
case X86EMUL_RETRY:
diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
index 97fe54b5ee..07040ced4a 100644
--- a/xen/arch/x86/pv/traps.c
+++ b/xen/arch/x86/pv/traps.c
@@ -43,7 +43,7 @@ void pv_inject_event(const struct x86_event *event)
if ( event->type == X86_EVENTTYPE_HW_EXCEPTION )
{
ASSERT(vector < 32);
- use_error_code = TRAP_HAVE_EC & (1u << vector);
+ use_error_code = X86_EXC_HAVE_EC & (1u << vector);
}
else
{
@@ -63,7 +63,7 @@ void pv_inject_event(const struct x86_event *event)
tb->eip = ti->address;
if ( event->type == X86_EVENTTYPE_HW_EXCEPTION &&
- vector == TRAP_page_fault )
+ vector == X86_EXC_PF )
{
curr->arch.pv.ctrlreg[2] = event->cr2;
arch_set_cr2(curr, event->cr2);
@@ -93,7 +93,7 @@ void pv_inject_event(const struct x86_event *event)
"Unhandled: vec %u, %s[%04x]\n",
vector, vector_name(vector), error_code);
- if ( vector == TRAP_page_fault )
+ if ( vector == X86_EXC_PF )
show_page_walk(event->cr2);
}
}
@@ -107,7 +107,7 @@ bool set_guest_machinecheck_trapbounce(void)
struct vcpu *curr = current;
struct trap_bounce *tb = &curr->arch.pv.trap_bounce;
- pv_inject_hw_exception(TRAP_machine_check, X86_EVENT_NO_EC);
+ pv_inject_hw_exception(X86_EXC_MC, X86_EVENT_NO_EC);
tb->flags &= ~TBF_EXCEPTION; /* not needed for MCE delivery path */
return !null_trap_bounce(curr, tb);
@@ -122,7 +122,7 @@ bool set_guest_nmi_trapbounce(void)
struct vcpu *curr = current;
struct trap_bounce *tb = &curr->arch.pv.trap_bounce;
- pv_inject_hw_exception(TRAP_nmi, X86_EVENT_NO_EC);
+ pv_inject_hw_exception(X86_EXC_NMI, X86_EVENT_NO_EC);
tb->flags &= ~TBF_EXCEPTION; /* not needed for NMI delivery path */
return !null_trap_bounce(curr, tb);
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index e4f8b158e1..58e263056a 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -806,7 +806,7 @@ void fatal_trap(const struct cpu_user_regs *regs, bool
show_remote)
show_execution_state(regs);
- if ( trapnr == TRAP_page_fault )
+ if ( trapnr == X86_EXC_PF )
show_page_walk(read_cr2());
if ( show_remote )
@@ -945,7 +945,7 @@ void do_trap(struct cpu_user_regs *regs)
if ( guest_mode(regs) )
{
pv_inject_hw_exception(trapnr,
- (TRAP_HAVE_EC & (1u << trapnr))
+ (X86_EXC_HAVE_EC & (1u << trapnr))
? regs->error_code : X86_EVENT_NO_EC);
return;
}
@@ -1174,7 +1174,7 @@ void do_invalid_op(struct cpu_user_regs *regs)
if ( likely(guest_mode(regs)) )
{
if ( pv_emulate_invalid_op(regs) )
- pv_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ pv_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
return;
}
@@ -1203,11 +1203,11 @@ void do_invalid_op(struct cpu_user_regs *regs)
if ( likely(extable_fixup(regs, true)) )
return;
- if ( debugger_trap_fatal(TRAP_invalid_op, regs) )
+ if ( debugger_trap_fatal(X86_EXC_UD, regs) )
return;
show_execution_state(regs);
- panic("FATAL TRAP: vector = %d (invalid opcode)\n", TRAP_invalid_op);
+ panic("FATAL TRAP: vector = %d (invalid opcode)\n", X86_EXC_UD);
}
void do_int3(struct cpu_user_regs *regs)
@@ -1219,7 +1219,7 @@ void do_int3(struct cpu_user_regs *regs)
if ( likely(extable_fixup(regs, true)) )
return;
- if ( !debugger_trap_fatal(TRAP_int3, regs) )
+ if ( !debugger_trap_fatal(X86_EXC_BP, regs) )
printk(XENLOG_DEBUG "Hit embedded breakpoint at %p [%ps]\n",
_p(regs->rip), _p(regs->rip));
@@ -1228,12 +1228,12 @@ void do_int3(struct cpu_user_regs *regs)
if ( guest_kernel_mode(curr, regs) && curr->domain->debugger_attached )
{
- curr->arch.gdbsx_vcpu_event = TRAP_int3;
+ curr->arch.gdbsx_vcpu_event = X86_EXC_BP;
domain_pause_for_debugger();
return;
}
- pv_inject_hw_exception(TRAP_int3, X86_EVENT_NO_EC);
+ pv_inject_hw_exception(X86_EXC_BP, X86_EVENT_NO_EC);
}
#ifdef CONFIG_PV
@@ -1267,7 +1267,7 @@ static int handle_ldt_mapping_fault(unsigned int offset,
{
uint16_t ec = (offset & ~(X86_XEC_EXT | X86_XEC_IDT)) | X86_XEC_TI;
- pv_inject_hw_exception(TRAP_gp_fault, ec);
+ pv_inject_hw_exception(X86_EXC_GP, ec);
}
else
/* else pass the #PF back, with adjusted %cr2. */
@@ -1555,7 +1555,7 @@ void do_page_fault(struct cpu_user_regs *regs)
}
fatal:
- if ( debugger_trap_fatal(TRAP_page_fault, regs) )
+ if ( debugger_trap_fatal(X86_EXC_PF, regs) )
return;
show_execution_state(regs);
@@ -1663,7 +1663,7 @@ void do_general_protection(struct cpu_user_regs *regs)
}
/* Pass on GPF as is. */
- pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ pv_inject_hw_exception(X86_EXC_GP, regs->error_code);
return;
#endif
@@ -1673,7 +1673,7 @@ void do_general_protection(struct cpu_user_regs *regs)
return;
hardware_gp:
- if ( debugger_trap_fatal(TRAP_gp_fault, regs) )
+ if ( debugger_trap_fatal(X86_EXC_GP, regs) )
return;
show_execution_state(regs);
@@ -1881,7 +1881,7 @@ void do_device_not_available(struct cpu_user_regs *regs)
if ( curr->arch.pv.ctrlreg[0] & X86_CR0_TS )
{
- pv_inject_hw_exception(TRAP_no_device, X86_EVENT_NO_EC);
+ pv_inject_hw_exception(X86_EXC_NM, X86_EVENT_NO_EC);
curr->arch.pv.ctrlreg[0] &= ~X86_CR0_TS;
}
else
@@ -1945,7 +1945,7 @@ void do_debug(struct cpu_user_regs *regs)
return;
}
#endif
- if ( !debugger_trap_fatal(TRAP_debug, regs) )
+ if ( !debugger_trap_fatal(X86_EXC_DB, regs) )
{
WARN();
regs->eflags &= ~X86_EFLAGS_TF;
@@ -2013,7 +2013,7 @@ void do_debug(struct cpu_user_regs *regs)
return;
}
- pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ pv_inject_hw_exception(X86_EXC_DB, X86_EVENT_NO_EC);
}
void do_entry_CP(struct cpu_user_regs *regs)
@@ -2130,25 +2130,25 @@ void __init init_idt_traps(void)
* saved. The page-fault handler also needs interrupts disabled until %cr2
* has been read and saved on the stack.
*/
- set_intr_gate(TRAP_divide_error,÷_error);
- set_intr_gate(TRAP_debug,&debug);
- set_intr_gate(TRAP_nmi,&nmi);
- set_swint_gate(TRAP_int3,&int3); /* usable from all privileges */
- set_swint_gate(TRAP_overflow,&overflow); /* usable from all privileges */
- set_intr_gate(TRAP_bounds,&bounds);
- set_intr_gate(TRAP_invalid_op,&invalid_op);
- set_intr_gate(TRAP_no_device,&device_not_available);
- set_intr_gate(TRAP_double_fault,&double_fault);
- set_intr_gate(TRAP_invalid_tss,&invalid_TSS);
- set_intr_gate(TRAP_no_segment,&segment_not_present);
- set_intr_gate(TRAP_stack_error,&stack_segment);
- set_intr_gate(TRAP_gp_fault,&general_protection);
- set_intr_gate(TRAP_page_fault,&early_page_fault);
- set_intr_gate(TRAP_copro_error,&coprocessor_error);
- set_intr_gate(TRAP_alignment_check,&alignment_check);
- set_intr_gate(TRAP_machine_check,&machine_check);
- set_intr_gate(TRAP_simd_error,&simd_coprocessor_error);
- set_intr_gate(X86_EXC_CP, entry_CP);
+ set_intr_gate(X86_EXC_DE, divide_error);
+ set_intr_gate(X86_EXC_DB, debug);
+ set_intr_gate(X86_EXC_NMI, nmi);
+ set_swint_gate(X86_EXC_BP, int3); /* usable from all privileges */
+ set_swint_gate(X86_EXC_OF, overflow); /* usable from all privileges */
+ set_intr_gate(X86_EXC_BR, bounds);
+ set_intr_gate(X86_EXC_UD, invalid_op);
+ set_intr_gate(X86_EXC_NM, device_not_available);
+ set_intr_gate(X86_EXC_DF, double_fault);
+ set_intr_gate(X86_EXC_TS, invalid_TSS);
+ set_intr_gate(X86_EXC_NP, segment_not_present);
+ set_intr_gate(X86_EXC_SS, stack_segment);
+ set_intr_gate(X86_EXC_GP, general_protection);
+ set_intr_gate(X86_EXC_PF, early_page_fault);
+ set_intr_gate(X86_EXC_MF, coprocessor_error);
+ set_intr_gate(X86_EXC_AC, alignment_check);
+ set_intr_gate(X86_EXC_MC, machine_check);
+ set_intr_gate(X86_EXC_XM, simd_coprocessor_error);
+ set_intr_gate(X86_EXC_CP, entry_CP);
/* Specify dedicated interrupt stacks for NMI, #DF, and #MC. */
enable_each_ist(idt_table);
@@ -2167,7 +2167,7 @@ void __init trap_init(void)
unsigned int vector;
/* Replace early pagefault with real pagefault handler. */
- set_intr_gate(TRAP_page_fault, &page_fault);
+ set_intr_gate(X86_EXC_PF, &page_fault);
pv_trap_init();
diff --git a/xen/arch/x86/x86_64/compat/entry.S
b/xen/arch/x86/x86_64/compat/entry.S
index b86d38d1c5..bd5abd8040 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -208,13 +208,13 @@ ENTRY(compat_syscall)
leal (,%rcx,TBF_INTERRUPT),%ecx
UNLIKELY_START(z, compat_syscall_gpf)
movq VCPU_trap_ctxt(%rbx),%rdi
- movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
+ movl $X86_EXC_GP, UREGS_entry_vector(%rsp)
subl $2,UREGS_rip(%rsp)
/* %r12 is still zero at this point. */
mov %r12d, TRAPBOUNCE_error_code(%rdx)
- movl TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_eip(%rdi),%eax
- movzwl TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_cs(%rdi),%esi
- testb $4,TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_flags(%rdi)
+ movl X86_EXC_GP * TRAPINFO_sizeof + TRAPINFO_eip(%rdi),%eax
+ movzwl X86_EXC_GP * TRAPINFO_sizeof + TRAPINFO_cs(%rdi),%esi
+ testb $4, X86_EXC_GP * TRAPINFO_sizeof + TRAPINFO_flags(%rdi)
setnz %cl
leal TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE(,%rcx,TBF_INTERRUPT),%ecx
UNLIKELY_END(compat_syscall_gpf)
@@ -226,9 +226,9 @@ UNLIKELY_END(compat_syscall_gpf)
ENTRY(compat_sysenter)
CR4_PV32_RESTORE
movq VCPU_trap_ctxt(%rbx),%rcx
- cmpb $TRAP_gp_fault,UREGS_entry_vector(%rsp)
+ cmpb $X86_EXC_GP, UREGS_entry_vector(%rsp)
movzwl VCPU_sysenter_sel(%rbx),%eax
- movzwl TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_cs(%rcx),%ecx
+ movzwl X86_EXC_GP * TRAPINFO_sizeof + TRAPINFO_cs(%rcx),%ecx
cmovel %ecx,%eax
testl $~3,%eax
movl $FLAT_COMPAT_USER_SS,UREGS_ss(%rsp)
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 7675a59ff0..8b77d7113b 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -370,10 +370,10 @@ UNLIKELY_END(sysenter_nt_set)
leal (,%rcx,TBF_INTERRUPT),%ecx
UNLIKELY_START(z, sysenter_gpf)
movq VCPU_trap_ctxt(%rbx),%rsi
- movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
+ movl $X86_EXC_GP, UREGS_entry_vector(%rsp)
movl %eax,TRAPBOUNCE_error_code(%rdx)
- movq TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_eip(%rsi),%rax
- testb $4,TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_flags(%rsi)
+ movq X86_EXC_GP * TRAPINFO_sizeof + TRAPINFO_eip(%rsi),%rax
+ testb $4, X86_EXC_GP * TRAPINFO_sizeof + TRAPINFO_flags(%rsi)
setnz %cl
leal TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE(,%rcx,TBF_INTERRUPT),%ecx
UNLIKELY_END(sysenter_gpf)
@@ -464,7 +464,7 @@ int80_slow_path:
* IDT entry with DPL==0.
*/
movl $((0x80 << 3) | X86_XEC_IDT),UREGS_error_code(%rsp)
- movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
+ movl $X86_EXC_GP, UREGS_entry_vector(%rsp)
/* A GPF wouldn't have incremented the instruction pointer. */
subq $2,UREGS_rip(%rsp)
/*
@@ -650,7 +650,7 @@ ret_from_intr:
.section .init.text, "ax", @progbits
ENTRY(early_page_fault)
ENDBR64
- movl $TRAP_page_fault, 4(%rsp)
+ movl $X86_EXC_PF, 4(%rsp)
SAVE_ALL
movq %rsp, %rdi
call do_early_page_fault
@@ -710,7 +710,7 @@ ENTRY(common_interrupt)
ENTRY(page_fault)
ENDBR64
- movl $TRAP_page_fault,4(%rsp)
+ movl $X86_EXC_PF, 4(%rsp)
/* No special register assumptions. */
GLOBAL(handle_exception)
ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
@@ -768,13 +768,13 @@ handle_exception_saved:
jnz .Lcr4_pv32_done
/*
* The below effectively is
- * if ( regs->entry_vector == TRAP_page_fault &&
+ * if ( regs->entry_vector == X86_EXC_PF &&
* (regs->error_code & PFEC_page_present) &&
* !(regs->error_code & ~(PFEC_write_access|PFEC_insn_fetch)) )
* goto compat_test_all_events;
*/
mov $PFEC_page_present,%al
- cmpb $TRAP_page_fault,UREGS_entry_vector(%rsp)
+ cmpb $X86_EXC_PF, UREGS_entry_vector(%rsp)
jne .Lcr4_pv32_done
xor UREGS_error_code(%rsp),%eax
test $~(PFEC_write_access|PFEC_insn_fetch),%eax
@@ -886,80 +886,80 @@ FATAL_exception_with_ints_disabled:
ENTRY(divide_error)
ENDBR64
pushq $0
- movl $TRAP_divide_error,4(%rsp)
+ movl $X86_EXC_DE, 4(%rsp)
jmp handle_exception
ENTRY(coprocessor_error)
ENDBR64
pushq $0
- movl $TRAP_copro_error,4(%rsp)
+ movl $X86_EXC_MF, 4(%rsp)
jmp handle_exception
ENTRY(simd_coprocessor_error)
ENDBR64
pushq $0
- movl $TRAP_simd_error,4(%rsp)
+ movl $X86_EXC_XM, 4(%rsp)
jmp handle_exception
ENTRY(device_not_available)
ENDBR64
pushq $0
- movl $TRAP_no_device,4(%rsp)
+ movl $X86_EXC_NM, 4(%rsp)
jmp handle_exception
ENTRY(debug)
ENDBR64
pushq $0
- movl $TRAP_debug,4(%rsp)
+ movl $X86_EXC_DB, 4(%rsp)
jmp handle_ist_exception
ENTRY(int3)
ENDBR64
pushq $0
- movl $TRAP_int3,4(%rsp)
+ movl $X86_EXC_BP, 4(%rsp)
jmp handle_exception
ENTRY(overflow)
ENDBR64
pushq $0
- movl $TRAP_overflow,4(%rsp)
+ movl $X86_EXC_OF, 4(%rsp)
jmp handle_exception
ENTRY(bounds)
ENDBR64
pushq $0
- movl $TRAP_bounds,4(%rsp)
+ movl $X86_EXC_BR, 4(%rsp)
jmp handle_exception
ENTRY(invalid_op)
ENDBR64
pushq $0
- movl $TRAP_invalid_op,4(%rsp)
+ movl $X86_EXC_UD, 4(%rsp)
jmp handle_exception
ENTRY(invalid_TSS)
ENDBR64
- movl $TRAP_invalid_tss,4(%rsp)
+ movl $X86_EXC_TS, 4(%rsp)
jmp handle_exception
ENTRY(segment_not_present)
ENDBR64
- movl $TRAP_no_segment,4(%rsp)
+ movl $X86_EXC_NP, 4(%rsp)
jmp handle_exception
ENTRY(stack_segment)
ENDBR64
- movl $TRAP_stack_error,4(%rsp)
+ movl $X86_EXC_SS, 4(%rsp)
jmp handle_exception
ENTRY(general_protection)
ENDBR64
- movl $TRAP_gp_fault,4(%rsp)
+ movl $X86_EXC_GP, 4(%rsp)
jmp handle_exception
ENTRY(alignment_check)
ENDBR64
- movl $TRAP_alignment_check,4(%rsp)
+ movl $X86_EXC_AC, 4(%rsp)
jmp handle_exception
ENTRY(entry_CP)
@@ -969,7 +969,7 @@ ENTRY(entry_CP)
ENTRY(double_fault)
ENDBR64
- movl $TRAP_double_fault,4(%rsp)
+ movl $X86_EXC_DF, 4(%rsp)
/* Set AC to reduce chance of further SMAP faults */
ALTERNATIVE "", stac, X86_FEATURE_XEN_SMAP
SAVE_ALL
@@ -995,7 +995,7 @@ ENTRY(double_fault)
ENTRY(nmi)
ENDBR64
pushq $0
- movl $TRAP_nmi,4(%rsp)
+ movl $X86_EXC_NMI, 4(%rsp)
handle_ist_exception:
ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
SAVE_ALL
@@ -1091,7 +1091,7 @@ handle_ist_exception:
.L_ist_dispatch_done:
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
mov %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
- cmpb $TRAP_nmi,UREGS_entry_vector(%rsp)
+ cmpb $X86_EXC_NMI, UREGS_entry_vector(%rsp)
jne ret_from_intr
/* We want to get straight to the IRET on the NMI exit path. */
@@ -1124,7 +1124,7 @@ handle_ist_exception:
ENTRY(machine_check)
ENDBR64
pushq $0
- movl $TRAP_machine_check,4(%rsp)
+ movl $X86_EXC_MC, 4(%rsp)
jmp handle_ist_exception
/* No op trap handler. Required for kexec crash path. */
@@ -1168,7 +1168,7 @@ autogen_stubs: /* Automatically generated stubs. */
/* Reserved exceptions, heading towards do_unhandled_trap(). */
.elseif vec == X86_EXC_CSO || vec == X86_EXC_SPV || \
- vec == X86_EXC_VE || (vec > X86_EXC_CP && vec < TRAP_nr)
+ vec == X86_EXC_VE || (vec > X86_EXC_CP && vec < X86_EXC_NUM)
1:
ENDBR64
diff --git a/xen/arch/x86/x86_emulate/util-xen.c
b/xen/arch/x86/x86_emulate/util-xen.c
index 04b1cb8ca0..95c15492a8 100644
--- a/xen/arch/x86/x86_emulate/util-xen.c
+++ b/xen/arch/x86/x86_emulate/util-xen.c
@@ -138,7 +138,7 @@ int cf_check x86emul_read_xcr(unsigned int reg, uint64_t
*val,
break;
/* fall through */
default:
- x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+ x86_emul_hw_exception(X86_EXC_GP, 0, ctxt);
return X86EMUL_EXCEPTION;
}
@@ -159,7 +159,7 @@ int cf_check x86emul_write_xcr(unsigned int reg, uint64_t
val,
default:
gp_fault:
if ( ctxt )
- x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+ x86_emul_hw_exception(X86_EXC_GP, 0, ctxt);
return X86EMUL_EXCEPTION;
}
@@ -207,7 +207,7 @@ int cf_check x86emul_read_dr(unsigned int reg, unsigned
long *val,
ud_fault:
default:
if ( ctxt )
- x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
+ x86_emul_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC, ctxt);
return X86EMUL_EXCEPTION;
}
@@ -229,11 +229,11 @@ int cf_check x86emul_write_dr(unsigned int reg, unsigned
long val,
return X86EMUL_OKAY;
case -ENODEV:
- x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
+ x86_emul_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC, ctxt);
return X86EMUL_EXCEPTION;
default:
- x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+ x86_emul_hw_exception(X86_EXC_GP, 0, ctxt);
return X86EMUL_EXCEPTION;
}
}
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h
b/xen/arch/x86/x86_emulate/x86_emulate.h
index 0139d16da7..c713e425c0 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -90,7 +90,7 @@ struct x86_event {
uint8_t type; /* X86_EVENTTYPE_* */
uint8_t insn_len; /* Instruction length */
int32_t error_code; /* X86_EVENT_NO_EC if n/a */
- unsigned long cr2; /* Only for TRAP_page_fault h/w exception */
+ unsigned long cr2; /* Only for X86_EXC_PF h/w exception */
};
/*
@@ -838,7 +838,7 @@ static inline void x86_emul_pagefault(
{
ASSERT(!ctxt->event_pending);
- ctxt->event.vector = 14; /* TRAP_page_fault */
+ ctxt->event.vector = X86_EXC_PF;
ctxt->event.type = X86_EVENTTYPE_HW_EXCEPTION;
ctxt->event.error_code = error_code;
ctxt->event.cr2 = cr2;
--
generated by git-patchbot for /home/xen/git/xen.git#master
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |