[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for-4.5 RFC v2] x86/HVM: Unconditionally crash guests on repeated vmentry failures



A failed vmentry is overwhelmingly likely to be caused by corrupt VMC[SB]
state.  As a result, injecting a fault and retrying the the vmentry is likely
to fail in the same way.

With this new logic, a guest will unconditionally be crashed if it has
suffered two repeated vmentry failures, even if it is in usermode.  This
prevents an infinite loop in Xen where attempting to injecting a #UD is not
sufficient to prevent the vmentry failure.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Keir Fraser <keir@xxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>

---

This is RFC as there is still a niggle.  I tested this via a partial revert of
the XSA-110 fix but the result is quite chatty given a double VMCB dump and
domain crash.  However, I am not sure we want to make any vmentry failure
quite, as any vmentry failure constitues a Xen bug.

Konrad: A hypervisor infinite loop is quite bad, so I am requesting a release
ack for this in its eventual form.  An alternative would be to revert
28b4baacd5 wholesale, but most of it is good.
---
 xen/arch/x86/hvm/svm/svm.c     |   16 ++++++++++++----
 xen/arch/x86/hvm/vmx/vmx.c     |   19 +++++++++++++++----
 xen/include/asm-x86/hvm/vcpu.h |    3 +++
 3 files changed, 30 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 9398690..c42ec6d 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -90,13 +90,17 @@ static bool_t amd_erratum383_found __read_mostly;
 static uint64_t osvw_length, osvw_status;
 static DEFINE_SPINLOCK(osvw_lock);
 
-/* Only crash the guest if the problem originates in kernel mode. */
+/*
+ * Only crash the guest if the problem originates in kernel mode, or we have
+ * had repeated vmentry failures.
+ */
 static void svm_crash_or_fault(struct vcpu *v)
 {
-    if ( vmcb_get_cpl(v->arch.hvm_svm.vmcb) )
-        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
-    else
+    if ( (v->arch.hvm_vcpu.vmentry_failure_count > 1) ||
+         (vmcb_get_cpl(v->arch.hvm_svm.vmcb) == 0) )
         domain_crash(v->domain);
+    else
+        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
 }
 
 void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len)
@@ -2395,9 +2399,13 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
 
     if ( unlikely(exit_reason == VMEXIT_INVALID) )
     {
+        v->arch.hvm_vcpu.vmentry_failure_count++;
+
         svm_vmcb_dump(__func__, vmcb);
         goto exit_and_crash;
     }
+    else
+        v->arch.hvm_vcpu.vmentry_failure_count = 0;
 
     perfc_incra(svmexits, exit_reason);
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2907afa..e50c8a3 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -134,16 +134,21 @@ static void vmx_vcpu_destroy(struct vcpu *v)
     passive_domain_destroy(v);
 }
 
-/* Only crash the guest if the problem originates in kernel mode. */
+/*
+ * Only crash the guest if the problem originates in kernel mode, or we have
+ * had repeated vmentry failures.
+ */
 static void vmx_crash_or_fault(struct vcpu *v)
 {
     struct segment_register ss;
 
     vmx_get_segment_register(v, x86_seg_ss, &ss);
-    if ( ss.attr.fields.dpl )
-        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
-    else
+
+    if ( (v->arch.hvm_vcpu.vmentry_failure_count > 1) ||
+         (ss.attr.fields.dpl == 0) )
         domain_crash(v->domain);
+    else
+        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
 }
 
 static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
@@ -2722,7 +2727,13 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     }
 
     if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
+    {
+        v->arch.hvm_vcpu.vmentry_failure_count++;
+
         return vmx_failed_vmentry(exit_reason, regs);
+    }
+    else
+        v->arch.hvm_vcpu.vmentry_failure_count = 0;
 
     if ( v->arch.hvm_vmx.vmx_realmode )
     {
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 01e0665..3a9d521 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -159,6 +159,9 @@ struct hvm_vcpu {
         struct arch_svm_struct svm;
     } u;
 
+    /* Number of repeated vmentry failures. */
+    unsigned int        vmentry_failure_count;
+
     struct tasklet      assert_evtchn_irq_tasklet;
 
     struct nestedvcpu   nvcpu;
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.