|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 3/3] svm/vnmi: Add support for the SVM Virtual NMI
From: Abdelkareem Abdelsaamad <abdelkareem.abdelsaamad@xxxxxxxxxx>
With vNMI, the pending NMI is simply stuffed into the VMCB and handed off
to the hardware. This means that Xen needs to be able to set a vNMI pending
on-demand, and also query if a vNMI is pending, e.g. to honor the "at most
one NMI pending" rule and to preserve all NMIs across save and restore.
Introduce two new hvm_function_table callbacks to support the SVM's vNMI to
allow the Xen hypervisor to query if a vNMI is pending and to set VMCB's
_vintr pending flag so the NMIs are serviced by hardware if/when the virtual
NMIs become unblocked.
Signed-off-by: Abdelkareem Abdelsaamad <abdelkareem.abdelsaamad@xxxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 29 +++++++++++++++++++++--------
xen/arch/x86/hvm/svm/intr.c | 16 ++++++++++++++--
xen/arch/x86/hvm/svm/svm.c | 23 ++++++++++++++++++++++-
xen/arch/x86/hvm/svm/vmcb.c | 3 +++
xen/arch/x86/include/asm/hvm/hvm.h | 8 ++++++++
5 files changed, 68 insertions(+), 11 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4d37a93c57..3117ef8f2f 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3904,10 +3904,29 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
}
}
-enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
+static enum hvm_intblk hvm_nmi_blocked(struct vcpu *v)
{
unsigned long intr_shadow;
+ if (hvm_funcs.caps.vNMI)
+ {
+ if (!hvm_funcs.is_vnmi_pending(v))
+ return hvm_intblk_none;
+
+ return hvm_intblk_vnmi;
+ }
+
+ intr_shadow = alternative_call(hvm_funcs.get_interrupt_shadow, v);
+
+ if ( intr_shadow & (HVM_INTR_SHADOW_STI|HVM_INTR_SHADOW_MOV_SS) )
+ return hvm_intblk_shadow;
+
+ return ((intr_shadow & HVM_INTR_SHADOW_NMI) ?
+ hvm_intblk_nmi_iret : hvm_intblk_none);
+}
+
+enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
+{
ASSERT(v == current);
if ( nestedhvm_enabled(v->domain) ) {
@@ -3922,14 +3941,8 @@ enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v,
struct hvm_intack intack)
!(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
return hvm_intblk_rflags_ie;
- intr_shadow = alternative_call(hvm_funcs.get_interrupt_shadow, v);
-
- if ( intr_shadow & (HVM_INTR_SHADOW_STI|HVM_INTR_SHADOW_MOV_SS) )
- return hvm_intblk_shadow;
-
if ( intack.source == hvm_intsrc_nmi )
- return ((intr_shadow & HVM_INTR_SHADOW_NMI) ?
- hvm_intblk_nmi_iret : hvm_intblk_none);
+ return hvm_nmi_blocked(v);
if ( intack.source == hvm_intsrc_lapic )
{
diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index 6453a46b85..c54298c402 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -29,10 +29,19 @@
static void svm_inject_nmi(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
+ struct vmcb_struct *vmcb;
+ u32 general1_intercepts;
intinfo_t event;
+ if (hvm_funcs.caps.vNMI)
+ {
+ hvm_funcs.set_vnmi_pending(v);
+ return;
+ }
+
+ vmcb = v->arch.hvm.svm.vmcb;
+ general1_intercepts = vmcb_get_general1_intercepts(vmcb);
+
event.raw = 0;
event.v = true;
event.type = X86_ET_NMI;
@@ -176,6 +185,9 @@ void asmlinkage svm_intr_assist(void)
}
}
+ // vNMI is currently serviced in progress.
+ if (unlikely(intblk == hvm_intblk_vnmi)) return;
+
/*
* Pending IRQs must be delayed if:
* 1. An event is already pending. This is despite the fact that SVM
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index e451937b04..f67869353b 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -297,6 +297,26 @@ void svm_intercept_msr(struct vcpu *v, uint32_t msr, int
flags)
__clear_bit(msr * 2 + 1, msr_bit);
}
+static bool cf_check svm_is_vnmi_pending(struct vcpu *vcpu)
+{
+ if (!hvm_funcs.caps.vNMI)
+ return false;
+
+ return (vcpu->arch.hvm.svm.vmcb->_vintr.fields.vnmi_blocking);
+}
+
+static bool cf_check svm_set_vnmi_pending(struct vcpu *vcpu)
+{
+ if (!hvm_funcs.caps.vNMI)
+ return false;
+
+ if (vcpu->arch.hvm.svm.vmcb->_vintr.fields.vnmi_pending)
+ return false;
+
+ vcpu->arch.hvm.svm.vmcb->_vintr.fields.vnmi_pending = 1;
+ return true;
+}
+
#ifdef CONFIG_VM_EVENT
static void cf_check svm_enable_msr_interception(struct domain *d, uint32_t
msr)
{
@@ -2464,7 +2484,8 @@ static struct hvm_function_table __initdata_cf_clobber
svm_function_table = {
#endif
.set_rdtsc_exiting = svm_set_rdtsc_exiting,
.get_insn_bytes = svm_get_insn_bytes,
-
+ .is_vnmi_pending = svm_is_vnmi_pending,
+ .set_vnmi_pending = svm_set_vnmi_pending,
.nhvm_vcpu_initialise = nsvm_vcpu_initialise,
.nhvm_vcpu_destroy = nsvm_vcpu_destroy,
.nhvm_vcpu_reset = nsvm_vcpu_reset,
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index e583ef8548..5069789f6c 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -184,6 +184,9 @@ static int construct_vmcb(struct vcpu *v)
if ( default_xen_spec_ctrl == SPEC_CTRL_STIBP )
v->arch.msrs->spec_ctrl.raw = SPEC_CTRL_STIBP;
+ if (hvm_funcs.caps.vNMI)
+ vmcb->_vintr.fields.vnmi_enable = 1;
+
return 0;
}
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
b/xen/arch/x86/include/asm/hvm/hvm.h
index ad17ea73e9..d7f439d41b 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -67,6 +67,7 @@ enum hvm_intblk {
hvm_intblk_rflags_ie, /* RFLAGS.IE == 0 */
hvm_intblk_tpr, /* LAPIC TPR too high */
hvm_intblk_nmi_iret, /* NMI blocked until IRET */
+ hvm_intblk_vnmi, /* vNMI is currently blocked*/
hvm_intblk_arch, /* SVM/VMX specific reason */
};
@@ -224,6 +225,13 @@ struct hvm_function_table {
int (*pi_update_irte)(const struct vcpu *v, const struct pirq *pirq,
uint8_t gvec);
void (*update_vlapic_mode)(struct vcpu *v);
+ /* Whether or not a virtual NMI is pending in hardware. */
+ bool (*is_vnmi_pending)(struct vcpu *vcpu);
+ /*
+ * Attempt to pend a virtual NMI in harware.
+ * Returns %true on success
+ */
+ bool (*set_vnmi_pending)(struct vcpu *vcpu);
/*Walk nested p2m */
int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
--
2.52.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |