|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] x86/svm: Drop emulation of Intel's SYSENTER MSR behaviour
commit fc30191a6a5ff805bcef767d5b3ae5022b41c4d5
Author: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
AuthorDate: Thu Mar 12 12:21:13 2026 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Apr 9 08:23:17 2026 +0200
x86/svm: Drop emulation of Intel's SYSENTER MSR behaviour
With cross-vendor support gone, it's no longer needed.
AMD CPUs ignore the top 32 bits of the SYSENTER/SYSEXIT MSRs, which is
not how this emulation worked due to the need for cross-vendor support.
Any AMD VMs storing state in the top 32bits of the SEP MSRs will lose
it.
It's very unlikely to affect any production VM because having 64bit width
just isn't how real AMD CPUs behave.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
Reviewed-by: Teddy Astie <teddy.astie@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/x86/hvm/svm/svm.c | 42 +++++++++++++++-----------------
xen/arch/x86/hvm/svm/vmcb.c | 3 +++
xen/arch/x86/include/asm/hvm/svm-types.h | 10 --------
3 files changed, 22 insertions(+), 33 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index d4502d3c0f..ced6166847 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -355,10 +355,6 @@ static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu
*c)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- c->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs;
- c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
- c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
-
if ( vmcb->event_inj.v &&
hvm_event_needs_reinjection(vmcb->event_inj.type,
vmcb->event_inj.vector) )
@@ -422,11 +418,6 @@ static int svm_vmcb_restore(struct vcpu *v, struct
hvm_hw_cpu *c)
svm_update_guest_cr(v, 0, 0);
svm_update_guest_cr(v, 4, 0);
- /* Load sysenter MSRs into both VMCB save area and VCPU fields. */
- vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = c->sysenter_cs;
- vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = c->sysenter_esp;
- vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = c->sysenter_eip;
-
if ( paging_mode_hap(v->domain) )
{
vmcb_set_np(vmcb, true);
@@ -455,6 +446,9 @@ static void svm_save_cpu_state(struct vcpu *v, struct
hvm_hw_cpu *data)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
+ data->sysenter_cs = vmcb->sysenter_cs;
+ data->sysenter_esp = vmcb->sysenter_esp;
+ data->sysenter_eip = vmcb->sysenter_eip;
data->shadow_gs = vmcb->kerngsbase;
data->msr_lstar = vmcb->lstar;
data->msr_star = vmcb->star;
@@ -466,11 +460,14 @@ static void svm_load_cpu_state(struct vcpu *v, struct
hvm_hw_cpu *data)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- vmcb->kerngsbase = data->shadow_gs;
- vmcb->lstar = data->msr_lstar;
- vmcb->star = data->msr_star;
- vmcb->cstar = data->msr_cstar;
- vmcb->sfmask = data->msr_syscall_mask;
+ vmcb->lstar = data->msr_lstar;
+ vmcb->star = data->msr_star;
+ vmcb->cstar = data->msr_cstar;
+ vmcb->sfmask = data->msr_syscall_mask;
+ vmcb->kerngsbase = data->shadow_gs;
+ vmcb->sysenter_cs = data->sysenter_cs;
+ vmcb->sysenter_esp = data->sysenter_esp;
+ vmcb->sysenter_eip = data->sysenter_eip;
v->arch.hvm.guest_efer = data->msr_efer;
svm_update_guest_efer(v);
}
@@ -1663,12 +1660,9 @@ static int cf_check svm_msr_read_intercept(
switch ( msr )
{
- /*
- * Sync not needed while the cross-vendor logic is in unilateral
effect.
case MSR_IA32_SYSENTER_CS:
case MSR_IA32_SYSENTER_ESP:
case MSR_IA32_SYSENTER_EIP:
- */
case MSR_STAR:
case MSR_LSTAR:
case MSR_CSTAR:
@@ -1683,13 +1677,15 @@ static int cf_check svm_msr_read_intercept(
switch ( msr )
{
case MSR_IA32_SYSENTER_CS:
- *msr_content = v->arch.hvm.svm.guest_sysenter_cs;
+ *msr_content = vmcb->sysenter_cs;
break;
+
case MSR_IA32_SYSENTER_ESP:
- *msr_content = v->arch.hvm.svm.guest_sysenter_esp;
+ *msr_content = vmcb->sysenter_esp;
break;
+
case MSR_IA32_SYSENTER_EIP:
- *msr_content = v->arch.hvm.svm.guest_sysenter_eip;
+ *msr_content = vmcb->sysenter_eip;
break;
case MSR_STAR:
@@ -1883,11 +1879,11 @@ static int cf_check svm_msr_write_intercept(
switch ( msr )
{
case MSR_IA32_SYSENTER_ESP:
- vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp =
msr_content;
+ vmcb->sysenter_esp = msr_content;
break;
case MSR_IA32_SYSENTER_EIP:
- vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip =
msr_content;
+ vmcb->sysenter_eip = msr_content;
break;
case MSR_LSTAR:
@@ -1913,7 +1909,7 @@ static int cf_check svm_msr_write_intercept(
break;
case MSR_IA32_SYSENTER_CS:
- vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = msr_content;
+ vmcb->sysenter_cs = msr_content;
break;
case MSR_STAR:
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 5ed7123d9a..514e530cbd 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -97,6 +97,9 @@ static int construct_vmcb(struct vcpu *v)
svm_disable_intercept_for_msr(v, MSR_LSTAR);
svm_disable_intercept_for_msr(v, MSR_STAR);
svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
+ svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
+ svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
+ svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
vmcb->_msrpm_base_pa = virt_to_maddr(svm->msrpm);
vmcb->_iopm_base_pa = __pa(v->domain->arch.hvm.io_bitmap);
diff --git a/xen/arch/x86/include/asm/hvm/svm-types.h
b/xen/arch/x86/include/asm/hvm/svm-types.h
index 3ede62cade..8acadb9dcc 100644
--- a/xen/arch/x86/include/asm/hvm/svm-types.h
+++ b/xen/arch/x86/include/asm/hvm/svm-types.h
@@ -27,16 +27,6 @@ struct svm_vcpu {
/* VMCB has a cached instruction from #PF/#NPF Decode Assist? */
uint8_t cached_insn_len; /* Zero if no cached instruction. */
-
- /*
- * Upper four bytes are undefined in the VMCB, therefore we can't use the
- * fields in the VMCB. Write a 64bit value and then read a 64bit value is
- * fine unless there's a VMRUN/VMEXIT in between which clears the upper
- * four bytes.
- */
- uint64_t guest_sysenter_cs;
- uint64_t guest_sysenter_esp;
- uint64_t guest_sysenter_eip;
};
struct nestedsvm {
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |