|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2] x86/vtx: Introduce a typed union for CR access exit information
This reduces code volume, and has a minor improvement on compiled size,
probably due to the removal of several temporary variables.
add/remove: 0/0 grow/shrink: 0/2 up/down: 0/-50 (-50)
function old new delta
vmx_vmexit_handler 6881 6878 -3
nvmx_n2_vmexit_handler 3473 3426 -47
Take the opportunity to make some style corrections, and add some
ASSERT_UNREACHABLE()s in appropriate places.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
v2:
* Fix CR3/CR8 copy/paste mistake in nvmx_n2_vmexit_handler()
* Assert that we don't see type CLTS/LMSW for control registers other than cr0.
---
xen/arch/x86/hvm/vmx/vmx.c | 33 ++++++++++++++--------------
xen/arch/x86/hvm/vmx/vvmx.c | 45 ++++++++++++++++++++++-----------------
xen/include/asm-x86/hvm/vmx/vmx.h | 31 ++++++++++++++++-----------
3 files changed, 61 insertions(+), 48 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index c5cc963..b23a7f8 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2561,23 +2561,20 @@ static int vmx_vmfunc_intercept(struct cpu_user_regs
*regs)
return X86EMUL_EXCEPTION;
}
-static int vmx_cr_access(unsigned long exit_qualification)
+static int vmx_cr_access(cr_access_qual_t qual)
{
struct vcpu *curr = current;
- switch ( VMX_CONTROL_REG_ACCESS_TYPE(exit_qualification) )
+ switch ( qual.access_type )
+ {
+ case VMX_CR_ACCESS_TYPE_MOV_TO_CR:
+ return hvm_mov_to_cr(qual.cr, qual.gpr);
+
+ case VMX_CR_ACCESS_TYPE_MOV_FROM_CR:
+ return hvm_mov_from_cr(qual.cr, qual.gpr);
+
+ case VMX_CR_ACCESS_TYPE_CLTS:
{
- case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR: {
- unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
- unsigned long cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
- return hvm_mov_to_cr(cr, gp);
- }
- case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR: {
- unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
- unsigned long cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
- return hvm_mov_from_cr(cr, gp);
- }
- case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: {
unsigned long old = curr->arch.hvm_vcpu.guest_cr[0];
unsigned long value = old & ~X86_CR0_TS;
@@ -2592,13 +2589,15 @@ static int vmx_cr_access(unsigned long
exit_qualification)
HVMTRACE_0D(CLTS);
break;
}
- case VMX_CONTROL_REG_ACCESS_TYPE_LMSW: {
+
+ case VMX_CR_ACCESS_TYPE_LMSW:
+ {
unsigned long value = curr->arch.hvm_vcpu.guest_cr[0];
int rc;
/* LMSW can (1) set PE; (2) set or clear MP, EM, and TS. */
value = (value & ~(X86_CR0_MP|X86_CR0_EM|X86_CR0_TS)) |
- (VMX_CONTROL_REG_ACCESS_DATA(exit_qualification) &
+ (qual.lmsw_data &
(X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS));
HVMTRACE_LONG_1D(LMSW, value);
@@ -2607,8 +2606,10 @@ static int vmx_cr_access(unsigned long
exit_qualification)
return rc;
}
+
default:
- BUG();
+ ASSERT_UNREACHABLE();
+ return X86EMUL_UNHANDLEABLE;
}
return X86EMUL_OKAY;
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index dcd3b28..030d8ac 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -2448,27 +2448,28 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
break;
case EXIT_REASON_CR_ACCESS:
{
- unsigned long exit_qualification;
- int cr, write;
+ cr_access_qual_t qual;
u32 mask = 0;
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
- cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
- write = VMX_CONTROL_REG_ACCESS_TYPE(exit_qualification);
+ __vmread(EXIT_QUALIFICATION, &qual.raw);
/* also according to guest exec_control */
ctrl = __n2_exec_control(v);
- if ( cr == 3 )
+ /* CLTS/LMSW strictly act on CR0 */
+ if ( qual.access_type >= VMX_CR_ACCESS_TYPE_CLTS )
+ ASSERT(qual.cr == 0);
+
+ if ( qual.cr == 3 )
{
- mask = write? CPU_BASED_CR3_STORE_EXITING:
- CPU_BASED_CR3_LOAD_EXITING;
+ mask = qual.access_type ? CPU_BASED_CR3_STORE_EXITING
+ : CPU_BASED_CR3_LOAD_EXITING;
if ( ctrl & mask )
nvcpu->nv_vmexit_pending = 1;
}
- else if ( cr == 8 )
+ else if ( qual.cr == 8 )
{
- mask = write? CPU_BASED_CR8_STORE_EXITING:
- CPU_BASED_CR8_LOAD_EXITING;
+ mask = qual.access_type ? CPU_BASED_CR8_STORE_EXITING
+ : CPU_BASED_CR8_LOAD_EXITING;
if ( ctrl & mask )
nvcpu->nv_vmexit_pending = 1;
}
@@ -2481,14 +2482,14 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
* Otherwise, L0 will handle it and sync the value to L1 virtual
VMCS.
*/
unsigned long old_val, val, changed_bits;
- switch ( VMX_CONTROL_REG_ACCESS_TYPE(exit_qualification) )
+
+ switch ( qual.access_type )
{
- case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
+ case VMX_CR_ACCESS_TYPE_MOV_TO_CR:
{
- unsigned long gp =
VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
- val = *decode_gpr(guest_cpu_user_regs(), gp);
+ val = *decode_gpr(guest_cpu_user_regs(), qual.gpr);
- if ( cr == 0 )
+ if ( qual.cr == 0 )
{
u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK);
@@ -2504,7 +2505,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
(guest_cr0 & cr0_gh_mask) | (val &
~cr0_gh_mask));
}
}
- else if ( cr == 4 )
+ else if ( qual.cr == 4 )
{
u64 cr4_gh_mask = get_vvmcs(v, CR4_GUEST_HOST_MASK);
@@ -2524,7 +2525,8 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
nvcpu->nv_vmexit_pending = 1;
break;
}
- case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
+
+ case VMX_CR_ACCESS_TYPE_CLTS:
{
u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK);
@@ -2538,13 +2540,14 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
}
break;
}
- case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
+
+ case VMX_CR_ACCESS_TYPE_LMSW:
{
u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK);
__vmread(CR0_READ_SHADOW, &old_val);
old_val &= X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS;
- val = VMX_CONTROL_REG_ACCESS_DATA(exit_qualification) &
+ val = qual.lmsw_data &
(X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS);
changed_bits = old_val ^ val;
if ( changed_bits & cr0_gh_mask )
@@ -2557,7 +2560,9 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
}
break;
}
+
default:
+ ASSERT_UNREACHABLE();
break;
}
}
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h
b/xen/include/asm-x86/hvm/vmx/vmx.h
index af6fe7c..89619e4 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -232,18 +232,25 @@ static inline void pi_clear_sn(struct pi_desc *pi_desc)
/*
* Exit Qualifications for MOV for Control Register Access
*/
- /* 3:0 - control register number (CRn) */
-#define VMX_CONTROL_REG_ACCESS_NUM(eq) ((eq) & 0xf)
- /* 5:4 - access type (CR write, CR read, CLTS, LMSW) */
-#define VMX_CONTROL_REG_ACCESS_TYPE(eq) (((eq) >> 4) & 0x3)
-# define VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR 0
-# define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR 1
-# define VMX_CONTROL_REG_ACCESS_TYPE_CLTS 2
-# define VMX_CONTROL_REG_ACCESS_TYPE_LMSW 3
- /* 11:8 - general purpose register operand */
-#define VMX_CONTROL_REG_ACCESS_GPR(eq) (((eq) >> 8) & 0xf)
- /* 31:16 - LMSW source data */
-#define VMX_CONTROL_REG_ACCESS_DATA(eq) ((uint32_t)(eq) >> 16)
+enum {
+ VMX_CR_ACCESS_TYPE_MOV_TO_CR,
+ VMX_CR_ACCESS_TYPE_MOV_FROM_CR,
+ VMX_CR_ACCESS_TYPE_CLTS,
+ VMX_CR_ACCESS_TYPE_LMSW,
+};
+typedef union cr_access_qual {
+ unsigned long raw;
+ struct {
+ uint16_t cr:4,
+ access_type:2, /* VMX_CR_ACCESS_TYPE_* */
+ lmsw_op_type:1, /* 0 => reg, 1 => mem */
+ :1,
+ gpr:4,
+ :4;
+ uint16_t lmsw_data;
+ uint32_t :32;
+ };
+} __transparent__ cr_access_qual_t;
/*
* Access Rights
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |