|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/hvm: don't use indirect calls without need
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1347625522 -7200
# Node ID 191db43e50cfc919681a406d313b570d3b3d2d2e
# Parent 7f507c5e3a5c9a514286266837749e65704a4447
x86/hvm: don't use indirect calls without need
Direct calls perform better, so we should prefer them and use indirect
ones only when there indeed is a need for indirection.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---
diff -r 7f507c5e3a5c -r 191db43e50cf xen/arch/x86/apic.c
--- a/xen/arch/x86/apic.c Fri Sep 14 14:20:08 2012 +0200
+++ b/xen/arch/x86/apic.c Fri Sep 14 14:25:22 2012 +0200
@@ -1373,7 +1373,7 @@ void error_interrupt(struct cpu_user_reg
void pmu_apic_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
- hvm_do_pmu_interrupt(regs);
+ vpmu_do_interrupt(regs);
}
/*
diff -r 7f507c5e3a5c -r 191db43e50cf xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Fri Sep 14 14:20:08 2012 +0200
+++ b/xen/arch/x86/hvm/svm/svm.c Fri Sep 14 14:25:22 2012 +0200
@@ -73,6 +73,8 @@ bool_t cpu_has_lmsl;
#define set_segment_register(name, value) \
asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
+static void svm_update_guest_efer(struct vcpu *);
+
static struct hvm_function_table svm_function_table;
/* va of hardware host save area */
@@ -269,9 +271,9 @@ static int svm_vmcb_restore(struct vcpu
v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
- hvm_update_guest_cr(v, 0);
- hvm_update_guest_cr(v, 2);
- hvm_update_guest_cr(v, 4);
+ svm_update_guest_cr(v, 0);
+ svm_update_guest_cr(v, 2);
+ svm_update_guest_cr(v, 4);
/* Load sysenter MSRs into both VMCB save area and VCPU fields. */
vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
@@ -330,7 +332,7 @@ static void svm_load_cpu_state(struct vc
vmcb->cstar = data->msr_cstar;
vmcb->sfmask = data->msr_syscall_mask;
v->arch.hvm_vcpu.guest_efer = data->msr_efer;
- hvm_update_guest_efer(v);
+ svm_update_guest_efer(v);
hvm_set_guest_tsc(v, data->tsc);
}
@@ -426,12 +428,7 @@ static int svm_guest_x86_mode(struct vcp
return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
}
-static void svm_update_host_cr3(struct vcpu *v)
-{
- /* SVM doesn't have a HOST_CR3 equivalent to update. */
-}
-
-static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
+void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
uint64_t value;
@@ -1124,11 +1121,6 @@ static int svm_event_pending(struct vcpu
return vmcb->eventinj.fields.v;
}
-static int svm_do_pmu_interrupt(struct cpu_user_regs *regs)
-{
- return vpmu_do_interrupt(regs);
-}
-
static void svm_cpu_dead(unsigned int cpu)
{
free_xenheap_page(per_cpu(hsa, cpu));
@@ -1990,7 +1982,6 @@ static struct hvm_function_table __read_
.get_segment_register = svm_get_segment_register,
.set_segment_register = svm_set_segment_register,
.get_shadow_gs_base = svm_get_shadow_gs_base,
- .update_host_cr3 = svm_update_host_cr3,
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
.set_guest_pat = svm_set_guest_pat,
@@ -1999,7 +1990,6 @@ static struct hvm_function_table __read_
.inject_trap = svm_inject_trap,
.init_hypercall_page = svm_init_hypercall_page,
.event_pending = svm_event_pending,
- .do_pmu_interrupt = svm_do_pmu_interrupt,
.cpuid_intercept = svm_cpuid_intercept,
.wbinvd_intercept = svm_wbinvd_intercept,
.fpu_dirty_intercept = svm_fpu_dirty_intercept,
diff -r 7f507c5e3a5c -r 191db43e50cf xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Fri Sep 14 14:20:08 2012 +0200
+++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Sep 14 14:25:22 2012 +0200
@@ -681,8 +681,8 @@ static void vmx_ctxt_switch_to(struct vc
.fields = { .type = 0xb, .s = 0, .dpl = 0, .p = 1, .avl = 0, \
.l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
-static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
- struct segment_register *reg)
+void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
+ struct segment_register *reg)
{
uint32_t attr = 0;
@@ -1412,11 +1412,6 @@ static int vmx_event_pending(struct vcpu
return (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK);
}
-static int vmx_do_pmu_interrupt(struct cpu_user_regs *regs)
-{
- return vpmu_do_interrupt(regs);
-}
-
static void vmx_set_uc_mode(struct vcpu *v)
{
if ( paging_mode_hap(v->domain) )
@@ -1478,7 +1473,6 @@ static struct hvm_function_table __read_
.inject_trap = vmx_inject_trap,
.init_hypercall_page = vmx_init_hypercall_page,
.event_pending = vmx_event_pending,
- .do_pmu_interrupt = vmx_do_pmu_interrupt,
.cpu_up = vmx_cpu_up,
.cpu_down = vmx_cpu_down,
.cpuid_intercept = vmx_cpuid_intercept,
@@ -1593,7 +1587,7 @@ static void vmx_cpuid_intercept(
{
case 0x80000001:
/* SYSCALL is visible iff running in long mode. */
- hvm_get_segment_register(v, x86_seg_cs, &cs);
+ vmx_get_segment_register(v, x86_seg_cs, &cs);
if ( cs.attr.fields.l )
*edx |= cpufeat_mask(X86_FEATURE_SYSCALL);
else
diff -r 7f507c5e3a5c -r 191db43e50cf xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c Fri Sep 14 14:20:08 2012 +0200
+++ b/xen/arch/x86/hvm/vmx/vvmx.c Fri Sep 14 14:25:22 2012 +0200
@@ -287,8 +287,6 @@ static int vmx_inst_check_privilege(stru
struct vcpu *v = current;
struct segment_register cs;
- hvm_get_segment_register(v, x86_seg_cs, &cs);
-
if ( vmxop_check )
{
if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
@@ -298,6 +296,8 @@ static int vmx_inst_check_privilege(stru
else if ( !vcpu_2_nvmx(v).vmxon_region_pa )
goto invalid_op;
+ vmx_get_segment_register(v, x86_seg_cs, &cs);
+
if ( (regs->eflags & X86_EFLAGS_VM) ||
(hvm_long_mode_enabled(v) && cs.attr.fields.l == 0) )
goto invalid_op;
@@ -354,13 +354,13 @@ static int decode_vmx_inst(struct cpu_us
if ( hvm_long_mode_enabled(v) )
{
- hvm_get_segment_register(v, x86_seg_cs, &seg);
+ vmx_get_segment_register(v, x86_seg_cs, &seg);
mode_64bit = seg.attr.fields.l;
}
if ( info.fields.segment > VMX_SREG_GS )
goto gp_fault;
- hvm_get_segment_register(v, sreg_to_index[info.fields.segment], &seg);
+ vmx_get_segment_register(v, sreg_to_index[info.fields.segment], &seg);
seg_base = seg.base;
base = info.fields.base_reg_invalid ? 0 :
diff -r 7f507c5e3a5c -r 191db43e50cf xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Fri Sep 14 14:20:08 2012 +0200
+++ b/xen/include/asm-x86/hvm/hvm.h Fri Sep 14 14:25:22 2012 +0200
@@ -137,7 +137,6 @@ struct hvm_function_table {
void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
int (*event_pending)(struct vcpu *v);
- int (*do_pmu_interrupt)(struct cpu_user_regs *regs);
int (*cpu_up_prepare)(unsigned int cpu);
void (*cpu_dead)(unsigned int cpu);
@@ -266,7 +265,8 @@ hvm_guest_x86_mode(struct vcpu *v)
static inline void
hvm_update_host_cr3(struct vcpu *v)
{
- hvm_funcs.update_host_cr3(v);
+ if ( hvm_funcs.update_host_cr3 )
+ hvm_funcs.update_host_cr3(v);
}
static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
@@ -330,11 +330,6 @@ static inline int hvm_event_pending(stru
return hvm_funcs.event_pending(v);
}
-static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs)
-{
- return hvm_funcs.do_pmu_interrupt(regs);
-}
-
/* These reserved bits in lower 32 remain 0 after any load of CR0 */
#define HVM_CR0_GUEST_RESERVED_BITS \
(~((unsigned long) \
diff -r 7f507c5e3a5c -r 191db43e50cf xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Fri Sep 14 14:20:08 2012 +0200
+++ b/xen/include/asm-x86/hvm/svm/svm.h Fri Sep 14 14:25:22 2012 +0200
@@ -66,6 +66,7 @@ static inline void svm_invlpga(unsigned
unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len);
+void svm_update_guest_cr(struct vcpu *, unsigned int cr);
extern u32 svm_feature_flags;
diff -r 7f507c5e3a5c -r 191db43e50cf xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Fri Sep 14 14:20:08 2012 +0200
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Fri Sep 14 14:25:22 2012 +0200
@@ -387,6 +387,8 @@ static inline int __vmxon(u64 addr)
return rc;
}
+void vmx_get_segment_register(struct vcpu *, enum x86_segment,
+ struct segment_register *);
void vmx_inject_extint(int trap);
void vmx_inject_nmi(void);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |