|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/3] x86/hvm: don't use indirect calls without need
Direct calls perform better, so we should prefer them and use indirect
ones only when there indeed is a need for indirection.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -1373,7 +1373,7 @@ void error_interrupt(struct cpu_user_reg
void pmu_apic_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
- hvm_do_pmu_interrupt(regs);
+ vpmu_do_interrupt(regs);
}
/*
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -73,6 +73,8 @@ bool_t cpu_has_lmsl;
#define set_segment_register(name, value) \
asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
+static void svm_update_guest_efer(struct vcpu *);
+
static struct hvm_function_table svm_function_table;
/* va of hardware host save area */
@@ -269,9 +271,9 @@ static int svm_vmcb_restore(struct vcpu
v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
- hvm_update_guest_cr(v, 0);
- hvm_update_guest_cr(v, 2);
- hvm_update_guest_cr(v, 4);
+ svm_update_guest_cr(v, 0);
+ svm_update_guest_cr(v, 2);
+ svm_update_guest_cr(v, 4);
/* Load sysenter MSRs into both VMCB save area and VCPU fields. */
vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
@@ -330,7 +332,7 @@ static void svm_load_cpu_state(struct vc
vmcb->cstar = data->msr_cstar;
vmcb->sfmask = data->msr_syscall_mask;
v->arch.hvm_vcpu.guest_efer = data->msr_efer;
- hvm_update_guest_efer(v);
+ svm_update_guest_efer(v);
hvm_set_guest_tsc(v, data->tsc);
}
@@ -426,12 +428,7 @@ static int svm_guest_x86_mode(struct vcp
return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
}
-static void svm_update_host_cr3(struct vcpu *v)
-{
- /* SVM doesn't have a HOST_CR3 equivalent to update. */
-}
-
-static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
+void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
uint64_t value;
@@ -1128,11 +1125,6 @@ static int svm_event_pending(struct vcpu
return vmcb->eventinj.fields.v;
}
-static int svm_do_pmu_interrupt(struct cpu_user_regs *regs)
-{
- return vpmu_do_interrupt(regs);
-}
-
static void svm_cpu_dead(unsigned int cpu)
{
free_xenheap_page(per_cpu(hsa, cpu));
@@ -1997,7 +1989,6 @@ static struct hvm_function_table __read_
.get_segment_register = svm_get_segment_register,
.set_segment_register = svm_set_segment_register,
.get_shadow_gs_base = svm_get_shadow_gs_base,
- .update_host_cr3 = svm_update_host_cr3,
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
.set_guest_pat = svm_set_guest_pat,
@@ -2006,7 +1997,6 @@ static struct hvm_function_table __read_
.inject_trap = svm_inject_trap,
.init_hypercall_page = svm_init_hypercall_page,
.event_pending = svm_event_pending,
- .do_pmu_interrupt = svm_do_pmu_interrupt,
.cpuid_intercept = svm_cpuid_intercept,
.wbinvd_intercept = svm_wbinvd_intercept,
.fpu_dirty_intercept = svm_fpu_dirty_intercept,
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -709,8 +709,8 @@ static void vmx_ctxt_switch_to(struct vc
.fields = { .type = 0xb, .s = 0, .dpl = 0, .p = 1, .avl = 0, \
.l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
-static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
- struct segment_register *reg)
+void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
+ struct segment_register *reg)
{
uint32_t attr = 0;
@@ -1461,11 +1461,6 @@ static int vmx_event_pending(struct vcpu
return (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK);
}
-static int vmx_do_pmu_interrupt(struct cpu_user_regs *regs)
-{
- return vpmu_do_interrupt(regs);
-}
-
static void vmx_set_uc_mode(struct vcpu *v)
{
if ( paging_mode_hap(v->domain) )
@@ -1527,7 +1522,6 @@ static struct hvm_function_table __read_
.inject_trap = vmx_inject_trap,
.init_hypercall_page = vmx_init_hypercall_page,
.event_pending = vmx_event_pending,
- .do_pmu_interrupt = vmx_do_pmu_interrupt,
.cpu_up = vmx_cpu_up,
.cpu_down = vmx_cpu_down,
.cpuid_intercept = vmx_cpuid_intercept,
@@ -1642,7 +1636,7 @@ static void vmx_cpuid_intercept(
{
case 0x80000001:
/* SYSCALL is visible iff running in long mode. */
- hvm_get_segment_register(v, x86_seg_cs, &cs);
+ vmx_get_segment_register(v, x86_seg_cs, &cs);
if ( cs.attr.fields.l )
*edx |= cpufeat_mask(X86_FEATURE_SYSCALL);
else
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -291,8 +291,6 @@ static int vmx_inst_check_privilege(stru
struct vcpu *v = current;
struct segment_register cs;
- hvm_get_segment_register(v, x86_seg_cs, &cs);
-
if ( vmxop_check )
{
if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
@@ -302,6 +300,8 @@ static int vmx_inst_check_privilege(stru
else if ( !vcpu_2_nvmx(v).vmxon_region_pa )
goto invalid_op;
+ vmx_get_segment_register(v, x86_seg_cs, &cs);
+
if ( (regs->eflags & X86_EFLAGS_VM) ||
(hvm_long_mode_enabled(v) && cs.attr.fields.l == 0) )
goto invalid_op;
@@ -358,13 +358,13 @@ static int decode_vmx_inst(struct cpu_us
if ( hvm_long_mode_enabled(v) )
{
- hvm_get_segment_register(v, x86_seg_cs, &seg);
+ vmx_get_segment_register(v, x86_seg_cs, &seg);
mode_64bit = seg.attr.fields.l;
}
if ( info.fields.segment > VMX_SREG_GS )
goto gp_fault;
- hvm_get_segment_register(v, sreg_to_index[info.fields.segment], &seg);
+ vmx_get_segment_register(v, sreg_to_index[info.fields.segment], &seg);
seg_base = seg.base;
base = info.fields.base_reg_invalid ? 0 :
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -137,7 +137,6 @@ struct hvm_function_table {
void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
int (*event_pending)(struct vcpu *v);
- int (*do_pmu_interrupt)(struct cpu_user_regs *regs);
int (*cpu_up_prepare)(unsigned int cpu);
void (*cpu_dead)(unsigned int cpu);
@@ -270,7 +269,8 @@ hvm_guest_x86_mode(struct vcpu *v)
static inline void
hvm_update_host_cr3(struct vcpu *v)
{
- hvm_funcs.update_host_cr3(v);
+ if ( hvm_funcs.update_host_cr3 )
+ hvm_funcs.update_host_cr3(v);
}
static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
@@ -334,11 +334,6 @@ static inline int hvm_event_pending(stru
return hvm_funcs.event_pending(v);
}
-static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs)
-{
- return hvm_funcs.do_pmu_interrupt(regs);
-}
-
/* These reserved bits in lower 32 remain 0 after any load of CR0 */
#define HVM_CR0_GUEST_RESERVED_BITS \
(~((unsigned long) \
--- a/xen/include/asm-x86/hvm/svm/svm.h
+++ b/xen/include/asm-x86/hvm/svm/svm.h
@@ -66,6 +66,7 @@ static inline void svm_invlpga(unsigned
unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len);
+void svm_update_guest_cr(struct vcpu *, unsigned int cr);
extern u32 svm_feature_flags;
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -387,6 +387,8 @@ static inline int __vmxon(u64 addr)
return rc;
}
+void vmx_get_segment_register(struct vcpu *, enum x86_segment,
+ struct segment_register *);
void vmx_inject_extint(int trap);
void vmx_inject_nmi(void);
Attachment:
hvm-no-indirect-calls.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |