|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH] x86/HVM: convert most remaining hvm_funcs hook invocations to alt-call
The aim being to have as few indirect calls as possible (see [1]),
whereas during initial conversion performance was the main aspect and
hence rarely used hooks didn't get converted. Apparently one use of
get_interrupt_shadow() was missed at the time.
While I've intentionally left alone the cpu_{up,down}() etc hooks for
not being guest reachable, the nhvm_hap_walk_L1_p2m() one can't
currently be converted as the framework supports only up to 6 arguments.
Down the road the three booleans perhaps want folding into a single
parameter/argument.
While doing this, drop NULL checks ahead of .nhvm_*() calls when the
hook is always present. Also convert the .nhvm_vcpu_reset() call to
alternative_vcall(), as the return value is unused and the caller has
currently no way of propagating it.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
[1] https://lists.xen.org/archives/html/xen-devel/2021-11/msg01822.html
---
Another candidate for dropping the conditional would be
.enable_msr_interception(), but this would then want the wrapper to also
return void (hence perhaps better done separately).
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2548,7 +2548,7 @@ static int hvmemul_vmfunc(
if ( !hvm_funcs.altp2m_vcpu_emulate_vmfunc )
return X86EMUL_UNHANDLEABLE;
- rc = hvm_funcs.altp2m_vcpu_emulate_vmfunc(ctxt->regs);
+ rc = alternative_call(hvm_funcs.altp2m_vcpu_emulate_vmfunc, ctxt->regs);
if ( rc == X86EMUL_EXCEPTION )
x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -703,7 +703,7 @@ int hvm_domain_initialise(struct domain
if ( rc )
goto fail2;
- rc = hvm_funcs.domain_initialise(d);
+ rc = alternative_call(hvm_funcs.domain_initialise, d);
if ( rc != 0 )
goto fail2;
@@ -736,7 +736,7 @@ void hvm_domain_relinquish_resources(str
alternative_vcall(hvm_funcs.domain_relinquish_resources, d);
if ( hvm_funcs.nhvm_domain_relinquish_resources )
- hvm_funcs.nhvm_domain_relinquish_resources(d);
+ alternative_vcall(hvm_funcs.nhvm_domain_relinquish_resources, d);
viridian_domain_deinit(d);
@@ -866,7 +866,7 @@ static int hvm_save_cpu_ctxt(struct vcpu
return 0;
/* Architecture-specific vmcs/vmcb bits */
- hvm_funcs.save_cpu_ctxt(v, &ctxt);
+ alternative_vcall(hvm_funcs.save_cpu_ctxt, v, &ctxt);
hvm_get_segment_register(v, x86_seg_idtr, &seg);
ctxt.idtr_limit = seg.limit;
@@ -1089,14 +1089,14 @@ static int hvm_load_cpu_ctxt(struct doma
#undef UNFOLD_ARBYTES
/* Architecture-specific vmcs/vmcb bits */
- if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
+ if ( alternative_call(hvm_funcs.load_cpu_ctxt, v, &ctxt) < 0 )
return -EINVAL;
v->arch.hvm.guest_cr[2] = ctxt.cr2;
hvm_update_guest_cr(v, 2);
if ( hvm_funcs.tsc_scaling.setup )
- hvm_funcs.tsc_scaling.setup(v);
+ alternative_vcall(hvm_funcs.tsc_scaling.setup, v);
v->arch.msrs->tsc_aux = ctxt.msr_tsc_aux;
@@ -1559,7 +1559,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
if ( rc != 0 ) /* teardown: vlapic_destroy */
goto fail2;
- if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown:
hvm_funcs.vcpu_destroy */
+ rc = alternative_call(hvm_funcs.vcpu_initialise, v);
+ if ( rc != 0 ) /* teardown: hvm_funcs.vcpu_destroy */
goto fail3;
softirq_tasklet_init(&v->arch.hvm.assert_evtchn_irq_tasklet,
@@ -1607,7 +1608,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
free_compat_arg_xlat(v);
fail4:
hvmemul_cache_destroy(v);
- hvm_funcs.vcpu_destroy(v);
+ alternative_vcall(hvm_funcs.vcpu_destroy, v);
fail3:
vlapic_destroy(v);
fail2:
@@ -1631,7 +1632,7 @@ void hvm_vcpu_destroy(struct vcpu *v)
free_compat_arg_xlat(v);
tasklet_kill(&v->arch.hvm.assert_evtchn_irq_tasklet);
- hvm_funcs.vcpu_destroy(v);
+ alternative_vcall(hvm_funcs.vcpu_destroy, v);
vlapic_destroy(v);
@@ -3863,7 +3864,7 @@ enum hvm_intblk hvm_interrupt_blocked(st
!(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
return hvm_intblk_rflags_ie;
- intr_shadow = hvm_funcs.get_interrupt_shadow(v);
+ intr_shadow = alternative_call(hvm_funcs.get_interrupt_shadow, v);
if ( intr_shadow & (HVM_INTR_SHADOW_STI|HVM_INTR_SHADOW_MOV_SS) )
return hvm_intblk_shadow;
@@ -3979,7 +3980,7 @@ void hvm_vcpu_reset_state(struct vcpu *v
hvm_set_segment_register(v, x86_seg_idtr, ®);
if ( hvm_funcs.tsc_scaling.setup )
- hvm_funcs.tsc_scaling.setup(v);
+ alternative_vcall(hvm_funcs.tsc_scaling.setup, v);
/* Sync AP's TSC with BSP's. */
v->arch.hvm.cache_tsc_offset =
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -54,8 +54,7 @@ nestedhvm_vcpu_reset(struct vcpu *v)
hvm_asid_flush_vcpu_asid(&nv->nv_n2asid);
- if ( hvm_funcs.nhvm_vcpu_reset )
- hvm_funcs.nhvm_vcpu_reset(v);
+ alternative_vcall(hvm_funcs.nhvm_vcpu_reset, v);
/* vcpu is in host mode */
nestedhvm_vcpu_exit_guestmode(v);
@@ -64,14 +63,14 @@ nestedhvm_vcpu_reset(struct vcpu *v)
int
nestedhvm_vcpu_initialise(struct vcpu *v)
{
- int rc = -EOPNOTSUPP;
+ int rc;
if ( !shadow_io_bitmap[0] )
return -ENOMEM;
- if ( !hvm_funcs.nhvm_vcpu_initialise ||
- ((rc = hvm_funcs.nhvm_vcpu_initialise(v)) != 0) )
- return rc;
+ rc = alternative_call(hvm_funcs.nhvm_vcpu_initialise, v);
+ if ( rc )
+ return rc;
nestedhvm_vcpu_reset(v);
return 0;
@@ -80,8 +79,7 @@ nestedhvm_vcpu_initialise(struct vcpu *v
void
nestedhvm_vcpu_destroy(struct vcpu *v)
{
- if ( hvm_funcs.nhvm_vcpu_destroy )
- hvm_funcs.nhvm_vcpu_destroy(v);
+ alternative_vcall(hvm_funcs.nhvm_vcpu_destroy, v);
}
static void
--- a/xen/arch/x86/monitor.c
+++ b/xen/arch/x86/monitor.c
@@ -270,7 +270,8 @@ int arch_monitor_domctl_event(struct dom
ad->monitor.descriptor_access_enabled = requested_status;
for_each_vcpu ( d, v )
- hvm_funcs.set_descriptor_access_exiting(v, requested_status);
+ alternative_vcall(hvm_funcs.set_descriptor_access_exiting, v,
+ requested_status);
domain_unpause(d);
break;
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -204,7 +204,7 @@ void vm_event_fill_regs(vm_event_request
ASSERT(is_hvm_vcpu(curr));
/* Architecture-specific vmcs/vmcb bits */
- hvm_funcs.save_cpu_ctxt(curr, &ctxt);
+ alternative_vcall(hvm_funcs.save_cpu_ctxt, curr, &ctxt);
req->data.regs.x86.rax = regs->rax;
req->data.regs.x86.rcx = regs->rcx;
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -601,7 +601,7 @@ static inline void hvm_invalidate_regs_f
static inline int nhvm_vcpu_vmexit_event(
struct vcpu *v, const struct x86_event *event)
{
- return hvm_funcs.nhvm_vcpu_vmexit_event(v, event);
+ return alternative_call(hvm_funcs.nhvm_vcpu_vmexit_event, v, event);
}
/* returns l1 guest's cr3 that points to the page table used to
@@ -609,43 +609,44 @@ static inline int nhvm_vcpu_vmexit_event
*/
static inline uint64_t nhvm_vcpu_p2m_base(struct vcpu *v)
{
- return hvm_funcs.nhvm_vcpu_p2m_base(v);
+ return alternative_call(hvm_funcs.nhvm_vcpu_p2m_base, v);
}
/* returns true, when l1 guest intercepts the specified trap */
static inline bool_t nhvm_vmcx_guest_intercepts_event(
struct vcpu *v, unsigned int vector, int errcode)
{
- return hvm_funcs.nhvm_vmcx_guest_intercepts_event(v, vector, errcode);
+ return alternative_call(hvm_funcs.nhvm_vmcx_guest_intercepts_event, v,
+ vector, errcode);
}
/* returns true when l1 guest wants to use hap to run l2 guest */
static inline bool_t nhvm_vmcx_hap_enabled(struct vcpu *v)
{
- return hvm_funcs.nhvm_vmcx_hap_enabled(v);
+ return alternative_call(hvm_funcs.nhvm_vmcx_hap_enabled, v);
}
/* interrupt */
static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
{
- return hvm_funcs.nhvm_intr_blocked(v);
+ return alternative_call(hvm_funcs.nhvm_intr_blocked, v);
}
static inline bool_t hvm_enable_msr_interception(struct domain *d, uint32_t
msr)
{
if ( hvm_funcs.enable_msr_interception )
{
- hvm_funcs.enable_msr_interception(d, msr);
- return 1;
+ alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
+ return true;
}
- return 0;
+ return false;
}
static inline bool_t hvm_is_singlestep_supported(void)
{
return (hvm_funcs.is_singlestep_supported &&
- hvm_funcs.is_singlestep_supported());
+ alternative_call(hvm_funcs.is_singlestep_supported));
}
static inline bool hvm_hap_supported(void)
@@ -663,14 +664,14 @@ static inline bool hvm_altp2m_supported(
static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
{
if ( hvm_funcs.altp2m_vcpu_update_p2m )
- hvm_funcs.altp2m_vcpu_update_p2m(v);
+ alternative_vcall(hvm_funcs.altp2m_vcpu_update_p2m, v);
}
/* updates VMCS fields related to VMFUNC and #VE */
static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
{
if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
- hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
+ alternative_vcall(hvm_funcs.altp2m_vcpu_update_vmfunc_ve, v);
}
/* emulates #VE */
@@ -678,7 +679,7 @@ static inline bool altp2m_vcpu_emulate_v
{
if ( hvm_funcs.altp2m_vcpu_emulate_ve )
{
- hvm_funcs.altp2m_vcpu_emulate_ve(v);
+ alternative_vcall(hvm_funcs.altp2m_vcpu_emulate_ve, v);
return true;
}
return false;
@@ -687,7 +688,7 @@ static inline bool altp2m_vcpu_emulate_v
static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset)
{
if ( hvm_funcs.vmtrace_control )
- return hvm_funcs.vmtrace_control(v, enable, reset);
+ return alternative_call(hvm_funcs.vmtrace_control, v, enable, reset);
return -EOPNOTSUPP;
}
@@ -696,7 +697,7 @@ static inline int hvm_vmtrace_control(st
static inline int hvm_vmtrace_output_position(struct vcpu *v, uint64_t *pos)
{
if ( hvm_funcs.vmtrace_output_position )
- return hvm_funcs.vmtrace_output_position(v, pos);
+ return alternative_call(hvm_funcs.vmtrace_output_position, v, pos);
return -EOPNOTSUPP;
}
@@ -705,7 +706,7 @@ static inline int hvm_vmtrace_set_option
struct vcpu *v, uint64_t key, uint64_t value)
{
if ( hvm_funcs.vmtrace_set_option )
- return hvm_funcs.vmtrace_set_option(v, key, value);
+ return alternative_call(hvm_funcs.vmtrace_set_option, v, key, value);
return -EOPNOTSUPP;
}
@@ -714,7 +715,7 @@ static inline int hvm_vmtrace_get_option
struct vcpu *v, uint64_t key, uint64_t *value)
{
if ( hvm_funcs.vmtrace_get_option )
- return hvm_funcs.vmtrace_get_option(v, key, value);
+ return alternative_call(hvm_funcs.vmtrace_get_option, v, key, value);
return -EOPNOTSUPP;
}
@@ -722,7 +723,7 @@ static inline int hvm_vmtrace_get_option
static inline int hvm_vmtrace_reset(struct vcpu *v)
{
if ( hvm_funcs.vmtrace_reset )
- return hvm_funcs.vmtrace_reset(v);
+ return alternative_call(hvm_funcs.vmtrace_reset, v);
return -EOPNOTSUPP;
}
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |