|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 05/16] x86/HVM: add wrapper for hvm_funcs.set_tsc_offset()
It's used in quite a few places, and hence doing so eases subsequent
adjustment to how these (indirect) calls are carried out.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/hvm/domain.c
+++ b/xen/arch/x86/hvm/domain.c
@@ -317,9 +317,9 @@ int arch_set_info_hvm_guest(struct vcpu
/* Sync AP's TSC with BSP's. */
v->arch.hvm_vcpu.cache_tsc_offset =
- v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
- v->domain->arch.hvm_domain.sync_tsc);
+ d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
+ hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+ d->arch.hvm_domain.sync_tsc);
paging_update_paging_modes(v);
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -417,7 +417,7 @@ static void hvm_set_guest_tsc_fixed(stru
delta_tsc = guest_tsc - tsc;
v->arch.hvm_vcpu.cache_tsc_offset = delta_tsc;
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, at_tsc);
+ hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, at_tsc);
}
#define hvm_set_guest_tsc(v, t) hvm_set_guest_tsc_fixed(v, t, 0)
@@ -435,7 +435,7 @@ static void hvm_set_guest_tsc_adjust(str
{
v->arch.hvm_vcpu.cache_tsc_offset += tsc_adjust
- v->arch.hvm_vcpu.msr_tsc_adjust;
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+ hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
v->arch.hvm_vcpu.msr_tsc_adjust = tsc_adjust;
}
@@ -3934,8 +3934,8 @@ void hvm_vcpu_reset_state(struct vcpu *v
/* Sync AP's TSC with BSP's. */
v->arch.hvm_vcpu.cache_tsc_offset =
v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
- d->arch.hvm_domain.sync_tsc);
+ hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+ d->arch.hvm_domain.sync_tsc);
v->arch.hvm_vcpu.msr_tsc_adjust = 0;
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1082,7 +1082,7 @@ static void load_shadow_guest_state(stru
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+ hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
vvmcs_to_shadow_bulk(v, ARRAY_SIZE(vmentry_fields), vmentry_fields);
@@ -1288,7 +1288,7 @@ static void load_vvmcs_host_state(struct
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+ hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
set_vvmcs(v, VM_ENTRY_INTR_INFO, 0);
}
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -2198,9 +2198,9 @@ void tsc_set_info(struct domain *d,
* will sync their TSC to BSP's sync_tsc.
*/
d->arch.hvm_domain.sync_tsc = rdtsc();
- hvm_funcs.set_tsc_offset(d->vcpu[0],
-
d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset,
- d->arch.hvm_domain.sync_tsc);
+ hvm_set_tsc_offset(d->vcpu[0],
+ d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset,
+ d->arch.hvm_domain.sync_tsc);
}
}
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -347,6 +347,12 @@ static inline void hvm_cpuid_policy_chan
hvm_funcs.cpuid_policy_changed(v);
}
+static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset,
+ uint64_t at_tsc)
+{
+ hvm_funcs.set_tsc_offset(v, offset, at_tsc);
+}
+
/*
* Called to ensure than all guest-specific mappings in a tagged TLB are
* flushed; does *not* flush Xen's TLB entries, and on processors without a
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |