[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 03/13] x86/hvm: Scale host TSC when setting/getting guest TSC
The existing hvm_[set|get]_guest_tsc_fixed() calculate the guest TSC by adding the TSC offset to the host TSC. When the TSC scaling is enabled, the host TSC should be scaled first. This patch adds the scaling logic to those two functions. Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 17 +++++++---------- xen/arch/x86/hvm/svm/svm.c | 12 ++++++++++++ xen/include/asm-x86/hvm/hvm.h | 2 ++ 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 21470ec..3648a44 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -60,6 +60,7 @@ #include <asm/hvm/nestedhvm.h> #include <asm/hvm/event.h> #include <asm/hvm/vmx/vmx.h> +#include <asm/hvm/svm/svm.h> /* for cpu_has_tsc_ratio */ #include <asm/altp2m.h> #include <asm/mtrr.h> #include <asm/apic.h> @@ -310,13 +311,11 @@ void hvm_set_guest_tsc_fixed(struct vcpu *v, u64 guest_tsc, u64 at_tsc) tsc = hvm_get_guest_time_fixed(v, at_tsc); tsc = gtime_to_gtsc(v->domain, tsc); } - else if ( at_tsc ) - { - tsc = at_tsc; - } else { - tsc = rdtsc(); + tsc = at_tsc ?: rdtsc(); + if ( cpu_has_tsc_ratio ) + tsc = hvm_funcs.scale_tsc(v, tsc); } delta_tsc = guest_tsc - tsc; @@ -344,13 +343,11 @@ u64 hvm_get_guest_tsc_fixed(struct vcpu *v, uint64_t at_tsc) tsc = hvm_get_guest_time_fixed(v, at_tsc); tsc = gtime_to_gtsc(v->domain, tsc); } - else if ( at_tsc ) - { - tsc = at_tsc; - } else { - tsc = rdtsc(); + tsc = at_tsc ?: rdtsc(); + if ( cpu_has_tsc_ratio ) + tsc = hvm_funcs.scale_tsc(v, tsc); } return tsc + v->arch.hvm_vcpu.cache_tsc_offset; diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index a66d854..c538a29 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -804,6 +804,16 @@ static uint64_t scale_tsc(uint64_t host_tsc, uint64_t ratio) return scaled_host_tsc; } +static uint64_t svm_scale_tsc(struct vcpu *v, uint64_t tsc) +{ + struct domain *d = v->domain; + + if ( !cpu_has_tsc_ratio || d->arch.vtsc ) + return tsc; + + return scale_tsc(tsc, vcpu_tsc_ratio(v)); +} + static uint64_t svm_get_tsc_offset(uint64_t host_tsc, uint64_t guest_tsc, uint64_t ratio) { @@ -2272,6 +2282,8 @@ static struct hvm_function_table __initdata svm_function_table = { .nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled, .nhvm_intr_blocked = nsvm_intr_blocked, .nhvm_hap_walk_L1_p2m = nsvm_hap_walk_L1_p2m, + + .scale_tsc = svm_scale_tsc, }; void svm_vmexit_handler(struct cpu_user_regs *regs) diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index b9d893d..ba6259e 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -212,6 +212,8 @@ struct hvm_function_table { void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v); bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v); int (*altp2m_vcpu_emulate_vmfunc)(struct cpu_user_regs *regs); + + uint64_t (*scale_tsc)(struct vcpu *v, uint64_t tsc); }; extern struct hvm_function_table hvm_funcs; -- 2.4.8 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |