[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v4 18/30] KVM: x86: Remove implicit rdtsc() from kvm_compute_l1_tsc_offset()



From: David Woodhouse <dwmw@xxxxxxxxxxxx>

Let the callers pass the host TSC value in as an explicit parameter.
This leaves some fairly obviously stupid code, which is using this
function to compare the guest TSC at some *other* time, with the
newly-minted TSC value from rdtsc(). Unless it's being used to measure
*elapsed* time, that isn't very sensible.

In this case, "obviously stupid" is an improvement over being
non-obviously so.

No functional change intended.

Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx>
Reviewed-by: Paul Durrant <paul@xxxxxxx>
---
 arch/x86/kvm/x86.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 980fc22ee05b..c9cbebd6a92a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2693,11 +2693,12 @@ u64 kvm_scale_tsc(u64 tsc, u64 ratio)
        return _tsc;
 }
 
-static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 host_tsc,
+                                    u64 target_tsc)
 {
        u64 tsc;
 
-       tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
+       tsc = kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
 
        return target_tsc - tsc;
 }
@@ -2859,7 +2860,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, 
u64 *user_value)
        bool synchronizing = false;
 
        raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
-       offset = kvm_compute_l1_tsc_offset(vcpu, data);
+       offset = kvm_compute_l1_tsc_offset(vcpu, rdtsc(), data);
        ns = get_kvmclock_base_ns();
        elapsed = ns - kvm->arch.last_tsc_nsec;
 
@@ -2908,7 +2909,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, 
u64 *user_value)
                } else {
                        u64 delta = nsec_to_cycles(vcpu, elapsed);
                        data += delta;
-                       offset = kvm_compute_l1_tsc_offset(vcpu, data);
+                       offset = kvm_compute_l1_tsc_offset(vcpu, rdtsc(), data);
                }
                matched = true;
        }
@@ -4143,7 +4144,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                if (msr_info->host_initiated) {
                        kvm_synchronize_tsc(vcpu, &data);
                } else if (!vcpu->arch.guest_tsc_protected) {
-                       u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - 
vcpu->arch.l1_tsc_offset;
+                       u64 adj = kvm_compute_l1_tsc_offset(vcpu, rdtsc(), 
data) -
+                                 vcpu->arch.l1_tsc_offset;
                        adjust_tsc_offset_guest(vcpu, adj);
                        vcpu->arch.ia32_tsc_adjust_msr += adj;
                }
@@ -5267,7 +5269,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                        mark_tsc_unstable("KVM discovered backwards TSC");
 
                if (kvm_check_tsc_unstable()) {
-                       u64 offset = kvm_compute_l1_tsc_offset(vcpu,
+                       u64 offset = kvm_compute_l1_tsc_offset(vcpu, rdtsc(),
                                                vcpu->arch.last_guest_tsc);
                        kvm_vcpu_write_tsc_offset(vcpu, offset);
                        if (!vcpu->arch.guest_tsc_protected)
-- 
2.51.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.