|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v4 20/30] KVM: x86: Kill last_tsc_{nsec,write,offset} fields
From: David Woodhouse <dwmw@xxxxxxxxxxxx>
These pointlessly duplicate the cur_tsc_{nsec,write,offset} values.
The only place they were used was where the TSC is stable and a new
vCPU is being synchronized to the previous setting, in which case the
cur_tsc_* value is definitely identical.
Rename last_tsc_khz and last_tsc_scaling_ratio to cur_tsc_khz and
cur_tsc_scaling_ratio respectively, since they are properties of the
current TSC generation.
Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx>
Reviewed-by: Paul Durrant <paul@xxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 7 ++-----
arch/x86/kvm/x86.c | 32 ++++++++++++++------------------
2 files changed, 16 insertions(+), 23 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 5348fd5ea3f3..59298a8f78eb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1486,11 +1486,8 @@ struct kvm_arch {
* preemption-disabled region, so it must be a raw spinlock.
*/
raw_spinlock_t tsc_write_lock;
- u64 last_tsc_nsec;
- u64 last_tsc_write;
- u32 last_tsc_khz;
- u64 last_tsc_offset;
- u64 last_tsc_scaling_ratio;
+ u32 cur_tsc_khz;
+ u64 cur_tsc_scaling_ratio;
u64 cur_tsc_nsec;
u64 cur_tsc_write;
u64 cur_tsc_offset;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 097df58749c3..3c68d2a4c8d0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2813,14 +2813,12 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu
*vcpu, u64 offset, u64 tsc,
vcpu->kvm->arch.user_set_tsc = true;
/*
- * We also track th most recent recorded KHZ, write and time to
- * allow the matching interval to be extended at each write.
+ * Track the TSC frequency, scaling ratio, and offset for the current
+ * generation. These are used to detect matching TSC writes and to
+ * compute the guest TSC from the host clock.
*/
- kvm->arch.last_tsc_nsec = ns;
- kvm->arch.last_tsc_write = tsc;
- kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
- kvm->arch.last_tsc_offset = offset;
- kvm->arch.last_tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
+ kvm->arch.cur_tsc_khz = vcpu->arch.virtual_tsc_khz;
+ kvm->arch.cur_tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
vcpu->arch.last_guest_tsc = tsc;
@@ -2833,8 +2831,6 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu,
u64 offset, u64 tsc,
* nanosecond time, offset, and write, so if TSCs are in
* sync, we can match exact offset, and if not, we can match
* exact software computation in compute_guest_tsc()
- *
- * These values are tracked in kvm->arch.cur_xxx variables.
*/
kvm->arch.cur_tsc_generation++;
kvm->arch.cur_tsc_nsec = ns;
@@ -2873,7 +2869,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu,
u64 *user_value)
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = kvm_compute_l1_tsc_offset(vcpu, host_tsc, data);
- elapsed = ns - kvm->arch.last_tsc_nsec;
+ elapsed = ns - kvm->arch.cur_tsc_nsec;
if (vcpu->arch.virtual_tsc_khz) {
if (data == 0) {
@@ -2883,7 +2879,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu,
u64 *user_value)
*/
synchronizing = true;
} else if (kvm->arch.user_set_tsc) {
- u64 tsc_exp = kvm->arch.last_tsc_write +
+ u64 tsc_exp = kvm->arch.cur_tsc_write +
nsec_to_cycles(vcpu, elapsed);
u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
/*
@@ -2914,14 +2910,14 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu,
u64 *user_value)
* it's better to try to match offsets from the beginning.
*/
if (synchronizing &&
- vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
+ vcpu->arch.virtual_tsc_khz == kvm->arch.cur_tsc_khz) {
/*
* If synchronizing, the "last written" TSC value/time
* recorded by __kvm_synchronize_tsc() should not change
* (i.e. should be precisely the same as the existing
* generation).
*/
- data = kvm->arch.last_tsc_write;
+ data = kvm->arch.cur_tsc_write;
if (!kvm_check_tsc_unstable()) {
offset = kvm->arch.cur_tsc_offset;
@@ -3206,7 +3202,7 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
* get_kvmclock() to compute kvmclock from the host TSC
* without needing a vCPU reference.
*/
- ka->master_tsc_scaling_ratio = ka->last_tsc_scaling_ratio;
+ ka->master_tsc_scaling_ratio = ka->cur_tsc_scaling_ratio;
tsc_hz = (u64)get_cpu_tsc_khz() * 1000;
if (tsc_hz && kvm_caps.has_tsc_control)
tsc_hz = kvm_scale_tsc(tsc_hz,
@@ -6075,8 +6071,8 @@ static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
matched = (vcpu->arch.virtual_tsc_khz &&
- kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz
&&
- kvm->arch.last_tsc_offset == offset);
+ kvm->arch.cur_tsc_khz == vcpu->arch.virtual_tsc_khz
&&
+ kvm->arch.cur_tsc_offset == offset);
tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) +
offset;
ns = get_kvmclock_base_ns();
@@ -13520,8 +13516,8 @@ int kvm_arch_enable_virtualization_cpu(void)
* you may have some problem. Solving this issue is
* left as an exercise to the reader.
*/
- kvm->arch.last_tsc_nsec = 0;
- kvm->arch.last_tsc_write = 0;
+ kvm->arch.cur_tsc_nsec = 0;
+ kvm->arch.cur_tsc_write = 0;
}
}
--
2.51.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |