[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 02/10] hvm/hpet: Only call guest_time_hpet(h) one time per action.
This call is expensive and will cause extra time to pass. The software-developers-hpet-spec-1-0a.pdf does not say how long it takes after the main clock is enabled before the first change of the master clock. Therefore multiple calls to guest_time_hpet(h) are not needed. Since each timer is started by a loop, each ones start time will change on the multple calls. In the real hardware, there is not delta based on which timer. Without this change it is possible for an HVM guest running linux to get the message: ..MP-BIOS bug: 8254 timer not connected to IO-APIC On the guest console(s); and the guest will panic. Also Xen hypervisor console with be flooded with: vioapic.c:352:d1 Unsupported delivery mode 7 vioapic.c:352:d1 Unsupported delivery mode 7 vioapic.c:352:d1 Unsupported delivery mode 7 Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx> --- xen/arch/x86/hvm/hpet.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c index 4324b52..16c0f07 100644 --- a/xen/arch/x86/hvm/hpet.c +++ b/xen/arch/x86/hvm/hpet.c @@ -73,17 +73,18 @@ ((timer_config(h, n) & HPET_TN_INT_ROUTE_CAP_MASK) \ >> HPET_TN_INT_ROUTE_CAP_SHIFT) -static inline uint64_t hpet_read_maincounter(HPETState *h) +static inline uint64_t hpet_read_maincounter(HPETState *h, uint64_t guest_time) { ASSERT(spin_is_locked(&h->lock)); if ( hpet_enabled(h) ) - return guest_time_hpet(h) + h->mc_offset; + return guest_time + h->mc_offset; else return h->hpet.mc64; } -static uint64_t hpet_get_comparator(HPETState *h, unsigned int tn) +static uint64_t hpet_get_comparator(HPETState *h, unsigned int tn, + uint64_t guest_time) { uint64_t comparator; uint64_t elapsed; @@ -95,7 +96,8 @@ static uint64_t hpet_get_comparator(HPETState *h, unsigned int tn) uint64_t period = h->hpet.period[tn]; if (period) { - elapsed = hpet_read_maincounter(h) + period - 1 - comparator; + elapsed = hpet_read_maincounter(h, guest_time) + + period - 1 - comparator; comparator += (elapsed / period) * period; h->hpet.comparator64[tn] = comparator; } @@ -120,7 +122,7 @@ static inline uint64_t hpet_read64(HPETState *h, unsigned long addr) case HPET_STATUS: return h->hpet.isr; case HPET_COUNTER: - return hpet_read_maincounter(h); + return hpet_read_maincounter(h, guest_time_hpet(h)); case HPET_Tn_CFG(0): case HPET_Tn_CFG(1): case HPET_Tn_CFG(2): @@ -128,7 +130,7 @@ static inline uint64_t hpet_read64(HPETState *h, unsigned long addr) case HPET_Tn_CMP(0): case HPET_Tn_CMP(1): case HPET_Tn_CMP(2): - return hpet_get_comparator(h, HPET_TN(CMP, addr)); + return hpet_get_comparator(h, HPET_TN(CMP, addr), guest_time_hpet(h)); case HPET_Tn_ROUTE(0): case HPET_Tn_ROUTE(1): case HPET_Tn_ROUTE(2): @@ -194,18 +196,19 @@ static void hpet_stop_timer(HPETState *h, unsigned int tn) destroy_periodic_time(&h->pt[tn]); /* read the comparator to get it updated so a read while stopped will * return the expected value. */ - hpet_get_comparator(h, tn); + hpet_get_comparator(h, tn, guest_time_hpet(h)); } /* the number of HPET tick that stands for * 1/(2^10) second, namely, 0.9765625 milliseconds */ #define HPET_TINY_TIME_SPAN ((h->stime_freq >> 10) / STIME_PER_HPET_TICK) -static void hpet_set_timer(HPETState *h, unsigned int tn) +static void hpet_set_timer(HPETState *h, unsigned int tn, int mc_starting) { uint64_t tn_cmp, cur_tick, diff; unsigned int irq; unsigned int oneshot; + uint64_t guest_time; ASSERT(tn < HPET_TIMER_NUM); ASSERT(spin_is_locked(&h->lock)); @@ -220,8 +223,13 @@ static void hpet_set_timer(HPETState *h, unsigned int tn) if ( !timer_enabled(h, tn) ) return; - tn_cmp = hpet_get_comparator(h, tn); - cur_tick = hpet_read_maincounter(h); + if ( mc_starting ) + guest_time = h->hpet.mc64 - h->mc_offset; + else + guest_time = guest_time_hpet(h); + + tn_cmp = hpet_get_comparator(h, tn, guest_time); + cur_tick = hpet_read_maincounter(h, guest_time); if ( timer_is_32bit(h, tn) ) { tn_cmp = (uint32_t)tn_cmp; @@ -283,6 +291,7 @@ static int hpet_write( #define set_stop_timer(n) (__set_bit((n), &stop_timers)) #define set_start_timer(n) (__set_bit((n), &start_timers)) #define set_restart_timer(n) (set_stop_timer(n),set_start_timer(n)) + int mc_starting = 0; addr &= HPET_MMAP_SIZE-1; @@ -307,6 +316,7 @@ static int hpet_write( { /* Enable main counter and interrupt generation. */ h->mc_offset = h->hpet.mc64 - guest_time_hpet(h); + mc_starting = 1; for ( i = 0; i < HPET_TIMER_NUM; i++ ) { h->hpet.comparator64[i] = @@ -441,7 +451,7 @@ static int hpet_write( { i = find_first_set_bit(start_timers); __clear_bit(i, &start_timers); - hpet_set_timer(h, i); + hpet_set_timer(h, i, mc_starting); } #undef set_stop_timer @@ -562,7 +572,7 @@ static int hpet_load(struct domain *d, hvm_domain_context_t *h) if ( hpet_enabled(hp) ) for ( i = 0; i < HPET_TIMER_NUM; i++ ) if ( timer_enabled(hp, i) ) - hpet_set_timer(hp, i); + hpet_set_timer(hp, i, 1); spin_unlock(&hp->lock); -- 1.8.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |