[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/HVM: also separate kernel/user vTSC statistics



It is unclear why this got done for PV only originally.

While at it, limit this statistics collection to debug or performance
counter enabled builds.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -319,7 +319,6 @@ u64 hvm_get_guest_tsc_fixed(struct vcpu 
     {
         tsc = hvm_get_guest_time_fixed(v, at_tsc);
         tsc = gtime_to_gtsc(v->domain, tsc);
-        v->domain->arch.vtsc_kerncount++;
     }
     else if ( at_tsc )
     {
@@ -4368,12 +4367,41 @@ void hvm_cpuid(unsigned int input, unsig
     }
 }
 
+static uint64_t _hvm_rdtsc_intercept(void)
+{
+    struct vcpu *curr = current;
+#if !defined(NDEBUG) || defined(PERF_COUNTERS)
+    struct domain *currd = curr->domain;
+
+    if ( currd->arch.vtsc )
+        switch ( hvm_guest_x86_mode(curr) )
+        {
+            struct segment_register sreg;
+
+        case 8:
+        case 4:
+        case 2:
+            hvm_get_segment_register(curr, x86_seg_ss, &sreg);
+            if ( unlikely(sreg.attr.fields.dpl) )
+            {
+        case 1:
+                currd->arch.vtsc_usercount++;
+                break;
+            }
+            /* fall through */
+        case 0:
+            currd->arch.vtsc_kerncount++;
+            break;
+        }
+#endif
+
+    return hvm_get_guest_tsc(curr);
+}
+
 void hvm_rdtsc_intercept(struct cpu_user_regs *regs)
 {
-    uint64_t tsc;
-    struct vcpu *v = current;
+    uint64_t tsc = _hvm_rdtsc_intercept();
 
-    tsc = hvm_get_guest_tsc(v);
     regs->eax = (uint32_t)tsc;
     regs->edx = (uint32_t)(tsc >> 32);
 
@@ -4401,7 +4429,7 @@ int hvm_msr_read_intercept(unsigned int 
         break;
 
     case MSR_IA32_TSC:
-        *msr_content = hvm_get_guest_tsc(v);
+        *msr_content = _hvm_rdtsc_intercept();
         break;
 
     case MSR_IA32_TSC_ADJUST:
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1777,10 +1777,12 @@ void pv_soft_rdtsc(struct vcpu *v, struc
 
     spin_lock(&d->arch.vtsc_lock);
 
+#if !defined(NDEBUG) || defined(PERF_COUNTERS)
     if ( guest_kernel_mode(v, regs) )
         d->arch.vtsc_kerncount++;
     else
         d->arch.vtsc_usercount++;
+#endif
 
     if ( (int64_t)(now - d->arch.vtsc_last) > 0 )
         d->arch.vtsc_last = now;
@@ -2033,17 +2035,13 @@ static void dump_softtsc(unsigned char k
             printk(",khz=%"PRIu32, d->arch.tsc_khz);
         if ( d->arch.incarnation )
             printk(",inc=%"PRIu32, d->arch.incarnation);
+#if !defined(NDEBUG) || defined(PERF_COUNTERS)
         if ( !(d->arch.vtsc_kerncount | d->arch.vtsc_usercount) )
-        {
             printk("\n");
-            continue;
-        }
-        if ( is_hvm_domain(d) )
-            printk(",vtsc count: %"PRIu64" total\n",
-                   d->arch.vtsc_kerncount);
         else
             printk(",vtsc count: %"PRIu64" kernel, %"PRIu64" user\n",
                    d->arch.vtsc_kerncount, d->arch.vtsc_usercount);
+#endif
         domcnt++;
     }
 
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -319,8 +319,10 @@ struct arch_domain
     struct time_scale ns_to_vtsc; /* scaling for certain emulated cases */
     uint32_t incarnation;    /* incremented every restore or live migrate
                                 (possibly other cases in the future */
-    uint64_t vtsc_kerncount; /* for hvm, counts all vtsc */
-    uint64_t vtsc_usercount; /* not used for hvm */
+#if !defined(NDEBUG) || defined(PERF_COUNTERS)
+    uint64_t vtsc_kerncount;
+    uint64_t vtsc_usercount;
+#endif
 
     /* Pseudophysical e820 map (XENMEM_memory_map).  */
     spinlock_t e820_lock;



Attachment: x86-HVM-vTSC-counts.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.