[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] vsprintf: introduce %pv extended format specifier to print domain/vcpu ID pair



On 25/02/14 10:00, Jan Beulich wrote:
... in a simplified and consistent way.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

That is a remarkable reduction in code.

Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>


--- a/docs/misc/printk-formats.txt
+++ b/docs/misc/printk-formats.txt
@@ -15,3 +15,6 @@ Symbol/Function pointers:
 
        In the case that an appropriate symbol name can't be found, %p[sS] will
        fall back to '%p' and print the address in hex.
+
+       %pv     Domain and vCPU ID from a 'struct vcpu *' (printed as
+               "d<domid>v<vcpuid>")
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -82,10 +82,9 @@ int vmce_restore_vcpu(struct vcpu *v, co
     if ( ctxt->caps & ~guest_mcg_cap & ~MCG_CAP_COUNT & ~MCG_CTL_P )
     {
         dprintk(XENLOG_G_ERR, "%s restore: unsupported MCA capabilities"
-                " %#" PRIx64 " for d%d:v%u (supported: %#Lx)\n",
+                " %#" PRIx64 " for %pv (supported: %#Lx)\n",
                 has_hvm_container_vcpu(v) ? "HVM" : "PV", ctxt->caps,
-                v->domain->domain_id, v->vcpu_id,
-                guest_mcg_cap & ~MCG_CAP_COUNT);
+                v, guest_mcg_cap & ~MCG_CAP_COUNT);
         return -EPERM;
     }
 
@@ -361,15 +360,13 @@ int inject_vmce(struct domain *d, int vc
               guest_has_trap_callback(d, v->vcpu_id, TRAP_machine_check)) &&
              !test_and_set_bool(v->mce_pending) )
         {
-            mce_printk(MCE_VERBOSE, "MCE: inject vMCE to d%d:v%d\n",
-                       d->domain_id, v->vcpu_id);
+            mce_printk(MCE_VERBOSE, "MCE: inject vMCE to %pv\n", v);
             vcpu_kick(v);
             ret = 0;
         }
         else
         {
-            mce_printk(MCE_QUIET, "Failed to inject vMCE to d%d:v%d\n",
-                       d->domain_id, v->vcpu_id);
+            mce_printk(MCE_QUIET, "Failed to inject vMCE to %pv\n", v);
             ret = -EBUSY;
             break;
         }
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -734,10 +734,10 @@ void vmx_get_segment_register(struct vcp
         if ( !warned )
         {
             warned = 1;
-            printk(XENLOG_WARNING "Segment register inaccessible for d%dv%d\n"
+            printk(XENLOG_WARNING "Segment register inaccessible for %pv\n"
                    "(If you see this outside of debugging activity,"
                    " please report to xen-devel@xxxxxxxxxxxxxxxxxxxx)\n",
-                   v->domain->domain_id, v->vcpu_id);
+                   v);
         }
         memset(reg, 0, sizeof(*reg));
         return;
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -769,8 +769,8 @@ static int core2_vpmu_initialise(struct 
         if ( !cpu_has(c, X86_FEATURE_DTES64) )
         {
             printk(XENLOG_G_WARNING "CPU doesn't support 64-bit DS Area"
-                   " - Debug Store disabled for d%d:v%d\n",
-                   v->domain->domain_id, v->vcpu_id);
+                   " - Debug Store disabled for %pv\n",
+                   v);
             goto func_out;
         }
         vpmu_set(vpmu, VPMU_CPU_HAS_DS);
@@ -780,8 +780,8 @@ static int core2_vpmu_initialise(struct 
             /* If BTS_UNAVAIL is set reset the DS feature. */
             vpmu_reset(vpmu, VPMU_CPU_HAS_DS);
             printk(XENLOG_G_WARNING "CPU has set BTS_UNAVAIL"
-                   " - Debug Store disabled for d%d:v%d\n",
-                   v->domain->domain_id, v->vcpu_id);
+                   " - Debug Store disabled for %pv\n",
+                   v);
         }
         else
         {
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -786,8 +786,7 @@ static void oos_hash_remove(struct vcpu 
     mfn_t *oos;
     struct domain *d = v->domain;
 
-    SHADOW_PRINTK("D%dV%d gmfn %lx\n",
-                  v->domain->domain_id, v->vcpu_id, mfn_x(gmfn)); 
+    SHADOW_PRINTK("%pv gmfn %lx\n", v, mfn_x(gmfn));
 
     for_each_vcpu(d, v) 
     {
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1070,8 +1070,8 @@ void do_machine_check(struct cpu_user_re
 static void reserved_bit_page_fault(
     unsigned long addr, struct cpu_user_regs *regs)
 {
-    printk("d%d:v%d: reserved bit in page table (ec=%04X)\n",
-           current->domain->domain_id, current->vcpu_id, regs->error_code);
+    printk("%pv: reserved bit in page table (ec=%04X)\n",
+           current, regs->error_code);
     show_page_walk(addr);
     show_execution_state(regs);
 }
@@ -1113,8 +1113,7 @@ struct trap_bounce *propagate_page_fault
         tb->flags |= TBF_INTERRUPT;
     if ( unlikely(null_trap_bounce(v, tb)) )
     {
-        printk("d%d:v%d: unhandled page fault (ec=%04X)\n",
-               v->domain->domain_id, v->vcpu_id, error_code);
+        printk("%pv: unhandled page fault (ec=%04X)\n", v, error_code);
         show_page_walk(addr);
     }
 
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -65,11 +65,6 @@ struct vcpu *idle_vcpu[NR_CPUS] __read_m
 
 vcpu_info_t dummy_vcpu_info;
 
-int current_domain_id(void)
-{
-    return current->domain->domain_id;
-}
-
 static void __domain_finalise_shutdown(struct domain *d)
 {
     struct vcpu *v;
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -89,9 +89,8 @@ void dump_execstate(struct cpu_user_regs
 
     if ( !is_idle_vcpu(current) )
     {
-        printk("*** Dumping CPU%u guest state (d%d:v%d): ***\n",
-               smp_processor_id(), current->domain->domain_id,
-               current->vcpu_id);
+        printk("*** Dumping CPU%u guest state (%pv): ***\n",
+               smp_processor_id(), current);
         show_execution_state(guest_cpu_user_regs());
         printk("\n");
     }
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -413,9 +413,7 @@ __runq_insert(struct list_head *runq, st
     struct list_head *iter;
     int pos = 0;
 
-    d2printk("rqi d%dv%d\n",
-           svc->vcpu->domain->domain_id,
-           svc->vcpu->vcpu_id);
+    d2printk("rqi %pv\n", svc->vcpu);
 
     BUG_ON(&svc->rqd->runq != runq);
     /* Idle vcpus not allowed on the runqueue anymore */
@@ -429,10 +427,7 @@ __runq_insert(struct list_head *runq, st
 
         if ( svc->credit > iter_svc->credit )
         {
-            d2printk(" p%d d%dv%d\n",
-                   pos,
-                   iter_svc->vcpu->domain->domain_id,
-                   iter_svc->vcpu->vcpu_id);
+            d2printk(" p%d %pv\n", pos, iter_svc->vcpu);
             break;
         }
         pos++;
@@ -492,11 +487,7 @@ runq_tickle(const struct scheduler *ops,
     cpumask_t mask;
     struct csched_vcpu * cur;
 
-    d2printk("rqt d%dv%d cd%dv%d\n",
-             new->vcpu->domain->domain_id,
-             new->vcpu->vcpu_id,
-             current->domain->domain_id,
-             current->vcpu_id);
+    d2printk("rqt %pv curr %pv\n", new->vcpu, current);
 
     BUG_ON(new->vcpu->processor != cpu);
     BUG_ON(new->rqd != rqd);
@@ -681,10 +672,7 @@ void burn_credits(struct csched_runqueue
         t2c_update(rqd, delta, svc);
         svc->start_time = now;
 
-        d2printk("b d%dv%d c%d\n",
-                 svc->vcpu->domain->domain_id,
-                 svc->vcpu->vcpu_id,
-                 svc->credit);
+        d2printk("b %pv c%d\n", svc->vcpu, svc->credit);
     } else {
         d2printk("%s: Time went backwards? now %"PRI_stime" start %"PRI_stime"\n",
                __func__, now, svc->start_time);
@@ -871,11 +859,9 @@ static void
 csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
 {
     struct csched_vcpu *svc = vc->sched_priv;
-    struct domain * const dom = vc->domain;
     struct csched_dom * const sdom = svc->sdom;
 
-    printk("%s: Inserting d%dv%d\n",
-           __func__, dom->domain_id, vc->vcpu_id);
+    printk("%s: Inserting %pv\n", __func__, vc);
 
     /* NB: On boot, idle vcpus are inserted before alloc_pdata() has
      * been called for that cpu.
@@ -965,7 +951,7 @@ csched_vcpu_wake(const struct scheduler 
 
     /* Schedule lock should be held at this point. */
 
-    d2printk("w d%dv%d\n", vc->domain->domain_id, vc->vcpu_id);
+    d2printk("w %pv\n", vc);
 
     BUG_ON( is_idle_vcpu(vc) );
 
@@ -1074,7 +1060,7 @@ choose_cpu(const struct scheduler *ops, 
     {
         if ( test_and_clear_bit(__CSFLAG_runq_migrate_request, &svc->flags) )
         {
-            d2printk("d%dv%d -\n", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id);
+            d2printk("%pv -\n", svc->vcpu);
             clear_bit(__CSFLAG_runq_migrate_request, &svc->flags);
         }
         /* Leave it where it is for now.  When we actually pay attention
@@ -1094,7 +1080,7 @@ choose_cpu(const struct scheduler *ops, 
         }
         else
         {
-            d2printk("d%dv%d +\n", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id);
+            d2printk("%pv +\n", svc->vcpu);
             new_cpu = cpumask_cycle(vc->processor, &svc->migrate_rqd->active);
             goto out_up;
         }
@@ -1203,8 +1189,7 @@ void migrate(const struct scheduler *ops
 {
     if ( test_bit(__CSFLAG_scheduled, &svc->flags) )
     {
-        d2printk("d%dv%d %d-%d a\n", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id,
-                 svc->rqd->id, trqd->id);
+        d2printk("%pv %d-%d a\n", svc->vcpu, svc->rqd->id, trqd->id);
         /* It's running; mark it to migrate. */
         svc->migrate_rqd = trqd;
         set_bit(_VPF_migrating, &svc->vcpu->pause_flags);
@@ -1214,8 +1199,7 @@ void migrate(const struct scheduler *ops
     {
         int 
         /* It's not running; just move it */
-        d2printk("d%dv%d %d-%d i\n", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id,
-                 svc->rqd->id, trqd->id);
+        d2printk("%pv %d-%d i\n", svc->vcpu, svc->rqd->id, trqd->id);
         if ( __vcpu_on_runq(svc) )
         {
             __runq_remove(svc);
@@ -1662,11 +1646,7 @@ csched_schedule(
     SCHED_STAT_CRANK(schedule);
     CSCHED_VCPU_CHECK(current);
 
-    d2printk("sc p%d c d%dv%d now %"PRI_stime"\n",
-             cpu,
-             scurr->vcpu->domain->domain_id,
-             scurr->vcpu->vcpu_id,
-             now);
+    d2printk("sc p%d c %pv now %"PRI_stime"\n", cpu, scurr->vcpu, now);
 
     BUG_ON(!cpumask_test_cpu(cpu, &CSCHED_PRIV(ops)->initialized));
 
@@ -1693,12 +1673,11 @@ csched_schedule(
                 }
             }
         }
-        printk("%s: pcpu %d rq %d, but scurr d%dv%d assigned to "
+        printk("%s: pcpu %d rq %d, but scurr %pv assigned to "
                "pcpu %d rq %d!\n",
                __func__,
                cpu, this_rqi,
-               scurr->vcpu->domain->domain_id, scurr->vcpu->vcpu_id,
-               scurr->vcpu->processor, other_rqi);
+               scurr->vcpu, scurr->vcpu->processor, other_rqi);
     }
     BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
 
@@ -1755,12 +1734,8 @@ csched_schedule(
             __runq_remove(snext);
             if ( snext->vcpu->is_running )
             {
-                printk("p%d: snext d%dv%d running on p%d! scurr d%dv%d\n",
-                       cpu,
-                       snext->vcpu->domain->domain_id, snext->vcpu->vcpu_id,
-                       snext->vcpu->processor,
-                       scurr->vcpu->domain->domain_id,
-                       scurr->vcpu->vcpu_id);
+                printk("p%d: snext %pv running on p%d! scurr %pv\n",
+                       cpu, snext->vcpu, snext->vcpu->processor, scurr->vcpu);
                 BUG();
             }
             set_bit(__CSFLAG_scheduled, &snext->flags);
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -559,8 +559,7 @@ void restore_vcpu_affinity(struct domain
 
         if ( v->affinity_broken )
         {
-            printk(XENLOG_DEBUG "Restoring affinity for d%dv%d\n",
-                   d->domain_id, v->vcpu_id);
+            printk(XENLOG_DEBUG "Restoring affinity for %pv\n", v);
             cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
             v->affinity_broken = 0;
         }
@@ -608,8 +607,7 @@ int cpu_disable_scheduler(unsigned int c
             if ( cpumask_empty(&online_affinity) &&
                  cpumask_test_cpu(cpu, v->cpu_affinity) )
             {
-                printk(XENLOG_DEBUG "Breaking affinity for d%dv%d\n",
-                        d->domain_id, v->vcpu_id);
+                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
 
                 if (system_state == SYS_STATE_suspend)
                 {
--- a/xen/common/vsprintf.c
+++ b/xen/common/vsprintf.c
@@ -19,6 +19,7 @@
 #include <xen/ctype.h>
 #include <xen/symbols.h>
 #include <xen/lib.h>
+#include <xen/sched.h>
 #include <asm/div64.h>
 #include <asm/page.h>
 
@@ -301,6 +302,19 @@ static char *pointer(char *str, char *en
 
         return str;
     }
+
+    case 'v': /* d<domain-id>v<vcpu-id> from a struct vcpu */
+    {
+        const struct vcpu *v = arg;
+
+        ++*fmt_ptr;
+        if ( str <= end )
+            *str = 'd';
+        str = number(str + 1, end, v->domain->domain_id, 10, -1, -1, 0);
+        if ( str <= end )
+            *str = 'v';
+        return number(str + 1, end, v->vcpu_id, 10, -1, -1, 0);
+    }
     }
 
     if ( field_width == -1 )
--- a/xen/include/xen/config.h
+++ b/xen/include/xen/config.h
@@ -74,12 +74,11 @@
 
 #ifndef __ASSEMBLY__
 
-int current_domain_id(void);
 #define dprintk(_l, _f, _a...)                              \
     printk(_l "%s:%d: " _f, __FILE__ , __LINE__ , ## _a )
 #define gdprintk(_l, _f, _a...)                             \
-    printk(XENLOG_GUEST _l "%s:%d:d%d " _f, __FILE__,       \
-           __LINE__, current_domain_id() , ## _a )
+    printk(XENLOG_GUEST _l "%s:%d:%pv " _f, __FILE__,       \
+           __LINE__, current, ## _a )
 
 #endif /* !__ASSEMBLY__ */
 




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.