[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 2/2] sched_credit2.c: runqueue_per_core code



This patch do the following things:
-Insertion of runqueue_per_core code
-Boot paarmeter creation to select runqueue
-Update of xen-command-line.markdown
 
Signed-off-by : Uma Sharma <uma.sharma523@xxxxxxxxx>
---
 docs/misc/xen-command-line.markdown |  7 +++++++
 xen/common/sched_credit2.c          | 31 ++++++++++++++++++++++++-------
 2 files changed, 31 insertions(+), 7 deletions(-)

diff --git a/docs/misc/xen-command-line.markdown 
b/docs/misc/xen-command-line.markdown
index 63871cb..12e5c01 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -460,6 +460,13 @@ combination with the `low_crashinfo` command line option.
 ### credit2\_load\_window\_shift
 > `= <integer>`
 
+### credit2\_runqueue
+> `= core | socket`
+
+> Default: `credit2_runqueue = core`
+
+Choose the credit2 runqueue.
+
 ### dbgp
 > `= ehci[ <integer> | @pci<bus>:<slot>.<func> ]`
 
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index ad0a5d4..2067b3b 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -85,8 +85,8 @@
  * to a small value, and a fixed credit is added to everyone.
  *
  * The plan is for all cores that share an L2 will share the same
- * runqueue.  At the moment, there is one global runqueue for all
- * cores.
+ * runqueue.  At the moment, the code allows the user to choose runqueue
+ * to be used. Default used core runqueue.
  */
 
 /*
@@ -161,10 +161,16 @@
  */
 #define __CSFLAG_runq_migrate_request 3
 #define CSFLAG_runq_migrate_request (1<<__CSFLAG_runq_migrate_request)
-
+/* CREDIT2_OPT_RUNQUEUE: Used to define the runqueue used
+ */
+#define CREDIT2_OPT_RUNQUEUE_CORE 1
+#define CREDIT2_OPT_RUNQUEUE_SOCKET 2
 
 int opt_migrate_resist=500;
 integer_param("sched_credit2_migrate_resist", opt_migrate_resist);
+char __initdata opt_credit2_runqueue_string[10] = "core";
+string_param("credit2_runqueue", opt_credit2_runqueue_string);
+int opt_credit2_runqueue = CREDIT2_OPT_RUNQUEUE_CORE;
 
 /*
  * Useful macros
@@ -1940,10 +1946,10 @@ static void init_pcpu(const struct scheduler *ops, int 
cpu)
 
     /* Figure out which runqueue to put it in */
     /* NB: cpu 0 doesn't get a STARTING callback, so we hard-code it to 
runqueue 0. */
-    if ( cpu == 0 )
-        rqi = 0;
+    if ( opt_credit2_runqueue == CREDIT2_OPT_RUNQUEUE_SOCKET )
+        rqi = cpu ? cpu_to_socket(cpu) : boot_cpu_to_socket();
     else
-        rqi = cpu_to_socket(cpu);
+        rqi = cpu ? cpu_to_core(cpu) : boot_cpu_to_core();
 
     if ( rqi < 0 )
     {
@@ -1988,7 +1994,7 @@ csched2_alloc_pdata(const struct scheduler *ops, int cpu)
 {
     /* Check to see if the cpu is online yet */
     /* Note: cpu 0 doesn't get a STARTING callback */
-    if ( cpu == 0 || cpu_to_socket(cpu) >= 0 )
+    if ( cpu == 0 || cpu_to_socket(cpu) >= 0 || cpu_to_core(cpu) >= 0 )
         init_pcpu(ops, cpu);
     else
         printk("%s: cpu %d not online yet, deferring initializatgion\n",
@@ -2109,6 +2115,17 @@ csched2_init(struct scheduler *ops)
         opt_load_window_shift = LOADAVG_WINDOW_SHIFT_MIN;
     }
 
+    /* Defines the runqueue used. */
+    if ( !strcmp(opt_credit2_runqueue_string, "socket") )
+        opt_credit2_runqueue = CREDIT2_OPT_RUNQUEUE_SOCKET;
+    else if ( strcmp(opt_credit2_runqueue_string, "core") )
+        printk("WARNING, unrecognized credit2_runqueue option %s, using 
core\n",
+                opt_credit2_runqueue_string);
+
+    printk("Runqueue: Using %s\n",
+            opt_credit2_runqueue == CREDIT2_OPT_RUNQUEUE_CORE ? "core" :
+            opt_credit2_runqueue_string);
+
     /* Basically no CPU information is available at this point; just
      * set up basic structures, and a callback when the CPU info is
      * available. */
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.