[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-Devel] [PATCH 1/1] Add .adjust_global callback


  • To: Kathy Hadley <Kathy.Hadley@xxxxxxxxxxxxxxx>, keir.fraser@xxxxxxxxxx, xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
  • Date: Tue, 4 May 2010 10:46:30 -0500
  • Cc:
  • Delivery-date: Tue, 04 May 2010 08:47:57 -0700
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=mime-version:sender:in-reply-to:references:date :x-google-sender-auth:message-id:subject:from:to:content-type; b=fJA9MVfSRP5qvLCKC7ONN64Xjf+uCpsovvdQbkz0QiYuXon2D2HmeHoPfxQuBU5Ew0 HdQtCAAR9P1biELZb9vIb2orENX2+h2VK03DaaxoH9WBD/9agGvRKGgyOJxhSYWyX844 xrnhiOX++K4lzCsiERoH3SyPQYELYsyH4RsMc=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

Keir, the new SCHEDOP_adjust_global needs to have the cpupool_id so that we can call the "global" parameters for the right pool.  Would it make sense to overload the domid in DOMCTL, making it the cpupool_id if the command is XEN_DOMCTL_SCHEDOP_{get,put}_global_info?  Or should we put it somewhere else?  I glanced around the other hypercalls, and nothing seems much better; the cpupools stuff is in the DOMCTL file anyway.

 -George

On Mon, May 3, 2010 at 10:29 PM, George Dunlap <George.Dunlap@xxxxxxxxxxxxx> wrote:
Hmm, and it also needs to be adjusted for the new cpu pools stuff.  I'll take a look at it tomorrow.
 -George




On Mon, May 3, 2010 at 4:50 PM, George Dunlap <George.Dunlap@xxxxxxxxxxxxx> wrote:
Also, the SCHEDOP needs an xsm callback; but I'm not an expert enough in the xsm framework to say what it needs.

Other than that:

Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>


On Mon, Apr 19, 2010 at 6:54 AM, Dulloor <dulloor@xxxxxxxxx> wrote:
You don't have to explicitly set adjust_global to NULL for credit and sedf.

-dulloor

On Mon, Apr 19, 2010 at 9:39 AM, Kathy Hadley <Kathy.Hadley@xxxxxxxxxxxxxxx> wrote:

Resubmitting now that I have been added to the Xen-devel mailing list.

 

Kathy Hadley

DornerWorks, Ltd.

 

From: Kathy Hadley
Sent: Friday, April 16, 2010 10:14 AM
To: 'xen-devel@xxxxxxxxxxxxxxxxxxx'
Cc: 'Keir.Fraser@xxxxxxxxxx'
Subject: [Xen-Devel] [PATCH 1/1] Add .adjust_global callback

 

This patch adds an .adjust_global scheduler callback function, which allows adjusting the global scheduler parameters (rather than just one domain’s parameters).  This patch supports the addition of an ARINC 653 scheduler (which will be submitted in a subsequent patch), and was suggested by George Dunlap and Keir Fraser in response to an earlier patch (ref: [Xen-devel] [PATCH 1/1] Xen ARINC 653 scheduler).

 

Thanks and regards,

 

 

cid:image001.jpg@01CAD7E0.50E45D70

Kathy Hadley
DornerWorks, Ltd.
Embedded Systems Engineering

3445 Lake Eastbrook Blvd SE
Grand Rapids, MI  49546

Direct: 616.389.6127

Tel:      616.245.8369

Fax:     616.245.8372

 

Kathy.Hadley@xxxxxxxxxxxxxxx

www.DornerWorks.com

cid:image002.jpg@01CAD7E0.50E45D70

Honored as one of the 2010 “Michigan 50 Companies to Watch”

 

diff -rupN a/xen/common/domctl.c b/xen/common/domctl.c

--- a/xen/common/domctl.c        2010-04-07 12:12:06.000000000 -0400

+++ b/xen/common/domctl.c     2010-04-14 10:57:11.262796000 -0400

@@ -592,22 +592,35 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc

 

     case XEN_DOMCTL_scheduler_op:

     {

-        struct domain *d;

-

-        ret = -ESRCH;

-        if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )

-            break;

+        if ( (op->u.scheduler_op.cmd == XEN_DOMCTL_SCHEDOP_put_global_info)

+          || (op->u.scheduler_op.cmd == XEN_DOMCTL_SCHEDOP_get_global_info) )

+        {

+            ret = sched_adjust_global(&op->u.scheduler_op);

+            if (op->u.scheduler_op.cmd == XEN_DOMCTL_SCHEDOP_get_global_info)

+            {

+                if ( copy_to_guest(u_domctl, op, 1) )

+                    ret = -EFAULT;

+            }

+        }

+        else

+        {

+            struct domain *d;

 

-        ret = xsm_scheduler(d);

-        if ( ret )

-            goto scheduler_op_out;

+            ret = -ESRCH;

+            if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )

+                break;

 

-        ret = sched_adjust(d, &op->u.scheduler_op);

-        if ( copy_to_guest(u_domctl, op, 1) )

-            ret = -EFAULT;

+            ret = xsm_scheduler(d);

+            if ( ret )

+                goto scheduler_op_out;

+

+            ret = sched_adjust(d, &op->u.scheduler_op);

+            if ( copy_to_guest(u_domctl, op, 1) )

+                ret = -EFAULT;

 

-    scheduler_op_out:

-        rcu_unlock_domain(d);

+        scheduler_op_out:

+            rcu_unlock_domain(d);

+        }

     }

     break;

 

diff -rupN a/xen/common/sched_credit.c b/xen/common/sched_credit.c

--- a/xen/common/sched_credit.c           2010-04-07 12:12:06.000000000 -0400

+++ b/xen/common/sched_credit.c        2010-04-13 17:30:40.710992000 -0400

@@ -1404,6 +1404,7 @@ const struct scheduler sched_credit_def

     .wake           = csched_vcpu_wake,

 

     .adjust         = csched_dom_cntl,

+    .adjust_global  = NULL,

 

     .pick_cpu       = csched_cpu_pick,

     .do_schedule    = csched_schedule,

diff -rupN a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c

--- a/xen/common/sched_sedf.c 2010-04-07 12:12:06.000000000 -0400

+++ b/xen/common/sched_sedf.c          2010-04-13 17:30:40.710992000 -0400

@@ -1473,6 +1473,7 @@ const struct scheduler sched_sedf_def =

     .sleep          = sedf_sleep,

     .wake           = sedf_wake,

     .adjust         = sedf_adjust,

+    .adjust_global  = NULL,

 };

 

 /*

diff -rupN a/xen/common/schedule.c b/xen/common/schedule.c

--- a/xen/common/schedule.c     2010-04-07 12:12:06.000000000 -0400

+++ b/xen/common/schedule.c  2010-04-14 10:57:11.262796000 -0400

@@ -804,6 +804,21 @@ long sched_adjust(struct domain *d, stru

     return ret;

 }

 

+/* Adjust scheduling parameters globally */

+long sched_adjust_global(struct xen_domctl_scheduler_op *op)

+{

+    long ret;

+

+    if ( (op->sched_id != ops.sched_id)

+      || ( (op->cmd != XEN_DOMCTL_SCHEDOP_put_global_info)

+        && (op->cmd != XEN_DOMCTL_SCHEDOP_get_global_info) ) )

+        return -EINVAL;

+

+    ret = SCHED_OP(adjust_global, op);

+

+    return ret;

+}

+

 static void vcpu_periodic_timer_work(struct vcpu *v)

 {

     s_time_t now = NOW();

diff -rupN a/xen/include/public/domctl.h b/xen/include/public/domctl.h

--- a/xen/include/public/domctl.h 2010-04-07 12:12:06.000000000 -0400

+++ b/xen/include/public/domctl.h         2010-04-14 10:57:11.262796000 -0400

@@ -306,6 +306,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_v

 /* Set or get info? */

 #define XEN_DOMCTL_SCHEDOP_putinfo 0

 #define XEN_DOMCTL_SCHEDOP_getinfo 1

+#define XEN_DOMCTL_SCHEDOP_put_global_info 2

+#define XEN_DOMCTL_SCHEDOP_get_global_info 3

 struct xen_domctl_scheduler_op {

     uint32_t sched_id;  /* XEN_SCHEDULER_* */

     uint32_t cmd;       /* XEN_DOMCTL_SCHEDOP_* */

diff -rupN a/xen/include/xen/sched.h b/xen/include/xen/sched.h

--- a/xen/include/xen/sched.h      2010-04-07 12:12:06.000000000 -0400

+++ b/xen/include/xen/sched.h  2010-04-13 17:30:40.710992000 -0400

@@ -472,6 +472,7 @@ void sched_destroy_vcpu(struct vcpu *v);

 int  sched_init_domain(struct domain *d);

 void sched_destroy_domain(struct domain *d);

 long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);

+long sched_adjust_global(struct xen_domctl_scheduler_op *);

 int  sched_id(void);

 void sched_tick_suspend(void);

 void sched_tick_resume(void);

diff -rupN a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h

--- a/xen/include/xen/sched-if.h   2010-04-07 12:12:06.000000000 -0400

+++ b/xen/include/xen/sched-if.h           2010-04-13 17:30:40.710992000 -0400

@@ -76,6 +76,7 @@ struct scheduler {

     int          (*pick_cpu)       (struct vcpu *);

     int          (*adjust)         (struct domain *,

                                     struct xen_domctl_scheduler_op *);

+    int          (*adjust_global)  (struct xen_domctl_scheduler_op *);

     void         (*dump_settings)  (void);

     void         (*dump_cpu_state) (int);

 

 

 

 

 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.