This patch adds an ARINC 653 scheduler to Xen.  This is
a modification of an earlier patch (submitted Friday, April 16).  In particular,
it has been modified to use the new .adjust_global callback function (added by
Keir Fraser in c/s 21282) and to support CPU pools (per feedback received from
George Dunlap on Tuesday, May 4).
 
Thanks and regards,
  Kathy Hadley
  DornerWorks, Ltd.
 
 
diff -rupN
a/tools/libxc/Makefile b/tools/libxc/Makefile
---
a/tools/libxc/Makefile 2010-05-26 17:01:34.000000000 -0400
+++
b/tools/libxc/Makefile 2010-06-01 12:30:45.000000000 -0400
@@ -19,6 +19,7 @@
CTRL_SRCS-y       += xc_private.c
 CTRL_SRCS-y      
+= xc_sedf.c
 CTRL_SRCS-y      
+= xc_csched.c
 CTRL_SRCS-y      
+= xc_csched2.c
+CTRL_SRCS-y      
+= xc_arinc653.c
 CTRL_SRCS-y      
+= xc_tbuf.c
 CTRL_SRCS-y      
+= xc_pm.c
 CTRL_SRCS-y      
+= xc_cpu_hotplug.c
diff -rupN
a/tools/libxc/xc_arinc653.c b/tools/libxc/xc_arinc653.c
---
a/tools/libxc/xc_arinc653.c 1969-12-31 19:00:00.000000000 -0500
+++
b/tools/libxc/xc_arinc653.c 2010-06-14 10:50:57.000000000 -0400
@@ -0,0 +1,27 @@
+/****************************************************************************
+ * (C) 2010 -
DornerWorks, Ltd <DornerWorks.com>
+
****************************************************************************
+ *
+
*        File: xc_arinc653.c
+
*      Author: Josh Holtrop <DornerWorks.com>
+ *
+ * Description: XC
Interface to the ARINC 653 scheduler
+ *
+ */
+
+#include
"xc_private.h"
+
+int
+xc_sched_arinc653_sched_set(
+   
int xc_handle,
+   
xen_sysctl_sched_arinc653_schedule_t * sched)
+{
+   
DECLARE_SYSCTL;
+
+   
sysctl.cmd = XEN_SYSCTL_scheduler_op;
+   
sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
+   
sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;
+   
set_xen_guest_handle(sysctl.u.scheduler_op.u.arinc653.schedule, sched);
+
+   
return do_sysctl(xc_handle, &sysctl);
+}
diff -rupN
a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
---
a/tools/libxc/xenctrl.h     2010-05-26 17:01:34.000000000
-0400
+++
b/tools/libxc/xenctrl.h     2010-06-01 12:30:45.000000000
-0400
@@ -485,6 +485,16 @@
int xc_sched_credit2_domain_get(int xc_h
      
                         struct
xen_domctl_sched_credit2 *sdom);
 
 /**
+ * This function
sets the global ARINC 653 schedule.
+ *
+ * @parm xc_handle
a handle to an open hypervisor interface
+ * @parm sched a
pointer to the new ARINC 653 schedule
+ * return 0 on
success
+ */
+int
xc_sched_arinc653_sched_set(int xc_handle,
+                               
xen_sysctl_sched_arinc653_schedule_t * sched);
+
+/**
  * This
function sends a trigger to a domain.
  *
  * @parm
xc_handle a handle to an open hypervisor interface
diff -rupN
a/xen/common/Makefile b/xen/common/Makefile
---
a/xen/common/Makefile  2010-05-26 17:01:34.000000000 -0400
+++
b/xen/common/Makefile  2010-06-01 12:30:45.000000000 -0400
@@ -17,6 +17,7 @@
obj-y += rangeset.o
 obj-y +=
sched_credit.o
 obj-y +=
sched_credit2.o
 obj-y +=
sched_sedf.o
+obj-y +=
sched_arinc653.o
 obj-y +=
schedule.o
 obj-y +=
shutdown.o
 obj-y +=
softirq.o
diff -rupN
a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
---
a/xen/common/sched_arinc653.c    1969-12-31 19:00:00.000000000
-0500
+++
b/xen/common/sched_arinc653.c    2010-06-16 09:05:24.000000000
-0400
@@ -0,0 +1,806 @@
+/*
+ * File:
sched_arinc653.c
+ * Copyright (c)
2010, DornerWorks, Ltd. <DornerWorks.com>
+ *
+ * Description:
+ *   This
file provides an ARINC 653-compatible scheduling algorithm
+ *   for
use in Xen.
+ *
+ * This program is
free software; you can redistribute it and/or modify it
+ * under the terms
of the GNU General Public License as published by the Free
+ * software
Foundation; either version 2 of the License, or (at your option)
+ * any later
version.
+ *
+ * This program is
distributed in the hope that it will be useful,
+ * but WITHOUT ANY
WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU
General Public License for more details.
+ */
+
+
+/**************************************************************************
+ *
Includes                                                              
*
+
*************************************************************************/
+#include
<xen/lib.h>
+#include
<xen/sched.h>
+#include
<xen/sched-if.h>
+#include
<xen/timer.h>
+#include
<xen/softirq.h>
+#include
<xen/time.h>
+#include
<xen/errno.h>
+#include
<xen/list.h>
+#include
<public/sysctl.h>         
/* ARINC653_MAX_DOMAINS_PER_SCHEDULE */
+#include
<xen/guest_access.h>
+
+
+/**************************************************************************
+ * Private
Macros                                  
                      *
+
**************************************************************************/
+
+/**
+ * Retrieve the
idle VCPU for a given physical CPU
+ */
+#define
IDLETASK(cpu)  (idle_vcpu[cpu])
+
+/**
+ * Return a pointer
to the ARINC 653-specific scheduler data information
+ * associated with
the given VCPU (vc)
+ */
+#define AVCPU(vc)
((arinc653_vcpu_t *)(vc)->sched_priv)
+
+
+/**************************************************************************
+ * Private Type
Definitions                                              
*
+
**************************************************************************/
+
+/**
+ * The
sched_entry_t structure holds a single entry of the
+ * ARINC 653
schedule.
+ */
+typedef struct
sched_entry_s
+{
+   
/* dom_handle holds the handle ("UUID") for the domain that this
+    
* schedule entry refers to. */
+   
xen_domain_handle_t dom_handle;
+   
/* vcpu_id holds the VCPU number for the VCPU that this schedule
+    
* entry refers to. */
+   
int                
vcpu_id;
+   
/* runtime holds the number of nanoseconds that the VCPU for this
+    
* schedule entry should be allowed to run per major frame. */
+   
s_time_t           
runtime;
+   
/* vc holds a pointer to the Xen VCPU structure */
+   
struct vcpu *       vc;
+} sched_entry_t;
+
+/**
+ * The
arinc653_vcpu_t structure holds ARINC 653-scheduler-specific
+ * information for
all non-idle VCPUs
+ */
+typedef struct
arinc653_vcpu_s
+{
+   
/* vc points to Xen's struct vcpu so we can get to it from an
+    
* arinc653_vcpu_t pointer. */
+   
struct vcpu *       vc;
+   
/* awake holds whether the VCPU has been woken with vcpu_wake() */
+   
bool_t             
awake;
+   
/* list holds the linked list information for the list this VCPU
+    
* is stored in */
+   
struct list_head    list;
+} arinc653_vcpu_t;
+
+/**
+ * Data structure
containing domain-specific information.
+ */
+struct
arinc653_dom_info {
+   
struct domain  *domain;
+};
+
+/**
+ * Data structure
containing all the "global" data items used by the scheduler.
+ */
+typedef struct
arinc653_sched_private_s
+{
+   
/*
+    
* This array holds the active ARINC 653 schedule.
+    
*
+    
* When the system tries to start a new VCPU, this schedule is scanned
+    
* to look for a matching (handle, VCPU #) pair. If both the handle
("UUID")
+    
* and VCPU number match, then the VCPU is allowed to run. Its run time
+    
* (per major frame) is given in the third entry of the schedule.
+    
*/
+   
sched_entry_t arinc653_schedule[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
+   
/*
+    
* This variable holds the number of entries that are valid in
+    
* the arinc653_schedule table.
+    
*
+    
* This is not necessarily the same as the number of domains in the
+    
* schedule. A domain could be listed multiple times within the schedule,
+    
* or a domain with multiple VCPUs could have a different
+    
* schedule entry for each VCPU.
+    
*
+    
* A value of 1 means that only 1 domain (Dom0) will initially be started.
+    
*/
+   
int num_schedule_entries;
+   
/*
+    
* arinc653_major_frame holds the major frame time for the ARINC 653
+    
* schedule.
+    
*/
+    s_time_t
arinc653_major_frame;
+   
/*
+    
* next_major_frame holds the time that the next major frame starts
+    
*/
+   
s_time_t next_major_frame;
+   
/*
+    
* vcpu_list holds pointers to all Xen VCPU structures for iterating through
+    
*/
+   
struct list_head vcpu_list;
+}
arinc653_sched_private_t;
+
+
+/**************************************************************************
+ * Global
data                                                           
*
+
**************************************************************************/
+static
arinc653_sched_private_t arinc653_schedule;
+
+
+/**************************************************************************
+ * Scheduler
functions                                                   
*
+ **************************************************************************/
+
+/**
+ * This function
compares two domain handles.
+ *
+ * @param
h1        Pointer to handle 1
+ * @param
h2        Pointer to handle 2
+ *
+ *
@return          <ul>
+
*                  <li>
<0:  handle 1 is less than handle 2
+
*                 
<li>  0:  handle 1 is equal to handle 2
+
*                 
<li> >0:  handle 1 is greater than handle 2
+
*                 
</ul>
+ */
+static int
dom_handle_cmp(const xen_domain_handle_t h1,
+                         
const xen_domain_handle_t h2)
+{
+   
return memcmp(h1, h2, sizeof(xen_domain_handle_t));
+} /* end
dom_handle_cmp */
+
+/**
+ * This function
searches the vcpu list to find a VCPU that matches
+ * the domain
handle and VCPU ID specified.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler
+ * @param
handle    Pointer to handle
+ * @param
vcpu_id   VCPU ID
+ *
+ *
@return          <ul>
+
*                 
<li> Pointer to the matching VCPU if one is found
+
*                 
<li> NULL otherwise
+
*                 
</ul>
+ */
+static struct vcpu
* find_vcpu(const struct scheduler *ops,
+                              
xen_domain_handle_t handle,
+                              
int vcpu_id)
+{
+   
arinc653_sched_private_t *prv = ops->sched_data;
+   
arinc653_vcpu_t * avcpu; /* loop index variable */
+   
struct vcpu * vc = NULL;
+
+   
/* loop through the vcpu_list looking for the specified VCPU */
+   
list_for_each_entry(avcpu, &prv->vcpu_list, list)
+   
{
+       
/* If the handles & VCPU IDs match, we've found a matching VCPU */
+       
if ((dom_handle_cmp(avcpu->vc->domain->handle, handle) == 0)
+            
&& (vcpu_id == avcpu->vc->vcpu_id))
+       
{
+           
vc = avcpu->vc;
+           
/*
+            
* "break" statement used instead of loop control variable because
+            
* the macro used for this loop does not support using loop control
+            
* variables
+            
*/
+           
break;
+       
}
+   
}
+
+   
return vc;
+} /* end find_vcpu
*/
+
+/**
+ * This function
updates the pointer to the Xen VCPU structure for each entry in
+ * the ARINC 653
schedule.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler
+ *
+ * @return
<None>
+ */
+static void
update_schedule_vcpus(const struct scheduler *ops)
+{
+   
arinc653_sched_private_t *prv = ops->sched_data;
+
+   
/* Loop through the number of entries in the schedule */
+   
for (int i = 0; i < prv->num_schedule_entries; i++)
+   
{
+       
/* Update the pointer to the Xen VCPU structure for the current entry */
+       
prv->arinc653_schedule[i].vc =
+           
find_vcpu(ops,
+                     
prv->arinc653_schedule[i].dom_handle,
+                     
prv->arinc653_schedule[i].vcpu_id);
+   
}
+} /* end
update_schedule_vcpus */
+
+/**
+ * This function is
called by the arinc653_adjust_global scheduler
+ * callback
function in response to a domain control hypercall with
+ * a scheduler
operation.
+ *
+ * The parameter
schedule is set to be the address of a local variable from
+ * within
arinc653_adjust_global(), so it is guaranteed to not be NULL.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler
+ * @param
schedule  Pointer to the new ARINC 653 schedule.
+ *
+ *
@return          <ul>
+ *                 
<li> 0 = success
+
*                 
<li> !0 = error
+
*                 
</ul>
+ */
+static int
arinc653_sched_set(const struct scheduler *ops,
+                             
xen_sysctl_sched_arinc653_schedule_t * schedule)
+{
+   
arinc653_sched_private_t *prv = ops->sched_data;
+
+   
int ret = 0;
+   
s_time_t total_runtime = 0;
+   
bool_t found_dom0 = 0;
+   
const static xen_domain_handle_t dom0_handle = {0};
+
+   
/* check for valid major frame and number of schedule entries */
+   
if ( (schedule->major_frame <= 0)
+     
|| (schedule->num_sched_entries < 1)
+     
|| (schedule->num_sched_entries > ARINC653_MAX_DOMAINS_PER_SCHEDULE) )
+   
{
+       
ret = -EINVAL;
+   
}
+
+   
if (ret == 0)
+   
{
+       
for (int i = 0; i < schedule->num_sched_entries; i++)
+       
{
+           
/*
+            
* look for domain 0 handle - every schedule must contain
+            
* some time for domain 0 to run
+            
*/
+           
if (dom_handle_cmp(schedule->sched_entries[i].dom_handle,
+                              
dom0_handle) == 0)
+           
{
+               
found_dom0 = 1;
+           
}
+
+           
/* check for a valid VCPU ID and run time */
+           
if ( (schedule->sched_entries[i].vcpu_id < 0)
+             
|| (schedule->sched_entries[i].runtime <= 0) )
+           
{
+               
ret = -EINVAL;
+           
}
+           
else
+           
{
+               
/* Add this entry's run time to total run time */
+               
total_runtime += schedule->sched_entries[i].runtime;
+           
}
+       
} /* end loop through schedule entries */
+   
}
+
+   
if (ret == 0)
+   
{
+       
/* error if the schedule doesn't contain a slot for domain 0 */
+       
if (found_dom0 == 0)
+       
{
+            ret
= -EINVAL;
+       
}
+   
}
+
+   
if (ret == 0)
+   
{
+       
/*
+        
* error if the major frame is not large enough to run all entries
+        
* as indicated by comparing the total run time to the major frame
+        
* length
+        
*/
+       
if (total_runtime > schedule->major_frame)
+       
{
+           
ret = -EINVAL;
+       
}
+   
}
+
+   
if (ret == 0)
+   
{
+       
/* copy the new schedule into place */
+       
prv->num_schedule_entries = schedule->num_sched_entries;
+       
prv->arinc653_major_frame = schedule->major_frame;
+       
for (int i = 0; i < prv->num_schedule_entries; i++)
+       
{
+           
memcpy(prv->arinc653_schedule[i].dom_handle,
+                  
schedule->sched_entries[i].dom_handle,
+                  
sizeof(prv->arinc653_schedule[i].dom_handle));
+           
prv->arinc653_schedule[i].vcpu_id =
+               
schedule->sched_entries[i].vcpu_id;
+           
prv->arinc653_schedule[i].runtime =
+               
schedule->sched_entries[i].runtime;
+       
}
+       
update_schedule_vcpus(ops);
+
+       
/*
+        
* The newly-installed schedule takes effect immediately.
+        
* We do not even wait for the current major frame to expire.
+        
*
+        
* Signal a new major frame to begin. The next major frame
+        
* is set up by the do_schedule callback function when it
+        
* is next invoked.
+        
*/
+       
prv->next_major_frame = NOW();
+   
}
+
+   
return ret;
+} /* end arinc653_sched_set
*/
+
+/**
+ * Xen scheduler
callback function to adjust global scheduling parameters
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler
+ * @param
op        Pointer to the system control
scheduler operation structure
+ *
+ *
@return          <ul>
+
*                 
<li> 0 = success
+
*                 
<li> !0 = error
+
*                 
</ul>
+ */
+static int
arinc653_adjust_global(const struct scheduler *ops,
+       
                          struct
xen_sysctl_scheduler_op * op)
+{
+   
int ret = -1;
+   
xen_sysctl_sched_arinc653_schedule_t new_sched;
+
+   
if (op->cmd == XEN_SYSCTL_SCHEDOP_putinfo)
+   
{
+       
if (copy_from_guest(&new_sched, op->u.arinc653.schedule, 1) != 0)
+       
{
+           
ret = -EFAULT;
+       
}
+       
else
+       
{
+           
ret = arinc653_sched_set(ops, &new_sched);
+       
}
+   
}
+
+   
return ret;
+} /* end arinc653_adjust_global
*/
+
+/**
+ * Xen scheduler
callback function to allocate and initialize the ARINC 653
+ * scheduler data
structure.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler
+ * @param
pool0     Flag indicating whether pool 0 or other being
allocated
+
*                 
[Not Used]
+ *
+ *
@return          <ul>
+
*                 
<li> 0 = success
+
*                 
<li> !0 = error
+
*                 
</ul>
+ */
+static int
arinc653_init(struct scheduler *ops)
+{
+   
arinc653_sched_private_t *prv;
+   
int i;
+
+   
/* Initial value for the ARINC 653 scheduler data.*/
+   
const sched_entry_t init_sched_element = { "", 0, MILLISECS(10), NULL
};
+
+   
/* Allocate memory for ARINC 653 scheduler data structure */
+   
prv = &arinc653_schedule;
+   
if ( prv == NULL )
+   
{
+       
return -ENOMEM;
+   
}
+
+   
/* Initialize the ARINC 653 scheduler data structure*/
+   
memset(prv, 0, sizeof(*prv));
+
+   
/* Set the "scheduler" structure to point to the ARINC 653 scheduler
data */
+   
ops->sched_data = prv;
+
+   
/*
+    
* Initialize the ARINC 653 scheduler data.  In particular:
+    
*   All domains execute for 10 ms.
+    
*   Only one domain is enabled (domain 0).
+    
*   Major frame = 10 ms (time required for domain 0).
+    
*/
+   
for (i=0; i<(sizeof(prv->arinc653_schedule)/sizeof(sched_entry_t)); i++)
+   
{
+       
prv->arinc653_schedule[i] = init_sched_element;
+   
}
+   
prv->num_schedule_entries = 1;
+   
prv->arinc653_major_frame = MILLISECS(10);
+   
prv->next_major_frame = 0;
+   
INIT_LIST_HEAD(&prv->vcpu_list);
+
+   
return 0;
+} /* end
arinc653_init */
+
+/**
+ * Xen scheduler
callback function to allocate and initialize a data structure
+ * containing
information for a VCPU.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler
+ * @param
v         Pointer to the VCPU structure
+ * @param
dd        Domain data [Not Used]
+ *
+ *
@return          <ul>
+
*                 
<li> address of the allocated data structure
+
*                 
<li> NULL if error
+
*                 
</ul>
+ */
+static void
*arinc653_alloc_vdata(const struct scheduler *ops,
+                                 
struct vcpu *v,
+                                 
void *dd)
+{
+   
arinc653_sched_private_t *prv = ops->sched_data;
+   
arinc653_vcpu_t *inf;
+
+   
/*
+    
* Allocate memory for the ARINC 653-specific scheduler data information
+    
* associated with the given VCPU (vc).
+    
*/
+   
inf = xmalloc(arinc653_vcpu_t);
+   
if (inf != NULL)
+   
{
+       
/*
+        
* Initialize our ARINC 653 scheduler-specific information
+        
* for the VCPU.
+        
* The VCPU starts "asleep."
+        
* When Xen is ready for the VCPU to run, it will call
+        
* the vcpu_wake scheduler callback function and our
+        
* scheduler will mark the VCPU awake.
+        
*/
+       
inf->vc = v;
+       
inf->awake = 0;
+       
list_add(&inf->list, &prv->vcpu_list);
+       
update_schedule_vcpus(ops);
+   
}
+
+   
return inf;
+} /* end
arinc653_alloc_vdata */
+
+/**
+ * Xen scheduler
callback function to free up VCPU data.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler [Not Used]
+ * @param
priv      Pointer to the VCPU structure
+ *
+ *
@return          <None>
+ */
+static void
arinc653_free_vdata(const struct scheduler *ops, void *priv)
+{
+   
/* Free the arinc653_vcpu structure */
+   
xfree(AVCPU((struct vcpu *)priv));
+} /* end
arinc653_free_vdata */
+
+/**
+ * Xen scheduler
callback function to allocate and initialize a data structure
+ * containing
domain-specific data.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler [Not Used]
+ * @param
dom       Pointer to data structure with data for
the current domain
+
*                 
[Not Used]
+ *
+ * @return
void*    <ul>
+
*                 
<li> address of the allocated data structure
+
*                 
</ul>
+ */
+static void *
+arinc653_alloc_domdata(const
struct scheduler *ops, struct domain *dom)
+{
+   
void *mem;
+
+   
/* Allocate memory for the domain-specific data structure */
+   
mem = xmalloc(struct arinc653_dom_info);
+   
if ( mem == NULL )
+   
{
+       
return NULL;
+   
}
+
+   
/* Initialize the allocated memory */
+   
memset(mem, 0, sizeof(struct arinc653_dom_info));
+
+   
return mem;
+} /* end
arinc653_alloc_domdata */
+
+/**
+ * Xen scheduler
callback function to free up domain-specific data.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler [Not Used]
+ * @param
data      Pointer to the domain-specific data
structure
+ *
+ *
@return          <None>
+ */
+static void
arinc653_free_domdata(const struct scheduler *ops, void *data)
+{
+   
/* free the domain-specific data structure */
+   
xfree(data);
+} /* end
arinc653_free_domdata */
+
+/**
+ * Xen scheduler
callback function to initialize a domain.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler [Not Used]
+ * @param
dom       Pointer to domain-specific data
structure
+ *
+ * @return
int      <ul>
+
*                 
<li> 0 for success
+
*                 
<li> -ENOMEM if out of memory
+
*                 
</ul>
+ */
+static int
arinc653_init_domain(const struct scheduler *ops,
+                               
struct domain *dom)
+{
+   
/* Don't allocate a data structure for an idle domain */
+   
if ( is_idle_domain(dom) )
+   
{
+       
return 0;
+   
}
+
+   
/* Allocate memory for the domain. */
+   
dom->sched_priv = arinc653_alloc_domdata(ops, dom);
+   
if ( dom->sched_priv == NULL )
+   
{
+       
return -ENOMEM;
+   
}
+
+   
return 0;
+} /* end
arinc653_init_domain */
+
+/**
+ * Xen scheduler
callback function to destroy a domain.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler
+ * @param
dom       Pointer to domain-specific data
structure
+ */
+static void
arinc653_destroy_domain(const struct scheduler *ops,
+                                   
struct domain *dom)
+{
+   
arinc653_free_domdata(ops, dom->sched_priv);
+} /* end
arinc653_destroy_domain */
+
+/**
+ * Xen scheduler
callback function to remove a VCPU.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler
+ * @param
v         Pointer to the VCPU structure
to remove
+ *
+ *
@return          <None>
+ */
+static void
arinc653_destroy_vcpu(const struct scheduler *ops, struct vcpu * v)
+{
+   
if (AVCPU(v) != NULL)
+   
{
+       
/* remove the VCPU from whichever list it is on */
+       
list_del(&AVCPU(v)->list);
+       
/* free the arinc653_vcpu structure */
+       
arinc653_free_vdata(ops, v);
+       
update_schedule_vcpus(ops);
+   
}
+} /* end
arinc653_destroy_vcpu */
+
+/**
+ * Xen scheduler
callback function to select a VCPU to run.
+ * This is the main
scheduler routine.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler
+ * @param
t         Current time
+ * @param
tasklet_work_scheduled
+
*                 
[Not Used]
+ *
+ *
@return          Time slice and
address of the VCPU structure for the chosen
+
*                 
domain
+ */
+static struct
task_slice arinc653_do_schedule(const struct scheduler *ops,
+                                             
s_time_t t,
+                                             
bool_t tasklet_work_scheduled)
+{
+   
arinc653_sched_private_t *prv = ops->sched_data;
+
+   
struct task_slice
ret;                     
/* hold the chosen domain */
+   
struct vcpu * new_task = NULL;
+   
static int sched_index = 0;
+   
static s_time_t last_major_frame;
+   
static s_time_t last_switch_time;
+   
static s_time_t next_switch_time;
+
+   
if (t >= prv->next_major_frame)
+   
{
+       
/* time to enter a new major frame
+        
* the first time this function is called, this will be true */
+       
sched_index = 0;
+       
last_major_frame = last_switch_time = t;
+       
prv->next_major_frame = t + prv->arinc653_major_frame;
+   
}
+   
else if (t >= next_switch_time)
+   
{
+       
/* time to switch to the next domain in this major frame */
+       
sched_index++;
+       
last_switch_time = next_switch_time;
+   
}
+
+   
/*
+    
* If there are more domains to run in the current major frame, set
+    
* next_switch_time equal to the last switch time + this domain's run time.
+    
* Otherwise, set next_switch_time equal to the start of the next major
+    
* frame.
+    
*/
+   
next_switch_time = (sched_index < prv->num_schedule_entries)
+       
? last_switch_time + prv->arinc653_schedule[sched_index].runtime
+       
: prv->next_major_frame;
+
+   
/*
+    
* If there are more domains to run in the current major frame, set
+    
* new_task equal to the address of next domain's VCPU structure.
+    
* Otherwise, set new_task equal to the address of the idle task's VCPU
+    
* structure.
+    
*/
+   
new_task = (sched_index < prv->num_schedule_entries)
+                  
? prv->arinc653_schedule[sched_index].vc
+                  
: IDLETASK(0);
+
+   
/* Check to see if the new task can be run (awake & runnable). */
+   
if (!((new_task != NULL)
+           
&& AVCPU(new_task)->awake
+           
&& vcpu_runnable(new_task)) )
+   
{
+       
new_task = IDLETASK(0);
+   
}
+   
BUG_ON(new_task == NULL);
+
+   
/*
+    
* Check to make sure we did not miss a major frame.
+    
* This is a good test for robust partitioning.
+    
*/
+   
BUG_ON(t >= prv->next_major_frame);
+
+   
/*
+    
* Return the amount of time the next domain has to run and the address
+    
* of the selected task's VCPU structure.
+    
*/
+   
ret.time = next_switch_time - t;
+   
ret.task = new_task;
+
+   
BUG_ON(ret.time <= 0);
+
+   
return ret;
+} /* end
arinc653_do_schedule */
+
+/**
+ * Xen scheduler
callback function to select a CPU for the VCPU to run on.
+ * Currently only
one CPU is supported.
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler
+ * @param
v         Pointer to the VCPU structure
for the current domain
+ *
+ *
@return          Number of
selected physical CPU
+ */
+static int
arinc653_pick_cpu(const struct scheduler *ops, struct vcpu *v)
+{
+   
/* this implementation only supports one physical CPU */
+   
return 0;
+} /* end
arinc653_pick_cpu */
+
+/**
+ * Xen scheduler
callback function to wake up a VCPU
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler [Not Used]
+ * @param
vc        Pointer to the VCPU structure for
the current domain
+ *
+ *
@return          <None>
+ */
+static void
arinc653_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
+{
+   
/* boolean flag to indicate first run */
+   
static bool_t dont_raise_softirq = 0;
+
+   
if (AVCPU(vc) != NULL)  /* check that this is a VCPU we are tracking */
+   
{
+       
AVCPU(vc)->awake = 1;
+   
}
+
+   
/* the first time the vcpu_wake function is called, we should raise
+    
* a softirq to invoke the do_scheduler callback */
+   
if (!dont_raise_softirq)
+   
{
+       
cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
+       
dont_raise_softirq = 1;
+   
}
+} /* end
arinc653_vcpu_wake */
+
+/**
+ * Xen scheduler
callback function to sleep a VCPU
+ *
+ * @param
ops       Pointer to data structure with data
& functions for the
+
*                 
current scheduler [Not Used]
+ * @param
vc        Pointer to the VCPU structure for
the current domain
+ *
+ *
@return          <None>
+ */
+static void
arinc653_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
+{
+   
if (AVCPU(vc) != NULL)  /* check that this is a VCPU we are tracking */
+   
{
+       
AVCPU(vc)->awake = 0;
+   
}
+
+   
/* if the VCPU being put to sleep is the same one that is currently
+    
* running, raise a softirq to invoke the scheduler to switch domains */
+   
if (per_cpu(schedule_data, vc->processor).curr == vc)
+   
{
+       
cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
+   
}
+} /* end
arinc653_vcpu_sleep */
+
+/**
+ * This structure
defines our scheduler for Xen.
+ * The entries tell
Xen where to find our scheduler-specific
+ * callback
functions.
+ * The symbol must
be visible to the rest of Xen at link time.
+ */
+struct scheduler
sched_arinc653_def = {
+   
.name           = "ARINC
653 Scheduler",
+   
.opt_name       = "arinc653",
+   
.sched_id       = XEN_SCHEDULER_ARINC653,
+   
.sched_data     = &arinc653_schedule,
+
+   
.init           =
arinc653_init,
+/* 
.deinit         = NULL, */
+
+   
.free_vdata     = arinc653_free_vdata,
+   
.alloc_vdata    = arinc653_alloc_vdata,
+
+/* 
.free_pdata     = NULL, */
+/* 
.alloc_pdata    = NULL, */
+   
.free_domdata   = arinc653_free_domdata,
+   
.alloc_domdata  = arinc653_alloc_domdata,
+
+   
.init_domain    = arinc653_init_domain,
+    .destroy_domain
= arinc653_destroy_domain,
+
+/* 
.insert_vcpu    = NULL, */
+   
.destroy_vcpu   = arinc653_destroy_vcpu,
+
+   
.sleep          =
arinc653_vcpu_sleep,
+   
.wake           =
arinc653_vcpu_wake,
+/* 
.context_saved  = NULL, */
+
+   
.do_schedule    = arinc653_do_schedule,
+
+   
.pick_cpu       = arinc653_pick_cpu,
+/* 
.adjust         = NULL, */
+   
.adjust_global  = arinc653_adjust_global,
+/* 
.dump_settings  = NULL, */
+/* 
.dump_cpu_state = NULL, */
+
+/* 
.tick_suspend   = NULL, */
+/* 
.tick_resume    = NULL, */
+};
diff -rupN
a/xen/common/schedule.c b/xen/common/schedule.c
---
a/xen/common/schedule.c     2010-05-26 17:01:34.000000000
-0400
+++
b/xen/common/schedule.c     2010-06-01 12:30:45.000000000
-0400
@@ -8,6 +8,8 @@
 
*      Author: Rolf Neugebauer & Keir Fraser
 
*             
Updated for generic API by Mark Williamson
  * 
+
*             
ARINC 653 scheduler added by DornerWorks <DornerWorks.com>
+ * 
  *
Description: Generic CPU scheduling code
 
*             
implements support functionality for the Xen scheduler API.
  *
@@ -59,10 +61,12 @@
DEFINE_PER_CPU(struct scheduler *, sched
 extern const
struct scheduler sched_sedf_def;
 extern const
struct scheduler sched_credit_def;
 extern const
struct scheduler sched_credit2_def;
+extern const struct
scheduler sched_arinc653_def;
 static const
struct scheduler *schedulers[] = {
    
&sched_sedf_def,
    
&sched_credit_def,
    
&sched_credit2_def,
+   
&sched_arinc653_def,
    
NULL
 };
 
diff -rupN
a/xen/include/public/domctl.h b/xen/include/public/domctl.h
---
a/xen/include/public/domctl.h    2010-05-26 17:01:34.000000000
-0400
+++
b/xen/include/public/domctl.h    2010-06-01 12:30:45.000000000
-0400
@@ -305,6 +305,7 @@
DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_v
 #define
XEN_SCHEDULER_SEDF     4
 #define
XEN_SCHEDULER_CREDIT   5
 #define
XEN_SCHEDULER_CREDIT2  6
+#define
XEN_SCHEDULER_ARINC653 7
 /* Set or get
info? */
 #define
XEN_DOMCTL_SCHEDOP_putinfo 0
 #define
XEN_DOMCTL_SCHEDOP_getinfo 1
diff -rupN
a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
---
a/xen/include/public/sysctl.h    2010-05-26 17:01:34.000000000
-0400
+++
b/xen/include/public/sysctl.h    2010-06-01 12:30:45.000000000
-0400
@@ -22,6 +22,8 @@
  * DEALINGS IN
THE SOFTWARE.
  *
  * Copyright
(c) 2002-2006, K Fraser
+ *
+ * ARINC 653
Scheduler type added by DornerWorks <DornerWorks.com>.
  */
 
 #ifndef
__XEN_PUBLIC_SYSCTL_H__
@@ -539,10 +541,43
@@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupo
 /* Set or get
info? */
 #define XEN_SYSCTL_SCHEDOP_putinfo
0
 #define
XEN_SYSCTL_SCHEDOP_getinfo 1
+
+/*
+ * This structure
is used to pass a new ARINC 653 schedule from a
+ * privileged
domain (i.e. Dom0) to Xen.
+ */
+#define
ARINC653_MAX_DOMAINS_PER_SCHEDULE   64
+struct
xen_sysctl_sched_arinc653_schedule {
+   
/* major_frame holds the time for the new schedule's major frame
+    
* in nanoseconds. */
+   
int64_t     major_frame;
+   
/* num_sched_entries holds how many of the entries in the
+    
* sched_entries[] array are valid. */
+   
uint8_t     num_sched_entries;
+   
/* The sched_entries array holds the actual schedule entries. */
+   
struct {
+       
/* dom_handle must match a domain's UUID */
+       
xen_domain_handle_t dom_handle;
+       
/* If a domain has multiple VCPUs, vcpu_id specifies which one
+        
* this schedule entry applies to. It should be set to 0 if
+        
* there is only one VCPU for the domain. */
+       
int                
vcpu_id;
+       
/* runtime specifies the amount of time that should be allocated
+        
* to this VCPU per major frame. It is specified in nanoseconds */
+       
int64_t            
runtime;
+   
} sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
+};
+typedef struct
xen_sysctl_sched_arinc653_schedule
+   
xen_sysctl_sched_arinc653_schedule_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_arinc653_schedule_t);
+
 struct
xen_sysctl_scheduler_op {
    
uint32_t sched_id;  /* XEN_SCHEDULER_* (domctl.h) */
    
uint32_t cmd;       /* XEN_SYSCTL_SCHEDOP_* */
    
union {
+       
struct xen_sysctl_sched_arinc653 {
+           
XEN_GUEST_HANDLE(xen_sysctl_sched_arinc653_schedule_t) schedule;
+       
} arinc653;
    
} u;
 };
 typedef struct
xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;