This patch adds an ARINC 653 scheduler to Xen. This is a modification
of an earlier patch ([Xen-devel] [PATCH 1/1] Xen ARINC653 scheduler).
In particular, it has been modified to use the new .adjust_global
callback function, which was added in "[Xen-devel] [PATCH 1/1] Add
.adjust_global callback".
Thanks and regards,
Kathy Hadley
DornerWorks, Ltd.
Embedded Systems Engineering
3445 Lake Eastbrook Blvd SE
Grand Rapids, MI 49546
Direct: 616.389.6127
Tel: 616.245.8369
Fax: 616.245.8372
Kathy.Hadley@DornerWorks.com <mailto:Kathy.Hadley@DornerWorks.com>
www.DornerWorks.com <http://www.dornerworks.com/>
Honored as one of the 2010 "Michigan 50 Companies to Watch"
diff -rupN a/tools/libxc/Makefile b/tools/libxc/Makefile
--- a/tools/libxc/Makefile 2010-04-13 10:49:37.573793000
-0400
+++ b/tools/libxc/Makefile 2010-04-14 17:49:26.952638000 -0400
@@ -17,6 +17,7 @@ CTRL_SRCS-y += xc_physdev.c
CTRL_SRCS-y += xc_private.c
CTRL_SRCS-y += xc_sedf.c
CTRL_SRCS-y += xc_csched.c
+CTRL_SRCS-y += xc_arinc653.c
CTRL_SRCS-y += xc_tbuf.c
CTRL_SRCS-y += xc_pm.c
CTRL_SRCS-y += xc_cpu_hotplug.c
diff -rupN a/tools/libxc/xc_arinc653.c b/tools/libxc/xc_arinc653.c
--- a/tools/libxc/xc_arinc653.c 1969-12-31 19:00:00.000000000 -0500
+++ b/tools/libxc/xc_arinc653.c 2010-04-14 17:49:26.952638000 -0400
@@ -0,0 +1,28 @@
+/**********************************************************************
******
+ * (C) 2010 - DornerWorks, Ltd <DornerWorks.com>
+
************************************************************************
****
+ *
+ * File: xc_arinc653.c
+ * Author: Josh Holtrop <DornerWorks.com>
+ *
+ * Description: XC Interface to the ARINC 653 scheduler
+ *
+ */
+
+#include "xc_private.h"
+
+int
+xc_sched_arinc653_sched_set(
+ int xc_handle,
+ xen_domctl_sched_arinc653_schedule_t * sched)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_scheduler_op;
+ domctl.domain = (domid_t) 0;
+ domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
+ domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_put_global_info;
+ set_xen_guest_handle(domctl.u.scheduler_op.u.arinc653.schedule,
sched);
+
+ return do_domctl(xc_handle, &domctl);
+}
diff -rupN a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h 2010-04-13 10:49:37.573793000
-0400
+++ b/tools/libxc/xenctrl.h 2010-04-14 17:49:26.952638000 -0400
@@ -476,6 +476,16 @@ int xc_sched_credit_domain_get(int xc_ha
struct xen_domctl_sched_credit *sdom);
/**
+ * This function sets the global ARINC 653 schedule.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm sched a pointer to the new ARINC 653 schedule
+ * return 0 on success
+ */
+int xc_sched_arinc653_sched_set(int xc_handle,
+ xen_domctl_sched_arinc653_schedule_t *
sched);
+
+/**
* This function sends a trigger to a domain.
*
* @parm xc_handle a handle to an open hypervisor interface
diff -rupN a/xen/common/Makefile b/xen/common/Makefile
--- a/xen/common/Makefile 2010-04-13 10:49:37.573793000 -0400
+++ b/xen/common/Makefile 2010-04-13 13:00:31.651749000 -0400
@@ -14,6 +14,7 @@ obj-y += page_alloc.o
obj-y += rangeset.o
obj-y += sched_credit.o
obj-y += sched_sedf.o
+obj-y += sched_arinc653.o
obj-y += schedule.o
obj-y += shutdown.o
obj-y += softirq.o
diff -rupN a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
--- a/xen/common/sched_arinc653.c 1969-12-31 19:00:00.000000000
-0500
+++ b/xen/common/sched_arinc653.c 2010-04-14 18:13:26.163404000 -0400
@@ -0,0 +1,590 @@
+/*
+ * File: sched_arinc653.c
+ * Copyright (c) 2010, DornerWorks, Ltd. <DornerWorks.com>
+ *
+ * Description:
+ * This file provides an ARINC653-compatible scheduling algorithm
+ * for use in Xen.
+ *
+ * This program is free software; you can redistribute it and/or modify
it
+ * under the terms of the GNU General Public License as published by
the Free
+ * software Foundation; either version 2 of the License, or (at your
option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ */
+
+
+/**********************************************************************
****
+ * Includes
*
+
************************************************************************
*/
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <xen/sched-if.h>
+#include <xen/timer.h>
+#include <xen/softirq.h>
+#include <xen/time.h>
+#include <xen/errno.h>
+#include <xen/list.h>
+#include <public/domctl.h> /*
ARINC653_MAX_DOMAINS_PER_SCHEDULE */
+#include <xen/guest_access.h>
+
+
+/**********************************************************************
****
+ * Private Macros
*
+
************************************************************************
**/
+
+/**
+ * Retrieve the idle VCPU for a given physical CPU
+ */
+#define IDLETASK(cpu) ((struct vcpu *) per_cpu(schedule_data,
(cpu)).idle)
+
+/**
+ * Return a pointer to the ARINC 653-specific scheduler data
information
+ * associated with the given VCPU (vc)
+ */
+#define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_priv)
+
+/**********************************************************************
****
+ * Private Type Definitions
*
+
************************************************************************
**/
+
+/**
+ * The sched_entry_t structure holds a single entry of the
+ * ARINC 653 schedule.
+ */
+typedef struct sched_entry_s
+{
+ /* dom_handle holds the handle ("UUID") for the domain that this
+ * schedule entry refers to. */
+ xen_domain_handle_t dom_handle;
+ /* vcpu_id holds the VCPU number for the VCPU that this schedule
+ * entry refers to. */
+ int vcpu_id;
+ /* runtime holds the number of nanoseconds that the VCPU for this
+ * schedule entry should be allowed to run per major frame. */
+ s_time_t runtime;
+ /* vc holds a pointer to the Xen VCPU structure */
+ struct vcpu * vc;
+} sched_entry_t;
+
+/**
+ * The arinc653_vcpu_t structure holds ARINC 653-scheduler-specific
+ * information for all non-idle VCPUs
+ */
+typedef struct arinc653_vcpu_s
+{
+ /* vc points to Xen''s struct vcpu so we can get to it from an
+ * arinc653_vcpu_t pointer. */
+ struct vcpu * vc;
+ /* awake holds whether the VCPU has been woken with vcpu_wake() */
+ bool_t awake;
+ /* list holds the linked list information for the list this VCPU
+ * is stored in */
+ struct list_head list;
+} arinc653_vcpu_t;
+
+
+/**********************************************************************
****
+ * Global Data
*
+
************************************************************************
**/
+
+/**
+ * This array holds the active ARINC 653 schedule.
+ *
+ * When the system tries to start a new VCPU, this schedule is scanned
+ * to look for a matching (handle, VCPU #) pair. If both the handle
("UUID")
+ * and VCPU number match, then the VCPU is allowed to run. Its run time
+ * (per major frame) is given in the third entry of the schedule.
+ */
+static sched_entry_t
arinc653_schedule[ARINC653_MAX_DOMAINS_PER_SCHEDULE] = {
+ { "", 0, MILLISECS(10), NULL }
+};
+
+/**
+ * This variable holds the number of entries that are valid in
+ * the arinc653_schedule table.
+ *
+ * This is not necessarily the same as the number of domains in the
+ * schedule. A domain could be listed multiple times within the
schedule,
+ * or a domain with multiple VCPUs could have a different
+ * schedule entry for each VCPU.
+ *
+ * A value of 1 means that only 1 domain (Dom0) will initially be
started.
+ */
+static int num_schedule_entries = 1;
+
+/**
+ * arinc653_major_frame holds the major frame time for the ARINC 653
schedule.
+ */
+static s_time_t arinc653_major_frame = MILLISECS(10);
+
+/**
+ * next_major_frame holds the time that the next major frame starts
+ */
+static s_time_t next_major_frame = 0;
+
+/**
+ * vcpu_list holds pointers to all Xen VCPU structures for iterating
through
+ */
+static LIST_HEAD(vcpu_list);
+
+/**********************************************************************
****
+ * Scheduler functions
*
+
************************************************************************
**/
+
+/**
+ * This function compares two domain handles.
+ *
+ * @param h1 Pointer to handle 1
+ * @param h2 Pointer to handle 2
+ *
+ * @return <ul>
+ * <li> <0: handle 1 is less than handle 2
+ * <li> 0: handle 1 is equal to handle 2
+ * <li> >0: handle 1 is greater than handle 2
+ * </ul>
+ */
+static int dom_handle_cmp(const xen_domain_handle_t h1,
+ const xen_domain_handle_t h2)
+{
+ return memcmp(h1, h2, sizeof(xen_domain_handle_t));
+} /* end dom_handle_cmp */
+
+/**
+ * This function searches the vcpu list to find a VCPU that matches
+ * the domain handle and VCPU ID specified.
+ *
+ * @param handle Pointer to handler
+ * @param vcpu_id VCPU ID
+ *
+ * @return <ul>
+ * <li> Pointer to the matching VCPU if one is found
+ * <li> NULL otherwise
+ * </ul>
+ */
+static struct vcpu * find_vcpu(xen_domain_handle_t handle, int vcpu_id)
+{
+ arinc653_vcpu_t * avcpu; /* loop index variable */
+ struct vcpu * vc = NULL;
+
+ /* loop through the vcpu_list looking for the specified VCPU */
+ list_for_each_entry(avcpu, &vcpu_list, list)
+ {
+ /* If the handles & VCPU IDs match, we''ve found a matching
VCPU
*/
+ if ((dom_handle_cmp(avcpu->vc->domain->handle, handle) == 0)
+ && (vcpu_id == avcpu->vc->vcpu_id))
+ {
+ vc = avcpu->vc;
+ /*
+ * "break" statement used instead of loop control
variable
because
+ * the macro used for this loop does not support using loop
control
+ * variables
+ */
+ break;
+ }
+ }
+
+ return vc;
+} /* end find_vcpu */
+
+/**
+ * This function updates the pointer to the Xen VCPU structure for each
entry in
+ * the ARINC 653 schedule.
+ *
+ * @param <None>
+ * @return <None>
+ */
+static void update_schedule_vcpus(void)
+{
+ /* Loop through the number of entries in the schedule */
+ for (int i = 0; i < num_schedule_entries; i++)
+ {
+ /* Update the pointer to the Xen VCPU structure for the current
entry */
+ arinc653_schedule[i].vc
+ find_vcpu(arinc653_schedule[i].dom_handle,
+ arinc653_schedule[i].vcpu_id);
+ }
+} /* end update_schedule_vcpus */
+
+/**
+ * This function is called by the arinc653_adjust_global scheduler
+ * callback function in response to a domain control hypercall with
+ * a scheduler operation.
+ *
+ * The parameter schedule is set to be the address of a local variable
from
+ * within arinc653_adjust_global(), so it is guaranteed to not be NULL.
+ *
+ * @param schedule Pointer to the new ARINC 653 schedule.
+ *
+ * @return <ul>
+ * <li> 0 = success
+ * <li> !0 = error
+ * </ul>
+ */
+static int arinc653_sched_set(xen_domctl_sched_arinc653_schedule_t *
schedule)
+{
+ int ret = 0;
+ s_time_t total_runtime = 0;
+ bool_t found_dom0 = 0;
+ const static xen_domain_handle_t dom0_handle = {0};
+
+ /* check for valid major frame and number of schedule entries */
+ if ( (schedule->major_frame <= 0)
+ || (schedule->num_sched_entries < 1)
+ || (schedule->num_sched_entries >
ARINC653_MAX_DOMAINS_PER_SCHEDULE) )
+ {
+ ret = -EINVAL;
+ }
+
+ if (ret == 0)
+ {
+ for (int i = 0; i < schedule->num_sched_entries; i++)
+ {
+ /*
+ * look for domain 0 handle - every schedule must contain
+ * some time for domain 0 to run
+ */
+ if (dom_handle_cmp(schedule->sched_entries[i].dom_handle,
+ dom0_handle) == 0)
+ {
+ found_dom0 = 1;
+ }
+
+ /* check for a valid VCPU ID and run time */
+ if ( (schedule->sched_entries[i].vcpu_id < 0)
+ || (schedule->sched_entries[i].runtime <= 0) )
+ {
+ ret = -EINVAL;
+ }
+ else
+ {
+ /* Add this entry''s run time to total run time */
+ total_runtime += schedule->sched_entries[i].runtime;
+ }
+ } /* end loop through schedule entries */
+ }
+
+ if (ret == 0)
+ {
+ /* error if the schedule doesn''t contain a slot for domain 0
*/
+ if (found_dom0 == 0)
+ {
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret == 0)
+ {
+ /*
+ * error if the major frame is not large enough to run all
entries
+ * as indicated by comparing the total run time to the major
frame
+ * length
+ */
+ if (total_runtime > schedule->major_frame)
+ {
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret == 0)
+ {
+ /* copy the new schedule into place */
+ num_schedule_entries = schedule->num_sched_entries;
+ arinc653_major_frame = schedule->major_frame;
+ for (int i = 0; i < num_schedule_entries; i++)
+ {
+ memcpy(arinc653_schedule[i].dom_handle,
+ schedule->sched_entries[i].dom_handle,
+ sizeof(arinc653_schedule[i].dom_handle));
+ arinc653_schedule[i].vcpu_id schedule->sched_entries[i].vcpu_id;
+ arinc653_schedule[i].runtime schedule->sched_entries[i].runtime;
+ }
+ update_schedule_vcpus();
+
+ /*
+ * The newly-installed schedule takes effect immediately.
+ * We do not even wait for the current major frame to expire.
+ *
+ * Signal a new major frame to begin. The next major frame
+ * is set up by the do_schedule callback function when it
+ * is next invoked.
+ */
+ next_major_frame = NOW();
+ }
+
+ return ret;
+} /* end arinc653_sched_set */
+
+/**
+ * Xen scheduler callback function to adjust global scheduling
parameters
+ *
+ * @param op Pointer to the domain control scheduler operation
structure
+ *
+ * @return <ul>
+ * <li> 0 for success
+ * <li> !0 if there is an error
+ * </ul>
+ */
+static int arinc653_adjust_global(struct xen_domctl_scheduler_op * op)
+{
+ int ret = -1;
+ xen_domctl_sched_arinc653_schedule_t new_sched;
+
+ if (op->cmd == XEN_DOMCTL_SCHEDOP_put_global_info)
+ {
+ if (copy_from_guest(&new_sched, op->u.arinc653.schedule, 1) !0)
+ {
+ ret = -EFAULT;
+ }
+ else
+ {
+ ret = arinc653_sched_set(&new_sched);
+ }
+ }
+
+ return ret;
+} /* end arinc653_adjust_global */
+
+/**
+ * Xen scheduler callback function to initialize a virtual CPU (VCPU).
+ *
+ * @param v Pointer to the VCPU structure
+ *
+ * @return <ul>
+ * <li> 0 if the VCPU is allowed to run
+ * <li> !0 if there is an error
+ * </ul>
+ */
+static int arinc653_init_vcpu(struct vcpu * v)
+{
+ int ret = -1;
+
+ if (is_idle_vcpu(v))
+ {
+ /*
+ * The idle VCPU is created by Xen to run when no domains
+ * are runnable or require CPU time.
+ * It is similar to an "idle task" or "halt loop"
process
+ * in an operating system.
+ * We do not track any scheduler information for the idle VCPU.
+ */
+ v->sched_priv = NULL;
+ ret = 0;
+ }
+ else
+ {
+ /*
+ * Allocate memory for the ARINC 653-specific scheduler data
information
+ * associated with the given VCPU (vc).
+ */
+ v->sched_priv = xmalloc(arinc653_vcpu_t);
+ if (AVCPU(v) != NULL)
+ {
+ /*
+ * Initialize our ARINC 653 scheduler-specific information
+ * for the VCPU.
+ * The VCPU starts "asleep."
+ * When Xen is ready for the VCPU to run, it will call
+ * the vcpu_wake scheduler callback function and our
+ * scheduler will mark the VCPU awake.
+ */
+ AVCPU(v)->vc = v;
+ AVCPU(v)->awake = 0;
+ list_add(&AVCPU(v)->list, &vcpu_list);
+ ret = 0;
+ update_schedule_vcpus();
+ }
+ }
+
+ return ret;
+} /* end arinc653_init_vcpu */
+
+/**
+ * Xen scheduler callback function to remove a VCPU
+ *
+ * @param v Pointer to the VCPU structure to remove
+ *
+ * @return <None>
+ */
+static void arinc653_destroy_vcpu(struct vcpu * v)
+{
+ if (AVCPU(v) != NULL)
+ {
+ /* remove the VCPU from whichever list it is on */
+ list_del(&AVCPU(v)->list);
+ /* free the arinc653_vcpu structure */
+ xfree(AVCPU(v));
+ update_schedule_vcpus();
+ }
+} /* end arinc653_destroy_vcpu */
+
+/**
+ * Xen scheduler callback function to select a VCPU to run.
+ * This is the main scheduler routine.
+ *
+ * @param t Current time
+ *
+ * @return Time slice and address of the VCPU structure for
the chosen
+ * domain
+ */
+static struct task_slice arinc653_do_schedule(s_time_t t)
+{
+ struct task_slice ret; /* hold the chosen
domain */
+ struct vcpu * new_task = NULL;
+ static int sched_index = 0;
+ static s_time_t last_major_frame;
+ static s_time_t last_switch_time;
+ static s_time_t next_switch_time;
+
+ if (t >= next_major_frame)
+ {
+ /* time to enter a new major frame
+ * the first time this function is called, this will be true */
+ sched_index = 0;
+ last_major_frame = last_switch_time = t;
+ next_major_frame = t + arinc653_major_frame;
+ }
+ else if (t >= next_switch_time)
+ {
+ /* time to switch to the next domain in this major frame */
+ sched_index++;
+ last_switch_time = next_switch_time;
+ }
+
+ /*
+ * If there are more domains to run in the current major frame, set
+ * next_switch_time equal to the last switch time + this domain''s
run time.
+ * Otherwise, set next_switch_time equal to the start of the next
major
+ * frame.
+ */
+ next_switch_time = (sched_index < num_schedule_entries)
+ ? last_switch_time +
arinc653_schedule[sched_index].runtime
+ : next_major_frame;
+
+ /*
+ * If there are more domains to run in the current major frame, set
+ * new_task equal to the address of next domain''s VCPU structure.
+ * Otherwise, set new_task equal to the address of the idle task''s
VCPU
+ * structure.
+ */
+ new_task = (sched_index < num_schedule_entries)
+ ? arinc653_schedule[sched_index].vc
+ : IDLETASK(0);
+
+ /* Check to see if the new task can be run (awake & runnable). */
+ if (!((new_task != NULL)
+ && AVCPU(new_task)->awake
+ && vcpu_runnable(new_task)) )
+ {
+ new_task = IDLETASK(0);
+ }
+ BUG_ON(new_task == NULL);
+
+ /*
+ * Check to make sure we did not miss a major frame.
+ * This is a good test for robust partitioning.
+ */
+ BUG_ON(t >= next_major_frame);
+
+ /*
+ * Return the amount of time the next domain has to run and the
address
+ * of the selected task''s VCPU structure.
+ */
+ ret.time = next_switch_time - t;
+ ret.task = new_task;
+
+ BUG_ON(ret.time <= 0);
+
+ return ret;
+} /* end arinc653_do_schedule */
+
+/**
+ * Xen scheduler callback function to select a CPU for the VCPU to run
on
+ *
+ * @param v Pointer to the VCPU structure for the current
domain
+ *
+ * @return Number of selected physical CPU
+ */
+static int arinc653_pick_cpu(struct vcpu * v)
+{
+ /* this implementation only supports one physical CPU */
+ return 0;
+} /* end arinc653_pick_cpu */
+
+/**
+ * Xen scheduler callback function to wake up a VCPU
+ *
+ * @param vc Pointer to the VCPU structure for the current
domain
+ *
+ * @return <None>
+ */
+static void arinc653_vcpu_wake(struct vcpu * vc)
+{
+ /* boolean flag to indicate first run */
+ static bool_t dont_raise_softirq = 0;
+
+ if (AVCPU(vc) != NULL) /* check that this is a VCPU we are
tracking */
+ {
+ AVCPU(vc)->awake = 1;
+ }
+
+ /* the first time the vcpu_wake function is called, we should raise
+ * a softirq to invoke the do_scheduler callback */
+ if (!dont_raise_softirq)
+ {
+ cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
+ dont_raise_softirq = 1;
+ }
+} /* end arinc653_vcpu_wake */
+
+/**
+ * Xen scheduler callback function to sleep a VCPU
+ *
+ * @param vc Pointer to the VCPU structure for the current
domain
+ *
+ * @return <None>
+ */
+static void arinc653_vcpu_sleep(struct vcpu * vc)
+{
+ if (AVCPU(vc) != NULL) /* check that this is a VCPU we are
tracking */
+ {
+ AVCPU(vc)->awake = 0;
+ }
+
+ /* if the VCPU being put to sleep is the same one that is currently
+ * running, raise a softirq to invoke the scheduler to switch
domains */
+ if (per_cpu(schedule_data, vc->processor).curr == vc)
+ {
+ cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
+ }
+} /* end arinc653_vcpu_sleep */
+
+/**
+ * This structure defines our scheduler for Xen.
+ * The entries tell Xen where to find our scheduler-specific
+ * callback functions.
+ * The symbol must be visible to the rest of Xen at link time.
+ */
+struct scheduler sched_arinc653_def = {
+ .name = "ARINC 653 Scheduler",
+ .opt_name = "arinc653",
+ .sched_id = XEN_SCHEDULER_ARINC653,
+
+ .init_domain = NULL,
+ .destroy_domain = NULL,
+
+ .init_vcpu = arinc653_init_vcpu,
+ .destroy_vcpu = arinc653_destroy_vcpu,
+
+ .do_schedule = arinc653_do_schedule,
+ .pick_cpu = arinc653_pick_cpu,
+ .dump_cpu_state = NULL,
+ .sleep = arinc653_vcpu_sleep,
+ .wake = arinc653_vcpu_wake,
+ .adjust = NULL,
+ .adjust_global = arinc653_adjust_global,
+};
diff -rupN a/xen/common/schedule.c b/xen/common/schedule.c
--- a/xen/common/schedule.c 2010-04-14 10:57:11.262796000 -0400
+++ b/xen/common/schedule.c 2010-04-14 16:40:21.543608000 -0400
@@ -7,7 +7,8 @@
* File: common/schedule.c
* Author: Rolf Neugebauer & Keir Fraser
* Updated for generic API by Mark Williamson
- *
+ * ARINC653 scheduler added by DornerWorks
<DornerWorks.com>
+ *
* Description: Generic CPU scheduling code
* implements support functionality for the Xen scheduler
API.
*
@@ -56,9 +57,11 @@ DEFINE_PER_CPU(struct schedule_data, sch
extern const struct scheduler sched_sedf_def;
extern const struct scheduler sched_credit_def;
+extern const struct scheduler sched_arinc653_def;
static const struct scheduler *__initdata schedulers[] = {
&sched_sedf_def,
&sched_credit_def,
+ &sched_arinc653_def,
NULL
};
diff -rupN a/xen/include/public/domctl.h b/xen/include/public/domctl.h
--- a/xen/include/public/domctl.h 2010-04-14 10:57:11.262796000 -0400
+++ b/xen/include/public/domctl.h 2010-04-14 16:40:21.543608000
-0400
@@ -23,6 +23,8 @@
*
* Copyright (c) 2002-2003, B Dragovic
* Copyright (c) 2002-2006, K Fraser
+ *
+ * ARINC653 Scheduler type added by DornerWorks <DornerWorks.com>.
*/
#ifndef __XEN_PUBLIC_DOMCTL_H__
@@ -303,11 +305,43 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_v
/* Scheduler types. */
#define XEN_SCHEDULER_SEDF 4
#define XEN_SCHEDULER_CREDIT 5
+#define XEN_SCHEDULER_ARINC653 6
+
/* Set or get info? */
#define XEN_DOMCTL_SCHEDOP_putinfo 0
#define XEN_DOMCTL_SCHEDOP_getinfo 1
#define XEN_DOMCTL_SCHEDOP_put_global_info 2
#define XEN_DOMCTL_SCHEDOP_get_global_info 3
+
+/*
+ * This structure is used to pass a new ARINC653 schedule from a
+ * privileged domain (ie dom0) to Xen.
+ */
+#define ARINC653_MAX_DOMAINS_PER_SCHEDULE 64
+struct xen_domctl_sched_arinc653_schedule {
+ /* major_frame holds the time for the new schedule''s major frame
+ * in nanoseconds. */
+ int64_t major_frame;
+ /* num_sched_entries holds how many of the entries in the
+ * sched_entries[] array are valid. */
+ uint8_t num_sched_entries;
+ /* The sched_entries array holds the actual schedule entries. */
+ struct {
+ /* dom_handle must match a domain''s UUID */
+ xen_domain_handle_t dom_handle;
+ /* If a domain has multiple VCPUs, vcpu_id specifies which one
+ * this schedule entry applies to. It should be set to 0 if
+ * there is only one VCPU for the domain. */
+ int vcpu_id;
+ /* runtime specifies the amount of time that should be
allocated
+ * to this VCPU per major frame. It is specified in nanoseconds
*/
+ int64_t runtime;
+ } sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
+};
+typedef struct xen_domctl_sched_arinc653_schedule
+ xen_domctl_sched_arinc653_schedule_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_sched_arinc653_schedule_t);
+
struct xen_domctl_scheduler_op {
uint32_t sched_id; /* XEN_SCHEDULER_* */
uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
@@ -323,6 +357,9 @@ struct xen_domctl_scheduler_op {
uint16_t weight;
uint16_t cap;
} credit;
+ struct xen_domctl_sched_arinc653 {
+ XEN_GUEST_HANDLE(xen_domctl_sched_arinc653_schedule_t)
schedule;
+ } arinc653;
} u;
};
typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Second resubmission, now that I have been added to the Xen-devel mailing
list.
Regards,
Kathy Hadley
DornerWorks, Ltd.
From: Kathy Hadley
Sent: Friday, April 16, 2010 10:19 AM
To: ''xen-devel@lists.xensource.com''
Cc: ''Keir.Fraser@citrix.com''
Subject: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
This patch adds an ARINC 653 scheduler to Xen. This is a modification
of an earlier patch ([Xen-devel] [PATCH 1/1] Xen ARINC653 scheduler).
In particular, it has been modified to use the new .adjust_global
callback function, which was added in "[Xen-devel] [PATCH 1/1] Add
.adjust_global callback".
Thanks and regards,
Kathy Hadley
DornerWorks, Ltd.
Embedded Systems Engineering
3445 Lake Eastbrook Blvd SE
Grand Rapids, MI 49546
Direct: 616.389.6127
Tel: 616.245.8369
Fax: 616.245.8372
Kathy.Hadley@DornerWorks.com <mailto:Kathy.Hadley@DornerWorks.com>
www.DornerWorks.com <http://www.dornerworks.com/>
Honored as one of the 2010 "Michigan 50 Companies to Watch"
diff -rupN a/tools/libxc/Makefile b/tools/libxc/Makefile
--- a/tools/libxc/Makefile 2010-04-13 10:49:37.573793000
-0400
+++ b/tools/libxc/Makefile 2010-04-14 17:49:26.952638000 -0400
@@ -17,6 +17,7 @@ CTRL_SRCS-y += xc_physdev.c
CTRL_SRCS-y += xc_private.c
CTRL_SRCS-y += xc_sedf.c
CTRL_SRCS-y += xc_csched.c
+CTRL_SRCS-y += xc_arinc653.c
CTRL_SRCS-y += xc_tbuf.c
CTRL_SRCS-y += xc_pm.c
CTRL_SRCS-y += xc_cpu_hotplug.c
diff -rupN a/tools/libxc/xc_arinc653.c b/tools/libxc/xc_arinc653.c
--- a/tools/libxc/xc_arinc653.c 1969-12-31 19:00:00.000000000 -0500
+++ b/tools/libxc/xc_arinc653.c 2010-04-14 17:49:26.952638000 -0400
@@ -0,0 +1,28 @@
+/**********************************************************************
******
+ * (C) 2010 - DornerWorks, Ltd <DornerWorks.com>
+
************************************************************************
****
+ *
+ * File: xc_arinc653.c
+ * Author: Josh Holtrop <DornerWorks.com>
+ *
+ * Description: XC Interface to the ARINC 653 scheduler
+ *
+ */
+
+#include "xc_private.h"
+
+int
+xc_sched_arinc653_sched_set(
+ int xc_handle,
+ xen_domctl_sched_arinc653_schedule_t * sched)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_scheduler_op;
+ domctl.domain = (domid_t) 0;
+ domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
+ domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_put_global_info;
+ set_xen_guest_handle(domctl.u.scheduler_op.u.arinc653.schedule,
sched);
+
+ return do_domctl(xc_handle, &domctl);
+}
diff -rupN a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h 2010-04-13 10:49:37.573793000
-0400
+++ b/tools/libxc/xenctrl.h 2010-04-14 17:49:26.952638000 -0400
@@ -476,6 +476,16 @@ int xc_sched_credit_domain_get(int xc_ha
struct xen_domctl_sched_credit *sdom);
/**
+ * This function sets the global ARINC 653 schedule.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm sched a pointer to the new ARINC 653 schedule
+ * return 0 on success
+ */
+int xc_sched_arinc653_sched_set(int xc_handle,
+ xen_domctl_sched_arinc653_schedule_t *
sched);
+
+/**
* This function sends a trigger to a domain.
*
* @parm xc_handle a handle to an open hypervisor interface
diff -rupN a/xen/common/Makefile b/xen/common/Makefile
--- a/xen/common/Makefile 2010-04-13 10:49:37.573793000 -0400
+++ b/xen/common/Makefile 2010-04-13 13:00:31.651749000 -0400
@@ -14,6 +14,7 @@ obj-y += page_alloc.o
obj-y += rangeset.o
obj-y += sched_credit.o
obj-y += sched_sedf.o
+obj-y += sched_arinc653.o
obj-y += schedule.o
obj-y += shutdown.o
obj-y += softirq.o
diff -rupN a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
--- a/xen/common/sched_arinc653.c 1969-12-31 19:00:00.000000000
-0500
+++ b/xen/common/sched_arinc653.c 2010-04-14 18:13:26.163404000 -0400
@@ -0,0 +1,590 @@
+/*
+ * File: sched_arinc653.c
+ * Copyright (c) 2010, DornerWorks, Ltd. <DornerWorks.com>
+ *
+ * Description:
+ * This file provides an ARINC653-compatible scheduling algorithm
+ * for use in Xen.
+ *
+ * This program is free software; you can redistribute it and/or modify
it
+ * under the terms of the GNU General Public License as published by
the Free
+ * software Foundation; either version 2 of the License, or (at your
option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ */
+
+
+/**********************************************************************
****
+ * Includes
*
+
************************************************************************
*/
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <xen/sched-if.h>
+#include <xen/timer.h>
+#include <xen/softirq.h>
+#include <xen/time.h>
+#include <xen/errno.h>
+#include <xen/list.h>
+#include <public/domctl.h> /*
ARINC653_MAX_DOMAINS_PER_SCHEDULE */
+#include <xen/guest_access.h>
+
+
+/**********************************************************************
****
+ * Private Macros
*
+
************************************************************************
**/
+
+/**
+ * Retrieve the idle VCPU for a given physical CPU
+ */
+#define IDLETASK(cpu) ((struct vcpu *) per_cpu(schedule_data,
(cpu)).idle)
+
+/**
+ * Return a pointer to the ARINC 653-specific scheduler data
information
+ * associated with the given VCPU (vc)
+ */
+#define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_priv)
+
+/**********************************************************************
****
+ * Private Type Definitions
*
+
************************************************************************
**/
+
+/**
+ * The sched_entry_t structure holds a single entry of the
+ * ARINC 653 schedule.
+ */
+typedef struct sched_entry_s
+{
+ /* dom_handle holds the handle ("UUID") for the domain that this
+ * schedule entry refers to. */
+ xen_domain_handle_t dom_handle;
+ /* vcpu_id holds the VCPU number for the VCPU that this schedule
+ * entry refers to. */
+ int vcpu_id;
+ /* runtime holds the number of nanoseconds that the VCPU for this
+ * schedule entry should be allowed to run per major frame. */
+ s_time_t runtime;
+ /* vc holds a pointer to the Xen VCPU structure */
+ struct vcpu * vc;
+} sched_entry_t;
+
+/**
+ * The arinc653_vcpu_t structure holds ARINC 653-scheduler-specific
+ * information for all non-idle VCPUs
+ */
+typedef struct arinc653_vcpu_s
+{
+ /* vc points to Xen''s struct vcpu so we can get to it from an
+ * arinc653_vcpu_t pointer. */
+ struct vcpu * vc;
+ /* awake holds whether the VCPU has been woken with vcpu_wake() */
+ bool_t awake;
+ /* list holds the linked list information for the list this VCPU
+ * is stored in */
+ struct list_head list;
+} arinc653_vcpu_t;
+
+
+/**********************************************************************
****
+ * Global Data
*
+
************************************************************************
**/
+
+/**
+ * This array holds the active ARINC 653 schedule.
+ *
+ * When the system tries to start a new VCPU, this schedule is scanned
+ * to look for a matching (handle, VCPU #) pair. If both the handle
("UUID")
+ * and VCPU number match, then the VCPU is allowed to run. Its run time
+ * (per major frame) is given in the third entry of the schedule.
+ */
+static sched_entry_t
arinc653_schedule[ARINC653_MAX_DOMAINS_PER_SCHEDULE] = {
+ { "", 0, MILLISECS(10), NULL }
+};
+
+/**
+ * This variable holds the number of entries that are valid in
+ * the arinc653_schedule table.
+ *
+ * This is not necessarily the same as the number of domains in the
+ * schedule. A domain could be listed multiple times within the
schedule,
+ * or a domain with multiple VCPUs could have a different
+ * schedule entry for each VCPU.
+ *
+ * A value of 1 means that only 1 domain (Dom0) will initially be
started.
+ */
+static int num_schedule_entries = 1;
+
+/**
+ * arinc653_major_frame holds the major frame time for the ARINC 653
schedule.
+ */
+static s_time_t arinc653_major_frame = MILLISECS(10);
+
+/**
+ * next_major_frame holds the time that the next major frame starts
+ */
+static s_time_t next_major_frame = 0;
+
+/**
+ * vcpu_list holds pointers to all Xen VCPU structures for iterating
through
+ */
+static LIST_HEAD(vcpu_list);
+
+/**********************************************************************
****
+ * Scheduler functions
*
+
************************************************************************
**/
+
+/**
+ * This function compares two domain handles.
+ *
+ * @param h1 Pointer to handle 1
+ * @param h2 Pointer to handle 2
+ *
+ * @return <ul>
+ * <li> <0: handle 1 is less than handle 2
+ * <li> 0: handle 1 is equal to handle 2
+ * <li> >0: handle 1 is greater than handle 2
+ * </ul>
+ */
+static int dom_handle_cmp(const xen_domain_handle_t h1,
+ const xen_domain_handle_t h2)
+{
+ return memcmp(h1, h2, sizeof(xen_domain_handle_t));
+} /* end dom_handle_cmp */
+
+/**
+ * This function searches the vcpu list to find a VCPU that matches
+ * the domain handle and VCPU ID specified.
+ *
+ * @param handle Pointer to handler
+ * @param vcpu_id VCPU ID
+ *
+ * @return <ul>
+ * <li> Pointer to the matching VCPU if one is found
+ * <li> NULL otherwise
+ * </ul>
+ */
+static struct vcpu * find_vcpu(xen_domain_handle_t handle, int vcpu_id)
+{
+ arinc653_vcpu_t * avcpu; /* loop index variable */
+ struct vcpu * vc = NULL;
+
+ /* loop through the vcpu_list looking for the specified VCPU */
+ list_for_each_entry(avcpu, &vcpu_list, list)
+ {
+ /* If the handles & VCPU IDs match, we''ve found a matching
VCPU
*/
+ if ((dom_handle_cmp(avcpu->vc->domain->handle, handle) == 0)
+ && (vcpu_id == avcpu->vc->vcpu_id))
+ {
+ vc = avcpu->vc;
+ /*
+ * "break" statement used instead of loop control
variable
because
+ * the macro used for this loop does not support using loop
control
+ * variables
+ */
+ break;
+ }
+ }
+
+ return vc;
+} /* end find_vcpu */
+
+/**
+ * This function updates the pointer to the Xen VCPU structure for each
entry in
+ * the ARINC 653 schedule.
+ *
+ * @param <None>
+ * @return <None>
+ */
+static void update_schedule_vcpus(void)
+{
+ /* Loop through the number of entries in the schedule */
+ for (int i = 0; i < num_schedule_entries; i++)
+ {
+ /* Update the pointer to the Xen VCPU structure for the current
entry */
+ arinc653_schedule[i].vc
+ find_vcpu(arinc653_schedule[i].dom_handle,
+ arinc653_schedule[i].vcpu_id);
+ }
+} /* end update_schedule_vcpus */
+
+/**
+ * This function is called by the arinc653_adjust_global scheduler
+ * callback function in response to a domain control hypercall with
+ * a scheduler operation.
+ *
+ * The parameter schedule is set to be the address of a local variable
from
+ * within arinc653_adjust_global(), so it is guaranteed to not be NULL.
+ *
+ * @param schedule Pointer to the new ARINC 653 schedule.
+ *
+ * @return <ul>
+ * <li> 0 = success
+ * <li> !0 = error
+ * </ul>
+ */
+static int arinc653_sched_set(xen_domctl_sched_arinc653_schedule_t *
schedule)
+{
+ int ret = 0;
+ s_time_t total_runtime = 0;
+ bool_t found_dom0 = 0;
+ const static xen_domain_handle_t dom0_handle = {0};
+
+ /* check for valid major frame and number of schedule entries */
+ if ( (schedule->major_frame <= 0)
+ || (schedule->num_sched_entries < 1)
+ || (schedule->num_sched_entries >
ARINC653_MAX_DOMAINS_PER_SCHEDULE) )
+ {
+ ret = -EINVAL;
+ }
+
+ if (ret == 0)
+ {
+ for (int i = 0; i < schedule->num_sched_entries; i++)
+ {
+ /*
+ * look for domain 0 handle - every schedule must contain
+ * some time for domain 0 to run
+ */
+ if (dom_handle_cmp(schedule->sched_entries[i].dom_handle,
+ dom0_handle) == 0)
+ {
+ found_dom0 = 1;
+ }
+
+ /* check for a valid VCPU ID and run time */
+ if ( (schedule->sched_entries[i].vcpu_id < 0)
+ || (schedule->sched_entries[i].runtime <= 0) )
+ {
+ ret = -EINVAL;
+ }
+ else
+ {
+ /* Add this entry''s run time to total run time */
+ total_runtime += schedule->sched_entries[i].runtime;
+ }
+ } /* end loop through schedule entries */
+ }
+
+ if (ret == 0)
+ {
+ /* error if the schedule doesn''t contain a slot for domain 0
*/
+ if (found_dom0 == 0)
+ {
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret == 0)
+ {
+ /*
+ * error if the major frame is not large enough to run all
entries
+ * as indicated by comparing the total run time to the major
frame
+ * length
+ */
+ if (total_runtime > schedule->major_frame)
+ {
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret == 0)
+ {
+ /* copy the new schedule into place */
+ num_schedule_entries = schedule->num_sched_entries;
+ arinc653_major_frame = schedule->major_frame;
+ for (int i = 0; i < num_schedule_entries; i++)
+ {
+ memcpy(arinc653_schedule[i].dom_handle,
+ schedule->sched_entries[i].dom_handle,
+ sizeof(arinc653_schedule[i].dom_handle));
+ arinc653_schedule[i].vcpu_id schedule->sched_entries[i].vcpu_id;
+ arinc653_schedule[i].runtime schedule->sched_entries[i].runtime;
+ }
+ update_schedule_vcpus();
+
+ /*
+ * The newly-installed schedule takes effect immediately.
+ * We do not even wait for the current major frame to expire.
+ *
+ * Signal a new major frame to begin. The next major frame
+ * is set up by the do_schedule callback function when it
+ * is next invoked.
+ */
+ next_major_frame = NOW();
+ }
+
+ return ret;
+} /* end arinc653_sched_set */
+
+/**
+ * Xen scheduler callback function to adjust global scheduling
parameters
+ *
+ * @param op Pointer to the domain control scheduler operation
structure
+ *
+ * @return <ul>
+ * <li> 0 for success
+ * <li> !0 if there is an error
+ * </ul>
+ */
+static int arinc653_adjust_global(struct xen_domctl_scheduler_op * op)
+{
+ int ret = -1;
+ xen_domctl_sched_arinc653_schedule_t new_sched;
+
+ if (op->cmd == XEN_DOMCTL_SCHEDOP_put_global_info)
+ {
+ if (copy_from_guest(&new_sched, op->u.arinc653.schedule, 1) !0)
+ {
+ ret = -EFAULT;
+ }
+ else
+ {
+ ret = arinc653_sched_set(&new_sched);
+ }
+ }
+
+ return ret;
+} /* end arinc653_adjust_global */
+
+/**
+ * Xen scheduler callback function to initialize a virtual CPU (VCPU).
+ *
+ * @param v Pointer to the VCPU structure
+ *
+ * @return <ul>
+ * <li> 0 if the VCPU is allowed to run
+ * <li> !0 if there is an error
+ * </ul>
+ */
+static int arinc653_init_vcpu(struct vcpu * v)
+{
+ int ret = -1;
+
+ if (is_idle_vcpu(v))
+ {
+ /*
+ * The idle VCPU is created by Xen to run when no domains
+ * are runnable or require CPU time.
+ * It is similar to an "idle task" or "halt loop"
process
+ * in an operating system.
+ * We do not track any scheduler information for the idle VCPU.
+ */
+ v->sched_priv = NULL;
+ ret = 0;
+ }
+ else
+ {
+ /*
+ * Allocate memory for the ARINC 653-specific scheduler data
information
+ * associated with the given VCPU (vc).
+ */
+ v->sched_priv = xmalloc(arinc653_vcpu_t);
+ if (AVCPU(v) != NULL)
+ {
+ /*
+ * Initialize our ARINC 653 scheduler-specific information
+ * for the VCPU.
+ * The VCPU starts "asleep."
+ * When Xen is ready for the VCPU to run, it will call
+ * the vcpu_wake scheduler callback function and our
+ * scheduler will mark the VCPU awake.
+ */
+ AVCPU(v)->vc = v;
+ AVCPU(v)->awake = 0;
+ list_add(&AVCPU(v)->list, &vcpu_list);
+ ret = 0;
+ update_schedule_vcpus();
+ }
+ }
+
+ return ret;
+} /* end arinc653_init_vcpu */
+
+/**
+ * Xen scheduler callback function to remove a VCPU
+ *
+ * @param v Pointer to the VCPU structure to remove
+ *
+ * @return <None>
+ */
+static void arinc653_destroy_vcpu(struct vcpu * v)
+{
+ if (AVCPU(v) != NULL)
+ {
+ /* remove the VCPU from whichever list it is on */
+ list_del(&AVCPU(v)->list);
+ /* free the arinc653_vcpu structure */
+ xfree(AVCPU(v));
+ update_schedule_vcpus();
+ }
+} /* end arinc653_destroy_vcpu */
+
+/**
+ * Xen scheduler callback function to select a VCPU to run.
+ * This is the main scheduler routine.
+ *
+ * @param t Current time
+ *
+ * @return Time slice and address of the VCPU structure for
the chosen
+ * domain
+ */
+static struct task_slice arinc653_do_schedule(s_time_t t)
+{
+ struct task_slice ret; /* hold the chosen
domain */
+ struct vcpu * new_task = NULL;
+ static int sched_index = 0;
+ static s_time_t last_major_frame;
+ static s_time_t last_switch_time;
+ static s_time_t next_switch_time;
+
+ if (t >= next_major_frame)
+ {
+ /* time to enter a new major frame
+ * the first time this function is called, this will be true */
+ sched_index = 0;
+ last_major_frame = last_switch_time = t;
+ next_major_frame = t + arinc653_major_frame;
+ }
+ else if (t >= next_switch_time)
+ {
+ /* time to switch to the next domain in this major frame */
+ sched_index++;
+ last_switch_time = next_switch_time;
+ }
+
+ /*
+ * If there are more domains to run in the current major frame, set
+ * next_switch_time equal to the last switch time + this domain''s
run time.
+ * Otherwise, set next_switch_time equal to the start of the next
major
+ * frame.
+ */
+ next_switch_time = (sched_index < num_schedule_entries)
+ ? last_switch_time +
arinc653_schedule[sched_index].runtime
+ : next_major_frame;
+
+ /*
+ * If there are more domains to run in the current major frame, set
+ * new_task equal to the address of next domain''s VCPU structure.
+ * Otherwise, set new_task equal to the address of the idle task''s
VCPU
+ * structure.
+ */
+ new_task = (sched_index < num_schedule_entries)
+ ? arinc653_schedule[sched_index].vc
+ : IDLETASK(0);
+
+ /* Check to see if the new task can be run (awake & runnable). */
+ if (!((new_task != NULL)
+ && AVCPU(new_task)->awake
+ && vcpu_runnable(new_task)) )
+ {
+ new_task = IDLETASK(0);
+ }
+ BUG_ON(new_task == NULL);
+
+ /*
+ * Check to make sure we did not miss a major frame.
+ * This is a good test for robust partitioning.
+ */
+ BUG_ON(t >= next_major_frame);
+
+ /*
+ * Return the amount of time the next domain has to run and the
address
+ * of the selected task''s VCPU structure.
+ */
+ ret.time = next_switch_time - t;
+ ret.task = new_task;
+
+ BUG_ON(ret.time <= 0);
+
+ return ret;
+} /* end arinc653_do_schedule */
+
+/**
+ * Xen scheduler callback function to select a CPU for the VCPU to run
on
+ *
+ * @param v Pointer to the VCPU structure for the current
domain
+ *
+ * @return Number of selected physical CPU
+ */
+static int arinc653_pick_cpu(struct vcpu * v)
+{
+ /* this implementation only supports one physical CPU */
+ return 0;
+} /* end arinc653_pick_cpu */
+
+/**
+ * Xen scheduler callback function to wake up a VCPU
+ *
+ * @param vc Pointer to the VCPU structure for the current
domain
+ *
+ * @return <None>
+ */
+static void arinc653_vcpu_wake(struct vcpu * vc)
+{
+ /* boolean flag to indicate first run */
+ static bool_t dont_raise_softirq = 0;
+
+ if (AVCPU(vc) != NULL) /* check that this is a VCPU we are
tracking */
+ {
+ AVCPU(vc)->awake = 1;
+ }
+
+ /* the first time the vcpu_wake function is called, we should raise
+ * a softirq to invoke the do_scheduler callback */
+ if (!dont_raise_softirq)
+ {
+ cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
+ dont_raise_softirq = 1;
+ }
+} /* end arinc653_vcpu_wake */
+
+/**
+ * Xen scheduler callback function to sleep a VCPU
+ *
+ * @param vc Pointer to the VCPU structure for the current
domain
+ *
+ * @return <None>
+ */
+static void arinc653_vcpu_sleep(struct vcpu * vc)
+{
+ if (AVCPU(vc) != NULL) /* check that this is a VCPU we are
tracking */
+ {
+ AVCPU(vc)->awake = 0;
+ }
+
+ /* if the VCPU being put to sleep is the same one that is currently
+ * running, raise a softirq to invoke the scheduler to switch
domains */
+ if (per_cpu(schedule_data, vc->processor).curr == vc)
+ {
+ cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
+ }
+} /* end arinc653_vcpu_sleep */
+
+/**
+ * This structure defines our scheduler for Xen.
+ * The entries tell Xen where to find our scheduler-specific
+ * callback functions.
+ * The symbol must be visible to the rest of Xen at link time.
+ */
+struct scheduler sched_arinc653_def = {
+ .name = "ARINC 653 Scheduler",
+ .opt_name = "arinc653",
+ .sched_id = XEN_SCHEDULER_ARINC653,
+
+ .init_domain = NULL,
+ .destroy_domain = NULL,
+
+ .init_vcpu = arinc653_init_vcpu,
+ .destroy_vcpu = arinc653_destroy_vcpu,
+
+ .do_schedule = arinc653_do_schedule,
+ .pick_cpu = arinc653_pick_cpu,
+ .dump_cpu_state = NULL,
+ .sleep = arinc653_vcpu_sleep,
+ .wake = arinc653_vcpu_wake,
+ .adjust = NULL,
+ .adjust_global = arinc653_adjust_global,
+};
diff -rupN a/xen/common/schedule.c b/xen/common/schedule.c
--- a/xen/common/schedule.c 2010-04-14 10:57:11.262796000 -0400
+++ b/xen/common/schedule.c 2010-04-14 16:40:21.543608000 -0400
@@ -7,7 +7,8 @@
* File: common/schedule.c
* Author: Rolf Neugebauer & Keir Fraser
* Updated for generic API by Mark Williamson
- *
+ * ARINC653 scheduler added by DornerWorks
<DornerWorks.com>
+ *
* Description: Generic CPU scheduling code
* implements support functionality for the Xen scheduler
API.
*
@@ -56,9 +57,11 @@ DEFINE_PER_CPU(struct schedule_data, sch
extern const struct scheduler sched_sedf_def;
extern const struct scheduler sched_credit_def;
+extern const struct scheduler sched_arinc653_def;
static const struct scheduler *__initdata schedulers[] = {
&sched_sedf_def,
&sched_credit_def,
+ &sched_arinc653_def,
NULL
};
diff -rupN a/xen/include/public/domctl.h b/xen/include/public/domctl.h
--- a/xen/include/public/domctl.h 2010-04-14 10:57:11.262796000 -0400
+++ b/xen/include/public/domctl.h 2010-04-14 16:40:21.543608000
-0400
@@ -23,6 +23,8 @@
*
* Copyright (c) 2002-2003, B Dragovic
* Copyright (c) 2002-2006, K Fraser
+ *
+ * ARINC653 Scheduler type added by DornerWorks <DornerWorks.com>.
*/
#ifndef __XEN_PUBLIC_DOMCTL_H__
@@ -303,11 +305,43 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_v
/* Scheduler types. */
#define XEN_SCHEDULER_SEDF 4
#define XEN_SCHEDULER_CREDIT 5
+#define XEN_SCHEDULER_ARINC653 6
+
/* Set or get info? */
#define XEN_DOMCTL_SCHEDOP_putinfo 0
#define XEN_DOMCTL_SCHEDOP_getinfo 1
#define XEN_DOMCTL_SCHEDOP_put_global_info 2
#define XEN_DOMCTL_SCHEDOP_get_global_info 3
+
+/*
+ * This structure is used to pass a new ARINC653 schedule from a
+ * privileged domain (ie dom0) to Xen.
+ */
+#define ARINC653_MAX_DOMAINS_PER_SCHEDULE 64
+struct xen_domctl_sched_arinc653_schedule {
+ /* major_frame holds the time for the new schedule''s major frame
+ * in nanoseconds. */
+ int64_t major_frame;
+ /* num_sched_entries holds how many of the entries in the
+ * sched_entries[] array are valid. */
+ uint8_t num_sched_entries;
+ /* The sched_entries array holds the actual schedule entries. */
+ struct {
+ /* dom_handle must match a domain''s UUID */
+ xen_domain_handle_t dom_handle;
+ /* If a domain has multiple VCPUs, vcpu_id specifies which one
+ * this schedule entry applies to. It should be set to 0 if
+ * there is only one VCPU for the domain. */
+ int vcpu_id;
+ /* runtime specifies the amount of time that should be
allocated
+ * to this VCPU per major frame. It is specified in nanoseconds
*/
+ int64_t runtime;
+ } sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
+};
+typedef struct xen_domctl_sched_arinc653_schedule
+ xen_domctl_sched_arinc653_schedule_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_sched_arinc653_schedule_t);
+
struct xen_domctl_scheduler_op {
uint32_t sched_id; /* XEN_SCHEDULER_* */
uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
@@ -323,6 +357,9 @@ struct xen_domctl_scheduler_op {
uint16_t weight;
uint16_t cap;
} credit;
+ struct xen_domctl_sched_arinc653 {
+ XEN_GUEST_HANDLE(xen_domctl_sched_arinc653_schedule_t)
schedule;
+ } arinc653;
} u;
};
typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Kathy, thanks for your work on this. Unfortunately, with the new cpupools feature, your actual scheduler will need a little more modification before it can be merged into -unstable. Cpu pools carves up the cpus into several "pools", each of which has independent schedulers. This means that schedulers need to make pointers to per-pool "global" structures, rather than having global static structures. It should be a fairly straightforward transformation; you can see example transformations done on the credit and sedf schedulers. Let us know if you need any help. -George On Fri, Apr 16, 2010 at 9:18 AM, Kathy Hadley <Kathy.Hadley@dornerworks.com>wrote:> This patch adds an ARINC 653 scheduler to Xen. This is a modification of > an earlier patch ([Xen-devel] [PATCH 1/1] Xen ARINC653 scheduler). In > particular, it has been modified to use the new .adjust_global callback > function, which was added in “[Xen-devel] [PATCH 1/1] Add .adjust_global > callback”. > > > > Thanks and regards, > > > > > > [image: cid:image001.jpg@01CAD7E0.50E45D70] > > *Kathy Hadley > *DornerWorks, Ltd. > *Embedded Systems Engineering > > *3445 Lake Eastbrook Blvd SE > Grand Rapids, MI 49546 > > Direct: 616.389.6127 > > Tel: 616.245.8369 > > Fax: 616.245.8372 > > > > Kathy.Hadley@DornerWorks.com > > www.DornerWorks.com <http://www.dornerworks.com/> > > [image: cid:image002.jpg@01CAD7E0.50E45D70] > > *Honored as one of the 2010 “Michigan 50 Companies to Watch”* > > > > diff -rupN a/tools/libxc/Makefile b/tools/libxc/Makefile > > --- a/tools/libxc/Makefile 2010-04-13 10:49:37.573793000 -0400 > > +++ b/tools/libxc/Makefile 2010-04-14 17:49:26.952638000 -0400 > > @@ -17,6 +17,7 @@ CTRL_SRCS-y += xc_physdev.c > > CTRL_SRCS-y += xc_private.c > > CTRL_SRCS-y += xc_sedf.c > > CTRL_SRCS-y += xc_csched.c > > +CTRL_SRCS-y += xc_arinc653.c > > CTRL_SRCS-y += xc_tbuf.c > > CTRL_SRCS-y += xc_pm.c > > CTRL_SRCS-y += xc_cpu_hotplug.c > > diff -rupN a/tools/libxc/xc_arinc653.c b/tools/libxc/xc_arinc653.c > > --- a/tools/libxc/xc_arinc653.c 1969-12-31 19:00:00.000000000 -0500 > > +++ b/tools/libxc/xc_arinc653.c 2010-04-14 17:49:26.952638000 -0400 > > @@ -0,0 +1,28 @@ > > > +/**************************************************************************** > > + * (C) 2010 - DornerWorks, Ltd <DornerWorks.com> > > + > **************************************************************************** > > + * > > + * File: xc_arinc653.c > > + * Author: Josh Holtrop <DornerWorks.com> > > + * > > + * Description: XC Interface to the ARINC 653 scheduler > > + * > > + */ > > + > > +#include "xc_private.h" > > + > > +int > > +xc_sched_arinc653_sched_set( > > + int xc_handle, > > + xen_domctl_sched_arinc653_schedule_t * sched) > > +{ > > + DECLARE_DOMCTL; > > + > > + domctl.cmd = XEN_DOMCTL_scheduler_op; > > + domctl.domain = (domid_t) 0; > > + domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653; > > + domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_put_global_info; > > + set_xen_guest_handle(domctl.u.scheduler_op.u.arinc653.schedule, > sched); > > + > > + return do_domctl(xc_handle, &domctl); > > +} > > diff -rupN a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h > > --- a/tools/libxc/xenctrl.h 2010-04-13 10:49:37.573793000 -0400 > > +++ b/tools/libxc/xenctrl.h 2010-04-14 17:49:26.952638000 -0400 > > @@ -476,6 +476,16 @@ int xc_sched_credit_domain_get(int xc_ha > > struct xen_domctl_sched_credit *sdom); > > > > /** > > + * This function sets the global ARINC 653 schedule. > > + * > > + * @parm xc_handle a handle to an open hypervisor interface > > + * @parm sched a pointer to the new ARINC 653 schedule > > + * return 0 on success > > + */ > > +int xc_sched_arinc653_sched_set(int xc_handle, > > + xen_domctl_sched_arinc653_schedule_t * > sched); > > + > > +/** > > * This function sends a trigger to a domain. > > * > > * @parm xc_handle a handle to an open hypervisor interface > > diff -rupN a/xen/common/Makefile b/xen/common/Makefile > > --- a/xen/common/Makefile 2010-04-13 10:49:37.573793000 -0400 > > +++ b/xen/common/Makefile 2010-04-13 13:00:31.651749000 -0400 > > @@ -14,6 +14,7 @@ obj-y += page_alloc.o > > obj-y += rangeset.o > > obj-y += sched_credit.o > > obj-y += sched_sedf.o > > +obj-y += sched_arinc653.o > > obj-y += schedule.o > > obj-y += shutdown.o > > obj-y += softirq.o > > diff -rupN a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c > > --- a/xen/common/sched_arinc653.c 1969-12-31 19:00:00.000000000 -0500 > > +++ b/xen/common/sched_arinc653.c 2010-04-14 18:13:26.163404000 -0400 > > @@ -0,0 +1,590 @@ > > +/* > > + * File: sched_arinc653.c > > + * Copyright (c) 2010, DornerWorks, Ltd. <DornerWorks.com> > > + * > > + * Description: > > + * This file provides an ARINC653-compatible scheduling algorithm > > + * for use in Xen. > > + * > > + * This program is free software; you can redistribute it and/or modify it > > + * under the terms of the GNU General Public License as published by the > Free > > + * software Foundation; either version 2 of the License, or (at your > option) > > + * any later version. > > + * > > + * This program is distributed in the hope that it will be useful, > > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. > > + * See the GNU General Public License for more details. > > + */ > > + > > + > > > +/************************************************************************** > > + * Includes > * > > + > *************************************************************************/ > > +#include <xen/lib.h> > > +#include <xen/sched.h> > > +#include <xen/sched-if.h> > > +#include <xen/timer.h> > > +#include <xen/softirq.h> > > +#include <xen/time.h> > > +#include <xen/errno.h> > > +#include <xen/list.h> > > +#include <public/domctl.h> /* ARINC653_MAX_DOMAINS_PER_SCHEDULE > */ > > +#include <xen/guest_access.h> > > + > > + > > > +/************************************************************************** > > + * Private Macros > * > > + > **************************************************************************/ > > + > > +/** > > + * Retrieve the idle VCPU for a given physical CPU > > + */ > > +#define IDLETASK(cpu) ((struct vcpu *) per_cpu(schedule_data, > (cpu)).idle) > > + > > +/** > > + * Return a pointer to the ARINC 653-specific scheduler data information > > + * associated with the given VCPU (vc) > > + */ > > +#define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_priv) > > + > > > +/************************************************************************** > > + * Private Type Definitions > * > > + > **************************************************************************/ > > + > > +/** > > + * The sched_entry_t structure holds a single entry of the > > + * ARINC 653 schedule. > > + */ > > +typedef struct sched_entry_s > > +{ > > + /* dom_handle holds the handle ("UUID") for the domain that this > > + * schedule entry refers to. */ > > + xen_domain_handle_t dom_handle; > > + /* vcpu_id holds the VCPU number for the VCPU that this schedule > > + * entry refers to. */ > > + int vcpu_id; > > + /* runtime holds the number of nanoseconds that the VCPU for this > > + * schedule entry should be allowed to run per major frame. */ > > + s_time_t runtime; > > + /* vc holds a pointer to the Xen VCPU structure */ > > + struct vcpu * vc; > > +} sched_entry_t; > > + > > +/** > > + * The arinc653_vcpu_t structure holds ARINC 653-scheduler-specific > > + * information for all non-idle VCPUs > > + */ > > +typedef struct arinc653_vcpu_s > > +{ > > + /* vc points to Xen''s struct vcpu so we can get to it from an > > + * arinc653_vcpu_t pointer. */ > > + struct vcpu * vc; > > + /* awake holds whether the VCPU has been woken with vcpu_wake() */ > > + bool_t awake; > > + /* list holds the linked list information for the list this VCPU > > + * is stored in */ > > + struct list_head list; > > +} arinc653_vcpu_t; > > + > > + > > > +/************************************************************************** > > + * Global Data > * > > + > **************************************************************************/ > > + > > +/** > > + * This array holds the active ARINC 653 schedule. > > + * > > + * When the system tries to start a new VCPU, this schedule is scanned > > + * to look for a matching (handle, VCPU #) pair. If both the handle > ("UUID") > > + * and VCPU number match, then the VCPU is allowed to run. Its run time > > + * (per major frame) is given in the third entry of the schedule. > > + */ > > +static sched_entry_t arinc653_schedule[ARINC653_MAX_DOMAINS_PER_SCHEDULE] > = { > > + { "", 0, MILLISECS(10), NULL } > > +}; > > + > > +/** > > + * This variable holds the number of entries that are valid in > > + * the arinc653_schedule table. > > + * > > + * This is not necessarily the same as the number of domains in the > > + * schedule. A domain could be listed multiple times within the schedule, > > + * or a domain with multiple VCPUs could have a different > > + * schedule entry for each VCPU. > > + * > > + * A value of 1 means that only 1 domain (Dom0) will initially be started. > > + */ > > +static int num_schedule_entries = 1; > > + > > +/** > > + * arinc653_major_frame holds the major frame time for the ARINC 653 > schedule. > > + */ > > +static s_time_t arinc653_major_frame = MILLISECS(10); > > + > > +/** > > + * next_major_frame holds the time that the next major frame starts > > + */ > > +static s_time_t next_major_frame = 0; > > + > > +/** > > + * vcpu_list holds pointers to all Xen VCPU structures for iterating > through > > + */ > > +static LIST_HEAD(vcpu_list); > > + > > > +/************************************************************************** > > + * Scheduler functions > * > > + > **************************************************************************/ > > + > > +/** > > + * This function compares two domain handles. > > + * > > + * @param h1 Pointer to handle 1 > > + * @param h2 Pointer to handle 2 > > + * > > + * @return <ul> > > + * <li> <0: handle 1 is less than handle 2 > > + * <li> 0: handle 1 is equal to handle 2 > > + * <li> >0: handle 1 is greater than handle 2 > > + * </ul> > > + */ > > +static int dom_handle_cmp(const xen_domain_handle_t h1, > > + const xen_domain_handle_t h2) > > +{ > > + return memcmp(h1, h2, sizeof(xen_domain_handle_t)); > > +} /* end dom_handle_cmp */ > > + > > +/** > > + * This function searches the vcpu list to find a VCPU that matches > > + * the domain handle and VCPU ID specified. > > + * > > + * @param handle Pointer to handler > > + * @param vcpu_id VCPU ID > > + * > > + * @return <ul> > > + * <li> Pointer to the matching VCPU if one is found > > + * <li> NULL otherwise > > + * </ul> > > + */ > > +static struct vcpu * find_vcpu(xen_domain_handle_t handle, int vcpu_id) > > +{ > > + arinc653_vcpu_t * avcpu; /* loop index variable */ > > + struct vcpu * vc = NULL; > > + > > + /* loop through the vcpu_list looking for the specified VCPU */ > > + list_for_each_entry(avcpu, &vcpu_list, list) > > + { > > + /* If the handles & VCPU IDs match, we''ve found a matching VCPU */ > > + if ((dom_handle_cmp(avcpu->vc->domain->handle, handle) == 0) > > + && (vcpu_id == avcpu->vc->vcpu_id)) > > + { > > + vc = avcpu->vc; > > + /* > > + * "break" statement used instead of loop control variable > because > > + * the macro used for this loop does not support using loop > control > > + * variables > > + */ > > + break; > > + } > > + } > > + > > + return vc; > > +} /* end find_vcpu */ > > + > > +/** > > + * This function updates the pointer to the Xen VCPU structure for each > entry in > > + * the ARINC 653 schedule. > > + * > > + * @param <None> > > + * @return <None> > > + */ > > +static void update_schedule_vcpus(void) > > +{ > > + /* Loop through the number of entries in the schedule */ > > + for (int i = 0; i < num_schedule_entries; i++) > > + { > > + /* Update the pointer to the Xen VCPU structure for the current > entry */ > > + arinc653_schedule[i].vc > > + find_vcpu(arinc653_schedule[i].dom_handle, > > + arinc653_schedule[i].vcpu_id); > > + } > > +} /* end update_schedule_vcpus */ > > + > > +/** > > + * This function is called by the arinc653_adjust_global scheduler > > + * callback function in response to a domain control hypercall with > > + * a scheduler operation. > > + * > > + * The parameter schedule is set to be the address of a local variable > from > > + * within arinc653_adjust_global(), so it is guaranteed to not be NULL. > > + * > > + * @param schedule Pointer to the new ARINC 653 schedule. > > + * > > + * @return <ul> > > + * <li> 0 = success > > + * <li> !0 = error > > + * </ul> > > + */ > > +static int arinc653_sched_set(xen_domctl_sched_arinc653_schedule_t * > schedule) > > +{ > > + int ret = 0; > > + s_time_t total_runtime = 0; > > + bool_t found_dom0 = 0; > > + const static xen_domain_handle_t dom0_handle = {0}; > > + > > + /* check for valid major frame and number of schedule entries */ > > + if ( (schedule->major_frame <= 0) > > + || (schedule->num_sched_entries < 1) > > + || (schedule->num_sched_entries > ARINC653_MAX_DOMAINS_PER_SCHEDULE) > ) > > + { > > + ret = -EINVAL; > > + } > > + > > + if (ret == 0) > > + { > > + for (int i = 0; i < schedule->num_sched_entries; i++) > > + { > > + /* > > + * look for domain 0 handle - every schedule must contain > > + * some time for domain 0 to run > > + */ > > + if (dom_handle_cmp(schedule->sched_entries[i].dom_handle, > > + dom0_handle) == 0) > > + { > > + found_dom0 = 1; > > + } > > + > > + /* check for a valid VCPU ID and run time */ > > + if ( (schedule->sched_entries[i].vcpu_id < 0) > > + || (schedule->sched_entries[i].runtime <= 0) ) > > + { > > + ret = -EINVAL; > > + } > > + else > > + { > > + /* Add this entry''s run time to total run time */ > > + total_runtime += schedule->sched_entries[i].runtime; > > + } > > + } /* end loop through schedule entries */ > > + } > > + > > + if (ret == 0) > > + { > > + /* error if the schedule doesn''t contain a slot for domain 0 */ > > + if (found_dom0 == 0) > > + { > > + ret = -EINVAL; > > + } > > + } > > + > > + if (ret == 0) > > + { > > + /* > > + * error if the major frame is not large enough to run all entries > > + * as indicated by comparing the total run time to the major frame > > + * length > > + */ > > + if (total_runtime > schedule->major_frame) > > + { > > + ret = -EINVAL; > > + } > > + } > > + > > + if (ret == 0) > > + { > > + /* copy the new schedule into place */ > > + num_schedule_entries = schedule->num_sched_entries; > > + arinc653_major_frame = schedule->major_frame; > > + for (int i = 0; i < num_schedule_entries; i++) > > + { > > + memcpy(arinc653_schedule[i].dom_handle, > > + schedule->sched_entries[i].dom_handle, > > + sizeof(arinc653_schedule[i].dom_handle)); > > + arinc653_schedule[i].vcpu_id > schedule->sched_entries[i].vcpu_id; > > + arinc653_schedule[i].runtime > schedule->sched_entries[i].runtime; > > + } > > + update_schedule_vcpus(); > > + > > + /* > > + * The newly-installed schedule takes effect immediately. > > + * We do not even wait for the current major frame to expire. > > + * > > + * Signal a new major frame to begin. The next major frame > > + * is set up by the do_schedule callback function when it > > + * is next invoked. > > + */ > > + next_major_frame = NOW(); > > + } > > + > > + return ret; > > +} /* end arinc653_sched_set */ > > + > > +/** > > + * Xen scheduler callback function to adjust global scheduling parameters > > + * > > + * @param op Pointer to the domain control scheduler operation > structure > > + * > > + * @return <ul> > > + * <li> 0 for success > > + * <li> !0 if there is an error > > + * </ul> > > + */ > > +static int arinc653_adjust_global(struct xen_domctl_scheduler_op * op) > > +{ > > + int ret = -1; > > + xen_domctl_sched_arinc653_schedule_t new_sched; > > + > > + if (op->cmd == XEN_DOMCTL_SCHEDOP_put_global_info) > > + { > > + if (copy_from_guest(&new_sched, op->u.arinc653.schedule, 1) != 0) > > + { > > + ret = -EFAULT; > > + } > > + else > > + { > > + ret = arinc653_sched_set(&new_sched); > > + } > > + } > > + > > + return ret; > > +} /* end arinc653_adjust_global */ > > + > > +/** > > + * Xen scheduler callback function to initialize a virtual CPU (VCPU). > > + * > > + * @param v Pointer to the VCPU structure > > + * > > + * @return <ul> > > + * <li> 0 if the VCPU is allowed to run > > + * <li> !0 if there is an error > > + * </ul> > > + */ > > +static int arinc653_init_vcpu(struct vcpu * v) > > +{ > > + int ret = -1; > > + > > + if (is_idle_vcpu(v)) > > + { > > + /* > > + * The idle VCPU is created by Xen to run when no domains > > + * are runnable or require CPU time. > > + * It is similar to an "idle task" or "halt loop" process > > + * in an operating system. > > + * We do not track any scheduler information for the idle VCPU. > > + */ > > + v->sched_priv = NULL; > > + ret = 0; > > + } > > + else > > + { > > + /* > > + * Allocate memory for the ARINC 653-specific scheduler data > information > > + * associated with the given VCPU (vc). > > + */ > > + v->sched_priv = xmalloc(arinc653_vcpu_t); > > + if (AVCPU(v) != NULL) > > + { > > + /* > > + * Initialize our ARINC 653 scheduler-specific information > > + * for the VCPU. > > + * The VCPU starts "asleep." > > + * When Xen is ready for the VCPU to run, it will call > > + * the vcpu_wake scheduler callback function and our > > + * scheduler will mark the VCPU awake. > > + */ > > + AVCPU(v)->vc = v; > > + AVCPU(v)->awake = 0; > > + list_add(&AVCPU(v)->list, &vcpu_list); > > + ret = 0; > > + update_schedule_vcpus(); > > + } > > + } > > + > > + return ret; > > +} /* end arinc653_init_vcpu */ > > + > > +/** > > + * Xen scheduler callback function to remove a VCPU > > + * > > + * @param v Pointer to the VCPU structure to remove > > + * > > + * @return <None> > > + */ > > +static void arinc653_destroy_vcpu(struct vcpu * v) > > +{ > > + if (AVCPU(v) != NULL) > > + { > > + /* remove the VCPU from whichever list it is on */ > > + list_del(&AVCPU(v)->list); > > + /* free the arinc653_vcpu structure */ > > + xfree(AVCPU(v)); > > + update_schedule_vcpus(); > > + } > > +} /* end arinc653_destroy_vcpu */ > > + > > +/** > > + * Xen scheduler callback function to select a VCPU to run. > > + * This is the main scheduler routine. > > + * > > + * @param t Current time > > + * > > + * @return Time slice and address of the VCPU structure for the > chosen > > + * domain > > + */ > > +static struct task_slice arinc653_do_schedule(s_time_t t) > > +{ > > + struct task_slice ret; /* hold the chosen domain > */ > > + struct vcpu * new_task = NULL; > > + static int sched_index = 0; > > + static s_time_t last_major_frame; > > + static s_time_t last_switch_time; > > + static s_time_t next_switch_time; > > + > > + if (t >= next_major_frame) > > + { > > + /* time to enter a new major frame > > + * the first time this function is called, this will be true */ > > + sched_index = 0; > > + last_major_frame = last_switch_time = t; > > + next_major_frame = t + arinc653_major_frame; > > + } > > + else if (t >= next_switch_time) > > + { > > + /* time to switch to the next domain in this major frame */ > > + sched_index++; > > + last_switch_time = next_switch_time; > > + } > > + > > + /* > > + * If there are more domains to run in the current major frame, set > > + * next_switch_time equal to the last switch time + this domain''s run > time. > > + * Otherwise, set next_switch_time equal to the start of the next > major > > + * frame. > > + */ > > + next_switch_time = (sched_index < num_schedule_entries) > > + ? last_switch_time + > arinc653_schedule[sched_index].runtime > > + : next_major_frame; > > + > > + /* > > + * If there are more domains to run in the current major frame, set > > + * new_task equal to the address of next domain''s VCPU structure. > > + * Otherwise, set new_task equal to the address of the idle task''s > VCPU > > + * structure. > > + */ > > + new_task = (sched_index < num_schedule_entries) > > + ? arinc653_schedule[sched_index].vc > > + : IDLETASK(0); > > + > > + /* Check to see if the new task can be run (awake & runnable). */ > > + if (!((new_task != NULL) > > + && AVCPU(new_task)->awake > > + && vcpu_runnable(new_task)) ) > > + { > > + new_task = IDLETASK(0); > > + } > > + BUG_ON(new_task == NULL); > > + > > + /* > > + * Check to make sure we did not miss a major frame. > > + * This is a good test for robust partitioning. > > + */ > > + BUG_ON(t >= next_major_frame); > > + > > + /* > > + * Return the amount of time the next domain has to run and the > address > > + * of the selected task''s VCPU structure. > > + */ > > + ret.time = next_switch_time - t; > > + ret.task = new_task; > > + > > + BUG_ON(ret.time <= 0); > > + > > + return ret; > > +} /* end arinc653_do_schedule */ > > + > > +/** > > + * Xen scheduler callback function to select a CPU for the VCPU to run on > > + * > > + * @param v Pointer to the VCPU structure for the current domain > > + * > > + * @return Number of selected physical CPU > > + */ > > +static int arinc653_pick_cpu(struct vcpu * v) > > +{ > > + /* this implementation only supports one physical CPU */ > > + return 0; > > +} /* end arinc653_pick_cpu */ > > + > > +/** > > + * Xen scheduler callback function to wake up a VCPU > > + * > > + * @param vc Pointer to the VCPU structure for the current domain > > + * > > + * @return <None> > > + */ > > +static void arinc653_vcpu_wake(struct vcpu * vc) > > +{ > > + /* boolean flag to indicate first run */ > > + static bool_t dont_raise_softirq = 0; > > + > > + if (AVCPU(vc) != NULL) /* check that this is a VCPU we are tracking > */ > > + { > > + AVCPU(vc)->awake = 1; > > + } > > + > > + /* the first time the vcpu_wake function is called, we should raise > > + * a softirq to invoke the do_scheduler callback */ > > + if (!dont_raise_softirq) > > + { > > + cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ); > > + dont_raise_softirq = 1; > > + } > > +} /* end arinc653_vcpu_wake */ > > + > > +/** > > + * Xen scheduler callback function to sleep a VCPU > > + * > > + * @param vc Pointer to the VCPU structure for the current domain > > + * > > + * @return <None> > > + */ > > +static void arinc653_vcpu_sleep(struct vcpu * vc) > > +{ > > + if (AVCPU(vc) != NULL) /* check that this is a VCPU we are tracking > */ > > + { > > + AVCPU(vc)->awake = 0; > > + } > > + > > + /* if the VCPU being put to sleep is the same one that is currently > > + * running, raise a softirq to invoke the scheduler to switch domains > */ > > + if (per_cpu(schedule_data, vc->processor).curr == vc) > > + { > > + cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ); > > + } > > +} /* end arinc653_vcpu_sleep */ > > + > > +/** > > + * This structure defines our scheduler for Xen. > > + * The entries tell Xen where to find our scheduler-specific > > + * callback functions. > > + * The symbol must be visible to the rest of Xen at link time. > > + */ > > +struct scheduler sched_arinc653_def = { > > + .name = "ARINC 653 Scheduler", > > + .opt_name = "arinc653", > > + .sched_id = XEN_SCHEDULER_ARINC653, > > + > > + .init_domain = NULL, > > + .destroy_domain = NULL, > > + > > + .init_vcpu = arinc653_init_vcpu, > > + .destroy_vcpu = arinc653_destroy_vcpu, > > + > > + .do_schedule = arinc653_do_schedule, > > + .pick_cpu = arinc653_pick_cpu, > > + .dump_cpu_state = NULL, > > + .sleep = arinc653_vcpu_sleep, > > + .wake = arinc653_vcpu_wake, > > + .adjust = NULL, > > + .adjust_global = arinc653_adjust_global, > > +}; > > diff -rupN a/xen/common/schedule.c b/xen/common/schedule.c > > --- a/xen/common/schedule.c 2010-04-14 10:57:11.262796000 -0400 > > +++ b/xen/common/schedule.c 2010-04-14 16:40:21.543608000 -0400 > > @@ -7,7 +7,8 @@ > > * File: common/schedule.c > > * Author: Rolf Neugebauer & Keir Fraser > > * Updated for generic API by Mark Williamson > > - * > > + * ARINC653 scheduler added by DornerWorks <DornerWorks.com> > > + * > > * Description: Generic CPU scheduling code > > * implements support functionality for the Xen scheduler > API. > > * > > @@ -56,9 +57,11 @@ DEFINE_PER_CPU(struct schedule_data, sch > > > > extern const struct scheduler sched_sedf_def; > > extern const struct scheduler sched_credit_def; > > +extern const struct scheduler sched_arinc653_def; > > static const struct scheduler *__initdata schedulers[] = { > > &sched_sedf_def, > > &sched_credit_def, > > + &sched_arinc653_def, > > NULL > > }; > > > > diff -rupN a/xen/include/public/domctl.h b/xen/include/public/domctl.h > > --- a/xen/include/public/domctl.h 2010-04-14 10:57:11.262796000 -0400 > > +++ b/xen/include/public/domctl.h 2010-04-14 16:40:21.543608000 > -0400 > > @@ -23,6 +23,8 @@ > > * > > * Copyright (c) 2002-2003, B Dragovic > > * Copyright (c) 2002-2006, K Fraser > > + * > > + * ARINC653 Scheduler type added by DornerWorks <DornerWorks.com>. > > */ > > > > #ifndef __XEN_PUBLIC_DOMCTL_H__ > > @@ -303,11 +305,43 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_v > > /* Scheduler types. */ > > #define XEN_SCHEDULER_SEDF 4 > > #define XEN_SCHEDULER_CREDIT 5 > > +#define XEN_SCHEDULER_ARINC653 6 > > + > > /* Set or get info? */ > > #define XEN_DOMCTL_SCHEDOP_putinfo 0 > > #define XEN_DOMCTL_SCHEDOP_getinfo 1 > > #define XEN_DOMCTL_SCHEDOP_put_global_info 2 > > #define XEN_DOMCTL_SCHEDOP_get_global_info 3 > > + > > +/* > > + * This structure is used to pass a new ARINC653 schedule from a > > + * privileged domain (ie dom0) to Xen. > > + */ > > +#define ARINC653_MAX_DOMAINS_PER_SCHEDULE 64 > > +struct xen_domctl_sched_arinc653_schedule { > > + /* major_frame holds the time for the new schedule''s major frame > > + * in nanoseconds. */ > > + int64_t major_frame; > > + /* num_sched_entries holds how many of the entries in the > > + * sched_entries[] array are valid. */ > > + uint8_t num_sched_entries; > > + /* The sched_entries array holds the actual schedule entries. */ > > + struct { > > + /* dom_handle must match a domain''s UUID */ > > + xen_domain_handle_t dom_handle; > > + /* If a domain has multiple VCPUs, vcpu_id specifies which one > > + * this schedule entry applies to. It should be set to 0 if > > + * there is only one VCPU for the domain. */ > > + int vcpu_id; > > + /* runtime specifies the amount of time that should be allocated > > + * to this VCPU per major frame. It is specified in nanoseconds */ > > + int64_t runtime; > > + } sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE]; > > +}; > > +typedef struct xen_domctl_sched_arinc653_schedule > > + xen_domctl_sched_arinc653_schedule_t; > > +DEFINE_XEN_GUEST_HANDLE(xen_domctl_sched_arinc653_schedule_t); > > + > > struct xen_domctl_scheduler_op { > > uint32_t sched_id; /* XEN_SCHEDULER_* */ > > uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */ > > @@ -323,6 +357,9 @@ struct xen_domctl_scheduler_op { > > uint16_t weight; > > uint16_t cap; > > } credit; > > + struct xen_domctl_sched_arinc653 { > > + XEN_GUEST_HANDLE(xen_domctl_sched_arinc653_schedule_t) > schedule; > > + } arinc653; > > } u; > > }; > > typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t; > > > > > > > > > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel > >_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel