diff -Naur linux-2.6.12-rc2-mm3/arch/i386/kernel/process.c linux-2.6.12-rc2-mm3-plugsched/arch/i386/kernel/process.c
--- linux-2.6.12-rc2-mm3/arch/i386/kernel/process.c	2005-04-14 02:46:58.460677208 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/arch/i386/kernel/process.c	2005-04-23 13:37:59.795418088 -0700
@@ -626,6 +626,8 @@
 
 	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
+	perfctr_suspend_thread(prev);
+
 	__unlazy_fpu(prev_p);
 
 	/*
diff -Naur linux-2.6.12-rc2-mm3/arch/ia64/kernel/domain.c linux-2.6.12-rc2-mm3-plugsched/arch/ia64/kernel/domain.c
--- linux-2.6.12-rc2-mm3/arch/ia64/kernel/domain.c	2005-04-14 02:46:58.756632216 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/arch/ia64/kernel/domain.c	2005-04-23 13:20:17.511909728 -0700
@@ -14,7 +14,7 @@
 #include <linux/topology.h>
 #include <linux/nodemask.h>
 
-#define SD_NODES_PER_DOMAIN 16
+#define SD_NODES_PER_DOMAIN 6
 
 #ifdef CONFIG_NUMA
 /**
diff -Naur linux-2.6.12-rc2-mm3/fs/proc/array.c linux-2.6.12-rc2-mm3-plugsched/fs/proc/array.c
--- linux-2.6.12-rc2-mm3/fs/proc/array.c	2005-04-14 02:47:18.217673688 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/fs/proc/array.c	2005-04-23 13:20:23.562989824 -0700
@@ -163,7 +163,6 @@
 	read_lock(&tasklist_lock);
 	buffer += sprintf(buffer,
 		"State:\t%s\n"
-		"SleepAVG:\t%lu%%\n"
 		"Tgid:\t%d\n"
 		"Pid:\t%d\n"
 		"PPid:\t%d\n"
@@ -171,7 +170,6 @@
 		"Uid:\t%d\t%d\t%d\t%d\n"
 		"Gid:\t%d\t%d\t%d\t%d\n",
 		get_task_state(p),
-		(p->sleep_avg/1024)*100/(1020000000/1024),
 	       	p->tgid,
 		p->pid, pid_alive(p) ? p->group_leader->real_parent->tgid : 0,
 		pid_alive(p) && p->ptrace ? p->parent->pid : 0,
diff -Naur linux-2.6.12-rc2-mm3/fs/proc/base.c linux-2.6.12-rc2-mm3-plugsched/fs/proc/base.c
--- linux-2.6.12-rc2-mm3/fs/proc/base.c	2005-04-14 02:47:18.220673232 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/fs/proc/base.c	2005-04-23 13:20:23.582986784 -0700
@@ -34,6 +34,7 @@
 #include <linux/ptrace.h>
 #include <linux/seccomp.h>
 #include <linux/cpuset.h>
+#include <linux/sched_task.h>
 #include <linux/audit.h>
 #include "internal.h"
 
@@ -110,6 +111,10 @@
 #ifdef CONFIG_CPUSETS
 	PROC_TID_CPUSET,
 #endif
+#ifdef CONFIG_CPUSCHED_SPA
+	PROC_TID_CPU_RATE_CAP,
+	PROC_TID_CPU_RATE_HARD_CAP,
+#endif
 #ifdef CONFIG_SECURITY
 	PROC_TID_ATTR,
 	PROC_TID_ATTR_CURRENT,
@@ -205,6 +210,10 @@
 #ifdef CONFIG_AUDITSYSCALL
 	E(PROC_TID_LOGINUID, "loginuid", S_IFREG|S_IWUSR|S_IRUGO),
 #endif
+#ifdef CONFIG_CPUSCHED_SPA
+	E(PROC_TID_CPU_RATE_CAP,  "cpu_rate_cap",   S_IFREG|S_IRUGO|S_IWUSR),
+	E(PROC_TID_CPU_RATE_HARD_CAP,  "cpu_rate_hard_cap",   S_IFREG|S_IRUGO|S_IWUSR),
+#endif
 	{0,0,NULL,0}
 };
 
@@ -890,6 +899,100 @@
 };
 #endif /* CONFIG_SECCOMP */
 
+#ifdef CONFIG_CPUSCHED_SPA
+static ssize_t cpu_rate_cap_read(struct file * file, char * buf,
+			size_t count, loff_t *ppos)
+{
+	struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task;
+	char buffer[64];
+	size_t len;
+	unsigned int cppt = get_cpu_rate_cap(task);
+
+	if (*ppos)
+		return 0;
+	*ppos = len = sprintf(buffer, "%u\n", cppt);
+	if (copy_to_user(buf, buffer, len))
+		return -EFAULT;
+
+	return len;
+}
+
+static ssize_t cpu_rate_cap_write(struct file * file, const char * buf,
+			 size_t count, loff_t *ppos)
+{
+	struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task;
+	char buffer[128] = "";
+	char *endptr = NULL;
+	unsigned long hcppt;
+	int res;
+
+
+	if ((count > 63) || *ppos)
+		return -EFBIG;
+	if (copy_from_user(buffer, buf, count))
+		return -EFAULT;
+	hcppt = simple_strtoul(buffer, &endptr, 0);
+	if ((endptr == buffer) || (hcppt == ULONG_MAX))
+		return -EINVAL;
+
+	if ((res = set_cpu_rate_cap(task, hcppt)) != 0)
+		return res;
+
+	return count;
+}
+
+struct file_operations proc_cpu_rate_cap_operations = {
+	read:		cpu_rate_cap_read,
+	write:		cpu_rate_cap_write,
+};
+
+ssize_t cpu_rate_hard_cap_read(struct file * file, char * buf,
+			size_t count, loff_t *ppos)
+{
+	struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task;
+	char buffer[64];
+	size_t len;
+	unsigned int hcppt = get_cpu_rate_hard_cap(task);
+
+	if (*ppos)
+		return 0;
+	*ppos = len = sprintf(buffer, "%u\n", hcppt);
+	if (copy_to_user(buf, buffer, len))
+		return -EFAULT;
+
+	return len;
+}
+
+ssize_t cpu_rate_hard_cap_write(struct file * file, const char * buf,
+			 size_t count, loff_t *ppos)
+{
+	struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task;
+	char buffer[128] = "";
+	char *endptr = NULL;
+	unsigned long long hcppt;
+	int res;
+
+
+	if ((count > 63) || *ppos)
+		return -EFBIG;
+	if (copy_from_user(buffer, buf, count))
+		return -EFAULT;
+	hcppt = simple_strtoul(buffer, &endptr, 0);
+	if ((endptr == buffer) || (hcppt == ULONG_MAX))
+		return -EINVAL;
+
+	if ((res = set_cpu_rate_hard_cap(task, hcppt)) != 0)
+		return res;
+
+	return count;
+}
+
+struct file_operations proc_cpu_rate_hard_cap_operations = {
+	read:		cpu_rate_hard_cap_read,
+	write:		cpu_rate_hard_cap_write,
+};
+#endif
+
 static int proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
 	struct inode *inode = dentry->d_inode;
@@ -1594,6 +1697,14 @@
 			inode->i_fop = &proc_loginuid_operations;
 			break;
 #endif
+#ifdef CONFIG_CPUSCHED_SPA
+		case PROC_TID_CPU_RATE_CAP:
+			inode->i_fop = &proc_cpu_rate_cap_operations;
+			break;
+		case PROC_TID_CPU_RATE_HARD_CAP:
+			inode->i_fop = &proc_cpu_rate_hard_cap_operations;
+			break;
+#endif
 		default:
 			printk("procfs: impossible type (%d)",p->type);
 			iput(inode);
diff -Naur linux-2.6.12-rc2-mm3/fs/proc/proc_misc.c linux-2.6.12-rc2-mm3-plugsched/fs/proc/proc_misc.c
--- linux-2.6.12-rc2-mm3/fs/proc/proc_misc.c	2005-04-14 02:47:18.222672928 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/fs/proc/proc_misc.c	2005-04-23 13:21:17.167840648 -0700
@@ -44,6 +44,7 @@
 #include <linux/jiffies.h>
 #include <linux/sysrq.h>
 #include <linux/vmalloc.h>
+#include <linux/sched_drv.h>
 #include <linux/crash_dump.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -230,6 +231,17 @@
 	return proc_calc_metrics(page, start, off, count, eof, len);
 }
 
+static int scheduler_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int len;
+
+	strcpy(page, sched_drvp->name);
+	strcat(page, "\n");
+	len = strlen(page);
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
 extern struct seq_operations cpuinfo_op;
 static int cpuinfo_open(struct inode *inode, struct file *file)
 {
@@ -633,6 +645,7 @@
 		{"cmdline",	cmdline_read_proc},
 		{"locks",	locks_read_proc},
 		{"execdomains",	execdomains_read_proc},
+		{"scheduler",	scheduler_read_proc},
 		{NULL,}
 	};
 	for (p = simple_ones; p->name; p++)
diff -Naur linux-2.6.12-rc2-mm3/include/asm-arm/system.h linux-2.6.12-rc2-mm3-plugsched/include/asm-arm/system.h
--- linux-2.6.12-rc2-mm3/include/asm-arm/system.h	2005-04-14 02:47:19.003554216 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/asm-arm/system.h	2005-04-23 13:20:17.590897720 -0700
@@ -141,6 +141,35 @@
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
 
+#ifdef CONFIG_SMP
+/*
+ * Define our own context switch locking.  This allows us to enable
+ * interrupts over the context switch, otherwise we end up with high
+ * interrupt latency.  The real problem area is switch_mm() which may
+ * do a full cache flush.
+ */
+#define prepare_arch_switch(rq,next)					\
+do {									\
+	spin_lock(&(next)->switch_lock);				\
+	spin_unlock_irq(&(rq)->lock);					\
+} while (0)
+
+#define finish_arch_switch(rq,prev)					\
+	spin_unlock(&(prev)->switch_lock)
+
+#define task_running(rq,p)						\
+	((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
+#else
+/*
+ * Our UP-case is more simple, but we assume knowledge of how
+ * spin_unlock_irq() and friends are implemented.  This avoids
+ * us needlessly decrementing and incrementing the preempt count.
+ */
+#define prepare_arch_switch(rq,next)	local_irq_enable()
+#define finish_arch_switch(rq,prev)	spin_unlock(&(rq)->lock)
+#define task_running(rq,p)		((rq)->curr == (p))
+#endif
+
 /*
  * switch_to(prev, next) should switch from task `prev' to `next'
  * `prev' will never be the same as `next'.  schedule() itself
diff -Naur linux-2.6.12-rc2-mm3/include/asm-arm26/system.h linux-2.6.12-rc2-mm3-plugsched/include/asm-arm26/system.h
--- linux-2.6.12-rc2-mm3/include/asm-arm26/system.h	2005-04-14 02:47:18.958561056 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/asm-arm26/system.h	2005-04-23 13:20:17.605895440 -0700
@@ -94,6 +94,15 @@
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
 
 /*
+ * We assume knowledge of how
+ * spin_unlock_irq() and friends are implemented.  This avoids
+ * us needlessly decrementing and incrementing the preempt count.
+ */
+#define prepare_arch_switch(rq,next)	local_irq_enable()
+#define finish_arch_switch(rq,prev)	spin_unlock(&(rq)->lock)
+#define task_running(rq,p)		((rq)->curr == (p))
+
+/*
  * switch_to(prev, next) should switch from task `prev' to `next'
  * `prev' will never be the same as `next'.  schedule() itself
  * contains the memory barrier to tell GCC not to cache `current'.
diff -Naur linux-2.6.12-rc2-mm3/include/asm-i386/system.h linux-2.6.12-rc2-mm3-plugsched/include/asm-i386/system.h
--- linux-2.6.12-rc2-mm3/include/asm-i386/system.h	2005-04-14 02:47:19.238518496 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/asm-i386/system.h	2005-04-23 13:37:59.872406384 -0700
@@ -14,7 +14,6 @@
 
 #define switch_to(prev,next,last) do {					\
 	unsigned long esi,edi;						\
-	perfctr_suspend_thread(&(prev)->thread);			\
 	asm volatile("pushfl\n\t"					\
 		     "pushl %%ebp\n\t"					\
 		     "movl %%esp,%0\n\t"	/* save ESP */		\
diff -Naur linux-2.6.12-rc2-mm3/include/asm-i386/topology.h linux-2.6.12-rc2-mm3-plugsched/include/asm-i386/topology.h
--- linux-2.6.12-rc2-mm3/include/asm-i386/topology.h	2005-04-14 02:47:19.239518344 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/asm-i386/topology.h	2005-04-23 13:20:17.623892704 -0700
@@ -78,14 +78,11 @@
 	.imbalance_pct		= 125,			\
 	.cache_hot_time		= (10*1000000),		\
 	.cache_nice_tries	= 1,			\
-	.busy_idx		= 3,			\
-	.idle_idx		= 1,			\
-	.newidle_idx		= 2,			\
-	.wake_idx		= 1,			\
 	.per_cpu_gain		= 100,			\
 	.flags			= SD_LOAD_BALANCE	\
 				| SD_BALANCE_EXEC	\
-				| SD_BALANCE_FORK	\
+				| SD_BALANCE_NEWIDLE	\
+				| SD_WAKE_IDLE		\
 				| SD_WAKE_BALANCE,	\
 	.last_balance		= jiffies,		\
 	.balance_interval	= 1,			\
diff -Naur linux-2.6.12-rc2-mm3/include/asm-ia64/system.h linux-2.6.12-rc2-mm3-plugsched/include/asm-ia64/system.h
--- linux-2.6.12-rc2-mm3/include/asm-ia64/system.h	2005-04-14 02:47:20.051394920 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/asm-ia64/system.h	2005-04-23 13:20:17.658887384 -0700
@@ -183,6 +183,8 @@
 
 #ifdef __KERNEL__
 
+#define prepare_to_switch()    do { } while(0)
+
 #ifdef CONFIG_IA32_SUPPORT
 # define IS_IA32_PROCESS(regs)	(ia64_psr(regs)->is != 0)
 #else
@@ -248,6 +250,38 @@
 # define switch_to(prev,next,last)	__switch_to(prev, next, last)
 #endif
 
+/*
+ * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch,
+ * because that could cause a deadlock.  Here is an example by Erich Focht:
+ *
+ * Example:
+ * CPU#0:
+ * schedule()
+ *    -> spin_lock_irq(&rq->lock)
+ *    -> context_switch()
+ *       -> wrap_mmu_context()
+ *          -> read_lock(&tasklist_lock)
+ *
+ * CPU#1:
+ * sys_wait4() or release_task() or forget_original_parent()
+ *    -> write_lock(&tasklist_lock)
+ *    -> do_notify_parent()
+ *       -> wake_up_parent()
+ *          -> try_to_wake_up()
+ *             -> spin_lock_irq(&parent_rq->lock)
+ *
+ * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock
+ * of that CPU which will not be released, because there we wait for the
+ * tasklist_lock to become available.
+ */
+#define prepare_arch_switch(rq, next)		\
+do {						\
+	spin_lock(&(next)->switch_lock);	\
+	spin_unlock(&(rq)->lock);		\
+} while (0)
+#define finish_arch_switch(rq, prev)	spin_unlock_irq(&(prev)->switch_lock)
+#define task_running(rq, p) 		((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
+
 #define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
 
 void cpu_idle_wait(void);
diff -Naur linux-2.6.12-rc2-mm3/include/asm-ia64/topology.h linux-2.6.12-rc2-mm3-plugsched/include/asm-ia64/topology.h
--- linux-2.6.12-rc2-mm3/include/asm-ia64/topology.h	2005-04-14 02:47:20.052394768 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/asm-ia64/topology.h	2005-04-23 13:20:17.676884648 -0700
@@ -42,54 +42,25 @@
 
 void build_cpu_to_node_map(void);
 
-#define SD_CPU_INIT (struct sched_domain) {		\
-	.span			= CPU_MASK_NONE,	\
-	.parent			= NULL,			\
-	.groups			= NULL,			\
-	.min_interval		= 1,			\
-	.max_interval		= 4,			\
-	.busy_factor		= 64,			\
-	.imbalance_pct		= 125,			\
-	.cache_hot_time		= (10*1000000),		\
-	.per_cpu_gain		= 100,			\
-	.cache_nice_tries	= 2,			\
-	.busy_idx		= 2,			\
-	.idle_idx		= 1,			\
-	.newidle_idx		= 2,			\
-	.wake_idx		= 1,			\
-	.forkexec_idx		= 1,			\
-	.flags			= SD_LOAD_BALANCE	\
-				| SD_BALANCE_NEWIDLE	\
-				| SD_BALANCE_EXEC	\
-				| SD_WAKE_AFFINE,	\
-	.last_balance		= jiffies,		\
-	.balance_interval	= 1,			\
-	.nr_balance_failed	= 0,			\
-}
-
 /* sched_domains SD_NODE_INIT for IA64 NUMA machines */
 #define SD_NODE_INIT (struct sched_domain) {		\
 	.span			= CPU_MASK_NONE,	\
 	.parent			= NULL,			\
 	.groups			= NULL,			\
-	.min_interval		= 8,			\
-	.max_interval		= 8*(min(num_online_cpus(), 32)), \
-	.busy_factor		= 64,			\
+	.min_interval		= 80,			\
+	.max_interval		= 320,			\
+	.busy_factor		= 320,			\
 	.imbalance_pct		= 125,			\
 	.cache_hot_time		= (10*1000000),		\
-	.cache_nice_tries	= 2,			\
-	.busy_idx		= 3,			\
-	.idle_idx		= 2,			\
-	.newidle_idx		= 0, /* unused */	\
-	.wake_idx		= 1,			\
-	.forkexec_idx		= 1,			\
+	.cache_nice_tries	= 1,			\
 	.per_cpu_gain		= 100,			\
 	.flags			= SD_LOAD_BALANCE	\
 				| SD_BALANCE_EXEC	\
-				| SD_BALANCE_FORK	\
+				| SD_BALANCE_NEWIDLE	\
+				| SD_WAKE_IDLE		\
 				| SD_WAKE_BALANCE,	\
 	.last_balance		= jiffies,		\
-	.balance_interval	= 64,			\
+	.balance_interval	= 1,			\
 	.nr_balance_failed	= 0,			\
 }
 
@@ -98,21 +69,17 @@
 	.span			= CPU_MASK_NONE,	\
 	.parent			= NULL,			\
 	.groups			= NULL,			\
-	.min_interval		= 64,			\
-	.max_interval		= 64*num_online_cpus(),	\
-	.busy_factor		= 128,			\
-	.imbalance_pct		= 133,			\
+	.min_interval		= 80,			\
+	.max_interval		= 320,			\
+	.busy_factor		= 320,			\
+	.imbalance_pct		= 125,			\
 	.cache_hot_time		= (10*1000000),		\
 	.cache_nice_tries	= 1,			\
-	.busy_idx		= 3,			\
-	.idle_idx		= 3,			\
-	.newidle_idx		= 0, /* unused */	\
-	.wake_idx		= 0, /* unused */	\
-	.forkexec_idx		= 0, /* unused */	\
 	.per_cpu_gain		= 100,			\
-	.flags			= SD_LOAD_BALANCE,	\
+	.flags			= SD_LOAD_BALANCE	\
+				| SD_BALANCE_EXEC,	\
 	.last_balance		= jiffies,		\
-	.balance_interval	= 64,			\
+	.balance_interval	= 100*(63+num_online_cpus())/64,   \
 	.nr_balance_failed	= 0,			\
 }
 
diff -Naur linux-2.6.12-rc2-mm3/include/asm-mips/system.h linux-2.6.12-rc2-mm3-plugsched/include/asm-mips/system.h
--- linux-2.6.12-rc2-mm3/include/asm-mips/system.h	2005-04-14 02:47:20.233367256 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/asm-mips/system.h	2005-04-23 13:20:17.698881304 -0700
@@ -421,6 +421,18 @@
 
 extern int stop_a_enabled;
 
+/*
+ * Taken from include/asm-ia64/system.h; prevents deadlock on SMP
+ * systems.
+ */
+#define prepare_arch_switch(rq, next)		\
+do {						\
+	spin_lock(&(next)->switch_lock);	\
+	spin_unlock(&(rq)->lock);		\
+} while (0)
+#define finish_arch_switch(rq, prev)	spin_unlock_irq(&(prev)->switch_lock)
+#define task_running(rq, p) 		((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
+
 #define arch_align_stack(x) (x)
 
 #endif /* _ASM_SYSTEM_H */
diff -Naur linux-2.6.12-rc2-mm3/include/asm-s390/system.h linux-2.6.12-rc2-mm3-plugsched/include/asm-s390/system.h
--- linux-2.6.12-rc2-mm3/include/asm-s390/system.h	2005-04-14 02:47:20.666301440 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/asm-s390/system.h	2005-04-23 13:20:17.720877960 -0700
@@ -101,15 +101,29 @@
 	save_access_regs(&prev->thread.acrs[0]);			     \
 	restore_access_regs(&next->thread.acrs[0]);			     \
 	prev = __switch_to(prev,next);					     \
-	set_fs(current->thread.mm_segment);				     \
-	account_system_vtime(prev);					     \
 } while (0)
 
+#define prepare_arch_switch(rq, next)	do { } while(0)
+#define task_running(rq, p)		((rq)->curr == (p))
+
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 extern void account_user_vtime(struct task_struct *);
 extern void account_system_vtime(struct task_struct *);
+
+#define finish_arch_switch(rq, prev) do {				     \
+	set_fs(current->thread.mm_segment);				     \
+	spin_unlock(&(rq)->lock);					     \
+	account_system_vtime(prev);					     \
+	local_irq_enable();						     \
+} while (0)
+
 #else
-#define account_system_vtime(prev) do { } while (0)
+
+#define finish_arch_switch(rq, prev) do {				     \
+	set_fs(current->thread.mm_segment);				     \
+	spin_unlock_irq(&(rq)->lock);					     \
+} while (0)
+
 #endif
 
 #define nop() __asm__ __volatile__ ("nop")
diff -Naur linux-2.6.12-rc2-mm3/include/asm-sparc/system.h linux-2.6.12-rc2-mm3-plugsched/include/asm-sparc/system.h
--- linux-2.6.12-rc2-mm3/include/asm-sparc/system.h	2005-04-14 02:47:20.853273016 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/asm-sparc/system.h	2005-04-23 13:20:17.745874160 -0700
@@ -94,6 +94,24 @@
 	} while(0)
 #endif
 
+/*
+ * Flush windows so that the VM switch which follows
+ * would not pull the stack from under us.
+ *
+ * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
+ * XXX WTF is the above comment? Found in late teen 2.4.x.
+ */
+#define prepare_arch_switch(rq, next) do { \
+	__asm__ __volatile__( \
+	".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
+	"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
+	"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
+	"save %sp, -0x40, %sp\n\t" \
+	"restore; restore; restore; restore; restore; restore; restore"); \
+} while(0)
+#define finish_arch_switch(rq, next)	spin_unlock_irq(&(rq)->lock)
+#define task_running(rq, p)		((rq)->curr == (p))
+
 	/* Much care has gone into this code, do not touch it.
 	 *
 	 * We need to loadup regs l0/l1 for the newly forked child
@@ -106,12 +124,6 @@
 	 * - Anton & Pete
 	 */
 #define switch_to(prev, next, last) do {						\
-	__asm__ __volatile__( \
-	".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
-	"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
-	"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
-	"save %sp, -0x40, %sp\n\t" \
-	"restore; restore; restore; restore; restore; restore; restore"); \
 	SWITCH_ENTER(prev);								\
 	SWITCH_DO_LAZY_FPU(next);							\
 	cpu_set(smp_processor_id(), next->active_mm->cpu_vm_mask);			\
diff -Naur linux-2.6.12-rc2-mm3/include/asm-sparc64/system.h linux-2.6.12-rc2-mm3-plugsched/include/asm-sparc64/system.h
--- linux-2.6.12-rc2-mm3/include/asm-sparc64/system.h	2005-04-14 02:47:20.821277880 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/asm-sparc64/system.h	2005-04-23 13:20:17.762871576 -0700
@@ -139,6 +139,19 @@
 #define flush_user_windows flushw_user
 #define flush_register_windows flushw_all
 
+#define prepare_arch_switch(rq, next)		\
+do {	spin_lock(&(next)->switch_lock);	\
+	spin_unlock(&(rq)->lock);		\
+	flushw_all();				\
+} while (0)
+
+#define finish_arch_switch(rq, prev)		\
+do {	spin_unlock_irq(&(prev)->switch_lock);	\
+} while (0)
+
+#define task_running(rq, p) \
+	((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
+
 	/* See what happens when you design the chip correctly?
 	 *
 	 * We tell gcc we clobber all non-fixed-usage registers except
@@ -154,8 +167,7 @@
 #define EXTRA_CLOBBER
 #endif
 #define switch_to(prev, next, last)					\
-do {	flushw_all();							\
-	if (test_thread_flag(TIF_PERFCTR)) {				\
+do {	if (test_thread_flag(TIF_PERFCTR)) {				\
 		unsigned long __tmp;					\
 		read_pcr(__tmp);					\
 		current_thread_info()->pcr_reg = __tmp;			\
diff -Naur linux-2.6.12-rc2-mm3/include/asm-x86_64/system.h linux-2.6.12-rc2-mm3-plugsched/include/asm-x86_64/system.h
--- linux-2.6.12-rc2-mm3/include/asm-x86_64/system.h	2005-04-14 02:47:21.043244136 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/asm-x86_64/system.h	2005-04-23 13:20:23.616981616 -0700
@@ -32,8 +32,6 @@
 		     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
 		     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
 		     "call __switch_to\n\t"					  \
-		     ".globl thread_return\n"					\
-		     "thread_return:\n\t"					    \
 		     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
 		     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
 		     LOCK "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"		  \
diff -Naur linux-2.6.12-rc2-mm3/include/asm-x86_64/topology.h linux-2.6.12-rc2-mm3-plugsched/include/asm-x86_64/topology.h
--- linux-2.6.12-rc2-mm3/include/asm-x86_64/topology.h	2005-04-14 02:47:21.044243984 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/asm-x86_64/topology.h	2005-04-23 13:20:17.790867320 -0700
@@ -48,16 +48,12 @@
 	.busy_factor		= 32,			\
 	.imbalance_pct		= 125,			\
 	.cache_hot_time		= (10*1000000),		\
-	.cache_nice_tries	= 2,			\
-	.busy_idx		= 3,			\
-	.idle_idx		= 2,			\
-	.newidle_idx		= 0, 			\
-	.wake_idx		= 1,			\
-	.forkexec_idx		= 1,			\
+	.cache_nice_tries	= 1,			\
 	.per_cpu_gain		= 100,			\
 	.flags			= SD_LOAD_BALANCE	\
-				| SD_BALANCE_FORK	\
+				| SD_BALANCE_NEWIDLE	\
 				| SD_BALANCE_EXEC	\
+				| SD_WAKE_IDLE		\
 				| SD_WAKE_BALANCE,	\
 	.last_balance		= jiffies,		\
 	.balance_interval	= 1,			\
diff -Naur linux-2.6.12-rc2-mm3/include/linux/init_task.h linux-2.6.12-rc2-mm3-plugsched/include/linux/init_task.h
--- linux-2.6.12-rc2-mm3/include/linux/init_task.h	2005-04-14 02:47:23.351893168 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/linux/init_task.h	2005-04-23 13:26:04.727124992 -0700
@@ -74,15 +74,14 @@
 	.usage		= ATOMIC_INIT(2),				\
 	.flags		= 0,						\
 	.lock_depth	= -1,						\
-	.prio		= MAX_PRIO-20,					\
-	.static_prio	= MAX_PRIO-20,					\
+	.prio   = NICE_TO_PRIO(0),                              \
+        .static_prio    = NICE_TO_PRIO(0),                              \
 	.policy		= SCHED_NORMAL,					\
 	.cpus_allowed	= CPU_MASK_ALL,					\
 	.mm		= NULL,						\
 	.active_mm	= &init_mm,					\
 	.run_list	= LIST_HEAD_INIT(tsk.run_list),			\
 	.ioprio		= 0,						\
-	.time_slice	= HZ,						\
 	.tasks		= LIST_HEAD_INIT(tsk.tasks),			\
 	.ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children),		\
 	.ptrace_list	= LIST_HEAD_INIT(tsk.ptrace_list),		\
@@ -109,6 +108,7 @@
 	.blocked	= {{0}},					\
 	.alloc_lock	= SPIN_LOCK_UNLOCKED,				\
 	.proc_lock	= SPIN_LOCK_UNLOCKED,				\
+	.switch_lock	= SPIN_LOCK_UNLOCKED,				\
 	.journal_info	= NULL,						\
 	.cpu_timers	= INIT_CPU_TIMERS(tsk.cpu_timers),		\
 	.fs_excl	= ATOMIC_INIT(0),				\
diff -Naur linux-2.6.12-rc2-mm3/include/linux/sched.h linux-2.6.12-rc2-mm3-plugsched/include/linux/sched.h
--- linux-2.6.12-rc2-mm3/include/linux/sched.h	2005-04-14 02:47:23.965799840 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/linux/sched.h	2005-04-23 13:23:07.917004216 -0700
@@ -409,8 +409,6 @@
 #define MAX_USER_RT_PRIO	100
 #define MAX_RT_PRIO		MAX_USER_RT_PRIO
 
-#define MAX_PRIO		(MAX_RT_PRIO + 40)
-
 #define rt_task(p)		(unlikely((p)->prio < MAX_RT_PRIO))
 
 /*
@@ -480,11 +478,10 @@
 #define SD_LOAD_BALANCE		1	/* Do load balancing on this domain. */
 #define SD_BALANCE_NEWIDLE	2	/* Balance when about to become idle */
 #define SD_BALANCE_EXEC		4	/* Balance on exec */
-#define SD_BALANCE_FORK		8	/* Balance on fork, clone */
-#define SD_WAKE_IDLE		16	/* Wake to idle CPU on task wakeup */
-#define SD_WAKE_AFFINE		32	/* Wake task to waking CPU */
-#define SD_WAKE_BALANCE		64	/* Perform balancing at task wakeup */
-#define SD_SHARE_CPUPOWER	128	/* Domain members share cpu power */
+#define SD_WAKE_IDLE		8	/* Wake to idle CPU on task wakeup */
+#define SD_WAKE_AFFINE		16	/* Wake task to waking CPU */
+#define SD_WAKE_BALANCE		32	/* Perform balancing at task wakeup */
+#define SD_SHARE_CPUPOWER	64	/* Domain members share cpu power */
 
 struct sched_group {
 	struct sched_group *next;	/* Must be a circular list */
@@ -509,11 +506,6 @@
 	unsigned long long cache_hot_time; /* Task considered cache hot (ns) */
 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
 	unsigned int per_cpu_gain;	/* CPU % gained by adding domain cpus */
-	unsigned int busy_idx;
-	unsigned int idle_idx;
-	unsigned int newidle_idx;
-	unsigned int wake_idx;
-	unsigned int forkexec_idx;
 	int flags;			/* See SD_* */
 
 	/* Runtime fields. */
@@ -537,16 +529,10 @@
 	unsigned long alb_failed;
 	unsigned long alb_pushed;
 
-	/* SD_BALANCE_EXEC stats */
-	unsigned long sbe_cnt;
-	unsigned long sbe_balanced;
+	/* sched_balance_exec() stats */
+	unsigned long sbe_attempts;
 	unsigned long sbe_pushed;
 
-	/* SD_BALANCE_FORK stats */
-	unsigned long sbf_cnt;
-	unsigned long sbf_balanced;
-	unsigned long sbf_pushed;
-
 	/* try_to_wake_up() stats */
 	unsigned long ttwu_wake_remote;
 	unsigned long ttwu_move_affine;
@@ -605,6 +591,8 @@
 struct audit_context;		/* See audit.c */
 struct mempolicy;
 
+#include <linux/sched_task.h>
+
 struct task_struct {
 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
 	struct thread_info *thread_info;
@@ -614,23 +602,19 @@
 
 	int lock_depth;		/* BKL lock depth */
 
-#if defined(CONFIG_SMP)
-	int on_cpu;
-#endif
 	int prio, static_prio;
 	struct list_head run_list;
-	prio_array_t *array;
+        union sched_drv_task sdu;
 
 	unsigned short ioprio;
 
-	unsigned long sleep_avg;
 	unsigned long long timestamp, last_ran;
 	unsigned long long sched_time; /* sched_clock time spent running */
-	int activated;
+	
 
 	unsigned long policy;
 	cpumask_t cpus_allowed;
-	unsigned int time_slice, first_time_slice;
+
 
 #ifdef CONFIG_SCHEDSTATS
 	struct sched_info sched_info;
@@ -741,6 +725,8 @@
 	spinlock_t alloc_lock;
 /* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */
 	spinlock_t proc_lock;
+/* context-switch lock */
+	spinlock_t switch_lock;
 
 /* journalling filesystem info */
 	void *journal_info;
@@ -948,7 +934,7 @@
 #else
  static inline void kick_process(struct task_struct *tsk) { }
 #endif
-extern void FASTCALL(sched_fork(task_t * p, int clone_flags));
+extern void FASTCALL(sched_fork(task_t * p));
 extern void FASTCALL(sched_exit(task_t * p));
 
 extern int in_group_p(gid_t);
@@ -1248,9 +1234,11 @@
 	return p->thread_info->cpu;
 }
 
+#include <linux/sched_drv.h>
+
 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
 {
-	p->thread_info->cpu = cpu;
+	sched_drvp->set_task_cpu(p, cpu);
 }
 
 #else
diff -Naur linux-2.6.12-rc2-mm3/include/linux/sched_cpustats.h linux-2.6.12-rc2-mm3-plugsched/include/linux/sched_cpustats.h
--- linux-2.6.12-rc2-mm3/include/linux/sched_cpustats.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/include/linux/sched_cpustats.h	2005-04-23 13:20:23.634978880 -0700
@@ -0,0 +1,149 @@
+#ifndef _LINUX_SCHED_CPUSTATS_H
+#define _LINUX_SCHED_CPUSTATS_H
+
+#include <linux/sysctl.h>
+
+/*
+ * Fixed denominator rational numbers for use by the CPU scheduler
+ */
+#define SCHED_AVG_OFFSET 4
+/*
+ * Get the rounded integer value of a scheduling statistic average field
+ * i.e. those fields whose names begin with avg_
+ */
+#define SCHED_AVG_RND(x) \
+	(((x) + (1 << (SCHED_AVG_OFFSET - 1))) >> (SCHED_AVG_OFFSET))
+#define SCHED_AVG_REAL(a) ((a) << SCHED_AVG_OFFSET)
+
+#define INITIAL_CPUSTATS_TIMESTAMP \
+	((unsigned long long)INITIAL_JIFFIES * (1000000000ULL / HZ))
+
+struct runq_cpustats {
+	unsigned long long total_delay;
+	unsigned long long total_rt_delay;
+	unsigned long long total_intr_delay;
+	unsigned long long total_rt_intr_delay;
+	unsigned long long total_fork_delay;
+	unsigned long long total_sinbin;
+};
+
+extern DEFINE_PER_CPU(struct runq_cpustats, cpustats_runqs);
+
+/*
+ * Scheduling statistics for a task/thread
+ */
+struct task_cpustats {
+	unsigned long long timestamp;
+	unsigned long long total_wake_ups;
+	unsigned long long intr_wake_ups;
+	unsigned long long total_sleep;
+	unsigned long long avg_sleep_per_cycle;
+	unsigned long long total_cpu;
+	unsigned long long avg_cpu_per_cycle;
+	unsigned long long total_delay;
+	unsigned long long avg_delay_per_cycle;
+	unsigned long long total_sinbin;
+	unsigned long long avg_cycle_length;
+	unsigned long cpu_usage_rate;
+	unsigned int flags;
+};
+
+#define CPUSTATS_WOKEN_FOR_INTR_FL (1 << 0)
+#define CPUSTATS_JUST_FORKED_FL (1 << 1)
+
+#define INIT_CPUSTATS \
+	.cpustats = { .timestamp = INITIAL_CPUSTATS_TIMESTAMP, 0, }, \
+	.csrq = NULL
+
+
+struct task_struct;
+
+extern void init_runq_cpustats(unsigned int cpu);
+static inline struct runq_cpustats *cpu_runq_cpustats(unsigned int cpu)
+{
+	return &per_cpu(cpustats_runqs, cpu);
+}
+#ifdef CONFIG_SMP
+extern unsigned long long adjusted_sched_clock(const struct task_struct *p);
+extern void set_task_runq_cpustats(struct task_struct *p, unsigned int cpu);
+#else
+#define adjusted_sched_clock(p) sched_clock()
+static inline void set_task_runq_cpustats(struct task_struct *p, unsigned int cpu) {}
+#endif
+
+extern void initialize_cpustats(struct task_struct *p, unsigned long long now);
+extern void delta_sleep_cpustats(struct task_struct *p, unsigned long long now);
+extern void delta_cpu_cpustats(struct task_struct *p, unsigned long long now);
+extern void delta_delay_cpustats(struct task_struct *p, unsigned long long now);
+extern void delta_rq_delay_cpustats(struct task_struct *p, unsigned long long delta);
+extern void update_cpustats_at_wake_up(struct task_struct *p, unsigned long long now);
+extern void update_cpustats_at_end_of_ts(struct task_struct *p, unsigned long long now);
+
+extern unsigned long long cpustats_avg_in_jiffies(unsigned long long avg);
+
+/*
+ * Get "up to date" scheduling statistics for the given task
+ * This function should be used if reliable scheduling statistitcs are required
+ * outside the scheduler itself as the relevant fields in the task structure
+ * are not "up to date" NB the possible difference between those in the task
+ * structure and the correct values could be quite large for sleeping tasks.
+ */
+extern int get_task_cpustats(struct task_struct*, struct task_cpustats*);
+
+/*
+ * Scheduling statistics for a CPU
+ */
+struct cpu_cpustats {
+	unsigned long long timestamp;
+	unsigned long long total_idle;
+	unsigned long long total_busy;
+	unsigned long long total_delay;
+	unsigned long long total_rt_delay;
+	unsigned long long total_intr_delay;
+	unsigned long long total_rt_intr_delay;
+	unsigned long long total_fork_delay;
+	unsigned long long total_sinbin;
+	unsigned long long nr_switches;
+};
+
+/*
+ * Get scheduling statistics for the nominated CPU
+ */
+extern int get_cpu_cpustats(unsigned int, struct cpu_cpustats*);
+
+/*
+ * Make scheduling statistics available via /proc
+ */
+extern int task_sched_cpustats(struct task_struct *p, char *buffer);
+extern int cpustats_read_proc(char *page, char **start, off_t off, int count,
+    int *eof, void *data);
+
+
+/*
+ * CPU rate statistics are estimated as a proportions (i.e. real numbers in the
+ * rang 0 to 1 inclusive) using fixed denominator rational numbers.
+ * The denominator (PROPORTION_ONE) must be less than to 2^24
+ */
+#define PROPORTION_OFFSET 23
+#define PROPORTION_ONE (1ULL << PROPORTION_OFFSET)
+#define PROP_FM_PPT(a) (((unsigned long long)(a) * PROPORTION_ONE) / 1000)
+
+/* Require: a <= b */
+extern unsigned long calc_proportion(unsigned long long a, unsigned long long b);
+extern unsigned long map_proportion(unsigned long prop, unsigned long range);
+#define  map_proportion_rnd(p, r) map_proportion((p) >> 1, ((r) << 1) + 1)
+extern unsigned long proportion_to_ppt(unsigned long proportion);
+extern unsigned long ppt_to_proportion(unsigned long ppt);
+
+extern unsigned long avg_cpu_usage_rate(const struct task_struct*);
+extern unsigned long avg_sleep_rate(const struct task_struct*);
+extern unsigned long avg_cpu_delay_rate(const struct task_struct*);
+extern unsigned long delay_in_jiffies_for_usage(const struct task_struct*, unsigned long);
+
+extern int do_proc_proportion(ctl_table *ctp, int write, struct file *fp,
+    void __user *buffer, size_t *lenp, loff_t *ppos);
+
+#define TASK_CPUSTATS(p) (p)->sdu.spa.cpustats
+#define RUNQ_CPUSTATS(p) (p)->sdu.spa.csrq
+
+#endif
diff -Naur linux-2.6.12-rc2-mm3/include/linux/sched_drv.h linux-2.6.12-rc2-mm3-plugsched/include/linux/sched_drv.h
--- linux-2.6.12-rc2-mm3/include/linux/sched_drv.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/include/linux/sched_drv.h	2005-04-23 13:20:23.634978880 -0700
@@ -0,0 +1,62 @@
+#ifndef _LINUX_SCHED_DRV_H
+#define _LINUX_SCHED_DRV_H
+/*
+ * include/linux/sched_drv.h
+ * This contains the definition of the driver struct for all the exported per
+ * runqueue scheduler functions, and the private per scheduler data in
+ * struct task_struct.
+ */
+#include <linux/kobject.h>
+
+#include <linux/sched.h>
+#include <linux/sched_runq.h>
+
+/*
+ * This is the main scheduler driver struct.
+ */
+struct sched_drv {
+	const char *name;
+	void (*init_runqueue_queue)(union runqueue_queue *);
+	void (*set_oom_time_slice)(struct task_struct *, unsigned long);
+	unsigned int (*task_timeslice)(const task_t *);
+	void (*wake_up_task)(struct task_struct *, struct runqueue *, unsigned int, int);
+	void (*fork)(task_t *);
+	void (*wake_up_new_task)(task_t *, unsigned long);
+	void (*exit)(task_t *);
+#ifdef CONFIG_SMP
+	void (*set_task_cpu)(struct task_struct *, unsigned int);
+	int (*move_tasks)(runqueue_t *, int, runqueue_t *, unsigned long,
+		 struct sched_domain *, enum idle_type);
+#endif
+	void (*tick)(struct task_struct*, struct runqueue *, unsigned long long);
+#ifdef CONFIG_SCHED_SMT
+	struct task_struct *(*head_of_queue)(union runqueue_queue *);
+	int (*dependent_sleeper_trumps)(const struct task_struct *,
+		const struct task_struct *, struct sched_domain *);
+#endif
+	void (*schedule)(void);
+	void (*set_normal_task_nice)(task_t *, long);
+	void (*setscheduler)(task_t *, int, int);
+	long (*sys_yield)(void);
+	void (*yield)(void);
+	void (*init_idle)(task_t *, int);
+	void (*sched_init)(void);
+#ifdef CONFIG_SMP
+	void (*migrate_queued_task)(struct task_struct *, int);
+#ifdef CONFIG_HOTPLUG_CPU
+	void (*set_select_idle_first)(struct runqueue *);
+	void (*set_select_idle_last)(struct runqueue *);
+	void (*migrate_dead_tasks)(unsigned int);
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	void (*normalize_rt_task)(struct task_struct *);
+#endif
+	struct attribute **attrs;
+};
+
+extern const struct sched_drv *sched_drvp;
+
+extern void sched_drv_sysfs_init(void);
+
+#endif
diff -Naur linux-2.6.12-rc2-mm3/include/linux/sched_pvt.h linux-2.6.12-rc2-mm3-plugsched/include/linux/sched_pvt.h
--- linux-2.6.12-rc2-mm3/include/linux/sched_pvt.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/include/linux/sched_pvt.h	2005-04-23 13:20:23.635978728 -0700
@@ -0,0 +1,407 @@
+#ifndef _LINUX_SCHED_PVT_H
+#define _LINUX_SCHED_PVT_H
+/*
+ * include/linux/sched_pvt.h
+ * This contains the definition of the CPU scheduler macros and function
+ * prototypes that are only of interest to scheduler implementations.
+ */
+
+#include <linux/sched_drv.h>
+
+#include <asm/mmu_context.h>
+
+extern DEFINE_PER_CPU(struct runqueue, runqueues);
+
+#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
+#define this_rq()		(&__get_cpu_var(runqueues))
+#define task_rq(p)		cpu_rq(task_cpu(p))
+#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
+
+/*
+ * Default context-switch locking:
+ */
+#ifndef prepare_arch_switch
+# define prepare_arch_switch(rq, next)	do { } while (0)
+# define finish_arch_switch(rq, next)	spin_unlock_irq(&(rq)->lock)
+# define task_running(rq, p)		((rq)->curr == (p))
+#endif
+
+/*
+ * task_rq_lock - lock the runqueue a given task resides on and disable
+ * interrupts.  Note the ordering: we can safely lookup the task_rq without
+ * explicitly disabling preemption.
+ */
+static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
+	__acquires(rq->lock)
+{
+	struct runqueue *rq;
+
+repeat_lock_task:
+	local_irq_save(*flags);
+	rq = task_rq(p);
+	spin_lock(&rq->lock);
+	if (unlikely(rq != task_rq(p))) {
+		spin_unlock_irqrestore(&rq->lock, *flags);
+		goto repeat_lock_task;
+	}
+	return rq;
+}
+
+static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
+	__releases(rq->lock)
+{
+	spin_unlock_irqrestore(&rq->lock, *flags);
+}
+
+/*
+ * rq_lock - lock a given runqueue and disable interrupts.
+ */
+static inline runqueue_t *this_rq_lock(void)
+	__acquires(rq->lock)
+{
+	runqueue_t *rq;
+
+	local_irq_disable();
+	rq = this_rq();
+	spin_lock(&rq->lock);
+
+	return rq;
+}
+
+/*
+ * Place scheduler attributes in sysfs
+ */
+struct sched_drv_sysfs_entry {
+	struct attribute attr;
+	ssize_t (*show)(char *);
+	ssize_t (*store)(const char *, size_t);
+};
+
+#define to_sched_drv_sysfs_entry(a) container_of((a), struct sched_drv_sysfs_entry, attr)
+
+/*
+ * Macros to help define more common scheduler sysfs attribute types
+ */
+#define SCHED_DRV_SYSFS_UINT_RW_EV(sdse_vis, aname, conv_in, conv_out, MINV, MAXV) \
+static ssize_t show_ ## aname(char *page) \
+{ \
+	unsigned long long val = conv_out(aname); \
+ \
+	return sprintf(page, "%lld\n", val); \
+} \
+ \
+static ssize_t store_ ## aname(const char *page, size_t count) \
+{ \
+	unsigned long long val; \
+	char *end = NULL; \
+ \
+	val = simple_strtoull(page, &end, 10); \
+	if ((end == page) || ((*end != '\0') && (*end != '\n'))) \
+		return -EINVAL; \
+	val = conv_in(val); \
+	if (val < (MINV)) \
+		val = (MINV); \
+	else if (val > (MAXV)) \
+		val = (MAXV); \
+ \
+	aname = val; \
+ \
+	return count; \
+} \
+ \
+sdse_vis struct sched_drv_sysfs_entry aname ## _sdse = { \
+	.attr = { .name = # aname, .mode = S_IRUGO | S_IWUSR }, \
+	.show = show_ ## aname, \
+	.store = store_ ## aname, \
+}
+#define SCHED_DRV_SYSFS_UINT_RW(aname, conv_in, conv_out, MINV, MAXV) \
+	SCHED_DRV_SYSFS_UINT_RW_EV(, aname, conv_in, conv_out, MINV, MAXV)
+#define SCHED_DRV_SYSFS_UINT_RW_STATIC(aname, conv_in, conv_out, MINV, MAXV) \
+	SCHED_DRV_SYSFS_UINT_RW_EV(static, aname, conv_in, conv_out, MINV, MAXV)
+
+#define SCHED_DRV_SYSFS_UINT_RO_EV(sdse_vis, ev, aname, conv_out) \
+static ssize_t show_ ## aname(char *page) \
+{ \
+	unsigned long long val = conv_out(aname); \
+ \
+	return sprintf(page, "%lld\n", val); \
+} \
+ \
+sdes_vis struct sched_drv_sysfs_entry aname ## _sdse = { \
+	.attr = { .name = # aname, .mode = S_IRUGO }, \
+	.show = show_ ## aname, \
+	.store = NULL, \
+}
+
+#define SCHED_DRV_SYSFS_UINT_RO(sdse_vis, ev, aname, conv_out) \
+	SCHED_DRV_SYSFS_UINT_RO_EV(, ev, aname, conv_out)
+#define SCHED_DRV_SYSFS_UINT_RO_STATIC(sdse_vis, ev, aname, conv_out) \
+	SCHED_DRV_SYSFS_UINT_RO_EV(static, ev, aname, conv_out)
+
+#define SCHED_DRV_SYSFS_ATTR(aname) (aname ## _sdse.attr)
+#define SCHED_DRV_DECLARE_SYSFS_ENTRY(aname) \
+extern struct sched_drv_sysfs_entry aname ## _sdse
+
+/**
+ * finish_task_switch - clean up after a task-switch
+ * @prev: the thread we just switched away from.
+ *
+ * We enter this with the runqueue still locked, and finish_arch_switch()
+ * will unlock it along with doing any other architecture-specific cleanup
+ * actions.
+ *
+ * Note that we may have delayed dropping an mm in context_switch(). If
+ * so, we finish that here outside of the runqueue lock.  (Doing it
+ * with the lock held can cause deadlocks; see schedule() for
+ * details.)
+ */
+static inline void finish_task_switch(task_t *prev)
+	__releases(rq->lock)
+{
+	runqueue_t *rq = this_rq();
+	struct mm_struct *mm = rq->prev_mm;
+	unsigned long prev_task_flags;
+
+	rq->prev_mm = NULL;
+
+	/*
+	 * A task struct has one reference for the use as "current".
+	 * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
+	 * calls schedule one last time. The schedule call will never return,
+	 * and the scheduled task must drop that reference.
+	 * The test for EXIT_ZOMBIE must occur while the runqueue locks are
+	 * still held, otherwise prev could be scheduled on another cpu, die
+	 * there before we look at prev->state, and then the reference would
+	 * be dropped twice.
+	 *		Manfred Spraul <manfred@colorfullife.com>
+	 */
+	prev_task_flags = prev->flags;
+	finish_arch_switch(rq, prev);
+	if (mm)
+		mmdrop(mm);
+	if (unlikely(prev_task_flags & PF_DEAD))
+		put_task_struct(prev);
+}
+
+/*
+ * context_switch - switch to the new MM and the new
+ * thread's register state.
+ */
+static inline
+task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
+{
+	struct mm_struct *mm = next->mm;
+	struct mm_struct *oldmm = prev->active_mm;
+
+	if (unlikely(!mm)) {
+		next->active_mm = oldmm;
+		atomic_inc(&oldmm->mm_count);
+		enter_lazy_tlb(oldmm, next);
+	} else
+		switch_mm(oldmm, mm, next);
+
+	if (unlikely(!prev->mm)) {
+		prev->active_mm = NULL;
+		WARN_ON(rq->prev_mm);
+		rq->prev_mm = oldmm;
+	}
+
+	/* Here we just switch the register state and the stack. */
+	switch_to(prev, next, prev);
+
+	return prev;
+}
+
+/*
+ * This is called on clock ticks and on context switches.
+ * Bank in p->sched_time the ns elapsed since the last tick or switch.
+ */
+static inline void update_cpu_clock(task_t *p, runqueue_t *rq,
+				    unsigned long long now)
+{
+	unsigned long long last = max(p->timestamp, rq->timestamp_last_tick);
+	p->sched_time += now - last;
+}
+
+/* Actually do priority change: must hold rq lock. */
+void __setscheduler(struct task_struct *, int, int);
+
+#ifdef CONFIG_SMP
+#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)	\
+				< (long long) (sd)->cache_hot_time)
+extern void common_set_task_cpu(struct task_struct *, unsigned int);
+extern void resched_task(task_t *p);
+extern void idle_balance(int, runqueue_t *);
+extern void rebalance_tick(int, runqueue_t *, enum idle_type);
+
+#ifdef CONFIG_SCHED_SMT
+extern int cpu_and_siblings_are_idle(int cpu);
+#else
+#define cpu_and_siblings_are_idle(A) idle_cpu(A)
+#endif
+
+/*
+ * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
+ */
+static inline
+int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
+		     struct sched_domain *sd, enum idle_type idle)
+{
+	/*
+	 * We do not migrate tasks that are:
+	 * 1) running (obviously), or
+	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
+	 * 3) are cache-hot on their current CPU.
+	 */
+	if (task_running(rq, p))
+		return 0;
+	if (!cpu_isset(this_cpu, p->cpus_allowed))
+		return 0;
+
+	/*
+	 * Aggressive migration if:
+	 * 1) the [whole] cpu is idle, or
+	 * 2) too many balance attempts have failed.
+	 */
+
+	if (cpu_and_siblings_are_idle(this_cpu) || \
+			sd->nr_balance_failed > sd->cache_nice_tries)
+		return 1;
+
+	if (task_hot(p, rq->timestamp_last_tick, sd))
+			return 0;
+	return 1;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void migrate_dead(unsigned int, task_t *);
+#endif
+#else
+#define resched_task(p) set_tsk_need_resched(p)
+/*
+ * on UP we do not need to balance between CPUs:
+ */
+static inline void idle_balance(int cpu, runqueue_t *rq) { }
+static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle) { }
+#endif
+
+#ifdef CONFIG_SCHED_SMT
+extern int wake_priority_sleeper(runqueue_t *);
+extern void wake_sleeping_dependent(int, runqueue_t *);
+extern int dependent_sleeper(int, runqueue_t *);
+#else
+static inline int wake_priority_sleeper(runqueue_t *rq) { return 0; }
+static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) { }
+static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) { return 0; }
+#endif
+
+#ifdef CONFIG_SCHEDSTATS
+# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
+
+/*
+ * Called when a process is dequeued from the active array and given
+ * the cpu.  We should note that with the exception of interactive
+ * tasks, the expired queue will become the active queue after the active
+ * queue is empty, without explicitly dequeuing and requeuing tasks in the
+ * expired queue.  (Interactive tasks may be requeued directly to the
+ * active queue, thus delaying tasks in the expired queue from running;
+ * see scheduler_tick()).
+ *
+ * This function is only called from sched_info_arrive(), rather than
+ * dequeue_task(). Even though a task may be queued and dequeued multiple
+ * times as it is shuffled about, we're really interested in knowing how
+ * long it was from the *first* time it was queued to the time that it
+ * finally hit a cpu.
+ */
+static inline void sched_info_dequeued(task_t *t)
+{
+	t->sched_info.last_queued = 0;
+}
+
+/*
+ * Called when a task finally hits the cpu.  We can now calculate how
+ * long it was waiting to run.  We also note when it began so that we
+ * can keep stats on how long its timeslice is.
+ */
+static inline void sched_info_arrive(task_t *t)
+{
+	unsigned long now = jiffies, diff = 0;
+	struct runqueue *rq = task_rq(t);
+
+	if (t->sched_info.last_queued)
+		diff = now - t->sched_info.last_queued;
+	sched_info_dequeued(t);
+	t->sched_info.run_delay += diff;
+	t->sched_info.last_arrival = now;
+	t->sched_info.pcnt++;
+
+	if (!rq)
+		return;
+
+	rq->rq_sched_info.run_delay += diff;
+	rq->rq_sched_info.pcnt++;
+}
+
+/*
+ * Called when a process is queued into either the active or expired
+ * array.  The time is noted and later used to determine how long we
+ * had to wait for us to reach the cpu.  Since the expired queue will
+ * become the active queue after active queue is empty, without dequeuing
+ * and requeuing any tasks, we are interested in queuing to either. It
+ * is unusual but not impossible for tasks to be dequeued and immediately
+ * requeued in the same or another array: this can happen in sched_yield(),
+ * set_user_nice(), and even load_balance() as it moves tasks from runqueue
+ * to runqueue.
+ *
+ * This function is only called from enqueue_task(), but also only updates
+ * the timestamp if it is already not set.  It's assumed that
+ * sched_info_dequeued() will clear that stamp when appropriate.
+ */
+static inline void sched_info_queued(task_t *t)
+{
+	if (!t->sched_info.last_queued)
+		t->sched_info.last_queued = jiffies;
+}
+
+/*
+ * Called when a process ceases being the active-running process, either
+ * voluntarily or involuntarily.  Now we can calculate how long we ran.
+ */
+static inline void sched_info_depart(task_t *t)
+{
+	struct runqueue *rq = task_rq(t);
+	unsigned long diff = jiffies - t->sched_info.last_arrival;
+
+	t->sched_info.cpu_time += diff;
+
+	if (rq)
+		rq->rq_sched_info.cpu_time += diff;
+}
+
+/*
+ * Called when tasks are switched involuntarily due, typically, to expiring
+ * their time slice.  (This may also be called when switching to or from
+ * the idle task.)  We are only called when prev != next.
+ */
+static inline void sched_info_switch(task_t *prev, task_t *next)
+{
+	struct runqueue *rq = task_rq(prev);
+
+	/*
+	 * prev now departs the cpu.  It's not interesting to record
+	 * stats about how efficient we were at scheduling the idle
+	 * process, however.
+	 */
+	if (prev != rq->idle)
+		sched_info_depart(prev);
+
+	if (next != rq->idle)
+		sched_info_arrive(next);
+}
+#else
+# define schedstat_inc(rq, field)	do { } while (0)
+# define sched_info_queued(t)		do { } while (0)
+# define sched_info_switch(t, next)	do { } while (0)
+#endif /* CONFIG_SCHEDSTATS */
+
+#endif
diff -Naur linux-2.6.12-rc2-mm3/include/linux/sched_runq.h linux-2.6.12-rc2-mm3-plugsched/include/linux/sched_runq.h
--- linux-2.6.12-rc2-mm3/include/linux/sched_runq.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/include/linux/sched_runq.h	2005-04-23 13:20:23.636978576 -0700
@@ -0,0 +1,173 @@
+#ifndef _LINUX_SCHED_RUNQ_H
+#define _LINUX_SCHED_RUNQ_H
+/*
+ * include/linux/sched_runq.h
+ * This contains the definition of the CPU scheduler run queue type.
+ * Modified to allow each scheduler to have its own private run queue data.
+ */
+
+/*
+ * These are the runqueue data structures:
+ */
+#ifdef CONFIG_CPUSCHED_INGO
+#define INGO_MAX_PRIO (MAX_RT_PRIO + 40)
+
+#define INGO_BITMAP_SIZE ((((INGO_MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+
+struct prio_array {
+	unsigned int nr_active;
+	unsigned long bitmap[INGO_BITMAP_SIZE];
+	struct list_head queue[INGO_MAX_PRIO];
+};
+
+struct ingo_runqueue_queue {
+	prio_array_t *active, *expired, arrays[2];
+	/*
+	   set to 0 on init, become null or array switch
+	   set to jiffies whenever an non-interactive job expires
+	   reset to jiffies if expires
+	 */
+	unsigned long expired_timestamp;
+	int best_expired_prio;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+#define STAIRCASE_MAX_PRIO (MAX_RT_PRIO + 40)
+#define STAIRCASE_NUM_PRIO_SLOTS (STAIRCASE_MAX_PRIO + 1)
+
+struct staircase_runqueue_queue {
+	DECLARE_BITMAP(bitmap, STAIRCASE_NUM_PRIO_SLOTS);
+	struct list_head queue[STAIRCASE_NUM_PRIO_SLOTS - 1];
+	unsigned int cache_ticks;
+	unsigned int preempted;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_SPA
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+#define SPA_IDLE_PRIO 159
+#else
+#define SPA_IDLE_PRIO (MAX_RT_PRIO + 40 + 2)
+#endif
+#define SPA_NUM_PRIO_SLOTS (SPA_IDLE_PRIO + 1)
+
+struct spa_prio_slot {
+	unsigned int prio;
+	struct list_head list;
+};
+
+struct spa_runqueue_queue {
+	DECLARE_BITMAP(bitmap, SPA_NUM_PRIO_SLOTS);
+	struct spa_prio_slot queue[SPA_NUM_PRIO_SLOTS - 1];
+	unsigned long next_prom_due;
+	unsigned long pcount;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_NICK
+#define NICK_MAX_PRIO (MAX_RT_PRIO + 59)
+
+#define NICK_BITMAP_SIZE ((((NICK_MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+
+struct nick_prio_array {
+	int min_prio;
+	unsigned int nr_active;
+	unsigned long bitmap[NICK_BITMAP_SIZE];
+	struct list_head queue[NICK_MAX_PRIO];
+};
+
+struct nick_runqueue_queue {
+	struct nick_prio_array *active, *expired, arrays[2];
+	/*
+	   set to 0 on init, become null or array switch
+	   set to jiffies whenever an non-interactive job expires
+	   reset to jiffies if expires
+	 */
+	unsigned long array_sequence;
+};
+#endif
+
+typedef struct runqueue runqueue_t;
+
+union runqueue_queue {
+#ifdef CONFIG_CPUSCHED_INGO
+	struct ingo_runqueue_queue ingosched;
+#endif
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+	struct staircase_runqueue_queue staircase;
+#endif
+#ifdef CONFIG_CPUSCHED_SPA
+	struct spa_runqueue_queue spa;
+#endif
+#ifdef CONFIG_CPUSCHED_NICK
+	struct nick_runqueue_queue nicksched;
+#endif
+};
+
+/*
+ * This is the main, per-CPU runqueue data structure.
+ *
+ * Locking rule: those places that want to lock multiple runqueues
+ * (such as the load balancing or the thread migration code), lock
+ * acquire operations must be ordered by ascending &runqueue.
+ */
+struct runqueue {
+	spinlock_t lock;
+
+	/*
+	 * nr_running and cpu_load should be in the same cacheline because
+	 * remote CPUs use both these fields when doing load calculation.
+	 */
+	unsigned long nr_running;
+#ifdef CONFIG_SMP
+	unsigned long cpu_load;
+#endif
+  	unsigned long long nr_switches;
+
+	/*
+	 * This is part of a global counter where only the total sum
+	 * over all CPUs matters. A task can increase this counter on
+	 * one CPU and if it got migrated afterwards it may decrease
+	 * it on another CPU. Always updated under the runqueue lock:
+	 */
+	unsigned long nr_uninterruptible;
+	union runqueue_queue qu;
+	unsigned long long timestamp_last_tick;
+	task_t *curr, *idle;
+	struct mm_struct *prev_mm;
+  	atomic_t nr_iowait;
+
+#ifdef CONFIG_SMP
+	struct sched_domain *sd;
+
+	/* For active balancing */
+	int active_balance;
+	int push_cpu;
+
+	task_t *migration_thread;
+	struct list_head migration_queue;
+#endif
+
+#ifdef CONFIG_SCHEDSTATS
+	/* latency stats */
+	struct sched_info rq_sched_info;
+
+	/* sys_sched_yield() stats */
+	unsigned long yld_exp_empty;
+	unsigned long yld_act_empty;
+	unsigned long yld_both_empty;
+	unsigned long yld_cnt;
+
+	/* schedule() stats */
+	unsigned long sched_switch;
+	unsigned long sched_cnt;
+	unsigned long sched_goidle;
+
+	/* try_to_wake_up() stats */
+	unsigned long ttwu_cnt;
+	unsigned long ttwu_local;
+#endif
+};
+
+#endif
diff -Naur linux-2.6.12-rc2-mm3/include/linux/sched_task.h linux-2.6.12-rc2-mm3-plugsched/include/linux/sched_task.h
--- linux-2.6.12-rc2-mm3/include/linux/sched_task.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/include/linux/sched_task.h	2005-04-23 13:20:23.637978424 -0700
@@ -0,0 +1,94 @@
+#ifndef _LINUX_SCHED_TASK_H
+#define _LINUX_SCHED_TASK_H
+/*
+ * include/linux/sched_task.h
+ */
+
+/*
+ * Require that the relationship between 'nice' and 'static_prio' be the same
+ * for all schedulers.
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..(MAX_RT_PRIO + 39) ],
+ * and back.
+ */
+#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
+#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
+#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
+
+#ifdef CONFIG_CPUSCHED_INGO
+struct ingo_sched_drv_task {
+	struct prio_array *array;
+	unsigned int time_slice;
+	unsigned int first_time_slice;
+	unsigned long sleep_avg;
+	int activated;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+struct staircase_sched_drv_task {
+	unsigned long sflags;
+	unsigned long runtime, totalrun, ns_debit;
+	unsigned int burst;
+	unsigned int slice, time_slice;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_SPA
+#include <linux/sched_cpustats.h>
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+#include <linux/sched_zaphod.h>
+#endif
+
+struct spa_sched_drv_task {
+	unsigned int time_slice;
+	struct task_cpustats cpustats;
+	struct runq_cpustats *csrq;
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+	struct sched_zaphod zaphod;
+	struct sched_zaphod_runq_data *zrq;
+#endif
+	unsigned long cpu_rate_cap, min_cpu_rate_cap;
+	unsigned long cpu_rate_hard_cap;
+	struct timer_list sinbin_timer;
+	unsigned int flags;
+};
+
+#define SPAF_SINBINNED	(1 << 0)	/* I am sinbinned */
+#define SPAF_UISLEEP	(1 << 1)	/* Uninterruptible sleep */
+
+#define task_is_sinbinned(p) (unlikely(((p)->sdu.spa.flags & SPAF_SINBINNED) != 0))
+
+/* set/get cpu rate caps in parts per thousand */
+extern int set_cpu_rate_cap(struct task_struct *p, unsigned long new_cap);
+extern int set_cpu_rate_hard_cap(struct task_struct *p, unsigned long new_cap);
+extern unsigned long get_cpu_rate_cap(struct task_struct *p);
+extern unsigned long get_cpu_rate_hard_cap(struct task_struct *p);
+#endif
+
+#ifdef CONFIG_CPUSCHED_NICK
+struct nick_sched_drv_task {
+	struct nick_prio_array *array;
+	unsigned long array_sequence;
+	unsigned long total_time, sleep_time;
+	int used_slice;
+};
+#endif
+
+union sched_drv_task {
+#ifdef CONFIG_CPUSCHED_INGO
+	struct ingo_sched_drv_task ingosched;
+#endif
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+	struct staircase_sched_drv_task staircase;
+#endif
+#ifdef CONFIG_CPUSCHED_SPA
+	struct spa_sched_drv_task spa;
+#endif
+#ifdef CONFIG_CPUSCHED_NICK
+	struct nick_sched_drv_task nicksched;
+#endif
+};
+
+void set_oom_time_slice(struct task_struct *p, unsigned long t);
+#endif
diff -Naur linux-2.6.12-rc2-mm3/include/linux/sched_zaphod.h linux-2.6.12-rc2-mm3-plugsched/include/linux/sched_zaphod.h
--- linux-2.6.12-rc2-mm3/include/linux/sched_zaphod.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/include/linux/sched_zaphod.h	2005-04-23 13:20:23.637978424 -0700
@@ -0,0 +1,67 @@
+#ifndef _LINUX_SCHED_ZAPHOD_H
+#define _LINUX_SCHED_ZAPHOD_H
+
+#include <linux/sysctl.h>
+#include <linux/timer.h>
+
+/*
+ * Making IDLE_PRIO bigger than 159 would require modification of bitmaps
+ */
+#define ZAPHOD_IDLE_PRIO 	159
+#define ZAPHOD_BGND_PRIO 	(ZAPHOD_IDLE_PRIO - 1)
+#define ZAPHOD_MIN_NORMAL_PRIO	MAX_RT_PRIO
+#define ZAPHOD_MAX_PRIO		(ZAPHOD_MIN_NORMAL_PRIO + 40)
+
+/*
+ * For entitlemnet based scheduling a task's shares will be determined from
+ * their "nice"ness
+ */
+#define EB_SHARES_PER_NICE 5
+#define DEFAULT_EB_SHARES (20 * EB_SHARES_PER_NICE)
+#define MAX_EB_SHARES (DEFAULT_EB_SHARES * DEFAULT_EB_SHARES)
+
+struct sched_zaphod_runq_data {
+	unsigned long avg_nr_running;
+	atomic_t eb_yardstick;
+	atomic_t eb_ticks_to_decay;
+};
+
+extern void zaphod_init_cpu_runq_data(unsigned int cpu);
+extern struct sched_zaphod_runq_data *zaphod_cpu_runq_data(unsigned int cpu);
+extern void zaphod_runq_data_tick(struct sched_zaphod_runq_data *zrq, unsigned long numr);
+
+struct sched_zaphod {
+	unsigned int pre_bonus_priority;
+	unsigned int interactive_bonus;
+	unsigned int throughput_bonus;
+	unsigned int eb_shares;
+};
+
+#define ZAPHOD_TASK_DATA_INIT() \
+	{ .pre_bonus_priority = (ZAPHOD_BGND_PRIO - 20), \
+	  .eb_shares = DEFAULT_EB_SHARES, \
+	  .interactive_bonus = 0, \
+	  .throughput_bonus = 0, \
+	}
+
+#define SCHED_ZAPHOD_INIT \
+	.zrq = NULL, \
+	.zaphod = ZAPHOD_TASK_DATA_INIT()
+
+static inline struct sched_zaphod zaphod_task_data_init(void) {
+	struct sched_zaphod ret = ZAPHOD_TASK_DATA_INIT();
+
+	return ret;
+}
+
+struct task_struct;
+
+extern void zaphod_fork(struct task_struct *p);
+extern unsigned int zaphod_effective_prio(struct task_struct *p);
+extern void zaphod_reassess_at_activation(struct task_struct *p);
+extern void zaphod_reassess_at_end_of_ts(struct task_struct *p);
+extern void zaphod_reassess_at_sinbin_release(struct task_struct *p);
+extern void zaphod_reassess_at_renice(struct task_struct *p);
+extern void zaphod_reassess_at_new_cap(struct task_struct *p);
+
+#endif
diff -Naur linux-2.6.12-rc2-mm3/include/linux/topology.h linux-2.6.12-rc2-mm3-plugsched/include/linux/topology.h
--- linux-2.6.12-rc2-mm3/include/linux/topology.h	2005-04-14 02:47:24.074783272 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/include/linux/topology.h	2005-04-23 13:20:17.834860632 -0700
@@ -89,11 +89,6 @@
 	.cache_hot_time		= 0,			\
 	.cache_nice_tries	= 0,			\
 	.per_cpu_gain		= 25,			\
-	.busy_idx		= 0,			\
-	.idle_idx		= 0,			\
-	.newidle_idx		= 1,			\
-	.wake_idx		= 0,			\
-	.forkexec_idx		= 0,			\
 	.flags			= SD_LOAD_BALANCE	\
 				| SD_BALANCE_NEWIDLE	\
 				| SD_BALANCE_EXEC	\
@@ -120,15 +115,12 @@
 	.cache_hot_time		= (5*1000000/2),	\
 	.cache_nice_tries	= 1,			\
 	.per_cpu_gain		= 100,			\
-	.busy_idx		= 2,			\
-	.idle_idx		= 1,			\
-	.newidle_idx		= 2,			\
-	.wake_idx		= 1,			\
-	.forkexec_idx		= 1,			\
 	.flags			= SD_LOAD_BALANCE	\
 				| SD_BALANCE_NEWIDLE	\
 				| SD_BALANCE_EXEC	\
-				| SD_WAKE_AFFINE,	\
+				| SD_WAKE_AFFINE	\
+				| SD_WAKE_IDLE		\
+				| SD_WAKE_BALANCE,	\
 	.last_balance		= jiffies,		\
 	.balance_interval	= 1,			\
 	.nr_balance_failed	= 0,			\
diff -Naur linux-2.6.12-rc2-mm3/init/Kconfig linux-2.6.12-rc2-mm3-plugsched/init/Kconfig
--- linux-2.6.12-rc2-mm3/init/Kconfig	2005-04-14 02:47:24.344742232 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/init/Kconfig	2005-04-23 13:20:23.645977208 -0700
@@ -255,6 +255,64 @@
 
 	  Say N if unsure.
 
+config PLUGSCHED
+	bool "Support for multiple cpu schedulers"
+	default y
+	help
+	  Say Y here if you want to compile in support for multiple
+	  cpu schedulers. The cpu scheduler may be selected at boot time
+	  with the boot parameter "cpusched=". The choice of which cpu
+	  schedulers to compile into the kernel can be made by enabling
+	  "Configure standard kernel features" otherwise all cpu schedulers
+	  supported will be compiled in.
+
+choice
+	prompt "Default cpu scheduler"
+	help
+	  This option allows you to choose which cpu scheduler shall be
+	  booted by default at startup if you have plugsched support, or
+	  it will choose which is the only scheduler compiled in.
+
+config CPUSCHED_DEFAULT_INGO
+	bool "Ingosched cpu scheduler"
+	select CPUSCHED_INGO
+	---help---
+	  This is the default cpu scheduler which is an O(1) dual priority
+	  array scheduler with a hybrid interactive design.
+
+config CPUSCHED_DEFAULT_STAIRCASE
+	bool "Staircase cpu scheduler"
+	select CPUSCHED_STAIRCASE
+	---help---
+	  This scheduler is an O(1) single priority array with a foreground-
+	  background interactive design.
+
+config CPUSCHED_DEFAULT_SPA_NF
+	bool "Single priority array (SPA) cpu scheduler (no frills)"
+	select CPUSCHED_SPA_NF
+	---help---
+	  This is a simple round robin scheduler with a O(1) single priority
+	  array.
+
+config CPUSCHED_DEFAULT_ZAPHOD
+	bool "Zaphod cpu scheduler"
+	select CPUSCHED_ZAPHOD
+	---help---
+	  This scheduler is an O(1) single priority array with interactive
+	  bonus, throughput bonus, soft and hard CPU rate caps and a runtime
+	  choice between priority based and entitlement based interpretation
+	  of nice.
+
+config CPUSCHED_DEFAULT_NICK
+	bool "Nicksched cpu scheduler"
+	select CPUSCHED_NICK
+	---help---
+	  This is the default cpu scheduler which is an O(1) dual priority
+	  array scheduler with a hybrid interactive design as modified by
+	  Nick Piggin.
+
+endchoice
+
 menuconfig EMBEDDED
 	bool "Configure standard kernel features (for small systems)"
 	help
@@ -263,6 +321,70 @@
           environments which can tolerate a "non-standard" kernel.
           Only use this if you really know what you are doing.
 
+config CPUSCHED_INGO
+	bool "Ingosched cpu scheduler" if EMBEDDED
+	depends on PLUGSCHED
+	default y
+	---help---
+	  This is the default cpu scheduler which is an O(1) dual priority
+	  array scheduler with a hybrid interactive design.
+	  To boot this cpu scheduler, if it is not the default, use the
+	  bootparam "cpusched=ingosched".
+
+config CPUSCHED_STAIRCASE
+	bool "Staircase cpu scheduler" if EMBEDDED
+	depends on PLUGSCHED
+	default y
+	---help---
+	  This scheduler is an O(1) single priority array with a foreground-
+	  background interactive design.
+	  To boot this cpu scheduler, if it is not the default, use the
+	  bootparam "cpusched=staircase".
+
+config CPUSCHED_SPA
+	bool "SPA cpu schedulers" if EMBEDDED
+	depends on PLUGSCHED
+	default y
+	---help---
+	  Support for O(1) single priority array schedulers.
+
+config CPUSCHED_SPA_NF
+	bool "SPA cpu scheduler (no frills)" if EMBEDDED
+	depends on PLUGSCHED
+	select CPUSCHED_SPA
+	default y
+	---help---
+	  This scheduler is a simple round robin O(1) single priority array
+	  with NO extra scheduling "frills".  This scheduler contains no extra
+	  mechanisms for enhancing interactive response and is best suited for
+	  server systems.
+	  To boot this cpu scheduler, if it is not the default, use the
+	  bootparam "cpusched=spa_no_frills".
+
+config CPUSCHED_ZAPHOD
+	bool "Zaphod cpu scheduler" if EMBEDDED
+	depends on PLUGSCHED
+	select CPUSCHED_SPA
+	default y
+	---help---
+	  This scheduler is an O(1) single priority array with interactive
+	  bonus, throughput bonus, soft and hard CPU rate caps and a runtime
+	  choice between priority based and entitlement based interpretation
+	  of nice.
+	  To boot this cpu scheduler, if it is not the default, use the
+	  bootparam "cpusched=zaphod".
+
+config CPUSCHED_NICK
+	bool "Nicksched cpu scheduler" if EMBEDDED
+	depends on PLUGSCHED
+	default y
+	---help---
+	  This is the default cpu scheduler which is an O(1) dual priority
+	  array scheduler with a hybrid interactive design as modified by
+	  Nick Piggin.
+	  To boot this cpu scheduler, if it is not the default, use the
+	  bootparam "cpusched=nicksched".
+
 config KALLSYMS
 	 bool "Load all symbols for debugging/kksymoops" if EMBEDDED
 	 default y
diff -Naur linux-2.6.12-rc2-mm3/init/main.c linux-2.6.12-rc2-mm3-plugsched/init/main.c
--- linux-2.6.12-rc2-mm3/init/main.c	2005-04-14 02:47:24.346741928 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/init/main.c	2005-04-23 13:24:10.330515920 -0700
@@ -47,6 +47,7 @@
 #include <linux/rmap.h>
 #include <linux/mempolicy.h>
 #include <linux/key.h>
+#include <linux/sched_drv.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -442,10 +443,19 @@
 	 */
 	smp_prepare_boot_cpu();
 
+	build_all_zonelists();
+	page_alloc_init();
+	printk(KERN_NOTICE "Kernel command line: %s\n", saved_command_line);
+	parse_early_param();
+	parse_args("Booting kernel", command_line, __start___param,
+		   __stop___param - __start___param,
+		   &unknown_bootoption);
 	/*
 	 * Set up the scheduler prior starting any interrupts (such as the
 	 * timer interrupt). Full topology setup happens at smp_init()
 	 * time - but meanwhile we still have a functioning scheduler.
+	 * But defer until after boot command line is parsed to avoid doing
+	 * this twice in the event that a different scheduler is selected.
 	 */
 	sched_init();
 	/*
@@ -453,14 +463,7 @@
 	 * fragile until we cpu_idle() for the first time.
 	 */
 	preempt_disable();
-	build_all_zonelists();
-	page_alloc_init();
 	trap_init();
-	printk(KERN_NOTICE "Kernel command line: %s\n", saved_command_line);
-	parse_early_param();
-	parse_args("Booting kernel", command_line, __start___param,
-		   __stop___param - __start___param,
-		   &unknown_bootoption);
 	sort_main_extable();
 	rcu_init();
 	init_IRQ();
@@ -522,6 +525,7 @@
 
 	acpi_early_init(); /* before LAPIC and SMP init */
 
+	printk("Running with \"%s\" cpu scheduler.\n", sched_drvp->name);
 	/* Do the rest non-__init'ed, we're now alive */
 	rest_init();
 }
@@ -591,6 +595,7 @@
 #ifdef CONFIG_SYSCTL
 	sysctl_init();
 #endif
+	sched_drv_sysfs_init();
 
 	/* Networking initialization needs a process context */ 
 	sock_init();
diff -Naur linux-2.6.12-rc2-mm3/kernel/Makefile linux-2.6.12-rc2-mm3-plugsched/kernel/Makefile
--- linux-2.6.12-rc2-mm3/kernel/Makefile	2005-04-14 02:47:24.469723232 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/kernel/Makefile	2005-04-23 13:20:23.650976448 -0700
@@ -7,8 +7,13 @@
 	    sysctl.o capability.o ptrace.o timer.o user.o \
 	    signal.o sys.o kmod.o workqueue.o pid.o \
 	    rcupdate.o intermodule.o extable.o params.o posix-timers.o \
-	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o
+	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o sched_drv.o
 
+obj-$(CONFIG_CPUSCHED_INGO) += ingosched.o
+obj-$(CONFIG_CPUSCHED_STAIRCASE) += staircase.o
+obj-$(CONFIG_CPUSCHED_SPA) += sched_spa.o sched_cpustats.o
+obj-$(CONFIG_CPUSCHED_ZAPHOD) += sched_zaphod.o
+obj-$(CONFIG_CPUSCHED_NICK) += nicksched.o
 obj-$(CONFIG_FUTEX) += futex.o
 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
 obj-$(CONFIG_SMP) += cpu.o spinlock.o
diff -Naur linux-2.6.12-rc2-mm3/kernel/fork.c linux-2.6.12-rc2-mm3-plugsched/kernel/fork.c
--- linux-2.6.12-rc2-mm3/kernel/fork.c	2005-04-14 02:47:24.412731896 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/kernel/fork.c	2005-04-23 13:20:17.854857592 -0700
@@ -1000,6 +1000,9 @@
 	p->pdeath_signal = 0;
 	p->exit_state = 0;
 
+	/* Perform scheduler related setup */
+	sched_fork(p);
+
 	/*
 	 * Ok, make it visible to the rest of the system.
 	 * We dont wake it up yet.
@@ -1008,24 +1011,18 @@
 	INIT_LIST_HEAD(&p->ptrace_children);
 	INIT_LIST_HEAD(&p->ptrace_list);
 
-	/* Perform scheduler related setup. Assign this task to a CPU. */
-	sched_fork(p, clone_flags);
-
 	/* Need tasklist lock for parent etc handling! */
 	write_lock_irq(&tasklist_lock);
 
 	/*
-	 * The task hasn't been attached yet, so its cpus_allowed mask will
-	 * not be changed, nor will its assigned CPU.
-	 *
-	 * The cpus_allowed mask of the parent may have changed after it was
-	 * copied first time - so re-copy it here, then check the child's CPU
-	 * to ensure it is on a valid CPU (and if not, just force it back to
-	 * parent's CPU). This avoids alot of nasty races.
+	 * The task hasn't been attached yet, so cpus_allowed mask cannot
+	 * have changed. The cpus_allowed mask of the parent may have
+	 * changed after it was copied first time, and it may then move to
+	 * another CPU - so we re-copy it here and set the child's CPU to
+	 * the parent's CPU. This avoids alot of nasty races.
 	 */
 	p->cpus_allowed = current->cpus_allowed;
-	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed)))
-		set_task_cpu(p, smp_processor_id());
+	set_task_cpu(p, smp_processor_id());
 
 	/*
 	 * Check for pending SIGKILL! The new thread should not be allowed
diff -Naur linux-2.6.12-rc2-mm3/kernel/ingosched.c linux-2.6.12-rc2-mm3-plugsched/kernel/ingosched.c
--- linux-2.6.12-rc2-mm3/kernel/ingosched.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/kernel/ingosched.c	2005-04-23 13:20:23.652976144 -0700
@@ -0,0 +1,1173 @@
+/*
+ *  kernel/ingosched.c
+ *  Copyright (C) 1991-2005  Linus Torvalds
+ *
+ *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
+ *		hybrid priority-list and round-robin design with
+ *		an array-switch method of distributing timeslices
+ *		and per-CPU runqueues.  Cleanups and useful suggestions
+ *		by Davide Libenzi, preemptible kernel bits by Robert Love.
+ *  2003-09-03	Interactivity tuning by Con Kolivas.
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+
+static void ingo_init_runqueue_queue(union runqueue_queue *rqq)
+{
+	int j;
+
+	rqq->ingosched.active = rqq->ingosched.arrays;
+	rqq->ingosched.expired = rqq->ingosched.arrays + 1;
+	rqq->ingosched.best_expired_prio = INGO_MAX_PRIO;
+
+	for (j = 0; j < 2; j++) {
+		int k;
+		prio_array_t *array = rqq->ingosched.arrays + j;
+
+		for (k = 0; k < INGO_MAX_PRIO; k++) {
+			INIT_LIST_HEAD(array->queue + k);
+			__clear_bit(k, array->bitmap);
+		}
+		// delimiter for bitsearch
+		__set_bit(INGO_MAX_PRIO, array->bitmap);
+		array->nr_active = 0;
+	}
+
+	rqq->ingosched.expired_timestamp = 0;
+}
+
+static void ingo_set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+	p->sdu.ingosched.time_slice = t;
+}
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
+#define MAX_USER_PRIO		(USER_PRIO(INGO_MAX_PRIO))
+
+/*
+ * Some helpers for converting nanosecond timing to jiffy resolution
+ */
+#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
+#define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
+ * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
+ * Timeslices get refilled after they expire.
+ */
+#define MIN_TIMESLICE		max(5 * HZ / 1000, 1)
+#define DEF_TIMESLICE		(100 * HZ / 1000)
+#define ON_RUNQUEUE_WEIGHT	 30
+#define CHILD_PENALTY		 95
+#define PARENT_PENALTY		100
+#define EXIT_WEIGHT		  3
+#define PRIO_BONUS_RATIO	 25
+#define MAX_BONUS		(MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
+#define INTERACTIVE_DELTA	  2
+#define MAX_SLEEP_AVG		(DEF_TIMESLICE * MAX_BONUS)
+#define STARVATION_LIMIT	(MAX_SLEEP_AVG)
+#define NS_MAX_SLEEP_AVG	(JIFFIES_TO_NS(MAX_SLEEP_AVG))
+
+/*
+ * If a task is 'interactive' then we reinsert it in the active
+ * array after it has expired its current timeslice. (it will not
+ * continue to run immediately, it will still roundrobin with
+ * other interactive tasks.)
+ *
+ * This part scales the interactivity limit depending on niceness.
+ *
+ * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
+ * Here are a few examples of different nice levels:
+ *
+ *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
+ *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
+ *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
+ *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
+ *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
+ *
+ * (the X axis represents the possible -5 ... 0 ... +5 dynamic
+ *  priority range a task can explore, a value of '1' means the
+ *  task is rated interactive.)
+ *
+ * Ie. nice +19 tasks can never get 'interactive' enough to be
+ * reinserted into the active array. And only heavily CPU-hog nice -20
+ * tasks will be expired. Default nice 0 tasks are somewhere between,
+ * it takes some effort for them to get interactive, but it's not
+ * too hard.
+ */
+
+#define CURRENT_BONUS(p) \
+	(NS_TO_JIFFIES((p)->sdu.ingosched.sleep_avg) * MAX_BONUS / \
+		MAX_SLEEP_AVG)
+
+#define GRANULARITY	(10 * HZ / 1000 ? : 1)
+
+#ifdef CONFIG_SMP
+#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
+		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
+			num_online_cpus())
+#else
+#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
+		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
+#endif
+
+#define SCALE(v1,v1_max,v2_max) \
+	(v1) * (v2_max) / (v1_max)
+
+#define DELTA(p) \
+	(SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA)
+
+#define TASK_INTERACTIVE(p) \
+	((p)->prio <= (p)->static_prio - DELTA(p))
+
+#define INTERACTIVE_SLEEP(p) \
+	(JIFFIES_TO_NS(MAX_SLEEP_AVG * \
+		(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
+
+#define TASK_PREEMPTS_CURR(p, rq) \
+	((p)->prio < (rq)->curr->prio)
+
+/*
+ * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
+ * to time slice values: [800ms ... 100ms ... 5ms]
+ *
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ */
+
+#define SCALE_PRIO(x, prio) \
+	max(x * (INGO_MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
+
+static inline unsigned int task_timeslice(const task_t *p)
+{
+	if (p->static_prio < NICE_TO_PRIO(0))
+		return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio);
+	else
+		return SCALE_PRIO(DEF_TIMESLICE, p->static_prio);
+}
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void dequeue_task(struct task_struct *p, prio_array_t *array)
+{
+	array->nr_active--;
+	list_del_init(&p->run_list);
+	if (list_empty(array->queue + p->prio))
+		__clear_bit(p->prio, array->bitmap);
+}
+
+static void enqueue_task(struct task_struct *p, prio_array_t *array)
+{
+	sched_info_queued(p);
+	list_add_tail(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.ingosched.array = array;
+}
+
+/*
+ * Put task to the end of the run list without the overhead of dequeue
+ * followed by enqueue.
+ */
+static void requeue_task(struct task_struct *p, prio_array_t *array)
+{
+	list_move_tail(&p->run_list, array->queue + p->prio);
+}
+
+static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
+{
+	list_add(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.ingosched.array = array;
+}
+
+/*
+ * effective_prio - return the priority that is based on the static
+ * priority but is modified by bonuses/penalties.
+ *
+ * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
+ * into the -5 ... 0 ... +5 bonus/penalty range.
+ *
+ * We use 25% of the full 0...39 priority range so that:
+ *
+ * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
+ * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
+ *
+ * Both properties are important to certain workloads.
+ */
+static int effective_prio(task_t *p)
+{
+	int bonus, prio;
+
+	if (rt_task(p))
+		return p->prio;
+
+	bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
+
+	prio = p->static_prio - bonus;
+	if (prio < MAX_RT_PRIO)
+		prio = MAX_RT_PRIO;
+	if (prio > INGO_MAX_PRIO-1)
+		prio = INGO_MAX_PRIO-1;
+	return prio;
+}
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task(p, rq->qu.ingosched.active);
+	rq->nr_running++;
+}
+
+static void recalc_task_prio(task_t *p, unsigned long long now)
+{
+	/* Caller must always ensure 'now >= p->timestamp' */
+	unsigned long long __sleep_time = now - p->timestamp;
+	unsigned long sleep_time;
+
+	if (__sleep_time > NS_MAX_SLEEP_AVG)
+		sleep_time = NS_MAX_SLEEP_AVG;
+	else
+		sleep_time = (unsigned long)__sleep_time;
+
+	if (likely(sleep_time > 0)) {
+		/*
+		 * User tasks that sleep a long time are categorised as
+		 * idle and will get just interactive status to stay active &
+		 * prevent them suddenly becoming cpu hogs and starving
+		 * other processes.
+		 */
+		if (p->mm && p->sdu.ingosched.activated != -1 &&
+			sleep_time > INTERACTIVE_SLEEP(p)) {
+				p->sdu.ingosched.sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
+						DEF_TIMESLICE);
+		} else {
+			/*
+			 * The lower the sleep avg a task has the more
+			 * rapidly it will rise with sleep time.
+			 */
+			sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
+
+			/*
+			 * Tasks waking from uninterruptible sleep are
+			 * limited in their sleep_avg rise as they
+			 * are likely to be waiting on I/O
+			 */
+			if (p->sdu.ingosched.activated == -1 && p->mm) {
+				if (p->sdu.ingosched.sleep_avg >= INTERACTIVE_SLEEP(p))
+					sleep_time = 0;
+				else if (p->sdu.ingosched.sleep_avg + sleep_time >=
+						INTERACTIVE_SLEEP(p)) {
+					p->sdu.ingosched.sleep_avg = INTERACTIVE_SLEEP(p);
+					sleep_time = 0;
+				}
+			}
+
+			/*
+			 * This code gives a bonus to interactive tasks.
+			 *
+			 * The boost works by updating the 'average sleep time'
+			 * value here, based on ->timestamp. The more time a
+			 * task spends sleeping, the higher the average gets -
+			 * and the higher the priority boost gets as well.
+			 */
+			p->sdu.ingosched.sleep_avg += sleep_time;
+
+			if (p->sdu.ingosched.sleep_avg > NS_MAX_SLEEP_AVG)
+				p->sdu.ingosched.sleep_avg = NS_MAX_SLEEP_AVG;
+		}
+	}
+
+	p->prio = effective_prio(p);
+}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ *
+ * Update all the scheduling statistics stuff. (sleep average
+ * calculation, priority modifiers, etc.)
+ */
+static void activate_task(task_t *p, runqueue_t *rq, int local)
+{
+	unsigned long long now;
+
+	now = sched_clock();
+#ifdef CONFIG_SMP
+	if (!local) {
+		/* Compensate for drifting sched_clock */
+		runqueue_t *this_rq = this_rq();
+		now = (now - this_rq->timestamp_last_tick)
+			+ rq->timestamp_last_tick;
+	}
+#endif
+
+	recalc_task_prio(p, now);
+
+	/*
+	 * This checks to make sure it's not an uninterruptible task
+	 * that is now waking up.
+	 */
+	if (!p->sdu.ingosched.activated) {
+		/*
+		 * Tasks which were woken up by interrupts (ie. hw events)
+		 * are most likely of interactive nature. So we give them
+		 * the credit of extending their sleep time to the period
+		 * of time they spend on the runqueue, waiting for execution
+		 * on a CPU, first time around:
+		 */
+		if (in_interrupt())
+			p->sdu.ingosched.activated = 2;
+		else {
+			/*
+			 * Normal first-time wakeups get a credit too for
+			 * on-runqueue time, but it will be weighted down:
+			 */
+			p->sdu.ingosched.activated = 1;
+		}
+	}
+	p->timestamp = now;
+
+	__activate_task(p, rq);
+}
+
+/*
+ * __activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task_head(p, rq->qu.ingosched.active);
+	rq->nr_running++;
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	rq->nr_running--;
+	dequeue_task(p, p->sdu.ingosched.array);
+	p->sdu.ingosched.array = NULL;
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: the task's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+static void ingo_wake_up_task(struct task_struct *p, struct runqueue *rq, unsigned int old_state, int sync)
+{
+	int same_cpu = (rq == this_rq());
+
+	if (old_state == TASK_UNINTERRUPTIBLE) {
+		rq->nr_uninterruptible--;
+		/*
+		 * Tasks on involuntary sleep don't earn
+		 * sleep_avg beyond just interactive state.
+		 */
+		p->sdu.ingosched.activated = -1;
+	}
+
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	activate_task(p, rq, same_cpu);
+	if (!sync || !same_cpu) {
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+static void ingo_fork(task_t *p)
+{
+	p->sdu.ingosched.array = NULL;
+	/*
+	 * Share the timeslice between parent and child, thus the
+	 * total amount of pending timeslices in the system doesn't change,
+	 * resulting in more scheduling fairness.
+	 */
+	local_irq_disable();
+	p->sdu.ingosched.time_slice = (current->sdu.ingosched.time_slice + 1) >> 1;
+	/*
+	 * The remainder of the first timeslice might be recovered by
+	 * the parent if the child exits early enough.
+	 */
+	p->sdu.ingosched.first_time_slice = 1;
+	current->sdu.ingosched.time_slice >>= 1;
+	p->timestamp = sched_clock();
+	if (unlikely(!current->sdu.ingosched.time_slice)) {
+		/*
+		 * This case is rare, it happens when the parent has only
+		 * a single jiffy left from its timeslice. Taking the
+		 * runqueue lock is not a problem.
+		 */
+		current->sdu.ingosched.time_slice = 1;
+		preempt_disable();
+		scheduler_tick();
+		local_irq_enable();
+		preempt_enable();
+	} else
+		local_irq_enable();
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+static void ingo_wake_up_new_task(task_t * p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	int this_cpu, cpu;
+	runqueue_t *rq, *this_rq;
+
+	rq = task_rq_lock(p, &flags);
+	cpu = task_cpu(p);
+	this_cpu = smp_processor_id();
+
+	BUG_ON(p->state != TASK_RUNNING);
+
+	/*
+	 * We decrease the sleep average of forking parents
+	 * and children as well, to keep max-interactive tasks
+	 * from forking tasks that are max-interactive. The parent
+	 * (current) is done further down, under its lock.
+	 */
+	p->sdu.ingosched.sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
+		CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
+
+	p->prio = effective_prio(p);
+
+	if (likely(cpu == this_cpu)) {
+		if (!(clone_flags & CLONE_VM)) {
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			if (unlikely(!current->sdu.ingosched.array))
+				__activate_task(p, rq);
+			else {
+				p->prio = current->prio;
+				list_add_tail(&p->run_list, &current->run_list);
+				p->sdu.ingosched.array = current->sdu.ingosched.array;
+				p->sdu.ingosched.array->nr_active++;
+				rq->nr_running++;
+			}
+			set_need_resched();
+		} else
+			/* Run child last */
+			__activate_task(p, rq);
+		/*
+		 * We skip the following code due to cpu == this_cpu
+	 	 *
+		 *   task_rq_unlock(rq, &flags);
+		 *   this_rq = task_rq_lock(current, &flags);
+		 */
+		this_rq = rq;
+	} else {
+		this_rq = cpu_rq(this_cpu);
+
+		/*
+		 * Not the local CPU - must adjust timestamp. This should
+		 * get optimised away in the !CONFIG_SMP case.
+		 */
+		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
+					+ rq->timestamp_last_tick;
+		__activate_task(p, rq);
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+
+		/*
+		 * Parent and child are on different CPUs, now get the
+		 * parent runqueue to update the parent's ->sdu.ingosched.sleep_avg:
+		 */
+		task_rq_unlock(rq, &flags);
+		this_rq = task_rq_lock(current, &flags);
+	}
+	current->sdu.ingosched.sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
+		PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
+	task_rq_unlock(this_rq, &flags);
+}
+
+/*
+ * Potentially available exiting-child timeslices are
+ * retrieved here - this way the parent does not get
+ * penalized for creating too many threads.
+ *
+ * (this cannot be used to 'generate' timeslices
+ * artificially, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+static void ingo_exit(task_t * p)
+{
+	unsigned long flags;
+	runqueue_t *rq;
+
+	/*
+	 * If the child was a (relative-) CPU hog then decrease
+	 * the sleep_avg of the parent as well.
+	 */
+	rq = task_rq_lock(p->parent, &flags);
+	if (p->sdu.ingosched.first_time_slice) {
+		p->parent->sdu.ingosched.time_slice += p->sdu.ingosched.time_slice;
+		if (unlikely(p->parent->sdu.ingosched.time_slice > task_timeslice(p)))
+			p->parent->sdu.ingosched.time_slice = task_timeslice(p);
+	}
+	if (p->sdu.ingosched.sleep_avg < p->parent->sdu.ingosched.sleep_avg)
+		p->parent->sdu.ingosched.sleep_avg = p->parent->sdu.ingosched.sleep_avg /
+		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sdu.ingosched.sleep_avg /
+		(EXIT_WEIGHT + 1);
+	task_rq_unlock(rq, &flags);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static inline
+void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
+	       runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
+{
+	dequeue_task(p, src_array);
+	src_rq->nr_running--;
+	set_task_cpu(p, this_cpu);
+	this_rq->nr_running++;
+	enqueue_task(p, this_array);
+	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+				+ this_rq->timestamp_last_tick;
+	/*
+	 * Note that idle threads have a prio of INGO_MAX_PRIO, for this test
+	 * to be always true for them.
+	 */
+	if (TASK_PREEMPTS_CURR(p, this_rq))
+		resched_task(this_rq->curr);
+}
+
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int ingo_move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+		      unsigned long max_nr_move, struct sched_domain *sd,
+		      enum idle_type idle)
+{
+	prio_array_t *array, *dst_array;
+	struct list_head *head, *curr;
+	int idx, pulled = 0;
+	task_t *tmp;
+
+	if (max_nr_move <= 0 || busiest->nr_running <= 1)
+		goto out;
+
+	/*
+	 * We first consider expired tasks. Those will likely not be
+	 * executed in the near future, and they are most likely to
+	 * be cache-cold, thus switching CPUs has the least effect
+	 * on them.
+	 */
+	if (busiest->qu.ingosched.expired->nr_active) {
+		array = busiest->qu.ingosched.expired;
+		dst_array = this_rq->qu.ingosched.expired;
+	} else {
+		array = busiest->qu.ingosched.active;
+		dst_array = this_rq->qu.ingosched.active;
+	}
+
+new_array:
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(array->bitmap);
+	else
+		idx = find_next_bit(array->bitmap, INGO_MAX_PRIO, idx);
+	if (idx >= INGO_MAX_PRIO) {
+		if (array == busiest->qu.ingosched.expired && busiest->qu.ingosched.active->nr_active) {
+			array = busiest->qu.ingosched.active;
+			dst_array = this_rq->qu.ingosched.active;
+			goto new_array;
+		}
+		goto out;
+	}
+
+	head = array->queue + idx;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+
+	curr = curr->prev;
+
+	if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
+	pulled++;
+
+	/* We only want to steal up to the prescribed number of tasks. */
+	if (pulled < max_nr_move) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	return pulled;
+}
+#endif
+
+/*
+ * We place interactive tasks back into the active array, if possible.
+ *
+ * To guarantee that this does not starve expired tasks we ignore the
+ * interactivity of a task if the first expired task had to wait more
+ * than a 'reasonable' amount of time. This deadline timeout is
+ * load-dependent, as the frequency of array switched decreases with
+ * increasing number of running tasks. We also ignore the interactivity
+ * if a better static_prio task has expired:
+ */
+#define EXPIRED_STARVING(rq) \
+	((STARVATION_LIMIT && ((rq)->qu.ingosched.expired_timestamp && \
+		(jiffies - (rq)->qu.ingosched.expired_timestamp >= \
+			STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
+			((rq)->curr->static_prio > (rq)->qu.ingosched.best_expired_prio))
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ *
+ * It also gets called by the fork code, when changing the parent's
+ * timeslices.
+ */
+static void ingo_tick(struct task_struct *p, struct runqueue *rq, unsigned long long now)
+{
+	int cpu = smp_processor_id();
+
+	if (p == rq->idle) {
+		if (wake_priority_sleeper(rq))
+			goto out;
+		rebalance_tick(cpu, rq, SCHED_IDLE);
+		return;
+	}
+
+	/* Task might have expired already, but not scheduled off yet */
+	if (p->sdu.ingosched.array != rq->qu.ingosched.active) {
+		set_tsk_need_resched(p);
+		goto out;
+	}
+	spin_lock(&rq->lock);
+	/*
+	 * The task was running during this tick - update the
+	 * time slice counter. Note: we do not update a thread's
+	 * priority until it either goes to sleep or uses up its
+	 * timeslice. This makes it possible for interactive tasks
+	 * to use up their timeslices at their highest priority levels.
+	 */
+	if (rt_task(p)) {
+		/*
+		 * RR tasks need a special form of timeslice management.
+		 * FIFO tasks have no timeslices.
+		 */
+		if ((p->policy == SCHED_RR) && !--p->sdu.ingosched.time_slice) {
+			p->sdu.ingosched.time_slice = task_timeslice(p);
+			p->sdu.ingosched.first_time_slice = 0;
+			set_tsk_need_resched(p);
+
+			/* put it at the end of the queue: */
+			requeue_task(p, rq->qu.ingosched.active);
+		}
+		goto out_unlock;
+	}
+	if (!--p->sdu.ingosched.time_slice) {
+		dequeue_task(p, rq->qu.ingosched.active);
+		set_tsk_need_resched(p);
+		p->prio = effective_prio(p);
+		p->sdu.ingosched.time_slice = task_timeslice(p);
+		p->sdu.ingosched.first_time_slice = 0;
+
+		if (!rq->qu.ingosched.expired_timestamp)
+			rq->qu.ingosched.expired_timestamp = jiffies;
+		if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
+			enqueue_task(p, rq->qu.ingosched.expired);
+			if (p->static_prio < rq->qu.ingosched.best_expired_prio)
+				rq->qu.ingosched.best_expired_prio = p->static_prio;
+		} else
+			enqueue_task(p, rq->qu.ingosched.active);
+	} else {
+		/*
+		 * Prevent a too long timeslice allowing a task to monopolize
+		 * the CPU. We do this by splitting up the timeslice into
+		 * smaller pieces.
+		 *
+		 * Note: this does not mean the task's timeslices expire or
+		 * get lost in any way, they just might be preempted by
+		 * another task of equal priority. (one with higher
+		 * priority would have preempted this task already.) We
+		 * requeue this task to the end of the list on this priority
+		 * level, which is in essence a round-robin of tasks with
+		 * equal priority.
+		 *
+		 * This only applies to tasks in the interactive
+		 * delta range with at least TIMESLICE_GRANULARITY to requeue.
+		 */
+		if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
+			p->sdu.ingosched.time_slice) % TIMESLICE_GRANULARITY(p)) &&
+			(p->sdu.ingosched.time_slice >= TIMESLICE_GRANULARITY(p)) &&
+			(p->sdu.ingosched.array == rq->qu.ingosched.active)) {
+
+			requeue_task(p, rq->qu.ingosched.active);
+			set_tsk_need_resched(p);
+		}
+	}
+out_unlock:
+	spin_unlock(&rq->lock);
+out:
+	rebalance_tick(cpu, rq, NOT_IDLE);
+}
+
+#ifdef CONFIG_SCHED_SMT
+static struct task_struct *ingo_head_of_queue(union runqueue_queue *rqq)
+{
+	prio_array_t *array = rqq->ingosched.active;
+
+	if (!array->nr_active)
+		array = rqq->ingosched.expired;
+	BUG_ON(!array->nr_active);
+
+	return list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
+		task_t, run_list);
+}
+
+static int ingo_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct * p2, struct sched_domain *sd)
+{
+	return ((p1->sdu.ingosched.time_slice * (100 - sd->per_cpu_gain) / 100) >
+			task_timeslice(p2) || rt_task(p1)) &&
+			p2->mm && p1->mm && !rt_task(p2);
+}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ */
+static void ingo_schedule(void)
+{
+	long *switch_count;
+	prio_array_t *array;
+	unsigned long run_time;
+	int cpu, idx;
+	struct task_struct *prev = current, *next;
+	struct list_head *queue;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+
+	if (likely((long long)now - prev->timestamp < NS_MAX_SLEEP_AVG)) {
+		run_time = now - prev->timestamp;
+		if (unlikely((long long)now - prev->timestamp < 0))
+			run_time = 0;
+	} else
+		run_time = NS_MAX_SLEEP_AVG;
+
+	/*
+	 * Tasks charged proportionately less run_time at high sleep_avg to
+	 * delay them losing their interactive status
+	 */
+	run_time /= (CURRENT_BONUS(prev) ? : 1);
+
+	spin_lock_irq(&rq->lock);
+
+	if (unlikely(prev->flags & PF_DEAD))
+		prev->state = EXIT_DEAD;
+
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE)
+				rq->nr_uninterruptible++;
+			deactivate_task(prev, rq);
+		}
+	}
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+go_idle:
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			rq->qu.ingosched.expired_timestamp = 0;
+			wake_sleeping_dependent(cpu, rq);
+			/*
+			 * wake_sleeping_dependent() might have released
+			 * the runqueue, so break out if we got new
+			 * tasks meanwhile:
+			 */
+			if (!rq->nr_running)
+				goto switch_tasks;
+		}
+	} else {
+		if (dependent_sleeper(cpu, rq)) {
+			next = rq->idle;
+			goto switch_tasks;
+		}
+		/*
+		 * dependent_sleeper() releases and reacquires the runqueue
+		 * lock, hence go into the idle loop if the rq went
+		 * empty meanwhile:
+		 */
+		if (unlikely(!rq->nr_running))
+			goto go_idle;
+	}
+
+	array = rq->qu.ingosched.active;
+	if (unlikely(!array->nr_active)) {
+		/*
+		 * Switch the active and expired arrays.
+		 */
+		schedstat_inc(rq, sched_switch);
+		rq->qu.ingosched.active = rq->qu.ingosched.expired;
+		rq->qu.ingosched.expired = array;
+		array = rq->qu.ingosched.active;
+		rq->qu.ingosched.expired_timestamp = 0;
+		rq->qu.ingosched.best_expired_prio = INGO_MAX_PRIO;
+	}
+
+	idx = sched_find_first_bit(array->bitmap);
+	queue = array->queue + idx;
+	next = list_entry(queue->next, task_t, run_list);
+
+	if (!rt_task(next) && next->sdu.ingosched.activated > 0) {
+		unsigned long long delta = now - next->timestamp;
+		if (unlikely((long long)now - next->timestamp < 0))
+			delta = 0;
+
+		if (next->sdu.ingosched.activated == 1)
+			delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
+
+		array = next->sdu.ingosched.array;
+		dequeue_task(next, array);
+		recalc_task_prio(next, next->timestamp + delta);
+		enqueue_task(next, array);
+	}
+	next->sdu.ingosched.activated = 0;
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prefetch(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(task_cpu(prev));
+
+	update_cpu_clock(prev, rq, now);
+
+	prev->sdu.ingosched.sleep_avg -= run_time;
+	if ((long)prev->sdu.ingosched.sleep_avg <= 0)
+		prev->sdu.ingosched.sleep_avg = 0;
+	prev->timestamp = prev->last_ran = now;
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		next->timestamp = now;
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_arch_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+
+		finish_task_switch(prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+static void ingo_set_normal_task_nice(task_t *p, long nice)
+{
+	prio_array_t *array;
+	int old_prio, new_prio, delta;
+
+	array = p->sdu.ingosched.array;
+	if (array)
+		dequeue_task(p, array);
+
+	old_prio = p->prio;
+	new_prio = NICE_TO_PRIO(nice);
+	delta = new_prio - old_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	p->prio += delta;
+
+	if (array) {
+		struct runqueue *rq = task_rq(p);
+
+		enqueue_task(p, array);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+static void ingo_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	prio_array_t *array;
+	runqueue_t *rq = task_rq(p);
+
+	array = p->sdu.ingosched.array;
+	if (array)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	if (array) {
+		__activate_task(p, rq);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+static long ingo_sys_yield(void)
+{
+	runqueue_t *rq = this_rq_lock();
+	prio_array_t *array = current->sdu.ingosched.array;
+	prio_array_t *target = rq->qu.ingosched.expired;
+
+	schedstat_inc(rq, yld_cnt);
+	/*
+	 * We implement yielding by moving the task into the expired
+	 * queue.
+	 *
+	 * (special rule: RT tasks will just roundrobin in the active
+	 *  array.)
+	 */
+	if (rt_task(current))
+		target = rq->qu.ingosched.active;
+
+	if (current->sdu.ingosched.array->nr_active == 1) {
+		schedstat_inc(rq, yld_act_empty);
+		if (!rq->qu.ingosched.expired->nr_active)
+			schedstat_inc(rq, yld_both_empty);
+	} else if (!rq->qu.ingosched.expired->nr_active)
+		schedstat_inc(rq, yld_exp_empty);
+
+	if (array != target) {
+		dequeue_task(current, array);
+		enqueue_task(current, target);
+	} else
+		/*
+		 * requeue_task is cheaper so perform that if possible.
+		 */
+		requeue_task(current, array);
+
+	/*
+	 * Since we are going to call schedule() anyway, there's
+	 * no need to preempt or enable interrupts:
+	 */
+	__release(rq->lock);
+	_raw_spin_unlock(&rq->lock);
+	preempt_enable_no_resched();
+
+	schedule();
+
+	return 0;
+}
+
+static void ingo_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	ingo_sys_yield();
+}
+
+static void ingo_init_idle(task_t *idle, int cpu)
+{
+	idle->sdu.ingosched.sleep_avg = 0;
+	idle->sdu.ingosched.array = NULL;
+	idle->prio = INGO_MAX_PRIO;
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+static void ingo_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	/*
+	 * Sync timestamp with rq_dest's before activating.
+	 * The same thing could be achieved by doing this step
+	 * afterwards, and pretending it was a local activate.
+	 * This way is cleaner and logically correct.
+	 */
+	p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+			+ rq_dest->timestamp_last_tick;
+	deactivate_task(p, rq_src);
+	set_task_cpu(p, dest_cpu);
+	activate_task(p, rq_dest, 0);
+	if (TASK_PREEMPTS_CURR(p, rq_dest))
+		resched_task(rq_dest->curr);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static	void ingo_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO-1);
+	/* Add idle task to _front_ of it's priority queue */
+	__activate_idle_task(rq->idle, rq);
+}
+
+static	void ingo_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = INGO_MAX_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+static void ingo_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned arr, i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (arr = 0; arr < 2; arr++) {
+		for (i = 0; i < INGO_MAX_PRIO; i++) {
+			struct list_head *list = &rq->qu.ingosched.arrays[arr].queue[i];
+			while (!list_empty(list))
+				migrate_dead(dead_cpu,
+					     list_entry(list->next, task_t,
+							run_list));
+		}
+	}
+}
+#endif
+#endif
+
+static void ingo_sched_init(void)
+{
+	init_task.sdu.ingosched.time_slice = HZ;
+	init_task.sdu.ingosched.array = NULL;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void ingo_normalize_rt_task(struct task_struct *p)
+{
+	prio_array_t *array;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	array = p->sdu.ingosched.array;
+	if (array)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	if (array) {
+		__activate_task(p, rq);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+const struct sched_drv ingo_sched_drv = {
+	.name = "ingosched",
+	.init_runqueue_queue = ingo_init_runqueue_queue,
+	.set_oom_time_slice = ingo_set_oom_time_slice,
+	.task_timeslice = task_timeslice,
+	.wake_up_task = ingo_wake_up_task,
+	.fork = ingo_fork,
+	.wake_up_new_task = ingo_wake_up_new_task,
+	.exit = ingo_exit,
+#ifdef CONFIG_SMP
+	.set_task_cpu = common_set_task_cpu,
+	.move_tasks = ingo_move_tasks,
+#endif
+	.tick = ingo_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = ingo_head_of_queue,
+	.dependent_sleeper_trumps = ingo_dependent_sleeper_trumps,
+#endif
+	.schedule = ingo_schedule,
+	.set_normal_task_nice = ingo_set_normal_task_nice,
+	.setscheduler = ingo_setscheduler,
+	.sys_yield = ingo_sys_yield,
+	.yield = ingo_yield,
+	.init_idle = ingo_init_idle,
+	.sched_init = ingo_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = ingo_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = ingo_set_select_idle_first,
+	.set_select_idle_last = ingo_set_select_idle_last,
+	.migrate_dead_tasks = ingo_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = ingo_normalize_rt_task,
+#endif
+	.attrs = NULL,
+};
diff -Naur linux-2.6.12-rc2-mm3/kernel/nicksched.c linux-2.6.12-rc2-mm3-plugsched/kernel/nicksched.c
--- linux-2.6.12-rc2-mm3/kernel/nicksched.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/kernel/nicksched.c	2005-04-23 13:20:23.655975688 -0700
@@ -0,0 +1,990 @@
+/*
+ *  kernel/nicksched.c
+ *  Copyright (C) 1991-2005  Linus Torvalds
+ *
+ *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
+ *		hybrid priority-list and round-robin design with
+ *		an array-switch method of distributing timeslices
+ *		and per-CPU runqueues.  Cleanups and useful suggestions
+ *		by Davide Libenzi, preemptible kernel bits by Robert Love.
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+
+static void nick_init_runqueue_queue(union runqueue_queue *rqq)
+{
+	int j;
+
+	rqq->nicksched.active = rqq->nicksched.arrays;
+	rqq->nicksched.expired = rqq->nicksched.arrays + 1;
+
+	for (j = 0; j < 2; j++) {
+		int k;
+		struct nick_prio_array *array = rqq->nicksched.arrays + j;
+
+		array->min_prio = NICK_MAX_PRIO;
+		for (k = 0; k < NICK_MAX_PRIO; k++) {
+			INIT_LIST_HEAD(array->queue + k);
+			__clear_bit(k, array->bitmap);
+		}
+		// delimiter for bitsearch
+		__set_bit(NICK_MAX_PRIO, array->bitmap);
+		array->nr_active = 0;
+	}
+
+	rqq->nicksched.array_sequence = 0;
+}
+
+static void nick_set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+}
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)		((p) - MAX_RT_PRIO)
+#define MAX_USER_PRIO		(USER_PRIO(NICK_MAX_PRIO))
+/*
+ * Correct for fact that p->static_prio has normal mapping
+ */
+#define STATIC_USER_PRIO(p)	((p)->static_prio - MAX_RT_PRIO + 10)
+
+/*
+ * Some helpers for converting microsecond timing to jiffy resolution
+ */
+#define US_TO_JIFFIES(x)	((x) * HZ / 1000000)
+#define JIFFIES_TO_US(x)	((x) * 1000000 / HZ)
+
+static int base_timeslice = 256;
+#define min_base_timeslice 1
+#define max_base_timeslice 10000
+
+#define RT_TIMESLICE		(50 * 1000 / HZ)		/* 50ms */
+#define BASE_TIMESLICE		(base_timeslice)
+#define MIN_TIMESLICE		(base_timeslice / 16 ?: 1)
+
+/* Maximum amount of history that will be used to calculate priority */
+#define MAX_SLEEP_SHIFT		19
+#define MAX_SLEEP		(1UL << MAX_SLEEP_SHIFT)	/* ~0.52s */
+
+/*
+ * Maximum effect that 1 block of activity (run/sleep/etc) can have. This is
+ * will moderate dicard freak events (eg. SIGSTOP)
+ */
+#define MAX_SLEEP_AFFECT	(MAX_SLEEP/4)
+
+/*
+ * The amount of history can be decreased (on fork for example). This puts a
+ * lower bound on it.
+ */
+#define MIN_HISTORY		(MAX_SLEEP/8)
+#define FORKED_TS_MAX		(US_TO_JIFFIES(MIN_HISTORY) ?: 1)
+
+/*
+ * SLEEP_FACTOR is a fixed point factor used to scale history tracking things.
+ * In particular: total_time, sleep_time, sleep_avg.
+ */
+#define SLEEP_FACTOR		1024
+
+/*
+ *  The scheduler classifies a process as performing one of the following
+ *  activities
+ */
+#define STIME_SLEEP		1	/* Sleeping */
+#define STIME_RUN		2	/* Using CPU */
+
+#define TASK_PREEMPTS_CURR(p, rq) \
+	((p)->prio < (rq)->curr->prio)
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void dequeue_task(struct task_struct *p, struct nick_prio_array *array)
+{
+	array->nr_active--;
+	list_del_init(&p->run_list);
+	if (list_empty(array->queue + p->prio))
+		__clear_bit(p->prio, array->bitmap);
+}
+
+static void enqueue_task(struct task_struct *p, struct nick_prio_array *array)
+{
+	struct list_head *entry = array->queue + p->prio;
+
+	sched_info_queued(p);
+	if (!rt_task(p)) {
+		/*
+		 * Cycle tasks on the same priority level. This reduces their
+		 * timeslice fluctuations due to higher priority tasks expiring.
+		 */
+		if (!list_empty(entry))
+			entry = entry->next;
+	}
+	list_add_tail(&p->run_list, entry);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.nicksched.array = array;
+}
+
+static inline void enqueue_task_head(struct task_struct *p, struct nick_prio_array *array)
+{
+	list_add(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.nicksched.array = array;
+}
+
+#define NS_TO_APPROX_US(t) ((t) >> 10)
+
+/*
+ * add_task_time updates a task @p after @time of doing the specified @type
+ * of activity. See STIME_*. This is used for priority calculation.
+ */
+static inline void add_task_time(task_t *p, unsigned long long time, unsigned long type)
+{
+	unsigned long ratio;
+	unsigned long long tmp;
+	unsigned long t;
+	if (type == STIME_SLEEP) {
+		if (time > MAX_SLEEP_AFFECT*4)
+			time = MAX_SLEEP_AFFECT*4;
+		t = ((unsigned long)time + 3) / 4;
+	} else {
+		unsigned long div = 60 - STATIC_USER_PRIO(p);
+		t = (unsigned long)time * 30;
+		t = t / div;
+		t = t * 30;
+		t = t / div;
+	}
+
+	ratio = MAX_SLEEP - t;
+	tmp = (unsigned long long)ratio * p->sdu.nicksched.total_time + MAX_SLEEP/2;
+	tmp >>= MAX_SLEEP_SHIFT;
+	p->sdu.nicksched.total_time = (unsigned long)tmp;
+
+	tmp = (unsigned long long)ratio * p->sdu.nicksched.sleep_time + MAX_SLEEP/2;
+	tmp >>= MAX_SLEEP_SHIFT;
+	p->sdu.nicksched.sleep_time = (unsigned long)tmp;
+
+	p->sdu.nicksched.total_time += t;
+	if (type == STIME_SLEEP)
+		p->sdu.nicksched.sleep_time += t;
+}
+
+static unsigned long task_sleep_avg(task_t *p)
+{
+	return (SLEEP_FACTOR * p->sdu.nicksched.sleep_time) / (p->sdu.nicksched.total_time + 1);
+}
+
+/*
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ *
+ * Timeslices are scaled, so if only low priority processes are running,
+ * they will all get long timeslices.
+ */
+
+static int task_timeslice(const task_t *p, runqueue_t *rq)
+{
+	int idx, base, delta;
+	int timeslice;
+
+	if (rt_task(p))
+		return RT_TIMESLICE;
+
+	idx = min(p->prio, rq->qu.nicksched.expired->min_prio);
+	delta = p->prio - idx;
+	base = BASE_TIMESLICE * (MAX_USER_PRIO + 1) / (delta + 2);
+	base = base * (MAX_USER_PRIO + 1) / (delta + 2);
+
+	base = base * 40 / (70 - USER_PRIO(idx));
+	base = base * 40 / (70 - USER_PRIO(idx));
+
+	timeslice = base >> 10;
+	timeslice = timeslice * HZ / 1000;
+	if (timeslice < MIN_TIMESLICE)
+		timeslice = MIN_TIMESLICE;
+
+	return timeslice;
+}
+
+/*
++ * task_priority: calculates a task's priority based on previous running
++ * history (see add_task_time). The priority is just a simple linear function
++ * based on sleep_avg and static_prio.
++ */
+static int task_priority(task_t *p)
+{
+	unsigned long sleep_avg;
+ 	int bonus, prio;
+
+ 	if (rt_task(p))
+ 		return p->prio;
+
+	sleep_avg = task_sleep_avg(p);
+
+	prio = STATIC_USER_PRIO(p) + 10;
+	bonus = (((MAX_USER_PRIO + 1) / 3) * sleep_avg + (SLEEP_FACTOR / 2))
+					/ SLEEP_FACTOR;
+	prio = MAX_RT_PRIO + prio - bonus;
+
+ 	if (prio < MAX_RT_PRIO)
+		return MAX_RT_PRIO;
+ 	if (prio > NICK_MAX_PRIO-1)
+		return NICK_MAX_PRIO-1;
+
+ 	return prio;
+}
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq, struct nick_prio_array *array)
+{
+	enqueue_task(p, array);
+	rq->nr_running++;
+	if (!rt_task(p)) {
+		if (p->prio < array->min_prio)
+			array->min_prio = p->prio;
+	}
+}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ *
+ * Update all the scheduling statistics stuff. (sleep average
+ * calculation, priority modifiers, etc.)
+ */
+static void activate_task(task_t *p, runqueue_t *rq, int local)
+{
+	unsigned long long now, sleep;
+	struct nick_prio_array *array;
+
+	now = sched_clock();
+#ifdef CONFIG_SMP
+	if (!local) {
+		/* Compensate for drifting sched_clock */
+		runqueue_t *this_rq = this_rq();
+		now = (now - this_rq->timestamp_last_tick)
+			+ rq->timestamp_last_tick;
+	}
+#endif
+
+	/*
+	 * If we have slept through an active/expired array switch, restart
+	 * our timeslice too.
+	 */
+	sleep = NS_TO_APPROX_US(now - p->timestamp);
+	p->timestamp = now;
+	add_task_time(p, sleep, STIME_SLEEP);
+	p->prio = task_priority(p);
+
+	array = rq->qu.nicksched.active;
+	if (rq->qu.nicksched.array_sequence != p->sdu.nicksched.array_sequence) {
+		p->sdu.nicksched.used_slice = 0;
+	} else if (unlikely(p->sdu.nicksched.used_slice == -1)) {
+		p->sdu.nicksched.used_slice = 0;
+		array = rq->qu.nicksched.expired;
+	}
+
+	__activate_task(p, rq, array);
+}
+
+/*
+ * __activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task_head(p, rq->qu.nicksched.active);
+	rq->nr_running++;
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	p->sdu.nicksched.array_sequence = rq->qu.nicksched.array_sequence;
+	rq->nr_running--;
+	dequeue_task(p, p->sdu.nicksched.array);
+	p->sdu.nicksched.array = NULL;
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: the task's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+static void nick_wake_up_task(struct task_struct *p, struct runqueue *rq, unsigned int old_state, int sync)
+{
+	int same_cpu = (rq == this_rq());
+
+	if (old_state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible--;
+
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	activate_task(p, rq, same_cpu);
+	if (!sync || !same_cpu) {
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+static void nick_fork(task_t *p)
+{
+	unsigned long sleep_avg;
+	runqueue_t *rq;
+
+	p->sdu.nicksched.array = NULL;
+
+	p->timestamp = sched_clock();
+	p->sdu.nicksched.used_slice = 0;
+	if (rt_task(p)) {
+		BUG_ON(!rt_task(current));
+		return;
+	}
+
+	preempt_disable();
+	rq = this_rq();
+	/* Get MIN_HISTORY of history with the same sleep_avg as parent. */
+	sleep_avg = task_sleep_avg(current);
+	p->sdu.nicksched.total_time = MIN_HISTORY;
+	p->sdu.nicksched.sleep_time = p->sdu.nicksched.total_time * sleep_avg / SLEEP_FACTOR;
+
+	/* Parent loses 1/4 of sleep time for forking */
+	current->sdu.nicksched.sleep_time = 3 * current->sdu.nicksched.sleep_time / 4;
+
+	local_irq_disable();
+	if (unlikely(current->sdu.nicksched.used_slice == -1 || current == rq->idle))
+		p->sdu.nicksched.used_slice = -1;
+	else {
+		int ts = task_timeslice(current, rq);
+		current->sdu.nicksched.used_slice += (ts + 3) / 4;
+		if (current->sdu.nicksched.used_slice >= ts) {
+			current->sdu.nicksched.used_slice = -1;
+			set_need_resched();
+		}
+	}
+	local_irq_enable();
+	preempt_enable();
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+static void nick_wake_up_new_task(task_t * p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	int this_cpu, cpu;
+	runqueue_t *rq;
+	struct nick_prio_array *array;
+
+	rq = task_rq_lock(p, &flags);
+
+	BUG_ON(p->state != TASK_RUNNING);
+
+	cpu = task_cpu(p);
+	this_cpu = smp_processor_id();
+
+	array = rq->qu.nicksched.active;
+	if (!rt_task(p)) {
+		if (unlikely(p->sdu.nicksched.used_slice == -1)) {
+			p->sdu.nicksched.used_slice = 0;
+			array = rq->qu.nicksched.expired;
+		} else {
+			int total = task_timeslice(p, rq);
+			int ts = max((total + 3) / 4, MIN_TIMESLICE);
+			ts = min(ts, (int)FORKED_TS_MAX);
+			p->sdu.nicksched.used_slice = total - ts;
+		}
+	}
+
+	if (likely(cpu == this_cpu)) {
+		if (!(clone_flags & CLONE_VM) && likely(array == rq->qu.nicksched.active)) {
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			if (p->prio >= current->prio) {
+				p->prio = current->prio;
+				list_add_tail(&p->run_list, &current->run_list);
+				p->sdu.nicksched.array = current->sdu.nicksched.array;
+				p->sdu.nicksched.array->nr_active++;
+				rq->nr_running++;
+			} else {
+				p->prio = task_priority(p);
+				__activate_task(p, rq, array);
+			}
+			set_need_resched();
+		} else {
+			/* Run child last */
+			p->prio = task_priority(p);
+			__activate_task(p, rq, array);
+		}
+#ifdef CONFIG_SMP
+	} else {
+		runqueue_t *this_rq = cpu_rq(this_cpu);
+
+		/*
+		 * Not the local CPU - must adjust timestamp. This should
+		 * get optimised away in the !CONFIG_SMP case.
+		 */
+		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
+					+ rq->timestamp_last_tick;
+		p->prio = task_priority(p);
+		__activate_task(p, rq, array);
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+#endif
+	}
+
+ 	task_rq_unlock(rq, &flags);
+}
+
+/*
+ * Potentially available exiting-child timeslices are
+ * retrieved here - this way the parent does not get
+ * penalized for creating too many threads.
+ *
+ * (this cannot be used to 'generate' timeslices
+ * artificially, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+static void nick_exit(task_t * p)
+{
+}
+
+#ifdef CONFIG_SMP
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static inline
+void pull_task(runqueue_t *src_rq, struct nick_prio_array *src_array, task_t *p,
+	       runqueue_t *this_rq, struct nick_prio_array *this_array, int this_cpu)
+{
+	dequeue_task(p, src_array);
+	src_rq->nr_running--;
+	set_task_cpu(p, this_cpu);
+	this_rq->nr_running++;
+	enqueue_task(p, this_array);
+	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+				+ this_rq->timestamp_last_tick;
+	/*
+	 * Note that idle threads have a prio of NICK_MAX_PRIO, for this test
+	 * to be always true for them.
+	 */
+	if (TASK_PREEMPTS_CURR(p, this_rq))
+		resched_task(this_rq->curr);
+}
+
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int nick_move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+		      unsigned long max_nr_move, struct sched_domain *sd,
+		      enum idle_type idle)
+{
+	struct nick_prio_array *array, *dst_array;
+	struct list_head *head, *curr;
+	int idx, pulled = 0;
+	task_t *tmp;
+
+	if (max_nr_move <= 0 || busiest->nr_running <= 1)
+		goto out;
+
+	/*
+	 * We first consider expired tasks. Those will likely not be
+	 * executed in the near future, and they are most likely to
+	 * be cache-cold, thus switching CPUs has the least effect
+	 * on them.
+	 */
+	if (busiest->qu.nicksched.expired->nr_active) {
+		array = busiest->qu.nicksched.expired;
+		dst_array = this_rq->qu.nicksched.expired;
+	} else {
+		array = busiest->qu.nicksched.active;
+		dst_array = this_rq->qu.nicksched.active;
+	}
+
+new_array:
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(array->bitmap);
+	else
+		idx = find_next_bit(array->bitmap, NICK_MAX_PRIO, idx);
+	if (idx >= NICK_MAX_PRIO) {
+		if (array == busiest->qu.nicksched.expired && busiest->qu.nicksched.active->nr_active) {
+			array = busiest->qu.nicksched.active;
+			dst_array = this_rq->qu.nicksched.active;
+			goto new_array;
+		}
+		goto out;
+	}
+
+	head = array->queue + idx;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+
+	curr = curr->prev;
+
+	if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
+	pulled++;
+
+	/* We only want to steal up to the prescribed number of tasks. */
+	if (pulled < max_nr_move) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	return pulled;
+}
+#endif
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ *
+ * It also gets called by the fork code, when changing the parent's
+ * timeslices.
+ */
+static void nick_tick(struct task_struct *p, struct runqueue *rq, unsigned long long now)
+{
+	enum idle_type cpu_status;
+	int ts;
+
+	if (p == rq->idle) {
+		cpu_status = SCHED_IDLE;
+		goto out;
+	}
+
+	cpu_status = NOT_IDLE;
+	/* Task might have expired already, but not scheduled off yet */
+	if  (unlikely(p->sdu.nicksched.used_slice == -1))
+		goto out;
+
+	if (unlikely(p->policy == SCHED_FIFO))
+		goto out;
+
+	/* p was running during this tick. Update its time slice counter. */
+	p->sdu.nicksched.used_slice++;
+	ts = task_timeslice(p, rq);
+	if (unlikely(p->sdu.nicksched.used_slice >= ts)) {
+		p->sdu.nicksched.used_slice = -1;
+		set_tsk_need_resched(p);
+	}
+out:
+	rebalance_tick(smp_processor_id(), rq, cpu_status);
+}
+
+#ifdef CONFIG_SCHED_SMT
+/* these should never get called */
+static struct task_struct *nick_head_of_queue(union runqueue_queue *rqq)
+{
+	struct nick_prio_array *array = rqq->nicksched.active;
+
+	if (!array->nr_active)
+		array = rqq->nicksched.expired;
+	BUG_ON(!array->nr_active);
+
+	return list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
+		task_t, run_list);
+}
+
+static int nick_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct * p2, struct sched_domain *sd)
+{
+	return 0;
+}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ */
+static void nick_schedule(void)
+{
+	long *switch_count;
+	struct nick_prio_array *array;
+	unsigned long run_time;
+	int cpu, idx;
+	struct task_struct *prev = current, *next;
+	struct list_head *queue;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+
+	run_time = NS_TO_APPROX_US(now - prev->timestamp);
+	update_cpu_clock(prev, rq, now);
+	prev->timestamp = prev->last_ran = now;
+	add_task_time(prev, run_time, STIME_RUN);
+
+	spin_lock_irq(&rq->lock);
+
+	if (unlikely(prev->flags & PF_DEAD))
+		prev->state = EXIT_DEAD;
+
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE)
+				rq->nr_uninterruptible++;
+			deactivate_task(prev, rq);
+			goto no_check_expired;
+		}
+	}
+
+	if (unlikely(prev->sdu.nicksched.used_slice == -1)) {
+		dequeue_task(prev, prev->sdu.nicksched.array);
+		if (rt_task(prev)) {
+			/* SCHED_FIFO can come in here too, from sched_yield */
+			array = rq->qu.nicksched.active;
+		} else {
+			array = rq->qu.nicksched.expired;
+			prev->prio = task_priority(prev);
+			if (prev->prio < rq->qu.nicksched.expired->min_prio)
+				rq->qu.nicksched.expired->min_prio = prev->prio;
+ 		}
+		enqueue_task(prev, array);
+		prev->sdu.nicksched.used_slice = 0;
+ 	}
+no_check_expired:
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+		rq->qu.nicksched.array_sequence++;
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			rq->qu.nicksched.arrays[0].min_prio = NICK_MAX_PRIO;
+			rq->qu.nicksched.arrays[1].min_prio = NICK_MAX_PRIO;
+ 			goto switch_tasks;
+		}
+	}
+
+	array = rq->qu.nicksched.active;
+	if (unlikely(!array->nr_active)) {
+		/*
+		 * Switch the active and expired arrays.
+		 */
+		schedstat_inc(rq, sched_switch);
+		rq->qu.nicksched.array_sequence++;
+		rq->qu.nicksched.active = rq->qu.nicksched.expired;
+		rq->qu.nicksched.expired = array;
+		rq->qu.nicksched.expired->min_prio = NICK_MAX_PRIO;
+		array = rq->qu.nicksched.active;
+	}
+
+	idx = sched_find_first_bit(array->bitmap);
+	queue = array->queue + idx;
+	next = list_entry(queue->next, task_t, run_list);
+
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(cpu);
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		next->timestamp = now;
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_arch_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+
+		finish_task_switch(prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+static void nick_set_normal_task_nice(task_t *p, long nice)
+{
+	struct nick_prio_array *array;
+	int old_prio, new_prio, delta;
+
+	array = p->sdu.nicksched.array;
+	if (array)
+		dequeue_task(p, array);
+
+	old_prio = p->prio;
+	new_prio = NICE_TO_PRIO(nice);
+	delta = new_prio - old_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	p->prio = task_priority(p);
+
+	if (array) {
+		struct runqueue *rq = task_rq(p);
+
+		enqueue_task(p, array);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+static void nick_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	struct nick_prio_array *array;
+	runqueue_t *rq = task_rq(p);
+
+	array = p->sdu.nicksched.array;
+	if (array)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	if (policy == SCHED_FIFO || policy == SCHED_RR)
+		p->sdu.nicksched.used_slice = 0;
+
+	if (array) {
+		__activate_task(p, rq, rq->qu.nicksched.active);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+static long nick_sys_yield(void)
+{
+	local_irq_disable();
+#ifdef CONFIG_SCHEDSTATS
+	schedstat_inc(this_rq(), yld_cnt);
+#endif
+	current->sdu.nicksched.used_slice = -1;
+	set_need_resched();
+	local_irq_enable();
+
+	return 0;
+}
+
+static void nick_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	nick_sys_yield();
+#ifndef CONFIG_PREEMPT
+	/*
+	 * Kernel-space yield won't follow the schedule upon
+	 * return from syscall path. Must call schedule() here.
+	 */
+	schedule();
+#endif
+}
+
+static void nick_init_idle(task_t *idle, int cpu)
+{
+	idle->sdu.nicksched.used_slice = 0;
+	idle->sdu.nicksched.array = NULL;
+	idle->prio = NICK_MAX_PRIO;
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+static void nick_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	/*
+	 * Sync timestamp with rq_dest's before activating.
+	 * The same thing could be achieved by doing this step
+	 * afterwards, and pretending it was a local activate.
+	 * This way is cleaner and logically correct.
+	 */
+	p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+			+ rq_dest->timestamp_last_tick;
+	deactivate_task(p, rq_src);
+	set_task_cpu(p, dest_cpu);
+	activate_task(p, rq_dest, 0);
+	if (TASK_PREEMPTS_CURR(p, rq_dest))
+		resched_task(rq_dest->curr);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static	void nick_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO-1);
+	/* Add idle task to _front_ of it's priority queue */
+	__activate_idle_task(rq->idle, rq);
+}
+
+static	void nick_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = NICK_MAX_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+static void nick_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned arr, i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (arr = 0; arr < 2; arr++) {
+		for (i = 0; i < NICK_MAX_PRIO; i++) {
+			struct list_head *list = &rq->qu.nicksched.arrays[arr].queue[i];
+			while (!list_empty(list))
+				migrate_dead(dead_cpu,
+					     list_entry(list->next, task_t,
+							run_list));
+		}
+	}
+}
+#endif
+#endif
+
+static void nick_sched_init(void)
+{
+	init_task.sdu.nicksched.used_slice = 0;
+	init_task.sdu.nicksched.array = NULL;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void nick_normalize_rt_task(struct task_struct *p)
+{
+	struct nick_prio_array *array;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	array = p->sdu.nicksched.array;
+	if (array)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	if (array) {
+		__activate_task(p, rq, array);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+static unsigned int nick_task_timeslice(const struct task_struct *p)
+{
+	return task_timeslice(p, task_rq(p));
+}
+
+#ifdef CONFIG_SYSFS
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW(base_timeslice, no_change, no_change, min_base_timeslice, max_base_timeslice);
+
+static struct attribute *nick_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(base_timeslice),
+	NULL,
+};
+#endif
+
+const struct sched_drv nick_sched_drv = {
+	.name = "nicksched",
+	.init_runqueue_queue = nick_init_runqueue_queue,
+	.set_oom_time_slice = nick_set_oom_time_slice,
+	.task_timeslice = nick_task_timeslice,
+	.wake_up_task = nick_wake_up_task,
+	.fork = nick_fork,
+	.wake_up_new_task = nick_wake_up_new_task,
+	.exit = nick_exit,
+#ifdef CONFIG_SMP
+	.set_task_cpu = common_set_task_cpu,
+	.move_tasks = nick_move_tasks,
+#endif
+	.tick = nick_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = nick_head_of_queue,
+	.dependent_sleeper_trumps = nick_dependent_sleeper_trumps,
+#endif
+	.schedule = nick_schedule,
+	.set_normal_task_nice = nick_set_normal_task_nice,
+	.setscheduler = nick_setscheduler,
+	.sys_yield = nick_sys_yield,
+	.yield = nick_yield,
+	.init_idle = nick_init_idle,
+	.sched_init = nick_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = nick_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = nick_set_select_idle_first,
+	.set_select_idle_last = nick_set_select_idle_last,
+	.migrate_dead_tasks = nick_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = nick_normalize_rt_task,
+#endif
+	.attrs = nick_attrs,
+};
diff -Naur linux-2.6.12-rc2-mm3/kernel/sched.c linux-2.6.12-rc2-mm3-plugsched/kernel/sched.c
--- linux-2.6.12-rc2-mm3/kernel/sched.c	2005-04-14 02:47:24.565708640 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/kernel/sched.c	2005-04-23 13:20:23.665974168 -0700
@@ -45,7 +45,6 @@
 #include <linux/perfctr.h>
 #include <linux/kthread.h>
 #include <linux/seq_file.h>
-#include <linux/sysctl.h>
 #include <linux/syscalls.h>
 #include <linux/times.h>
 #include <linux/acct.h>
@@ -53,282 +52,35 @@
 
 #include <asm/unistd.h>
 
-/*
- * Convert user-nice values [ -20 ... 0 ... 19 ]
- * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
- * and back.
- */
-#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
-#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
-#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
-
-/*
- * 'User priority' is the nice value converted to something we
- * can work with better when scaling various scheduler parameters,
- * it's a [ 0 ... 39 ] range.
- */
-#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
-#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
-#define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))
-
-/*
- * Some helpers for converting nanosecond timing to jiffy resolution
- */
-#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
-#define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
-
-/*
- * These are the 'tuning knobs' of the scheduler:
- *
- * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
- * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
- * Timeslices get refilled after they expire.
- */
-#define MIN_TIMESLICE		max(5 * HZ / 1000, 1)
-#define DEF_TIMESLICE		(100 * HZ / 1000)
-#define ON_RUNQUEUE_WEIGHT	 30
-#define CHILD_PENALTY		 95
-#define PARENT_PENALTY		100
-#define EXIT_WEIGHT		  3
-#define PRIO_BONUS_RATIO	 25
-#define MAX_BONUS		(MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
-#define INTERACTIVE_DELTA	  2
-#define MAX_SLEEP_AVG		(DEF_TIMESLICE * MAX_BONUS)
-#define STARVATION_LIMIT	(MAX_SLEEP_AVG)
-#define NS_MAX_SLEEP_AVG	(JIFFIES_TO_NS(MAX_SLEEP_AVG))
-
-/*
- * If a task is 'interactive' then we reinsert it in the active
- * array after it has expired its current timeslice. (it will not
- * continue to run immediately, it will still roundrobin with
- * other interactive tasks.)
- *
- * This part scales the interactivity limit depending on niceness.
- *
- * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
- * Here are a few examples of different nice levels:
- *
- *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
- *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
- *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
- *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
- *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
- *
- * (the X axis represents the possible -5 ... 0 ... +5 dynamic
- *  priority range a task can explore, a value of '1' means the
- *  task is rated interactive.)
- *
- * Ie. nice +19 tasks can never get 'interactive' enough to be
- * reinserted into the active array. And only heavily CPU-hog nice -20
- * tasks will be expired. Default nice 0 tasks are somewhere between,
- * it takes some effort for them to get interactive, but it's not
- * too hard.
- */
-
-#define CURRENT_BONUS(p) \
-	(NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
-		MAX_SLEEP_AVG)
+#include <linux/sched_runq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_drv.h>
 
-#define GRANULARITY	(10 * HZ / 1000 ? : 1)
-
-#ifdef CONFIG_SMP
-#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
-		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
-			num_online_cpus())
-#else
-#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
-		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
-#endif
-
-#define SCALE(v1,v1_max,v2_max) \
-	(v1) * (v2_max) / (v1_max)
-
-#define DELTA(p) \
-	(SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA)
-
-#define TASK_INTERACTIVE(p) \
-	((p)->prio <= (p)->static_prio - DELTA(p))
-
-#define INTERACTIVE_SLEEP(p) \
-	(JIFFIES_TO_NS(MAX_SLEEP_AVG * \
-		(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
-
-#define TASK_PREEMPTS_CURR(p, rq) \
-	((p)->prio < (rq)->curr->prio)
-
-/*
- * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
- * to time slice values: [800ms ... 100ms ... 5ms]
- *
- * The higher a thread's priority, the bigger timeslices
- * it gets during one round of execution. But even the lowest
- * priority thread gets MIN_TIMESLICE worth of execution time.
- */
-
-#define SCALE_PRIO(x, prio) \
-	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
+void common_set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+	p->thread_info->cpu = cpu;
+}
 
-static unsigned int task_timeslice(task_t *p)
+static inline unsigned int task_timeslice(const task_t *p)
 {
-	if (p->static_prio < NICE_TO_PRIO(0))
-		return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio);
-	else
-		return SCALE_PRIO(DEF_TIMESLICE, p->static_prio);
+	return sched_drvp->task_timeslice(p);
 }
-#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)	\
-				< (long long) (sd)->cache_hot_time)
 
 /*
  * These are the runqueue data structures:
  */
+DEFINE_PER_CPU(struct runqueue, runqueues);
 
-#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
-
-typedef struct runqueue runqueue_t;
-
-struct prio_array {
-	unsigned int nr_active;
-	unsigned long bitmap[BITMAP_SIZE];
-	struct list_head queue[MAX_PRIO];
-};
-
-/*
- * This is the main, per-CPU runqueue data structure.
- *
- * Locking rule: those places that want to lock multiple runqueues
- * (such as the load balancing or the thread migration code), lock
- * acquire operations must be ordered by ascending &runqueue.
- */
-struct runqueue {
-	spinlock_t lock;
-
-	/*
-	 * nr_running and cpu_load should be in the same cacheline because
-	 * remote CPUs use both these fields when doing load calculation.
-	 */
-	unsigned long nr_running;
-#ifdef CONFIG_SMP
-	unsigned long cpu_load[3];
-#endif
-	unsigned long long nr_switches;
-
-	/*
-	 * This is part of a global counter where only the total sum
-	 * over all CPUs matters. A task can increase this counter on
-	 * one CPU and if it got migrated afterwards it may decrease
-	 * it on another CPU. Always updated under the runqueue lock:
-	 */
-	unsigned long nr_uninterruptible;
-
-	unsigned long expired_timestamp;
-	unsigned long long timestamp_last_tick;
-	task_t *curr, *idle;
-	prio_array_t *active, *expired, arrays[2];
-	int best_expired_prio;
-	atomic_t nr_iowait;
-
-#ifdef CONFIG_SMP
-	struct sched_domain *sd;
-
-	/* For active balancing */
-	int active_balance;
-	int push_cpu;
-
-	task_t *migration_thread;
-	struct list_head migration_queue;
-#endif
-
-#ifdef CONFIG_SCHEDSTATS
-	/* latency stats */
-	struct sched_info rq_sched_info;
-
-	/* sys_sched_yield() stats */
-	unsigned long yld_exp_empty;
-	unsigned long yld_act_empty;
-	unsigned long yld_both_empty;
-	unsigned long yld_cnt;
-
-	/* schedule() stats */
-	unsigned long sched_switch;
-	unsigned long sched_cnt;
-	unsigned long sched_goidle;
-
-	/* try_to_wake_up() stats */
-	unsigned long ttwu_cnt;
-	unsigned long ttwu_local;
-#endif
-};
-
-static DEFINE_PER_CPU(struct runqueue, runqueues);
-
-/*
- * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
- * See update_sched_domains: synchronize_kernel for details.
- *
- * The domain tree of any CPU may only be accessed from within
- * preempt-disabled sections.
- */
 #define for_each_domain(cpu, domain) \
 	for (domain = cpu_rq(cpu)->sd; domain; domain = domain->parent)
-
-#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
-#define this_rq()		(&__get_cpu_var(runqueues))
-#define task_rq(p)		cpu_rq(task_cpu(p))
-#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
-
-/*
- * We can optimise this out completely for !SMP, because the
- * SMP rebalancing from interrupt is the only thing that cares:
- */
-static inline void set_task_on_cpu(struct task_struct *p, int val)
-{
-#ifdef CONFIG_SMP
-	p->on_cpu = val;
-#endif
-}
-
-static inline int task_on_cpu(runqueue_t *rq, task_t *p)
-{
-#ifdef CONFIG_SMP
-	return p->on_cpu;
-#else
-	return rq->curr == p;
-#endif
-}
-
-/*
- * task_rq_lock - lock the runqueue a given task resides on and disable
- * interrupts.  Note the ordering: we can safely lookup the task_rq without
- * explicitly disabling preemption.
- */
-static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
-	__acquires(rq->lock)
-{
-	struct runqueue *rq;
-
-repeat_lock_task:
-	local_irq_save(*flags);
-	rq = task_rq(p);
-	spin_lock(&rq->lock);
-	if (unlikely(rq != task_rq(p))) {
-		spin_unlock_irqrestore(&rq->lock, *flags);
-		goto repeat_lock_task;
-	}
-	return rq;
-}
-
-static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
-	__releases(rq->lock)
-{
-	spin_unlock_irqrestore(&rq->lock, *flags);
-}
+#define task_is_queued(p)	(!list_empty(&(p)->run_list))
 
 #ifdef CONFIG_SCHEDSTATS
 /*
  * bump this up when changing the output format or the meaning of an existing
  * format, so that tools can adapt (or abort)
  */
-#define SCHEDSTAT_VERSION 12
+#define SCHEDSTAT_VERSION 11
 
 static int show_schedstat(struct seq_file *seq, void *v)
 {
@@ -357,7 +109,6 @@
 
 #ifdef CONFIG_SMP
 		/* domain-specific stats */
-		preempt_disable();
 		for_each_domain(cpu, sd) {
 			enum idle_type itype;
 			char mask_str[NR_CPUS];
@@ -376,13 +127,11 @@
 				    sd->lb_nobusyq[itype],
 				    sd->lb_nobusyg[itype]);
 			}
-			seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
+			seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu\n",
 			    sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
-			    sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
-			    sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
+			    sd->sbe_pushed, sd->sbe_attempts,
 			    sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance);
 		}
-		preempt_enable();
 #endif
 	}
 	return 0;
@@ -414,341 +163,25 @@
 	.release = single_release,
 };
 
-# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
 # define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
 #else /* !CONFIG_SCHEDSTATS */
-# define schedstat_inc(rq, field)	do { } while (0)
 # define schedstat_add(rq, field, amt)	do { } while (0)
 #endif
 
-/*
- * rq_lock - lock a given runqueue and disable interrupts.
- */
-static inline runqueue_t *this_rq_lock(void)
-	__acquires(rq->lock)
-{
-	runqueue_t *rq;
-
-	local_irq_disable();
-	rq = this_rq();
-	spin_lock(&rq->lock);
-
-	return rq;
-}
-
-#ifdef CONFIG_SCHEDSTATS
-/*
- * Called when a process is dequeued from the active array and given
- * the cpu.  We should note that with the exception of interactive
- * tasks, the expired queue will become the active queue after the active
- * queue is empty, without explicitly dequeuing and requeuing tasks in the
- * expired queue.  (Interactive tasks may be requeued directly to the
- * active queue, thus delaying tasks in the expired queue from running;
- * see scheduler_tick()).
- *
- * This function is only called from sched_info_arrive(), rather than
- * dequeue_task(). Even though a task may be queued and dequeued multiple
- * times as it is shuffled about, we're really interested in knowing how
- * long it was from the *first* time it was queued to the time that it
- * finally hit a cpu.
- */
-static inline void sched_info_dequeued(task_t *t)
-{
-	t->sched_info.last_queued = 0;
-}
-
-/*
- * Called when a task finally hits the cpu.  We can now calculate how
- * long it was waiting to run.  We also note when it began so that we
- * can keep stats on how long its timeslice is.
- */
-static inline void sched_info_arrive(task_t *t)
-{
-	unsigned long now = jiffies, diff = 0;
-	struct runqueue *rq = task_rq(t);
-
-	if (t->sched_info.last_queued)
-		diff = now - t->sched_info.last_queued;
-	sched_info_dequeued(t);
-	t->sched_info.run_delay += diff;
-	t->sched_info.last_arrival = now;
-	t->sched_info.pcnt++;
-
-	if (!rq)
-		return;
-
-	rq->rq_sched_info.run_delay += diff;
-	rq->rq_sched_info.pcnt++;
-}
-
-/*
- * Called when a process is queued into either the active or expired
- * array.  The time is noted and later used to determine how long we
- * had to wait for us to reach the cpu.  Since the expired queue will
- * become the active queue after active queue is empty, without dequeuing
- * and requeuing any tasks, we are interested in queuing to either. It
- * is unusual but not impossible for tasks to be dequeued and immediately
- * requeued in the same or another array: this can happen in sched_yield(),
- * set_user_nice(), and even load_balance() as it moves tasks from runqueue
- * to runqueue.
- *
- * This function is only called from enqueue_task(), but also only updates
- * the timestamp if it is already not set.  It's assumed that
- * sched_info_dequeued() will clear that stamp when appropriate.
- */
-static inline void sched_info_queued(task_t *t)
-{
-	if (!t->sched_info.last_queued)
-		t->sched_info.last_queued = jiffies;
-}
-
-/*
- * Called when a process ceases being the active-running process, either
- * voluntarily or involuntarily.  Now we can calculate how long we ran.
- */
-static inline void sched_info_depart(task_t *t)
-{
-	struct runqueue *rq = task_rq(t);
-	unsigned long diff = jiffies - t->sched_info.last_arrival;
-
-	t->sched_info.cpu_time += diff;
-
-	if (rq)
-		rq->rq_sched_info.cpu_time += diff;
-}
-
-/*
- * Called when tasks are switched involuntarily due, typically, to expiring
- * their time slice.  (This may also be called when switching to or from
- * the idle task.)  We are only called when prev != next.
- */
-static inline void sched_info_switch(task_t *prev, task_t *next)
-{
-	struct runqueue *rq = task_rq(prev);
-
-	/*
-	 * prev now departs the cpu.  It's not interesting to record
-	 * stats about how efficient we were at scheduling the idle
-	 * process, however.
-	 */
-	if (prev != rq->idle)
-		sched_info_depart(prev);
-
-	if (next != rq->idle)
-		sched_info_arrive(next);
-}
-#else
-#define sched_info_queued(t)		do { } while (0)
-#define sched_info_switch(t, next)	do { } while (0)
-#endif /* CONFIG_SCHEDSTATS */
-
-/*
- * Adding/removing a task to/from a priority array:
- */
-static void dequeue_task(struct task_struct *p, prio_array_t *array)
-{
-	array->nr_active--;
-	list_del(&p->run_list);
-	if (list_empty(array->queue + p->prio))
-		__clear_bit(p->prio, array->bitmap);
-}
-
-static void enqueue_task(struct task_struct *p, prio_array_t *array)
-{
-	sched_info_queued(p);
-	list_add_tail(&p->run_list, array->queue + p->prio);
-	__set_bit(p->prio, array->bitmap);
-	array->nr_active++;
-	p->array = array;
-}
-
-/*
- * Put task to the end of the run list without the overhead of dequeue
- * followed by enqueue.
- */
-static void requeue_task(struct task_struct *p, prio_array_t *array)
-{
-	list_move_tail(&p->run_list, array->queue + p->prio);
-}
-
-static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
-{
-	list_add(&p->run_list, array->queue + p->prio);
-	__set_bit(p->prio, array->bitmap);
-	array->nr_active++;
-	p->array = array;
-}
-
-/*
- * effective_prio - return the priority that is based on the static
- * priority but is modified by bonuses/penalties.
- *
- * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
- * into the -5 ... 0 ... +5 bonus/penalty range.
- *
- * We use 25% of the full 0...39 priority range so that:
- *
- * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
- * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
- *
- * Both properties are important to certain workloads.
- */
-static int effective_prio(task_t *p)
-{
-	int bonus, prio;
-
-	if (rt_task(p))
-		return p->prio;
-
-	bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
-
-	prio = p->static_prio - bonus;
-	if (prio < MAX_RT_PRIO)
-		prio = MAX_RT_PRIO;
-	if (prio > MAX_PRIO-1)
-		prio = MAX_PRIO-1;
-	return prio;
-}
-
-/*
- * __activate_task - move a task to the runqueue.
- */
-static inline void __activate_task(task_t *p, runqueue_t *rq)
-{
-	enqueue_task(p, rq->active);
-	rq->nr_running++;
-}
-
-/*
- * __activate_idle_task - move idle task to the _front_ of runqueue.
- */
-static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
-{
-	enqueue_task_head(p, rq->active);
-	rq->nr_running++;
-}
-
-static void recalc_task_prio(task_t *p, unsigned long long now)
+#ifdef CONFIG_SCHED_SMT
+int cpu_and_siblings_are_idle(int cpu)
 {
-	/* Caller must always ensure 'now >= p->timestamp' */
-	unsigned long long __sleep_time = now - p->timestamp;
-	unsigned long sleep_time;
-
-	if (__sleep_time > NS_MAX_SLEEP_AVG)
-		sleep_time = NS_MAX_SLEEP_AVG;
-	else
-		sleep_time = (unsigned long)__sleep_time;
-
-	if (likely(sleep_time > 0)) {
-		/*
-		 * User tasks that sleep a long time are categorised as
-		 * idle and will get just interactive status to stay active &
-		 * prevent them suddenly becoming cpu hogs and starving
-		 * other processes.
-		 */
-		if (p->mm && p->activated != -1 &&
-			sleep_time > INTERACTIVE_SLEEP(p)) {
-				p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
-						DEF_TIMESLICE);
-		} else {
-			/*
-			 * The lower the sleep avg a task has the more
-			 * rapidly it will rise with sleep time.
-			 */
-			sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
-
-			/*
-			 * Tasks waking from uninterruptible sleep are
-			 * limited in their sleep_avg rise as they
-			 * are likely to be waiting on I/O
-			 */
-			if (p->activated == -1 && p->mm) {
-				if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
-					sleep_time = 0;
-				else if (p->sleep_avg + sleep_time >=
-						INTERACTIVE_SLEEP(p)) {
-					p->sleep_avg = INTERACTIVE_SLEEP(p);
-					sleep_time = 0;
-				}
-			}
-
-			/*
-			 * This code gives a bonus to interactive tasks.
-			 *
-			 * The boost works by updating the 'average sleep time'
-			 * value here, based on ->timestamp. The more time a
-			 * task spends sleeping, the higher the average gets -
-			 * and the higher the priority boost gets as well.
-			 */
-			p->sleep_avg += sleep_time;
-
-			if (p->sleep_avg > NS_MAX_SLEEP_AVG)
-				p->sleep_avg = NS_MAX_SLEEP_AVG;
-		}
+	int sib;
+	for_each_cpu_mask(sib, cpu_sibling_map[cpu]) {
+		if (idle_cpu(sib))
+			continue;
+		return 0;
 	}
 
-	p->prio = effective_prio(p);
+	return 1;
 }
-
-/*
- * activate_task - move a task to the runqueue and do priority recalculation
- *
- * Update all the scheduling statistics stuff. (sleep average
- * calculation, priority modifiers, etc.)
- */
-static void activate_task(task_t *p, runqueue_t *rq, int local)
-{
-	unsigned long long now;
-
-	now = sched_clock();
-#ifdef CONFIG_SMP
-	if (!local) {
-		/* Compensate for drifting sched_clock */
-		runqueue_t *this_rq = this_rq();
-		now = (now - this_rq->timestamp_last_tick)
-			+ rq->timestamp_last_tick;
-	}
 #endif
 
-	recalc_task_prio(p, now);
-
-	/*
-	 * This checks to make sure it's not an uninterruptible task
-	 * that is now waking up.
-	 */
-	if (!p->activated) {
-		/*
-		 * Tasks which were woken up by interrupts (ie. hw events)
-		 * are most likely of interactive nature. So we give them
-		 * the credit of extending their sleep time to the period
-		 * of time they spend on the runqueue, waiting for execution
-		 * on a CPU, first time around:
-		 */
-		if (in_interrupt())
-			p->activated = 2;
-		else {
-			/*
-			 * Normal first-time wakeups get a credit too for
-			 * on-runqueue time, but it will be weighted down:
-			 */
-			p->activated = 1;
-		}
-	}
-	p->timestamp = now;
-
-	__activate_task(p, rq);
-}
-
-/*
- * deactivate_task - remove a task from the runqueue.
- */
-static void deactivate_task(struct task_struct *p, runqueue_t *rq)
-{
-	rq->nr_running--;
-	dequeue_task(p, p->array);
-	p->array = NULL;
-}
-
 /*
  * resched_task - mark a task 'to be rescheduled now'.
  *
@@ -757,7 +190,7 @@
  * the target CPU.
  */
 #ifdef CONFIG_SMP
-static void resched_task(task_t *p)
+void resched_task(task_t *p)
 {
 	int need_resched, nrpolling;
 
@@ -771,11 +204,6 @@
 	if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id()))
 		smp_send_reschedule(task_cpu(p));
 }
-#else
-static inline void resched_task(task_t *p)
-{
-	set_tsk_need_resched(p);
-}
 #endif
 
 /**
@@ -788,12 +216,22 @@
 }
 
 #ifdef CONFIG_SMP
+enum request_type {
+	REQ_MOVE_TASK,
+	REQ_SET_DOMAIN,
+};
+
 typedef struct {
 	struct list_head list;
+	enum request_type type;
 
+	/* For REQ_MOVE_TASK */
 	task_t *task;
 	int dest_cpu;
 
+	/* For REQ_SET_DOMAIN */
+	struct sched_domain *sd;
+
 	struct completion done;
 } migration_req_t;
 
@@ -809,12 +247,13 @@
 	 * If the task is not on a runqueue (and not running), then
 	 * it is sufficient to simply update the task's cpu field.
 	 */
-	if (!p->array && !task_on_cpu(rq, p)) {
+	if (!task_is_queued(p) && !task_running(rq, p)) {
 		set_task_cpu(p, dest_cpu);
 		return 0;
 	}
 
 	init_completion(&req->done);
+	req->type = REQ_MOVE_TASK;
 	req->task = p;
 	req->dest_cpu = dest_cpu;
 	list_add(&req->list, &rq->migration_queue);
@@ -839,9 +278,9 @@
 repeat:
 	rq = task_rq_lock(p, &flags);
 	/* Must be off runqueue entirely, not preempted. */
-	if (unlikely(p->array || task_on_cpu(rq, p))) {
+	if (unlikely(task_is_queued(p) || task_running(rq, p))) {
 		/* If it's preempted, we yield.  It could be a while. */
-		preempted = !task_on_cpu(rq, p);
+		preempted = !task_running(rq, p);
 		task_rq_unlock(rq, &flags);
 		cpu_relax();
 		if (preempted)
@@ -856,177 +295,51 @@
  * @p: the to-be-kicked thread
  *
  * Cause a process which is running on another CPU to enter
- * kernel-mode, without any delay. (to get signals handled.)
- *
- * NOTE: this function doesnt have to take the runqueue lock,
- * because all it wants to ensure is that the remote task enters
- * the kernel. If the IPI races and the task has been migrated
- * to another CPU then no harm is done and the purpose has been
- * achieved as well.
- */
-void kick_process(task_t *p)
-{
-	int cpu;
-
-	preempt_disable();
-	cpu = task_cpu(p);
-	if ((cpu != smp_processor_id()) && task_curr(p))
-		smp_send_reschedule(cpu);
-	preempt_enable();
-}
-
-/*
- * Return a low guess at the load of a migration-source cpu.
- *
- * We want to under-estimate the load of migration sources, to
- * balance conservatively.
- */
-static inline unsigned long source_load(int cpu, int type)
-{
-	runqueue_t *rq = cpu_rq(cpu);
-	unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
-	if (type == 0)
-		return load_now;
-
-	return min(rq->cpu_load[type-1], load_now);
-}
-
-/*
- * Return a high guess at the load of a migration-target cpu
- */
-static inline unsigned long target_load(int cpu, int type)
-{
-	runqueue_t *rq = cpu_rq(cpu);
-	unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
-	if (type == 0)
-		return load_now;
-
-	return max(rq->cpu_load[type-1], load_now);
-}
-
-/*
- * find_idlest_group finds and returns the least busy CPU group within the
- * domain.
- */
-static struct sched_group *
-find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
-{
-	struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
-	unsigned long min_load = ULONG_MAX, this_load = 0;
-	int load_idx = sd->forkexec_idx;
-	int imbalance = 100 + (sd->imbalance_pct-100)/2;
-
-	do {
-		unsigned long load, avg_load;
-		int local_group;
-		int i;
-
-		local_group = cpu_isset(this_cpu, group->cpumask);
-		/* XXX: put a cpus allowed check */
-
-		/* Tally up the load of all CPUs in the group */
-		avg_load = 0;
-
-		for_each_cpu_mask(i, group->cpumask) {
-			/* Bias balancing toward cpus of our domain */
-			if (local_group)
-				load = source_load(i, load_idx);
-			else
-				load = target_load(i, load_idx);
-
-			avg_load += load;
-		}
-
-		/* Adjust by relative CPU power of the group */
-		avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
-
-		if (local_group) {
-			this_load = avg_load;
-			this = group;
-		} else if (avg_load < min_load) {
-			min_load = avg_load;
-			idlest = group;
-		}
-		group = group->next;
-	} while (group != sd->groups);
+ * kernel-mode, without any delay. (to get signals handled.)
+ *
+ * NOTE: this function doesnt have to take the runqueue lock,
+ * because all it wants to ensure is that the remote task enters
+ * the kernel. If the IPI races and the task has been migrated
+ * to another CPU then no harm is done and the purpose has been
+ * achieved as well.
+ */
+void kick_process(task_t *p)
+{
+	int cpu;
 
-	if (!idlest || 100*this_load < imbalance*min_load)
-		return NULL;
-	return idlest;
+	preempt_disable();
+	cpu = task_cpu(p);
+	if ((cpu != smp_processor_id()) && task_curr(p))
+		smp_send_reschedule(cpu);
+	preempt_enable();
 }
 
 /*
- * find_idlest_queue - find the idlest runqueue among the cpus in group.
+ * Return a low guess at the load of a migration-source cpu.
+ *
+ * We want to under-estimate the load of migration sources, to
+ * balance conservatively.
  */
-static int find_idlest_cpu(struct sched_group *group, int this_cpu)
+static inline unsigned long source_load(int cpu)
 {
-	unsigned long load, min_load = ULONG_MAX;
-	int idlest = -1;
-	int i;
-
-	for_each_cpu_mask(i, group->cpumask) {
-		load = source_load(i, 0);
-
-		if (load < min_load || (load == min_load && i == this_cpu)) {
-			min_load = load;
-			idlest = i;
-		}
-	}
+	runqueue_t *rq = cpu_rq(cpu);
+	unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
 
-	return idlest;
+	return min(rq->cpu_load, load_now);
 }
 
 /*
- * sched_balance_self: balance the current task (running on cpu) in domains
- * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
- * SD_BALANCE_EXEC.
- *
- * Balance, ie. select the least loaded group.
- *
- * Returns the target CPU number, or the same CPU if no balancing is needed.
- *
- * preempt must be disabled.
+ * Return a high guess at the load of a migration-target cpu
  */
-static int sched_balance_self(int cpu, int flag)
+static inline unsigned long target_load(int cpu)
 {
-	struct task_struct *t = current;
-	struct sched_domain *tmp, *sd = NULL;
-
-	for_each_domain(cpu, tmp)
-		if (tmp->flags & flag)
-			sd = tmp;
-
-	while (sd) {
-		cpumask_t span;
-		struct sched_group *group;
-		int new_cpu;
-
-		span = sd->span;
-		group = find_idlest_group(sd, t, cpu);
-		if (!group)
-			goto nextlevel;
-
-		new_cpu = find_idlest_cpu(group, cpu);
-		if (new_cpu == -1 || new_cpu == cpu)
-			goto nextlevel;
-
-		/* Now try balancing at a lower domain level */
-		cpu = new_cpu;
-nextlevel:
-		sd = NULL;
-		for_each_domain(cpu, tmp) {
-			if (cpus_subset(span, tmp->span))
-				break;
-			if (tmp->flags & flag)
-				sd = tmp;
-		}
-		/* while loop will break here if sd == NULL */
-	}
+	runqueue_t *rq = cpu_rq(cpu);
+	unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
 
-	return cpu;
+	return max(rq->cpu_load, load_now);
 }
 
-#endif /* CONFIG_SMP */
+#endif
 
 /*
  * wake_idle() will wake a task on an idle cpu if task->cpu is
@@ -1048,14 +361,14 @@
 
 	for_each_domain(cpu, sd) {
 		if (sd->flags & SD_WAKE_IDLE) {
-			cpus_and(tmp, sd->span, p->cpus_allowed);
+			cpus_and(tmp, sd->span, cpu_online_map);
+			cpus_and(tmp, tmp, p->cpus_allowed);
 			for_each_cpu_mask(i, tmp) {
 				if (idle_cpu(i))
 					return i;
 			}
 		}
-		else
-			break;
+		else break;
 	}
 	return cpu;
 }
@@ -1088,7 +401,7 @@
 	runqueue_t *rq;
 #ifdef CONFIG_SMP
 	unsigned long load, this_load;
-	struct sched_domain *sd, *this_sd = NULL;
+	struct sched_domain *sd;
 	int new_cpu;
 #endif
 
@@ -1097,79 +410,80 @@
 	if (!(old_state & state))
 		goto out;
 
-	if (p->array)
+	if (task_is_queued(p))
 		goto out_running;
 
 	cpu = task_cpu(p);
 	this_cpu = smp_processor_id();
 
 #ifdef CONFIG_SMP
-	if (unlikely(task_on_cpu(rq, p)))
+	if (unlikely(task_running(rq, p)))
 		goto out_activate;
 
-	new_cpu = cpu;
-
+#ifdef CONFIG_SCHEDSTATS
 	schedstat_inc(rq, ttwu_cnt);
 	if (cpu == this_cpu) {
 		schedstat_inc(rq, ttwu_local);
-		goto out_set_cpu;
-	}
-
-	for_each_domain(this_cpu, sd) {
-		if (cpu_isset(cpu, sd->span)) {
-			schedstat_inc(sd, ttwu_wake_remote);
-			this_sd = sd;
-			break;
+	} else {
+		for_each_domain(this_cpu, sd) {
+			if (cpu_isset(cpu, sd->span)) {
+				schedstat_inc(sd, ttwu_wake_remote);
+				break;
+			}
 		}
 	}
+#endif
 
-	if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
+	new_cpu = cpu;
+	if (cpu == this_cpu || unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
 		goto out_set_cpu;
 
+	load = source_load(cpu);
+	this_load = target_load(this_cpu);
+
 	/*
-	 * Check for affine wakeup and passive balancing possibilities.
+	 * If sync wakeup then subtract the (maximum possible) effect of
+	 * the currently running task from the load of the current CPU:
 	 */
-	if (this_sd) {
-		int idx = this_sd->wake_idx;
-		unsigned int imbalance;
+	if (sync)
+		this_load -= SCHED_LOAD_SCALE;
 
-		imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
+	/* Don't pull the task off an idle CPU to a busy one */
+	if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2)
+		goto out_set_cpu;
 
-		load = source_load(cpu, idx);
-		this_load = target_load(this_cpu, idx);
+	new_cpu = this_cpu; /* Wake to this CPU if we can */
 
-		new_cpu = this_cpu; /* Wake to this CPU if we can */
+	/*
+	 * Scan domains for affine wakeup and passive balancing
+	 * possibilities.
+	 */
+	for_each_domain(this_cpu, sd) {
+		unsigned int imbalance;
+		/*
+		 * Start passive balancing when half the imbalance_pct
+		 * limit is reached.
+		 */
+		imbalance = sd->imbalance_pct + (sd->imbalance_pct - 100) / 2;
 
-		if (this_sd->flags & SD_WAKE_AFFINE) {
-			unsigned long tl = this_load;
+		if ((sd->flags & SD_WAKE_AFFINE) &&
+				!task_hot(p, rq->timestamp_last_tick, sd)) {
 			/*
-			 * If sync wakeup then subtract the (maximum possible)
-			 * effect of the currently running task from the load
-			 * of the current CPU:
+			 * This domain has SD_WAKE_AFFINE and p is cache cold
+			 * in this domain.
 			 */
-			if (sync)
-				tl -= SCHED_LOAD_SCALE;
-
-			if ((tl <= load &&
-				tl + target_load(cpu, idx) <= SCHED_LOAD_SCALE) ||
-				100*(tl + SCHED_LOAD_SCALE) <= imbalance*load) {
-				/*
-				 * This domain has SD_WAKE_AFFINE and
-				 * p is cache cold in this domain, and
-				 * there is no bad imbalance.
-				 */
-				schedstat_inc(this_sd, ttwu_move_affine);
+			if (cpu_isset(cpu, sd->span)) {
+				schedstat_inc(sd, ttwu_move_affine);
 				goto out_set_cpu;
 			}
-		}
-
-		/*
-		 * Start passive balancing when half the imbalance_pct
-		 * limit is reached.
-		 */
-		if (this_sd->flags & SD_WAKE_BALANCE) {
-			if (imbalance*this_load <= 100*load) {
-				schedstat_inc(this_sd, ttwu_move_balance);
+		} else if ((sd->flags & SD_WAKE_BALANCE) &&
+				imbalance*this_load <= 100*load) {
+			/*
+			 * This domain has SD_WAKE_BALANCE and there is
+			 * an imbalance.
+			 */
+			if (cpu_isset(cpu, sd->span)) {
+				schedstat_inc(sd, ttwu_move_balance);
 				goto out_set_cpu;
 			}
 		}
@@ -1186,7 +500,7 @@
 		old_state = p->state;
 		if (!(old_state & state))
 			goto out;
-		if (p->array)
+		if (task_is_queued(p))
 			goto out_running;
 
 		this_cpu = smp_processor_id();
@@ -1195,28 +509,7 @@
 
 out_activate:
 #endif /* CONFIG_SMP */
-	if (old_state == TASK_UNINTERRUPTIBLE) {
-		rq->nr_uninterruptible--;
-		/*
-		 * Tasks on involuntary sleep don't earn
-		 * sleep_avg beyond just interactive state.
-		 */
-		p->activated = -1;
-	}
-
-	/*
-	 * Sync wakeups (i.e. those types of wakeups where the waker
-	 * has indicated that it will leave the CPU in short order)
-	 * don't trigger a preemption, if the woken up task will run on
-	 * this cpu. (in this case the 'I will reschedule' promise of
-	 * the waker guarantees that the freshly woken up task is going
-	 * to be considered on this CPU.)
-	 */
-	activate_task(p, rq, cpu == this_cpu);
-	if (!sync || cpu != this_cpu) {
-		if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
-	}
+	sched_drvp->wake_up_task(p, rq, old_state, sync);
 	success = 1;
 
 out_running:
@@ -1240,19 +533,17 @@
 	return try_to_wake_up(p, state, 0);
 }
 
+#ifdef CONFIG_SMP
+static int find_idlest_cpu(struct task_struct *p, int this_cpu,
+			   struct sched_domain *sd);
+#endif
+
 /*
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
  */
-void fastcall sched_fork(task_t *p, int clone_flags)
+void fastcall sched_fork(task_t *p)
 {
-	int cpu = get_cpu();
-
-#ifdef CONFIG_SMP
-	cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
-#endif
-	set_task_cpu(p, cpu);
-
 	/*
 	 * We mark the process as running here, but have not actually
 	 * inserted it onto the runqueue yet. This guarantees that
@@ -1261,40 +552,20 @@
 	 */
 	p->state = TASK_RUNNING;
 	INIT_LIST_HEAD(&p->run_list);
-	p->array = NULL;
+	spin_lock_init(&p->switch_lock);
 #ifdef CONFIG_SCHEDSTATS
 	memset(&p->sched_info, 0, sizeof(p->sched_info));
 #endif
-	set_task_on_cpu(p, 0);
 #ifdef CONFIG_PREEMPT
-	/* Want to start with kernel preemption disabled. */
-	p->thread_info->preempt_count = 1;
-#endif
-	/*
-	 * Share the timeslice between parent and child, thus the
-	 * total amount of pending timeslices in the system doesn't change,
-	 * resulting in more scheduling fairness.
-	 */
-	local_irq_disable();
-	p->time_slice = (current->time_slice + 1) >> 1;
 	/*
-	 * The remainder of the first timeslice might be recovered by
-	 * the parent if the child exits early enough.
+	 * During context-switch we hold precisely one spinlock, which
+	 * schedule_tail drops. (in the common case it's this_rq()->lock,
+	 * but it also can be p->switch_lock.) So we compensate with a count
+	 * of 1. Also, we want to start with kernel preemption disabled.
 	 */
-	p->first_time_slice = 1;
-	current->time_slice >>= 1;
-	p->timestamp = sched_clock();
-	if (unlikely(!current->time_slice)) {
-		/*
-		 * This case is rare, it happens when the parent has only
-		 * a single jiffy left from its timeslice. Taking the
-		 * runqueue lock is not a problem.
-		 */
-		current->time_slice = 1;
-		scheduler_tick();
-	}
-	local_irq_enable();
-	put_cpu();
+	p->thread_info->preempt_count = 1;
+#endif
+	sched_drvp->fork(p);
 }
 
 /*
@@ -1306,156 +577,12 @@
  */
 void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
 {
-	unsigned long flags;
-	int this_cpu, cpu;
-	runqueue_t *rq, *this_rq;
-
-	rq = task_rq_lock(p, &flags);
-	BUG_ON(p->state != TASK_RUNNING);
-	this_cpu = smp_processor_id();
-	cpu = task_cpu(p);
-
-	/*
-	 * We decrease the sleep average of forking parents
-	 * and children as well, to keep max-interactive tasks
-	 * from forking tasks that are max-interactive. The parent
-	 * (current) is done further down, under its lock.
-	 */
-	p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
-		CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
-
-	p->prio = effective_prio(p);
-
-	if (likely(cpu == this_cpu)) {
-		if (!(clone_flags & CLONE_VM)) {
-			/*
-			 * The VM isn't cloned, so we're in a good position to
-			 * do child-runs-first in anticipation of an exec. This
-			 * usually avoids a lot of COW overhead.
-			 */
-			if (unlikely(!current->array))
-				__activate_task(p, rq);
-			else {
-				p->prio = current->prio;
-				list_add_tail(&p->run_list, &current->run_list);
-				p->array = current->array;
-				p->array->nr_active++;
-				rq->nr_running++;
-			}
-			set_need_resched();
-		} else
-			/* Run child last */
-			__activate_task(p, rq);
-		/*
-		 * We skip the following code due to cpu == this_cpu
-	 	 *
-		 *   task_rq_unlock(rq, &flags);
-		 *   this_rq = task_rq_lock(current, &flags);
-		 */
-		this_rq = rq;
-	} else {
-		this_rq = cpu_rq(this_cpu);
-
-		/*
-		 * Not the local CPU - must adjust timestamp. This should
-		 * get optimised away in the !CONFIG_SMP case.
-		 */
-		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
-					+ rq->timestamp_last_tick;
-		__activate_task(p, rq);
-		if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
-
-		/*
-		 * Parent and child are on different CPUs, now get the
-		 * parent runqueue to update the parent's ->sleep_avg:
-		 */
-		task_rq_unlock(rq, &flags);
-		this_rq = task_rq_lock(current, &flags);
-	}
-	current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
-		PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
-	task_rq_unlock(this_rq, &flags);
+	sched_drvp->wake_up_new_task(p, clone_flags);
 }
 
-/*
- * Potentially available exiting-child timeslices are
- * retrieved here - this way the parent does not get
- * penalized for creating too many threads.
- *
- * (this cannot be used to 'generate' timeslices
- * artificially, because any timeslice recovered here
- * was given away by the parent in the first place.)
- */
 void fastcall sched_exit(task_t * p)
 {
-	unsigned long flags;
-	runqueue_t *rq;
-
-	/*
-	 * If the child was a (relative-) CPU hog then decrease
-	 * the sleep_avg of the parent as well.
-	 */
-	rq = task_rq_lock(p->parent, &flags);
-	if (p->first_time_slice) {
-		p->parent->time_slice += p->time_slice;
-		if (unlikely(p->parent->time_slice > task_timeslice(p)))
-			p->parent->time_slice = task_timeslice(p);
-	}
-	if (p->sleep_avg < p->parent->sleep_avg)
-		p->parent->sleep_avg = p->parent->sleep_avg /
-		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
-		(EXIT_WEIGHT + 1);
-	task_rq_unlock(rq, &flags);
-}
-
-/**
- * __schedule_tail - switch to the new MM and clean up after a task-switch
- * @prev: the thread we just switched away from.
- */
-static void __schedule_tail(task_t *prev)
-{
-	/*
-	 * A task struct has one reference for the use as "current".
-	 * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
-	 * calls schedule one last time. The schedule call will never return,
-	 * and the scheduled task must drop that reference.
-	 * The test for EXIT_ZOMBIE must occur while the runqueue locks are
-	 * still held, otherwise prev could be scheduled on another cpu, die
-	 * there before we look at prev->state, and then the reference would
-	 * be dropped twice.
-	 *		Manfred Spraul <manfred@colorfullife.com>
-	 */
-	struct task_struct *next = current;
-	unsigned long prev_task_flags = prev->flags;
-	struct mm_struct *prev_mm = prev->active_mm, *next_mm = next->mm;
-
-	/*
-	 * Switch the MM first:
-	 */
-	if (unlikely(!next_mm)) {
-		next->active_mm = prev_mm;
-		atomic_inc(&prev_mm->mm_count);
-		enter_lazy_tlb(prev_mm, next);
-	} else
-		switch_mm(prev_mm, next_mm, next);
-
-	if (unlikely(!prev->mm))
-		prev->active_mm = NULL;
-	else
-		prev_mm = NULL;
-	/*
-	 * After ->on_cpu is cleared, the previous task is free to be
-	 * moved to a different CPU. We must ensure this doesn't happen
-	 * until the switch is completely finished.
-	 */
-	smp_wmb();
-	set_task_on_cpu(prev, 0);
-
-	if (prev_mm)
-		mmdrop(prev_mm);
-	if (unlikely(prev_task_flags & PF_DEAD))
-		put_task_struct(prev);
+	sched_drvp->exit(p);
 }
 
 /**
@@ -1463,10 +590,10 @@
  * @prev: the thread we just switched away from.
  */
 asmlinkage void schedule_tail(task_t *prev)
+	__releases(rq->lock)
 {
-	__schedule_tail(prev);
-	/* __schedule_tail does not reenable preemption: */
-	preempt_enable();
+	finish_task_switch(prev);
+
 	if (current->set_child_tid)
 		put_user(current->pid, current->set_child_tid);
 }
@@ -1587,6 +714,51 @@
 }
 
 /*
+ * find_idlest_cpu - find the least busy runqueue.
+ */
+static int find_idlest_cpu(struct task_struct *p, int this_cpu,
+			   struct sched_domain *sd)
+{
+	unsigned long load, min_load, this_load;
+	int i, min_cpu;
+	cpumask_t mask;
+
+	min_cpu = UINT_MAX;
+	min_load = ULONG_MAX;
+
+	cpus_and(mask, sd->span, p->cpus_allowed);
+
+	for_each_cpu_mask(i, mask) {
+		load = target_load(i);
+
+		if (load < min_load) {
+			min_cpu = i;
+			min_load = load;
+
+			/* break out early on an idle CPU: */
+			if (!min_load)
+				break;
+		}
+	}
+
+	/* add +1 to account for the new task */
+	this_load = source_load(this_cpu) + SCHED_LOAD_SCALE;
+
+	/*
+	 * Would with the addition of the new task to the
+	 * current CPU there be an imbalance between this
+	 * CPU and the idlest CPU?
+	 *
+	 * Use half of the balancing threshold - new-context is
+	 * a good opportunity to balance.
+	 */
+	if (min_load*(100 + (sd->imbalance_pct-100)/2) < this_load*100)
+		return min_cpu;
+
+	return this_cpu;
+}
+
+/*
  * If dest_cpu is allowed for this process, migrate the task to it.
  * This is accomplished by forcing the cpu_allowed mask to only
  * allow dest_cpu, which will force the cpu onto dest_cpu.  Then
@@ -1619,73 +791,37 @@
 }
 
 /*
- * sched_exec - execve() is a valuable balancing opportunity, because at
- * this point the task has the smallest effective memory and cache footprint.
+ * sched_exec(): find the highest-level, exec-balance-capable
+ * domain and try to migrate the task to the least loaded CPU.
+ *
+ * execve() is a valuable balancing opportunity, because at this point
+ * the task has the smallest effective memory and cache footprint.
  */
 void sched_exec(void)
 {
+	struct sched_domain *tmp, *sd = NULL;
 	int new_cpu, this_cpu = get_cpu();
-	new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
-	put_cpu();
-	if (new_cpu != this_cpu)
-		sched_migrate_task(current, new_cpu);
-}
-
-/*
- * pull_task - move a task from a remote runqueue to the local runqueue.
- * Both runqueues must be locked.
- */
-static inline
-void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
-	       runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
-{
-	dequeue_task(p, src_array);
-	src_rq->nr_running--;
-	set_task_cpu(p, this_cpu);
-	this_rq->nr_running++;
-	enqueue_task(p, this_array);
-	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
-				+ this_rq->timestamp_last_tick;
-	/*
-	 * Note that idle threads have a prio of MAX_PRIO, for this test
-	 * to be always true for them.
-	 */
-	if (TASK_PREEMPTS_CURR(p, this_rq))
-		resched_task(this_rq->curr);
-}
-
-/*
- * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
- */
-static inline
-int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
-	     struct sched_domain *sd, enum idle_type idle, int *all_pinned)
-{
-	/*
-	 * We do not migrate tasks that are:
-	 * 1) running (obviously), or
-	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
-	 * 3) are cache-hot on their current CPU.
-	 */
-	if (!cpu_isset(this_cpu, p->cpus_allowed))
-		return 0;
-	*all_pinned = 0;
-
-	if (task_on_cpu(rq, p))
-		return 0;
 
-	/*
-	 * Aggressive migration if:
-	 * 1) task is cache cold, or
-	 * 2) too many balance attempts have failed.
-	 */
+	/* Prefer the current CPU if there's only this task running */
+	if (this_rq()->nr_running <= 1)
+		goto out;
 
-	if (sd->nr_balance_failed > sd->cache_nice_tries)
-		return 1;
+	for_each_domain(this_cpu, tmp)
+		if (tmp->flags & SD_BALANCE_EXEC)
+			sd = tmp;
 
-	if (task_hot(p, rq->timestamp_last_tick, sd))
-		return 0;
-	return 1;
+	if (sd) {
+		schedstat_inc(sd, sbe_attempts);
+		new_cpu = find_idlest_cpu(current, this_cpu, sd);
+		if (new_cpu != this_cpu) {
+			schedstat_inc(sd, sbe_pushed);
+			put_cpu();
+			sched_migrate_task(current, new_cpu);
+			return;
+		}
+	}
+out:
+	put_cpu();
 }
 
 /*
@@ -1695,81 +831,12 @@
  *
  * Called with both runqueues locked.
  */
-static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+static inline int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
 		      unsigned long max_nr_move, struct sched_domain *sd,
-		      enum idle_type idle, int *all_pinned)
+		      enum idle_type idle)
 {
-	prio_array_t *array, *dst_array;
-	struct list_head *head, *curr;
-	int idx, pulled = 0, pinned = 0;
-	task_t *tmp;
-
-	if (max_nr_move == 0)
-		goto out;
-
-	pinned = 1;
-
-	/*
-	 * We first consider expired tasks. Those will likely not be
-	 * executed in the near future, and they are most likely to
-	 * be cache-cold, thus switching CPUs has the least effect
-	 * on them.
-	 */
-	if (busiest->expired->nr_active) {
-		array = busiest->expired;
-		dst_array = this_rq->expired;
-	} else {
-		array = busiest->active;
-		dst_array = this_rq->active;
-	}
-
-new_array:
-	/* Start searching at priority 0: */
-	idx = 0;
-skip_bitmap:
-	if (!idx)
-		idx = sched_find_first_bit(array->bitmap);
-	else
-		idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
-	if (idx >= MAX_PRIO) {
-		if (array == busiest->expired && busiest->active->nr_active) {
-			array = busiest->active;
-			dst_array = this_rq->active;
-			goto new_array;
-		}
-		goto out;
-	}
-
-	head = array->queue + idx;
-	curr = head->prev;
-skip_queue:
-	tmp = list_entry(curr, task_t, run_list);
-
-	curr = curr->prev;
-
-	if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
-		if (curr != head)
-			goto skip_queue;
-		idx++;
-		goto skip_bitmap;
-	}
+	int pulled = sched_drvp->move_tasks(this_rq, this_cpu, busiest, max_nr_move, sd, idle);
 
-#ifdef CONFIG_SCHEDSTATS
-	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
-		schedstat_inc(sd, lb_hot_gained[idle]);
-#endif
-
-	pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
-	pulled++;
-
-	/* We only want to steal up to the prescribed number of tasks. */
-	if (pulled < max_nr_move) {
-		if (curr != head)
-			goto skip_queue;
-		idx++;
-		goto skip_bitmap;
-	}
-out:
 	/*
 	 * Right now, this is the only place pull_task() is called,
 	 * so we can safely collect pull_task() stats here rather than
@@ -1777,8 +844,6 @@
 	 */
 	schedstat_add(sd, lb_gained[idle], pulled);
 
-	if (all_pinned)
-		*all_pinned = pinned;
 	return pulled;
 }
 
@@ -1793,15 +858,8 @@
 {
 	struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
 	unsigned long max_load, avg_load, total_load, this_load, total_pwr;
-	int load_idx;
 
 	max_load = this_load = total_load = total_pwr = 0;
-	if (idle == NOT_IDLE)
-		load_idx = sd->busy_idx;
-	else if (idle == NEWLY_IDLE)
-		load_idx = sd->newidle_idx;
-	else
-		load_idx = sd->idle_idx;
 
 	do {
 		unsigned long load;
@@ -1816,9 +874,9 @@
 		for_each_cpu_mask(i, group->cpumask) {
 			/* Bias balancing toward cpus of our domain */
 			if (local_group)
-				load = target_load(i, load_idx);
+				load = target_load(i);
 			else
-				load = source_load(i, load_idx);
+				load = source_load(i);
 
 			avg_load += load;
 		}
@@ -1832,10 +890,12 @@
 		if (local_group) {
 			this_load = avg_load;
 			this = group;
+			goto nextgroup;
 		} else if (avg_load > max_load) {
 			max_load = avg_load;
 			busiest = group;
 		}
+nextgroup:
 		group = group->next;
 	} while (group != sd->groups);
 
@@ -1908,9 +968,15 @@
 
 	/* Get rid of the scaling factor, rounding down as we divide */
 	*imbalance = *imbalance / SCHED_LOAD_SCALE;
+
 	return busiest;
 
 out_balanced:
+	if (busiest && (idle == NEWLY_IDLE ||
+			(idle == SCHED_IDLE && max_load > SCHED_LOAD_SCALE)) ) {
+		*imbalance = 1;
+		return busiest;
+	}
 
 	*imbalance = 0;
 	return NULL;
@@ -1926,7 +992,7 @@
 	int i;
 
 	for_each_cpu_mask(i, group->cpumask) {
-		load = source_load(i, 0);
+		load = source_load(i);
 
 		if (load > max_load) {
 			max_load = load;
@@ -1938,12 +1004,6 @@
 }
 
 /*
- * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
- * so long as it is large enough.
- */
-#define MAX_PINNED_INTERVAL	512
-
-/*
  * Check this_cpu to ensure it is balanced within domain. Attempt to move
  * tasks if there is an imbalance.
  *
@@ -1955,8 +1015,7 @@
 	struct sched_group *group;
 	runqueue_t *busiest;
 	unsigned long imbalance;
-	int nr_moved, all_pinned = 0;
-	int active_balance = 0;
+	int nr_moved;
 
 	spin_lock(&this_rq->lock);
 	schedstat_inc(sd, lb_cnt[idle]);
@@ -1973,7 +1032,15 @@
 		goto out_balanced;
 	}
 
-	BUG_ON(busiest == this_rq);
+	/*
+	 * This should be "impossible", but since load
+	 * balancing is inherently racy and statistical,
+	 * it could happen in theory.
+	 */
+	if (unlikely(busiest == this_rq)) {
+		WARN_ON(1);
+		goto out_balanced;
+	}
 
 	schedstat_add(sd, lb_imbalance[idle], imbalance);
 
@@ -1987,15 +1054,9 @@
 		 */
 		double_lock_balance(this_rq, busiest);
 		nr_moved = move_tasks(this_rq, this_cpu, busiest,
-						imbalance, sd, idle,
-						&all_pinned);
+						imbalance, sd, idle);
 		spin_unlock(&busiest->lock);
-
-		/* All tasks on this runqueue were pinned by CPU affinity */
-		if (unlikely(all_pinned))
-			goto out_balanced;
 	}
-
 	spin_unlock(&this_rq->lock);
 
 	if (!nr_moved) {
@@ -2003,38 +1064,36 @@
 		sd->nr_balance_failed++;
 
 		if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
+			int wake = 0;
 
 			spin_lock(&busiest->lock);
 			if (!busiest->active_balance) {
 				busiest->active_balance = 1;
 				busiest->push_cpu = this_cpu;
-				active_balance = 1;
+				wake = 1;
 			}
 			spin_unlock(&busiest->lock);
-			if (active_balance)
+			if (wake)
 				wake_up_process(busiest->migration_thread);
 
 			/*
 			 * We've kicked active balancing, reset the failure
 			 * counter.
 			 */
-			sd->nr_balance_failed = sd->cache_nice_tries+1;
+			sd->nr_balance_failed = sd->cache_nice_tries;
 		}
-	} else
+
+		/*
+		 * We were unbalanced, but unsuccessful in move_tasks(),
+		 * so bump the balance_interval to lessen the lock contention.
+		 */
+		if (sd->balance_interval < sd->max_interval)
+			sd->balance_interval++;
+	} else {
 		sd->nr_balance_failed = 0;
 
-	if (likely(!active_balance)) {
 		/* We were unbalanced, so reset the balancing interval */
 		sd->balance_interval = sd->min_interval;
-	} else {
-		/*
-		 * If we've begun active balancing, start to back off. This
-		 * case may not be covered by the all_pinned logic if there
-		 * is only 1 task on the busy runqueue (because we don't call
-		 * move_tasks).
-		 */
-		if (sd->balance_interval < sd->max_interval)
-			sd->balance_interval *= 2;
 	}
 
 	return nr_moved;
@@ -2044,10 +1103,8 @@
 
 	schedstat_inc(sd, lb_balanced[idle]);
 
-	sd->nr_balance_failed = 0;
 	/* tune up the balancing interval */
-	if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
-			(sd->balance_interval < sd->max_interval))
+	if (sd->balance_interval < sd->max_interval)
 		sd->balance_interval *= 2;
 
 	return 0;
@@ -2071,43 +1128,38 @@
 	schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
 	group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE);
 	if (!group) {
+		schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
 		schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
-		goto out_balanced;
+		goto out;
 	}
 
 	busiest = find_busiest_queue(group);
-	if (!busiest) {
+	if (!busiest || busiest == this_rq) {
+		schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
 		schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
-		goto out_balanced;
+		goto out;
 	}
 
-	BUG_ON(busiest == this_rq);
-
 	/* Attempt to move tasks */
 	double_lock_balance(this_rq, busiest);
 
 	schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
 	nr_moved = move_tasks(this_rq, this_cpu, busiest,
-					imbalance, sd, NEWLY_IDLE, NULL);
+					imbalance, sd, NEWLY_IDLE);
 	if (!nr_moved)
 		schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
-	else
-		sd->nr_balance_failed = 0;
 
 	spin_unlock(&busiest->lock);
-	return nr_moved;
 
-out_balanced:
-	schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
-	sd->nr_balance_failed = 0;
-	return 0;
+out:
+	return nr_moved;
 }
 
 /*
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
  */
-static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
+void idle_balance(int this_cpu, runqueue_t *this_rq)
 {
 	struct sched_domain *sd;
 
@@ -2131,46 +1183,57 @@
  */
 static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
 {
-	struct sched_domain *tmp = NULL, *sd;
+	struct sched_domain *sd;
+	struct sched_group *cpu_group;
 	runqueue_t *target_rq;
-	int target_cpu = busiest_rq->push_cpu;
-
-	if (busiest_rq->nr_running <= 1)
-		/* no task to move */
-		return;
-
-	target_rq = cpu_rq(target_cpu);
+	cpumask_t visited_cpus;
+	int cpu;
 
 	/*
-	 * This condition is "impossible", if it occurs
-	 * we need to fix it.  Originally reported by
-	 * Bjorn Helgaas on a 128-cpu setup.
+	 * Search for suitable CPUs to push tasks to in successively higher
+	 * domains with SD_LOAD_BALANCE set.
 	 */
-	BUG_ON(busiest_rq == target_rq);
-
-	/* move a task from busiest_rq to target_rq */
-	double_lock_balance(busiest_rq, target_rq);
+	visited_cpus = CPU_MASK_NONE;
+	for_each_domain(busiest_cpu, sd) {
+		if (!(sd->flags & SD_LOAD_BALANCE))
+			/* no more domains to search */
+			break;
 
-	/* Search for an sd spanning us and the target CPU. */
-	for_each_domain(target_cpu, sd) {
-		if ((sd->flags & SD_LOAD_BALANCE) &&
-			cpu_isset(busiest_cpu, sd->span)) {
-				sd = tmp;
-				break;
-		}
-	}
+		schedstat_inc(sd, alb_cnt);
 
-	if (unlikely(sd == NULL))
-		goto out;
+		cpu_group = sd->groups;
+		do {
+			for_each_cpu_mask(cpu, cpu_group->cpumask) {
+				if (busiest_rq->nr_running <= 1)
+					/* no more tasks left to move */
+					return;
+				if (cpu_isset(cpu, visited_cpus))
+					continue;
+				cpu_set(cpu, visited_cpus);
+				if (!cpu_and_siblings_are_idle(cpu) || cpu == busiest_cpu)
+					continue;
 
-	schedstat_inc(sd, alb_cnt);
+				target_rq = cpu_rq(cpu);
+				/*
+				 * This condition is "impossible", if it occurs
+				 * we need to fix it.  Originally reported by
+				 * Bjorn Helgaas on a 128-cpu setup.
+				 */
+				BUG_ON(busiest_rq == target_rq);
 
-	if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE, NULL))
-		schedstat_inc(sd, alb_pushed);
-	else
-		schedstat_inc(sd, alb_failed);
-out:
-	spin_unlock(&target_rq->lock);
+				/* move a task from busiest_rq to target_rq */
+				double_lock_balance(busiest_rq, target_rq);
+				if (move_tasks(target_rq, cpu, busiest_rq,
+						1, sd, SCHED_IDLE)) {
+					schedstat_inc(sd, alb_pushed);
+				} else {
+					schedstat_inc(sd, alb_failed);
+				}
+				spin_unlock(&target_rq->lock);
+			}
+			cpu_group = cpu_group->next;
+		} while (cpu_group != sd->groups);
+	}
 }
 
 /*
@@ -2185,29 +1248,23 @@
 /* Don't have all balancing operations going off at once */
 #define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS)
 
-static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
-			   enum idle_type idle)
+void rebalance_tick(int this_cpu, runqueue_t *this_rq, enum idle_type idle)
 {
 	unsigned long old_load, this_load;
 	unsigned long j = jiffies + CPU_OFFSET(this_cpu);
 	struct sched_domain *sd;
-	int i;
 
-	this_load = this_rq->nr_running * SCHED_LOAD_SCALE;
 	/* Update our load */
-	for (i = 0; i < 3; i++) {
-		unsigned long new_load = this_load;
-		int scale = 1 << i;
-		old_load = this_rq->cpu_load[i];
-		/*
-		 * Round up the averaging division if load is increasing. This
-		 * prevents us from getting stuck on 9 if the load is 10, for
-		 * example.
-		 */
-		if (new_load > old_load)
-			new_load += scale-1;
-		this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale;
-	}
+	old_load = this_rq->cpu_load;
+	this_load = this_rq->nr_running * SCHED_LOAD_SCALE;
+	/*
+	 * Round up the averaging division if load is increasing. This
+	 * prevents us from getting stuck on 9 if the load is 10, for
+	 * example.
+	 */
+	if (this_load > old_load)
+		old_load++;
+	this_rq->cpu_load = (old_load + this_load) / 2;
 
 	for_each_domain(this_cpu, sd) {
 		unsigned long interval;
@@ -2233,22 +1290,13 @@
 		}
 	}
 }
-#else
-/*
- * on UP we do not need to balance between CPUs:
- */
-static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle)
-{
-}
-static inline void idle_balance(int cpu, runqueue_t *rq)
-{
-}
 #endif
 
-static inline int wake_priority_sleeper(runqueue_t *rq)
+#ifdef CONFIG_SCHED_SMT
+int wake_priority_sleeper(runqueue_t *rq)
 {
 	int ret = 0;
-#ifdef CONFIG_SCHED_SMT
+
 	spin_lock(&rq->lock);
 	/*
 	 * If an SMT sibling task has been put to sleep for priority
@@ -2259,26 +1307,16 @@
 		ret = 1;
 	}
 	spin_unlock(&rq->lock);
-#endif
+
 	return ret;
 }
+#endif
 
 DEFINE_PER_CPU(struct kernel_stat, kstat);
 
 EXPORT_PER_CPU_SYMBOL(kstat);
 
 /*
- * This is called on clock ticks and on context switches.
- * Bank in p->sched_time the ns elapsed since the last tick or switch.
- */
-static inline void update_cpu_clock(task_t *p, runqueue_t *rq,
-				    unsigned long long now)
-{
-	unsigned long long last = max(p->timestamp, rq->timestamp_last_tick);
-	p->sched_time += now - last;
-}
-
-/*
  * Return current->sched_time plus any more ns on the sched_clock
  * that have not yet been banked.
  */
@@ -2294,22 +1332,6 @@
 }
 
 /*
- * We place interactive tasks back into the active array, if possible.
- *
- * To guarantee that this does not starve expired tasks we ignore the
- * interactivity of a task if the first expired task had to wait more
- * than a 'reasonable' amount of time. This deadline timeout is
- * load-dependent, as the frequency of array switched decreases with
- * increasing number of running tasks. We also ignore the interactivity
- * if a better static_prio task has expired:
- */
-#define EXPIRED_STARVING(rq) \
-	((STARVATION_LIMIT && ((rq)->expired_timestamp && \
-		(jiffies - (rq)->expired_timestamp >= \
-			STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
-			((rq)->curr->static_prio > (rq)->best_expired_prio))
-
-/*
  * Account user cpu time to a process.
  * @p: the process that the cpu time gets accounted to
  * @hardirq_offset: the offset to subtract from hardirq_count()
@@ -2393,7 +1415,6 @@
  */
 void scheduler_tick(void)
 {
-	int cpu = smp_processor_id();
 	runqueue_t *rq = this_rq();
 	task_t *p = current;
 	unsigned long long now = sched_clock();
@@ -2402,100 +1423,17 @@
 
 	rq->timestamp_last_tick = now;
 
-	if (p == rq->idle) {
-		if (wake_priority_sleeper(rq))
-			goto out;
-		rebalance_tick(cpu, rq, SCHED_IDLE);
-		return;
-	}
-
-	/* Task might have expired already, but not scheduled off yet */
-	if (p->array != rq->active) {
-		set_tsk_need_resched(p);
-		goto out;
-	}
-	spin_lock(&rq->lock);
-	/*
-	 * The task was running during this tick - update the
-	 * time slice counter. Note: we do not update a thread's
-	 * priority until it either goes to sleep or uses up its
-	 * timeslice. This makes it possible for interactive tasks
-	 * to use up their timeslices at their highest priority levels.
-	 */
-	if (rt_task(p)) {
-		/*
-		 * RR tasks need a special form of timeslice management.
-		 * FIFO tasks have no timeslices.
-		 */
-		if ((p->policy == SCHED_RR) && !--p->time_slice) {
-			p->time_slice = task_timeslice(p);
-			p->first_time_slice = 0;
-			set_tsk_need_resched(p);
-
-			/* put it at the end of the queue: */
-			requeue_task(p, rq->active);
-		}
-		goto out_unlock;
-	}
-	if (!--p->time_slice) {
-		dequeue_task(p, rq->active);
-		set_tsk_need_resched(p);
-		p->prio = effective_prio(p);
-		p->time_slice = task_timeslice(p);
-		p->first_time_slice = 0;
-
-		if (!rq->expired_timestamp)
-			rq->expired_timestamp = jiffies;
-		if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
-			enqueue_task(p, rq->expired);
-			if (p->static_prio < rq->best_expired_prio)
-				rq->best_expired_prio = p->static_prio;
-		} else
-			enqueue_task(p, rq->active);
-	} else {
-		/*
-		 * Prevent a too long timeslice allowing a task to monopolize
-		 * the CPU. We do this by splitting up the timeslice into
-		 * smaller pieces.
-		 *
-		 * Note: this does not mean the task's timeslices expire or
-		 * get lost in any way, they just might be preempted by
-		 * another task of equal priority. (one with higher
-		 * priority would have preempted this task already.) We
-		 * requeue this task to the end of the list on this priority
-		 * level, which is in essence a round-robin of tasks with
-		 * equal priority.
-		 *
-		 * This only applies to tasks in the interactive
-		 * delta range with at least TIMESLICE_GRANULARITY to requeue.
-		 */
-		if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
-			p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
-			(p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
-			(p->array == rq->active)) {
-
-			requeue_task(p, rq->active);
-			set_tsk_need_resched(p);
-		}
-	}
-out_unlock:
-	spin_unlock(&rq->lock);
-out:
-	rebalance_tick(cpu, rq, NOT_IDLE);
+	sched_drvp->tick(p, rq, now);
 }
 
 #ifdef CONFIG_SCHED_SMT
-static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
+void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
 {
-	struct sched_domain *tmp, *sd = NULL;
+	struct sched_domain *sd = this_rq->sd;
 	cpumask_t sibling_map;
 	int i;
 
-	for_each_domain(this_cpu, tmp)
-		if (tmp->flags & SD_SHARE_CPUPOWER)
-			sd = tmp;
-
-	if (!sd)
+	if (!(sd->flags & SD_SHARE_CPUPOWER))
 		return;
 
 	/*
@@ -2534,19 +1472,14 @@
 	 */
 }
 
-static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
+int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
 {
-	struct sched_domain *tmp, *sd = NULL;
+	struct sched_domain *sd = this_rq->sd;
 	cpumask_t sibling_map;
-	prio_array_t *array;
 	int ret = 0, i;
 	task_t *p;
 
-	for_each_domain(this_cpu, tmp)
-		if (tmp->flags & SD_SHARE_CPUPOWER)
-			sd = tmp;
-
-	if (!sd)
+	if (!(sd->flags & SD_SHARE_CPUPOWER))
 		return 0;
 
 	/*
@@ -2565,13 +1498,8 @@
 	 */
 	if (!this_rq->nr_running)
 		goto out_unlock;
-	array = this_rq->active;
-	if (!array->nr_active)
-		array = this_rq->expired;
-	BUG_ON(!array->nr_active);
 
-	p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
-		task_t, run_list);
+	p = sched_drvp->head_of_queue(&this_rq->qu);
 
 	for_each_cpu_mask(i, sibling_map) {
 		runqueue_t *smt_rq = cpu_rq(i);
@@ -2585,9 +1513,7 @@
 		 * task from using an unfair proportion of the
 		 * physical cpu's resources. -ck
 		 */
-		if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / 100) >
-			task_timeslice(p) || rt_task(smt_curr)) &&
-			p->mm && smt_curr->mm && !rt_task(p))
+		if (sched_drvp->dependent_sleeper_trumps(smt_curr, p, sd))
 				ret = 1;
 
 		/*
@@ -2595,9 +1521,7 @@
 		 * or wake it up if it has been put to sleep for priority
 		 * reasons.
 		 */
-		if ((((p->time_slice * (100 - sd->per_cpu_gain) / 100) >
-			task_timeslice(smt_curr) || rt_task(p)) &&
-			smt_curr->mm && p->mm && !rt_task(smt_curr)) ||
+		if (sched_drvp->dependent_sleeper_trumps(p, smt_curr, sd) ||
 			(smt_curr == smt_rq->idle && smt_rq->nr_running))
 				resched_task(smt_curr);
 	}
@@ -2606,15 +1530,6 @@
 		spin_unlock(&cpu_rq(i)->lock);
 	return ret;
 }
-#else
-static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
-{
-}
-
-static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
-{
-	return 0;
-}
 #endif
 
 #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
@@ -2654,14 +1569,8 @@
  */
 asmlinkage void __sched schedule(void)
 {
-	long *switch_count;
-	task_t *prev, *next;
+	task_t *prev;
 	runqueue_t *rq;
-	prio_array_t *array;
-	struct list_head *queue;
-	unsigned long long now;
-	unsigned long run_time;
-	int cpu, idx;
 
 	/*
 	 * Test if we are atomic.  Since do_exit() needs to call into
@@ -2695,145 +1604,8 @@
 	}
 
 	schedstat_inc(rq, sched_cnt);
-	now = sched_clock();
-	if (likely((long long)now - prev->timestamp < NS_MAX_SLEEP_AVG)) {
-		run_time = now - prev->timestamp;
-		if (unlikely((long long)now - prev->timestamp < 0))
-			run_time = 0;
-	} else
-		run_time = NS_MAX_SLEEP_AVG;
-
-	/*
-	 * Tasks charged proportionately less run_time at high sleep_avg to
-	 * delay them losing their interactive status
-	 */
-	run_time /= (CURRENT_BONUS(prev) ? : 1);
-
-	spin_lock_irq(&rq->lock);
-
-	if (unlikely(prev->flags & PF_DEAD))
-		prev->state = EXIT_DEAD;
-
-	switch_count = &prev->nivcsw;
-	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
-		switch_count = &prev->nvcsw;
-		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
-				unlikely(signal_pending(prev))))
-			prev->state = TASK_RUNNING;
-		else {
-			if (prev->state == TASK_UNINTERRUPTIBLE)
-				rq->nr_uninterruptible++;
-			deactivate_task(prev, rq);
-		}
-	}
-
-	cpu = smp_processor_id();
-	if (unlikely(!rq->nr_running)) {
-go_idle:
-		idle_balance(cpu, rq);
-		if (!rq->nr_running) {
-			next = rq->idle;
-			rq->expired_timestamp = 0;
-			wake_sleeping_dependent(cpu, rq);
-			/*
-			 * wake_sleeping_dependent() might have released
-			 * the runqueue, so break out if we got new
-			 * tasks meanwhile:
-			 */
-			if (!rq->nr_running)
-				goto switch_tasks;
-		}
-	} else {
-		if (dependent_sleeper(cpu, rq)) {
-			next = rq->idle;
-			goto switch_tasks;
-		}
-		/*
-		 * dependent_sleeper() releases and reacquires the runqueue
-		 * lock, hence go into the idle loop if the rq went
-		 * empty meanwhile:
-		 */
-		if (unlikely(!rq->nr_running))
-			goto go_idle;
-	}
 
-	array = rq->active;
-	if (unlikely(!array->nr_active)) {
-		/*
-		 * Switch the active and expired arrays.
-		 */
-		schedstat_inc(rq, sched_switch);
-		rq->active = rq->expired;
-		rq->expired = array;
-		array = rq->active;
-		rq->expired_timestamp = 0;
-		rq->best_expired_prio = MAX_PRIO;
-	}
-
-	idx = sched_find_first_bit(array->bitmap);
-	queue = array->queue + idx;
-	next = list_entry(queue->next, task_t, run_list);
-
-	if (!rt_task(next) && next->activated > 0) {
-		unsigned long long delta = now - next->timestamp;
-		if (unlikely((long long)now - next->timestamp < 0))
-			delta = 0;
-
-		if (next->activated == 1)
-			delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
-
-		array = next->array;
-		dequeue_task(next, array);
-		recalc_task_prio(next, next->timestamp + delta);
-		enqueue_task(next, array);
-	}
-	next->activated = 0;
-switch_tasks:
-	if (next == rq->idle)
-		schedstat_inc(rq, sched_goidle);
-	prefetch(next);
-	clear_tsk_need_resched(prev);
-	rcu_qsctr_inc(task_cpu(prev));
-
-	update_cpu_clock(prev, rq, now);
-
-	prev->sleep_avg -= run_time;
-	if ((long)prev->sleep_avg <= 0)
-		prev->sleep_avg = 0;
-	prev->timestamp = prev->last_ran = now;
-
-	sched_info_switch(prev, next);
-	if (likely(prev != next)) {
-		next->timestamp = now;
-		rq->nr_switches++;
-		rq->curr = next;
-		++*switch_count;
-		set_task_on_cpu(next, 1);
-		/*
-		 * We release the runqueue lock and enable interrupts,
-		 * but preemption is disabled until the end of the
-		 * context-switch:
-		 */
-		spin_unlock_irq(&rq->lock);
-		/*
-		 * Switch kernel stack and register state. Updates
-		 * 'prev' to point to the real previous task.
-		 *
-		 * Here we are still in the old task, 'prev' is current,
-		 * 'next' is the task we are going to switch to:
-		 */
-		switch_to(prev, next, prev);
-		barrier();
-		/*
-		 * Here we are in the new task's stack already. 'prev'
-		 * has been updated by switch_to() to point to the task
-		 * we just switched from, 'next' is invalid.
-		 *
-		 * do the MM switch and clean up:
-		 */
-		__schedule_tail(prev);
-	} else
-		spin_unlock_irq(&rq->lock);
+	sched_drvp->schedule();
 
 	prev = current;
 	if (unlikely(reacquire_kernel_lock(prev) < 0))
@@ -3244,9 +2016,7 @@
 void set_user_nice(task_t *p, long nice)
 {
 	unsigned long flags;
-	prio_array_t *array;
 	runqueue_t *rq;
-	int old_prio, new_prio, delta;
 
 	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
 		return;
@@ -3265,25 +2035,8 @@
 		p->static_prio = NICE_TO_PRIO(nice);
 		goto out_unlock;
 	}
-	array = p->array;
-	if (array)
-		dequeue_task(p, array);
-
-	old_prio = p->prio;
-	new_prio = NICE_TO_PRIO(nice);
-	delta = new_prio - old_prio;
-	p->static_prio = NICE_TO_PRIO(nice);
-	p->prio += delta;
 
-	if (array) {
-		enqueue_task(p, array);
-		/*
-		 * If the task increased its priority or is running and
-		 * lowered its priority, then reschedule its CPU:
-		 */
-		if (delta < 0 || (delta > 0 && task_on_cpu(rq, p)))
-			resched_task(rq->curr);
-	}
+	sched_drvp->set_normal_task_nice(p, nice);
 out_unlock:
 	task_rq_unlock(rq, &flags);
 }
@@ -3404,9 +2157,9 @@
 }
 
 /* Actually do priority change: must hold rq lock. */
-static void __setscheduler(struct task_struct *p, int policy, int prio)
+void __setscheduler(struct task_struct *p, int policy, int prio)
 {
-	BUG_ON(p->array);
+	BUG_ON(task_is_queued(p));
 	p->policy = policy;
 	p->rt_priority = prio;
 	if (policy != SCHED_NORMAL)
@@ -3425,8 +2178,7 @@
 int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param)
 {
 	int retval;
-	int oldprio, oldpolicy = -1;
-	prio_array_t *array;
+	int oldpolicy = -1;
 	unsigned long flags;
 	runqueue_t *rq;
 
@@ -3469,24 +2221,9 @@
 		task_rq_unlock(rq, &flags);
 		goto recheck;
 	}
-	array = p->array;
-	if (array)
-		deactivate_task(p, rq);
-	oldprio = p->prio;
-	__setscheduler(p, policy, param->sched_priority);
-	if (array) {
-		__activate_task(p, rq);
-		/*
-		 * Reschedule if we are currently running on this runqueue and
-		 * our priority decreased, or if we are not currently running on
-		 * this runqueue and our priority is higher than the current's
-		 */
-		if (task_on_cpu(rq, p)) {
-			if (p->prio > oldprio)
-				resched_task(rq->curr);
-		} else if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
-	}
+
+	sched_drvp->setscheduler(p, policy, param->sched_priority);
+
 	task_rq_unlock(rq, &flags);
 	return 0;
 }
@@ -3744,48 +2481,7 @@
  */
 asmlinkage long sys_sched_yield(void)
 {
-	runqueue_t *rq = this_rq_lock();
-	prio_array_t *array = current->array;
-	prio_array_t *target = rq->expired;
-
-	schedstat_inc(rq, yld_cnt);
-	/*
-	 * We implement yielding by moving the task into the expired
-	 * queue.
-	 *
-	 * (special rule: RT tasks will just roundrobin in the active
-	 *  array.)
-	 */
-	if (rt_task(current))
-		target = rq->active;
-
-	if (current->array->nr_active == 1) {
-		schedstat_inc(rq, yld_act_empty);
-		if (!rq->expired->nr_active)
-			schedstat_inc(rq, yld_both_empty);
-	} else if (!rq->expired->nr_active)
-		schedstat_inc(rq, yld_exp_empty);
-
-	if (array != target) {
-		dequeue_task(current, array);
-		enqueue_task(current, target);
-	} else
-		/*
-		 * requeue_task is cheaper so perform that if possible.
-		 */
-		requeue_task(current, array);
-
-	/*
-	 * Since we are going to call schedule() anyway, there's
-	 * no need to preempt or enable interrupts:
-	 */
-	__release(rq->lock);
-	_raw_spin_unlock(&rq->lock);
-	preempt_enable_no_resched();
-
-	schedule();
-
-	return 0;
+	return sched_drvp->sys_yield();
 }
 
 static inline void __cond_resched(void)
@@ -3859,8 +2555,7 @@
  */
 void __sched yield(void)
 {
-	set_current_state(TASK_RUNNING);
-	sys_sched_yield();
+	sched_drvp->yield();
 }
 
 EXPORT_SYMBOL(yield);
@@ -4081,16 +2776,13 @@
 	runqueue_t *rq = cpu_rq(cpu);
 	unsigned long flags;
 
-	idle->sleep_avg = 0;
-	idle->array = NULL;
-	idle->prio = MAX_PRIO;
+	sched_drvp->init_idle(idle, cpu);
 	idle->state = TASK_RUNNING;
 	idle->cpus_allowed = cpumask_of_cpu(cpu);
 	set_task_cpu(idle, cpu);
 
 	spin_lock_irqsave(&rq->lock, flags);
 	rq->curr = rq->idle = idle;
-	set_task_on_cpu(idle, 1);
 	set_tsk_need_resched(idle);
 	spin_unlock_irqrestore(&rq->lock, flags);
 
@@ -4199,21 +2891,10 @@
 	if (!cpu_isset(dest_cpu, p->cpus_allowed))
 		goto out;
 
-	set_task_cpu(p, dest_cpu);
-	if (p->array) {
-		/*
-		 * Sync timestamp with rq_dest's before activating.
-		 * The same thing could be achieved by doing this step
-		 * afterwards, and pretending it was a local activate.
-		 * This way is cleaner and logically correct.
-		 */
-		p->timestamp = p->timestamp - rq_src->timestamp_last_tick
-				+ rq_dest->timestamp_last_tick;
-		deactivate_task(p, rq_src);
-		activate_task(p, rq_dest, 0);
-		if (TASK_PREEMPTS_CURR(p, rq_dest))
-			resched_task(rq_dest->curr);
-	}
+	if (task_is_queued(p))
+		sched_drvp->migrate_queued_task(p, dest_cpu);
+	else
+		set_task_cpu(p, dest_cpu);
 
 out:
 	double_rq_unlock(rq_src, rq_dest);
@@ -4263,9 +2944,17 @@
 		req = list_entry(head->next, migration_req_t, list);
 		list_del_init(head->next);
 
-		spin_unlock(&rq->lock);
-		__migrate_task(req->task, cpu, req->dest_cpu);
-		local_irq_enable();
+		if (req->type == REQ_MOVE_TASK) {
+			spin_unlock(&rq->lock);
+			__migrate_task(req->task, cpu, req->dest_cpu);
+			local_irq_enable();
+		} else if (req->type == REQ_SET_DOMAIN) {
+			rq->sd = req->sd;
+			spin_unlock_irq(&rq->lock);
+		} else {
+			spin_unlock_irq(&rq->lock);
+			WARN_ON(1);
+		}
 
 		complete(&req->done);
 	}
@@ -4363,7 +3052,6 @@
 {
 	int cpu = smp_processor_id();
 	runqueue_t *rq = this_rq();
-	struct task_struct *p = rq->idle;
 	unsigned long flags;
 
 	/* cpu has to be offline */
@@ -4374,9 +3062,7 @@
 	 */
 	spin_lock_irqsave(&rq->lock, flags);
 
-	__setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
-	/* Add idle task to _front_ of it's priority queue */
-	__activate_idle_task(p, rq);
+	sched_drvp->set_select_idle_first(rq);
 
 	spin_unlock_irqrestore(&rq->lock, flags);
 }
@@ -4395,7 +3081,7 @@
 	mmdrop(mm);
 }
 
-static void migrate_dead(unsigned int dead_cpu, task_t *tsk)
+void migrate_dead(unsigned int dead_cpu, task_t *tsk)
 {
 	struct runqueue *rq = cpu_rq(dead_cpu);
 
@@ -4420,20 +3106,9 @@
 }
 
 /* release_task() removes task from tasklist, so we won't find dead tasks. */
-static void migrate_dead_tasks(unsigned int dead_cpu)
+static inline void migrate_dead_tasks(unsigned int dead_cpu)
 {
-	unsigned arr, i;
-	struct runqueue *rq = cpu_rq(dead_cpu);
-
-	for (arr = 0; arr < 2; arr++) {
-		for (i = 0; i < MAX_PRIO; i++) {
-			struct list_head *list = &rq->arrays[arr].queue[i];
-			while (!list_empty(list))
-				migrate_dead(dead_cpu,
-					     list_entry(list->next, task_t,
-							run_list));
-		}
-	}
+	sched_drvp->migrate_dead_tasks(dead_cpu);
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
@@ -4480,9 +3155,7 @@
 		rq->migration_thread = NULL;
 		/* Idle task back to normal (off runqueue, low prio) */
 		rq = task_rq_lock(rq->idle, &flags);
-		deactivate_task(rq->idle, rq);
-		rq->idle->static_prio = MAX_PRIO;
-		__setscheduler(rq->idle, SCHED_NORMAL, 0);
+		sched_drvp->set_select_idle_last(rq);
 		migrate_dead_tasks(cpu);
 		task_rq_unlock(rq, &flags);
 		migrate_nr_uninterruptible(rq);
@@ -4496,6 +3169,7 @@
 			migration_req_t *req;
 			req = list_entry(rq->migration_queue.next,
 					 migration_req_t, list);
+			BUG_ON(req->type != REQ_MOVE_TASK);
 			list_del_init(&req->list);
 			complete(&req->done);
 		}
@@ -4532,11 +3206,6 @@
 {
 	int level = 0;
 
-	if (!sd) {
-		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
-		return;
-	}
-
 	printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
 
 	do {
@@ -4619,197 +3288,37 @@
 #define sched_domain_debug(sd, cpu) {}
 #endif
 
-#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_SYSCTL)
-static struct ctl_table sd_ctl_dir[] = {
-	{1, "sched_domain", NULL, 0, 0755, NULL, },
-	{0,},
-};
-
-static struct ctl_table sd_ctl_root[] = {
-	{1, "kernel", NULL, 0, 0755, sd_ctl_dir, },
-	{0,},
-};
-
-static char *sched_strdup(char *str)
-{
-	int n = strlen(str)+1;
-	char *s = kmalloc(n, GFP_KERNEL);
-	if (!s)
-		return NULL;
-	return strcpy(s, str);
-}
-
-static struct ctl_table *sd_alloc_ctl_entry(int n)
-{
-	struct ctl_table *entry =
-		kmalloc(n * sizeof(struct ctl_table), GFP_KERNEL);
-	BUG_ON(!entry);
-	memset(entry, 0, n * sizeof(struct ctl_table));
-	return entry;
-}
-
-static void set_table_entry(struct ctl_table *entry, int ctl_name,
-			const char *procname, void *data, int maxlen,
-			mode_t mode, proc_handler *proc_handler)
-{
-	entry->ctl_name = ctl_name;
-	entry->procname = procname;
-	entry->data = data;
-	entry->maxlen = maxlen;
-	entry->mode = mode;
-	entry->proc_handler = proc_handler;
-}
-
-static struct ctl_table *
-sd_alloc_ctl_domain_table(struct sched_domain *sd)
-{
-	struct ctl_table *table;
-	table = sd_alloc_ctl_entry(14);
-
-	set_table_entry(&table[0], 1, "min_interval", &sd->min_interval,
-		sizeof(long), 0644, proc_doulongvec_minmax);
-	set_table_entry(&table[1], 2, "max_interval", &sd->max_interval,
-		sizeof(long), 0644, proc_doulongvec_minmax);
-	set_table_entry(&table[2], 3, "busy_idx", &sd->busy_idx,
-		sizeof(int), 0644, proc_dointvec_minmax);
-	set_table_entry(&table[3], 4, "idle_idx", &sd->idle_idx,
-		sizeof(int), 0644, proc_dointvec_minmax);
-	set_table_entry(&table[4], 5, "newidle_idx", &sd->newidle_idx,
-		sizeof(int), 0644, proc_dointvec_minmax);
-	set_table_entry(&table[5], 6, "wake_idx", &sd->wake_idx,
-		sizeof(int), 0644, proc_dointvec_minmax);
-	set_table_entry(&table[6], 7, "forkexec_idx", &sd->forkexec_idx,
-		sizeof(int), 0644, proc_dointvec_minmax);
-	set_table_entry(&table[7], 8, "busy_factor", &sd->busy_factor,
-		sizeof(int), 0644, proc_dointvec_minmax);
-	set_table_entry(&table[8], 9, "imbalance_pct", &sd->imbalance_pct,
-		sizeof(int), 0644, proc_dointvec_minmax);
-	set_table_entry(&table[9], 10, "cache_hot_time", &sd->cache_hot_time,
-		sizeof(long long), 0644, proc_doulongvec_minmax);
-	set_table_entry(&table[10], 11, "cache_nice_tries", &sd->cache_nice_tries,
-		sizeof(int), 0644, proc_dointvec_minmax);
-	set_table_entry(&table[11], 12, "per_cpu_gain", &sd->per_cpu_gain,
-		sizeof(int), 0644, proc_dointvec_minmax);
-	set_table_entry(&table[12], 13, "flags", &sd->flags,
-		sizeof(int), 0644, proc_dointvec_minmax);
-	return table;
-}
-
-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
-{
-	struct sched_domain *sd;
-	int domain_num = 0, i;
-	struct ctl_table *entry, *table;
-	char buf[32];
-	for_each_domain(cpu, sd)
-		domain_num++;
-	entry = table = sd_alloc_ctl_entry(domain_num + 1);
-
-	i = 0;
-	for_each_domain(cpu, sd) {
-		snprintf(buf, 32, "domain%d", i);
-		entry->ctl_name = i + 1;
-		entry->procname = sched_strdup(buf);
-		entry->mode = 0755;
-		entry->child = sd_alloc_ctl_domain_table(sd);
-		entry++;
-		i++;
-	}
-	return table;
-}
-
-static struct ctl_table_header *sd_sysctl_header;
-static void init_sched_domain_sysctl(void)
-{
-	int i, cpu_num = num_online_cpus();
-	char buf[32];
-	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
-
-	sd_ctl_dir[0].child = entry;
-
-	for (i = 0; i < cpu_num; i++, entry++) {
-		snprintf(buf, 32, "cpu%d", i);
-		entry->ctl_name = i + 1;
-		entry->procname = sched_strdup(buf);
-		entry->mode = 0755;
-		entry->child = sd_alloc_ctl_cpu_table(i);
-	}
-	sd_sysctl_header = register_sysctl_table(sd_ctl_root, 0);
-}
-#else
-static void init_sched_domain_sysctl(void)
-{
-}
-#endif
-
-static int __devinit sd_degenerate(struct sched_domain *sd)
-{
-	if (cpus_weight(sd->span) == 1)
-		return 1;
-
-	/* Following flags need at least 2 groups */
-	if (sd->flags & (SD_LOAD_BALANCE |
-			 SD_BALANCE_NEWIDLE |
-			 SD_BALANCE_FORK |
-			 SD_BALANCE_EXEC)) {
-		if (sd->groups != sd->groups->next)
-			return 0;
-	}
-
-	/* Following flags don't use groups */
-	if (sd->flags & (SD_WAKE_IDLE |
-			 SD_WAKE_AFFINE |
-			 SD_WAKE_BALANCE))
-		return 0;
-
-	return 1;
-}
-
-static int __devinit sd_parent_degenerate(struct sched_domain *sd,
-						struct sched_domain *parent)
-{
-	unsigned long cflags = sd->flags, pflags = parent->flags;
-
-	if (sd_degenerate(parent))
-		return 1;
-
-	if (!cpus_equal(sd->span, parent->span))
-		return 0;
-
-	/* Does parent contain flags not in child? */
-	/* WAKE_BALANCE is a subset of WAKE_AFFINE */
-	if (cflags & SD_WAKE_AFFINE)
-		pflags &= ~SD_WAKE_BALANCE;
-	if ((~sd->flags) & parent->flags)
-		return 0;
-
-	return 1;
-}
-
 /*
  * Attach the domain 'sd' to 'cpu' as its base domain.  Callers must
  * hold the hotplug lock.
  */
 void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu)
 {
+	migration_req_t req;
+	unsigned long flags;
 	runqueue_t *rq = cpu_rq(cpu);
-	struct sched_domain *tmp;
+	int local = 1;
 
-	/* Remove the sched domains which do not contribute to scheduling. */
-	for (tmp = sd; tmp; tmp = tmp->parent) {
-		struct sched_domain *parent = tmp->parent;
-		if (!parent)
-			break;
-		if (sd_parent_degenerate(tmp, parent))
-			tmp->parent = parent->parent;
-	}
+	sched_domain_debug(sd, cpu);
 
-	if (sd && sd_degenerate(sd))
-		sd = sd->parent;
+	spin_lock_irqsave(&rq->lock, flags);
 
-	sched_domain_debug(sd, cpu);
+	if (cpu == smp_processor_id() || !cpu_online(cpu)) {
+		rq->sd = sd;
+	} else {
+		init_completion(&req.done);
+		req.type = REQ_SET_DOMAIN;
+		req.sd = sd;
+		list_add(&req.list, &rq->migration_queue);
+		local = 0;
+	}
+
+	spin_unlock_irqrestore(&rq->lock, flags);
 
-	rq->sd = sd;
+	if (!local) {
+		wake_up_process(rq->migration_thread);
+		wait_for_completion(&req.done);
+	}
 }
 
 /* cpus with isolated domains */
@@ -4954,7 +3463,7 @@
 	cpus_and(cpu_default_map, cpu_default_map, cpu_online_map);
 
 	/*
-	 * Set up domains. Isolated domains just stay on the NULL domain.
+	 * Set up domains. Isolated domains just stay on the dummy domain.
 	 */
 	for_each_cpu_mask(i, cpu_default_map) {
 		int group;
@@ -5067,11 +3576,18 @@
 
 #endif /* ARCH_HAS_SCHED_DOMAIN */
 
+/*
+ * Initial dummy domain for early boot and for hotplug cpu. Being static,
+ * it is initialized to zero, so all balancing flags are cleared which is
+ * what we want.
+ */
+static struct sched_domain sched_domain_dummy;
+
 #ifdef CONFIG_HOTPLUG_CPU
 /*
  * Force a reinitialization of the sched domains hierarchy.  The domains
  * and groups cannot be updated in place without racing with the balancing
- * code, so we temporarily attach all running cpus to the NULL domain
+ * code, so we temporarily attach all running cpus to a "dummy" domain
  * which will prevent rebalancing while the sched domains are recalculated.
  */
 static int update_sched_domains(struct notifier_block *nfb,
@@ -5083,8 +3599,7 @@
 	case CPU_UP_PREPARE:
 	case CPU_DOWN_PREPARE:
 		for_each_online_cpu(i)
-			cpu_attach_domain(NULL, i);
-		synchronize_kernel();
+			cpu_attach_domain(&sched_domain_dummy, i);
 		arch_destroy_sched_domains();
 		return NOTIFY_OK;
 
@@ -5114,7 +3629,6 @@
 	unlock_cpu_hotplug();
 	/* XXX: Theoretical race here - CPU may be hotplugged now */
 	hotcpu_notifier(update_sched_domains, 0);
-	init_sched_domain_sysctl();
 }
 #else
 void __init sched_init_smp(void)
@@ -5131,25 +3645,25 @@
 		&& addr < (unsigned long)__sched_text_end);
 }
 
+void set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+	sched_drvp->set_oom_time_slice(p, t);
+}
+
 void __init sched_init(void)
 {
 	runqueue_t *rq;
-	int i, j, k;
+	int i;
 
-	for (i = 0; i < NR_CPUS; i++) {
-		prio_array_t *array;
+	sched_drvp->sched_init();
 
+	for (i = 0; i < NR_CPUS; i++) {
 		rq = cpu_rq(i);
 		spin_lock_init(&rq->lock);
-		rq->nr_running = 0;
-		rq->active = rq->arrays;
-		rq->expired = rq->arrays + 1;
-		rq->best_expired_prio = MAX_PRIO;
 
 #ifdef CONFIG_SMP
-		rq->sd = NULL;
-		for (j = 1; j < 3; j++)
-			rq->cpu_load[j] = 0;
+		rq->sd = &sched_domain_dummy;
+		rq->cpu_load = 0;
 		rq->active_balance = 0;
 		rq->push_cpu = 0;
 		rq->migration_thread = NULL;
@@ -5157,15 +3671,7 @@
 #endif
 		atomic_set(&rq->nr_iowait, 0);
 
-		for (j = 0; j < 2; j++) {
-			array = rq->arrays + j;
-			for (k = 0; k < MAX_PRIO; k++) {
-				INIT_LIST_HEAD(array->queue + k);
-				__clear_bit(k, array->bitmap);
-			}
-			// delimiter for bitsearch
-			__set_bit(MAX_PRIO, array->bitmap);
-		}
+		sched_drvp->init_runqueue_queue(&rq->qu);
 	}
 
 	/*
@@ -5209,27 +3715,11 @@
 void normalize_rt_tasks(void)
 {
 	struct task_struct *p;
-	prio_array_t *array;
-	unsigned long flags;
-	runqueue_t *rq;
 
 	read_lock_irq(&tasklist_lock);
 	for_each_process (p) {
-		if (!rt_task(p))
-			continue;
-
-		rq = task_rq_lock(p, &flags);
-
-		array = p->array;
-		if (array)
-			deactivate_task(p, task_rq(p));
-		__setscheduler(p, SCHED_NORMAL, 0);
-		if (array) {
-			__activate_task(p, task_rq(p));
-			resched_task(rq->curr);
-		}
-
-		task_rq_unlock(rq, &flags);
+		if (rt_task(p))
+			sched_drvp->normalize_rt_task(p);
 	}
 	read_unlock_irq(&tasklist_lock);
 }
diff -Naur linux-2.6.12-rc2-mm3/kernel/sched_cpustats.c linux-2.6.12-rc2-mm3-plugsched/kernel/sched_cpustats.c
--- linux-2.6.12-rc2-mm3/kernel/sched_cpustats.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/kernel/sched_cpustats.c	2005-04-23 13:20:23.666974016 -0700
@@ -0,0 +1,404 @@
+/*
+ *  kernel/sched_stats.c
+ *
+ *  Kernel highe resolution cpu statistics for use by schedulers
+ *
+ *  Copyright (C) 2004  Aurema Pty Ltd
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/sched_pvt.h>
+
+#ifndef task_is_sinbinned
+#define task_is_sinbinned(p) (0)
+#endif
+
+DEFINE_PER_CPU(struct runq_cpustats, cpustats_runqs);
+
+void init_runq_cpustats(unsigned int cpu)
+{
+	struct runq_cpustats *csrq = &per_cpu(cpustats_runqs, cpu);
+
+	csrq->total_delay = 0;
+	csrq->total_sinbin = 0;
+	csrq->total_rt_delay = 0;
+	csrq->total_intr_delay = 0;
+	csrq->total_rt_intr_delay = 0;
+	csrq->total_fork_delay = 0;
+	cpu_rq(cpu)->timestamp_last_tick = INITIAL_CPUSTATS_TIMESTAMP;
+}
+
+#ifdef CONFIG_SMP
+unsigned long long adjusted_sched_clock(const task_t *p)
+{
+	return sched_clock() + (task_rq(p)->timestamp_last_tick - this_rq()->timestamp_last_tick);
+}
+
+void set_task_runq_cpustats(struct task_struct *p, unsigned int cpu)
+{
+	unsigned long long oldts = task_rq(p)->timestamp_last_tick;
+
+	RUNQ_CPUSTATS(p) = cpu_runq_cpustats(cpu);
+	TASK_CPUSTATS(p).timestamp += (cpu_rq(cpu)->timestamp_last_tick - oldts);
+}
+#endif
+
+void initialize_cpustats(struct task_struct *p, unsigned long long now)
+{
+	TASK_CPUSTATS(p).avg_sleep_per_cycle = 0;
+	TASK_CPUSTATS(p).avg_delay_per_cycle = 0;
+	TASK_CPUSTATS(p).avg_cpu_per_cycle = 0;
+	TASK_CPUSTATS(p).total_sleep = 0;
+	TASK_CPUSTATS(p).total_delay = 0;
+	TASK_CPUSTATS(p).total_sinbin = 0;
+	TASK_CPUSTATS(p).total_cpu = 0;
+	TASK_CPUSTATS(p).total_wake_ups = 0;
+	TASK_CPUSTATS(p).intr_wake_ups = 0;
+	TASK_CPUSTATS(p).avg_cycle_length = 0;
+	TASK_CPUSTATS(p).timestamp = now;
+	TASK_CPUSTATS(p).flags = CPUSTATS_JUST_FORKED_FL;
+}
+
+void delta_sleep_cpustats(struct task_struct *p, unsigned long long now)
+{
+	unsigned long long delta;
+
+	/* sched_clock() is not guaranteed monotonic */
+	if (now <= TASK_CPUSTATS(p).timestamp) {
+		TASK_CPUSTATS(p).timestamp = now;
+		return;
+	}
+
+	delta = now - TASK_CPUSTATS(p).timestamp;
+	TASK_CPUSTATS(p).timestamp = now;
+	TASK_CPUSTATS(p).avg_sleep_per_cycle += delta;
+	TASK_CPUSTATS(p).total_sleep += delta;
+}
+
+void delta_cpu_cpustats(struct task_struct *p, unsigned long long now)
+{
+	unsigned long long delta;
+
+	/* sched_clock() is not guaranteed monotonic */
+	if (now <= TASK_CPUSTATS(p).timestamp) {
+		TASK_CPUSTATS(p).timestamp = now;
+		return;
+	}
+
+	delta = now - TASK_CPUSTATS(p).timestamp;
+	TASK_CPUSTATS(p).timestamp = now;
+	TASK_CPUSTATS(p).avg_cpu_per_cycle += delta;
+	TASK_CPUSTATS(p).total_cpu += delta;
+}
+
+void delta_delay_cpustats(struct task_struct *p, unsigned long long now)
+{
+	unsigned long long delta;
+
+	/* sched_clock() is not guaranteed monotonic */
+	if (now <= TASK_CPUSTATS(p).timestamp) {
+		TASK_CPUSTATS(p).timestamp = now;
+		return;
+	}
+
+	delta = now - TASK_CPUSTATS(p).timestamp;
+	TASK_CPUSTATS(p).timestamp = now;
+	TASK_CPUSTATS(p).avg_delay_per_cycle += delta;
+	TASK_CPUSTATS(p).total_delay += delta;
+	RUNQ_CPUSTATS(p)->total_delay += delta;
+	if (task_is_sinbinned(p)) {
+		TASK_CPUSTATS(p).total_sinbin += delta;
+		RUNQ_CPUSTATS(p)->total_sinbin += delta;
+	} else if (rt_task(p)) { /* rt tasks are never sinbinned */
+		RUNQ_CPUSTATS(p)->total_rt_delay += delta;
+		if (TASK_CPUSTATS(p).flags & CPUSTATS_WOKEN_FOR_INTR_FL)
+			RUNQ_CPUSTATS(p)->total_rt_intr_delay += delta;
+	}
+	if (unlikely(TASK_CPUSTATS(p).flags & CPUSTATS_JUST_FORKED_FL)) {
+		RUNQ_CPUSTATS(p)->total_fork_delay += delta;
+		TASK_CPUSTATS(p).flags &= ~CPUSTATS_JUST_FORKED_FL;
+	}
+	if (TASK_CPUSTATS(p).flags & CPUSTATS_WOKEN_FOR_INTR_FL) {
+		RUNQ_CPUSTATS(p)->total_intr_delay += delta;
+		TASK_CPUSTATS(p).flags &= ~CPUSTATS_WOKEN_FOR_INTR_FL;
+	}
+}
+
+#define SCHED_AVG_ALPHA ((1 << SCHED_AVG_OFFSET) - 1)
+static inline void apply_sched_avg_decay(unsigned long long *valp)
+{
+	*valp *= SCHED_AVG_ALPHA;
+	*valp >>= SCHED_AVG_OFFSET;
+}
+
+static inline void decay_cpustats_for_cycle(struct task_struct *p)
+{
+	apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_sleep_per_cycle);
+	apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_delay_per_cycle);
+	apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_cpu_per_cycle);
+	TASK_CPUSTATS(p).avg_cycle_length = TASK_CPUSTATS(p).avg_sleep_per_cycle +
+		TASK_CPUSTATS(p).avg_delay_per_cycle +
+		TASK_CPUSTATS(p).avg_cpu_per_cycle;
+	/* take short cut and avoid possible divide by zero below */
+	if (TASK_CPUSTATS(p).avg_cpu_per_cycle == 0)
+		TASK_CPUSTATS(p).cpu_usage_rate = 0;
+	else
+		TASK_CPUSTATS(p).cpu_usage_rate =  calc_proportion(TASK_CPUSTATS(p).avg_cpu_per_cycle, TASK_CPUSTATS(p).avg_cycle_length);
+}
+
+void update_cpustats_at_wake_up(struct task_struct *p, unsigned long long now)
+{
+	delta_sleep_cpustats(p, now);
+	if (in_interrupt()) {
+		TASK_CPUSTATS(p).intr_wake_ups++;
+		TASK_CPUSTATS(p).flags |= CPUSTATS_WOKEN_FOR_INTR_FL;
+	}
+	TASK_CPUSTATS(p).total_wake_ups++;
+	decay_cpustats_for_cycle(p);
+}
+
+void update_cpustats_at_end_of_ts(struct task_struct *p, unsigned long long now)
+{
+	delta_cpu_cpustats(p, now);
+	decay_cpustats_for_cycle(p);
+}
+
+#ifndef CONFIG_CPUSCHED_SPA
+int task_sched_cpustats(struct task_struct *p, char *buffer)
+{
+	struct task_cpustats stats;
+	unsigned long nvcsw, nivcsw; /* context switch counts */
+	int result;
+
+	read_lock(&tasklist_lock);
+	result = get_task_cpustats(p, &stats);
+	nvcsw = p->nvcsw;
+	nivcsw = p-> nivcsw;
+	read_unlock(&tasklist_lock);
+	if (result)
+		return sprintf(buffer, "Data unavailable\n");
+	return sprintf(buffer,
+		"%llu %llu %llu %llu %llu %llu %lu %lu @ %llu\n",
+		stats.total_sleep,
+		stats.total_cpu,
+		stats.total_delay,
+		stats.total_sinbin,
+		stats.total_wake_ups,
+		stats.intr_wake_ups,
+		nvcsw, nivcsw,
+		stats.timestamp);
+}
+
+int cpustats_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int i;
+	int len = 0;
+	int avail = 1;
+	struct cpu_cpustats total = {0, };
+
+	for_each_online_cpu(i) {
+		struct cpu_cpustats stats;
+
+		if (get_cpu_cpustats(i, &stats) != 0) {
+			avail = 0;
+			break;
+		}
+		len += sprintf(page + len,
+		    "cpu%02d %llu %llu %llu %llu %llu %llu %llu %llu @ %llu\n", i,
+		stats.total_idle,
+		stats.total_busy,
+		stats.total_delay,
+		stats.total_rt_delay,
+		stats.total_intr_delay,
+		stats.total_rt_intr_delay,
+		stats.total_sinbin,
+		stats.nr_switches,
+		stats.timestamp);
+		total.total_idle += stats.total_idle;
+		total.total_busy += stats.total_busy;
+		total.total_delay += stats.total_delay;
+		total.total_rt_delay += stats.total_rt_delay;
+		total.total_intr_delay += stats.total_intr_delay;
+		total.total_rt_intr_delay += stats.total_rt_intr_delay;
+		total.total_sinbin += stats.total_sinbin;
+		total.nr_switches += stats.nr_switches;
+	}
+	if (avail)
+		len += sprintf(page + len, "total %llu %llu %llu %llu %llu %llu %llu %llu\n",
+			total.total_idle,
+			total.total_busy,
+			total.total_delay,
+			total.total_intr_delay,
+			total.total_rt_delay,
+			total.total_rt_intr_delay,
+			total.total_sinbin,
+			total.nr_switches);
+	else
+		len = sprintf(page, "Data unavailable\n");
+
+	if (len <= off+count) *eof = 1;
+	*start = page + off;
+	len -= off;
+	if (len > count) len = count;
+	if (len < 0) len = 0;
+
+	return len;
+}
+#endif
+
+static inline unsigned long long sched_div_64(unsigned long long a, unsigned long long b)
+{
+#if BITS_PER_LONG < 64
+	/*
+	 * Assume that there's no 64 bit divide available
+	 */
+	if (a < b)
+		return 0;
+	/*
+	 * Scale down until b less than 32 bits so that we can do
+	 * a divide using do_div()
+	 */
+	while (b > ULONG_MAX) { a >>= 1; b >>= 1; }
+
+	(void)do_div(a, (unsigned long)b);
+
+	return a;
+#else
+	return a / b;
+#endif
+}
+
+unsigned long long cpustats_avg_in_jiffies(unsigned long long avg)
+{
+	return sched_div_64(SCHED_AVG_RND(avg) * HZ, 1000000000);
+}
+
+/*
+ * CPU usage rate is estimated as a proportion of a CPU using fixed denominator
+ * rational numbers. The denominator must be less than 2^24 so that
+ * we can store the eb_yardstick in an atomic_t on sparc
+ */
+#if PROPORTION_OFFSET >= 24
+#error "PROPORTION_OFFSET must be less than 24"
+#endif
+#define PROPORTION_OVERFLOW ((1ULL << (64 - PROPORTION_OFFSET)) - 1)
+
+/*
+ * Convert a / b to a proportion in the range 0 to PROPORTION_ONE
+ * Requires a <= b or may get a divide by zero exception
+ */
+unsigned long calc_proportion(unsigned long long a, unsigned long long b)
+{
+	if (unlikely(a == b))
+		return PROPORTION_ONE;
+
+	while (a > PROPORTION_OVERFLOW) { a >>= 1; b >>= 1; }
+
+	return sched_div_64(a << PROPORTION_OFFSET, b);
+}
+
+/*
+ * Map the given proportion to an unsigned long in the specified range
+ * Requires range < PROPORTION_ONE to avoid overflow
+ */
+unsigned long map_proportion(unsigned long prop, unsigned long range)
+{
+	/* use 64 bits to help avoid overflow on 32 bit systems */
+	return ((unsigned long long)prop * (unsigned long long)range) >> PROPORTION_OFFSET;
+}
+
+/* WANT: proportion_to_ppt(ppt_to_proportion(x)) == x
+ */
+unsigned long proportion_to_ppt(unsigned long proportion)
+{
+	return ((unsigned long long)proportion * 2001ULL) >> (PROPORTION_OFFSET + 1);
+}
+
+unsigned long ppt_to_proportion(unsigned long ppt)
+{
+	return sched_div_64((unsigned long long)ppt * PROPORTION_ONE, 1000);
+}
+
+unsigned long avg_cpu_usage_rate(const struct task_struct *p)
+{
+	return TASK_CPUSTATS(p).cpu_usage_rate;
+}
+
+unsigned long avg_sleep_rate(const struct task_struct *p)
+{
+	/* take short cut and avoid possible divide by zero below */
+	if (TASK_CPUSTATS(p).avg_sleep_per_cycle == 0)
+		return 0;
+
+	return calc_proportion(TASK_CPUSTATS(p).avg_sleep_per_cycle, TASK_CPUSTATS(p).avg_cycle_length);
+}
+
+unsigned long avg_cpu_delay_rate(const struct task_struct *p)
+{
+	/* take short cut and avoid possible divide by zero below */
+	if (TASK_CPUSTATS(p).avg_delay_per_cycle == 0)
+		return 0;
+
+	return calc_proportion(TASK_CPUSTATS(p).avg_delay_per_cycle, TASK_CPUSTATS(p).avg_cycle_length);
+}
+
+unsigned long delay_in_jiffies_for_usage(const struct task_struct *p, unsigned long rur)
+{
+	unsigned long long acpc_jiffies, aspc_jiffies, res;
+
+	if (rur == 0)
+		return ULONG_MAX;
+
+	acpc_jiffies = cpustats_avg_in_jiffies(TASK_CPUSTATS(p).avg_cpu_per_cycle);
+	aspc_jiffies = cpustats_avg_in_jiffies(TASK_CPUSTATS(p).avg_sleep_per_cycle);
+
+	/*
+	 * we have to be careful about overflow and/or underflow
+	 */
+	while (unlikely(acpc_jiffies > PROPORTION_OVERFLOW)) {
+		acpc_jiffies >>= 1;
+		if (unlikely((rur >>= 1) == 0))
+			return ULONG_MAX;
+	}
+
+	res = sched_div_64(acpc_jiffies << PROPORTION_OFFSET, rur);
+	if (res > aspc_jiffies)
+		return res - aspc_jiffies;
+	else
+		return 0;
+}
+
+#ifndef CONFIG_CPUSCHED_SPA
+static int convert_proportion(unsigned long *val, void *data, int write)
+{
+	if (write) {
+		if (*val > 1000)
+			return -1;
+		*val = ppt_to_proportion(*val);
+	} else
+		*val = proportion_to_ppt(*val);
+
+	return 0;
+}
+
+int do_proc_proportion(ctl_table *ctp, int write, struct file *fp,
+				void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	return do_proc_doulongvec_convf_minmax(ctp, write, fp, buffer, lenp,
+		 ppos, convert_proportion, NULL);
+}
+#endif
diff -Naur linux-2.6.12-rc2-mm3/kernel/sched_drv.c linux-2.6.12-rc2-mm3-plugsched/kernel/sched_drv.c
--- linux-2.6.12-rc2-mm3/kernel/sched_drv.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/kernel/sched_drv.c	2005-04-23 13:20:23.667973864 -0700
@@ -0,0 +1,137 @@
+/*
+ *  kernel/sched_drv.c
+ *
+ *  Kernel scheduler device implementation
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/syscalls.h>
+#include <linux/sched_drv.h>
+#include <linux/sched_pvt.h>
+
+/*
+ * All private per scheduler entries in task_struct are defined as
+ * separate structs and placed into the cpusched union in task_struct.
+ */
+
+/* Ingosched */
+#ifdef CONFIG_CPUSCHED_INGO
+extern const struct sched_drv ingo_sched_drv;
+#endif
+
+/* Staircase */
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+extern const struct sched_drv staircase_sched_drv;
+#endif
+
+/* Single priority array (SPA) schedulers */
+#ifdef CONFIG_CPUSCHED_SPA_NF
+extern const struct sched_drv spa_nf_sched_drv;
+#endif
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+extern const struct sched_drv zaphod_sched_drv;
+#endif
+
+/* Nicksched */
+#ifdef CONFIG_CPUSCHED_NICK
+extern const struct sched_drv nick_sched_drv;
+#endif
+
+const struct sched_drv *sched_drvp =
+#if defined(CONFIG_CPUSCHED_DEFAULT_INGO)
+	&ingo_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_STAIRCASE)
+	&staircase_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_SPA_NF)
+	&spa_nf_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_ZAPHOD)
+	&zaphod_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_NICK)
+	&nick_sched_drv;
+#else
+	NULL;
+#error "You must have at least 1 cpu scheduler selected"
+#endif
+
+extern struct task_struct base_init_task;
+
+#define CPUSCHED_CHECK_SELECT(drv) \
+do { \
+	if (!strcmp(str, (drv).name)) { \
+		sched_drvp = &(drv); \
+		return 1; \
+	} \
+} while (0)
+
+static int __init sched_drv_setup(char *str)
+{
+#if defined(CONFIG_CPUSCHED_INGO)
+	CPUSCHED_CHECK_SELECT(ingo_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_STAIRCASE)
+	CPUSCHED_CHECK_SELECT(staircase_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_SPA_NF)
+	CPUSCHED_CHECK_SELECT(spa_nf_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_ZAPHOD)
+	CPUSCHED_CHECK_SELECT(zaphod_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_NICK)
+	CPUSCHED_CHECK_SELECT(nick_sched_drv);
+#endif
+	return 1;
+}
+
+__setup ("cpusched=", sched_drv_setup);
+
+static ssize_t show_attribute(struct kobject *kobj, struct attribute *attr, char *page)
+{
+	struct sched_drv_sysfs_entry *e = to_sched_drv_sysfs_entry(attr);
+
+	if (!e->show)
+		return 0;
+
+	return e->show(page);
+}
+
+static ssize_t store_attribute(struct kobject *kobj, struct attribute *attr, const char *page, size_t length)
+{
+	struct sched_drv_sysfs_entry *e = to_sched_drv_sysfs_entry(attr);
+
+	if (!e->show)
+		return -EBADF;
+
+	return e->store(page, length);
+}
+
+struct sysfs_ops sched_drv_sysfs_ops = {
+	.show = show_attribute,
+	.store = store_attribute,
+};
+
+static struct kobj_type sched_drv_ktype = {
+	.sysfs_ops = &sched_drv_sysfs_ops,
+	.default_attrs = NULL,
+};
+
+static struct kobject sched_drv_kobj = {
+	.ktype = &sched_drv_ktype
+};
+
+decl_subsys(cpusched, NULL, NULL);
+
+void __init sched_drv_sysfs_init(void)
+{
+	if (subsystem_register(&cpusched_subsys) == 0) {
+		if (sched_drvp->attrs == NULL)
+			return;
+
+		sched_drv_ktype.default_attrs = sched_drvp->attrs;
+		strncpy(sched_drv_kobj.name, sched_drvp->name, KOBJ_NAME_LEN);
+		sched_drv_kobj.kset = &cpusched_subsys.kset;
+		(void)kobject_register(&sched_drv_kobj);
+ 	}
+}
diff -Naur linux-2.6.12-rc2-mm3/kernel/sched_spa.c linux-2.6.12-rc2-mm3-plugsched/kernel/sched_spa.c
--- linux-2.6.12-rc2-mm3/kernel/sched_spa.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/kernel/sched_spa.c	2005-04-23 13:20:23.669973560 -0700
@@ -0,0 +1,1542 @@
+/*
+ *  kernel/sched_spa.c
+ *  Copyright (C) 1991-2005  Linus Torvalds
+ *
+ *  2005-01-11 Single priority array scheduler (no frills and Zaphod)
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+#include <linux/module.h>
+
+extern const struct sched_drv spa_nf_sched_drv;
+extern const struct sched_drv zaphod_sched_drv;
+
+/*
+ * Some of our exported functions could be called when other schedulers are
+ * in charge with catastrophic results if not handled properly.
+ * So we define some macros to enable detection of whether either of our
+ * schedulers is in charge
+ */
+#ifdef CONFIG_CPUSCHED_SPA_NF
+#define spa_nf_in_charge() (&spa_nf_sched_drv == sched_drvp)
+#else
+#define spa_nf_in_charge() (0)
+#endif
+
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+#define zaphod_in_charge() (&zaphod_sched_drv == sched_drvp)
+#else
+#define zaphod_in_charge() (0)
+#endif
+
+#define spa_in_charge() (zaphod_in_charge() || spa_nf_in_charge())
+
+#define SPA_BGND_PRIO (SPA_IDLE_PRIO - 1)
+#define SPA_SOFT_CAP_PRIO (SPA_BGND_PRIO - 1)
+
+#define task_is_queued(p) (!list_empty(&(p)->run_list))
+
+static void spa_init_runqueue_queue(union runqueue_queue *qup)
+{
+	int k;
+
+	for (k = 0; k < SPA_IDLE_PRIO; k++) {
+		qup->spa.queue[k].prio = k;
+		INIT_LIST_HEAD(&qup->spa.queue[k].list);
+	}
+	bitmap_zero(qup->spa.bitmap, SPA_NUM_PRIO_SLOTS);
+	// delimiter for bitsearch
+	__set_bit(SPA_IDLE_PRIO, qup->spa.bitmap);
+	qup->spa.next_prom_due = ULONG_MAX;
+	qup->spa.pcount = 0;
+}
+
+static void spa_set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+	p->sdu.spa.time_slice = t;
+}
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * Default configurable timeslice is 100 msecs, maximum configurable
+ * timeslice is 1000 msecs and minumum configurable timeslice is 1 jiffy.
+ * Timeslices get renewed on task creation, on wake up and after they expire.
+ */
+#define MIN_TIMESLICE		1
+#define DEF_TIMESLICE		(100 * HZ / 1000)
+#define MAX_TIMESLICE		(1000 * HZ / 1000)
+#define DEF_DESKTOP_TIMESLICE	((DEF_TIMESLICE > 10) ? (DEF_TIMESLICE / 10) : 1)
+
+static unsigned long time_slice = DEF_TIMESLICE;
+static unsigned long sched_rr_time_slice = DEF_TIMESLICE;
+
+/*
+ * Background tasks may have longer time slices as compensation
+ */
+#define task_is_bgnd(p) (unlikely((p)->sdu.spa.cpu_rate_cap == 0))
+static unsigned int bgnd_time_slice_multiplier = 1;
+
+#define TASK_PREEMPTS_CURR(p, rq) \
+	((p)->prio < (rq)->curr->prio)
+
+static inline unsigned int normal_task_timeslice(const task_t *p)
+{
+	if (unlikely(p->prio == SPA_BGND_PRIO))
+		return time_slice * bgnd_time_slice_multiplier;
+
+	return time_slice;
+}
+
+static inline unsigned int hard_cap_timeslice(const task_t *p)
+{
+	unsigned int cpu_avg = cpustats_avg_in_jiffies(p->sdu.spa.cpustats.avg_cpu_per_cycle);
+
+	return (cpu_avg / 2) ? (cpu_avg / 2) : 1;
+}
+
+/*
+ * spa_task_timeslice() is the interface that is used by the scheduler.
+ */
+static unsigned int spa_task_timeslice(const task_t *p)
+{
+	if (rt_task(p))
+		return sched_rr_time_slice;
+
+	return normal_task_timeslice(p);
+}
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_CPUSCHED_SPA_NF
+static void spa_nf_set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+	BUG_ON(!list_empty(&p->run_list));
+
+	set_task_runq_cpustats(p, cpu);
+	p->thread_info->cpu = cpu;
+}
+#endif
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+static void zaphod_set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+	BUG_ON(!list_empty(&p->run_list));
+
+	set_task_runq_cpustats(p, cpu);
+	p->thread_info->cpu = cpu;
+	p->sdu.spa.zrq = zaphod_cpu_runq_data(cpu);
+}
+#endif
+#endif
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void dequeue_task(struct task_struct *p, struct spa_runqueue_queue *rqq)
+{
+	/*
+	 * Initialize after removal from the list so that list_empty() works
+	 * as a means for testing whether the task is runnable
+	 * If p is the last task in this priority slot then slotp will be
+	 * a pointer to the head of the list in the sunqueue structure
+	 * NB we can't use p->prio as is for bitmap as task may have
+	 * been promoted so we update it.
+	 */
+	struct list_head *slotp = p->run_list.next;
+
+	list_del_init(&p->run_list);
+	if (list_empty(slotp)) {
+		p->prio = list_entry(slotp, struct spa_prio_slot, list)->prio;
+		__clear_bit(p->prio, rqq->bitmap);
+	}
+}
+
+static void enqueue_task(struct task_struct *p, struct spa_runqueue_queue *rqq)
+{
+	sched_info_queued(p);
+	list_add_tail(&p->run_list, &rqq->queue[p->prio].list);
+	__set_bit(p->prio, rqq->bitmap);
+}
+
+/*
+ * Used by the migration code - we pull tasks from the head of the
+ * remote queue so we want these tasks to show up at the head of the
+ * local queue:
+ */
+static inline void enqueue_task_head(struct task_struct *p, struct spa_runqueue_queue *rqq)
+{
+	list_add(&p->run_list, &rqq->queue[p->prio].list);
+	__set_bit(p->prio, rqq->bitmap);
+}
+
+/*
+ * Control value for promotion mechanism NB this controls severity of "nice"
+ */
+unsigned long base_prom_interval = ((DEF_TIMESLICE * 15) / 10);
+
+#define PROMOTION_FLOOR MAX_RT_PRIO
+#define PROMOTION_CEILING SPA_BGND_PRIO
+#define in_promotable_range(prio) \
+	((prio) > PROMOTION_FLOOR && (prio) < PROMOTION_CEILING)
+
+static inline void restart_promotions(struct runqueue *rq)
+{
+	rq->qu.spa.next_prom_due = jiffies + base_prom_interval;
+	rq->qu.spa.pcount = 2;
+}
+
+#define check_restart_promotions(rq) \
+do { \
+	if (rq->nr_running == 2) \
+		restart_promotions(rq); \
+} while (0)
+
+/* make it (relatively) easy to switch to using a timer */
+static inline void stop_promotions(struct runqueue *rq)
+{
+}
+
+#define check_stop_promotions(rq) \
+do { \
+	if (rq->nr_running == 1) \
+		stop_promotions(rq); \
+} while (0)
+
+/*
+ * Are promotions due?
+ */
+static inline int promotions_due(const struct runqueue *rq)
+{
+	return unlikely(time_after_eq(jiffies, rq->qu.spa.next_prom_due));
+}
+
+static inline void update_curr_prio_for_promotion(struct runqueue *rq)
+{
+	if (likely(in_promotable_range(rq->curr->prio)))
+		rq->curr->prio--;
+}
+
+/*
+ * Assume spa_runq lock is NOT already held.
+ */
+static void do_promotions(struct runqueue *rq)
+{
+	int idx = PROMOTION_FLOOR;
+
+	spin_lock(&rq->lock);
+	if (unlikely(rq->nr_running < 2))
+		goto out_unlock;
+	if (rq->nr_running > rq->qu.spa.pcount) {
+		rq->qu.spa.pcount++;
+		goto out_unlock;
+	}
+	for (;;) {
+		int new_prio;
+		idx = find_next_bit(rq->qu.spa.bitmap, PROMOTION_CEILING, idx + 1);
+		if (idx > (PROMOTION_CEILING - 1))
+			break;
+
+		new_prio = idx - 1;
+		__list_splice(&rq->qu.spa.queue[idx].list, rq->qu.spa.queue[new_prio].list.prev);
+		INIT_LIST_HEAD(&rq->qu.spa.queue[idx].list);
+		__clear_bit(idx, rq->qu.spa.bitmap);
+		__set_bit(new_prio, rq->qu.spa.bitmap);
+	}
+	/* The only prio field that needs update is the current task's */
+	update_curr_prio_for_promotion(rq);
+	rq->qu.spa.pcount = 2;
+out_unlock:
+	rq->qu.spa.next_prom_due = jiffies + base_prom_interval;
+	spin_unlock(&rq->lock);
+}
+
+/*
+ * effective_prio - return the priority that is based on the static
+ * priority
+ */
+#define should_run_in_background(p) \
+	(task_is_bgnd(p) && !((p)->sdu.spa.flags & SPAF_UISLEEP))
+#define exceeding_cap(p) \
+	(avg_cpu_usage_rate(p) > (p)->sdu.spa.min_cpu_rate_cap)
+#ifdef CONFIG_CPUSCHED_SPA_NF
+static int spa_nf_effective_prio(task_t *p)
+{
+	if (rt_task(p))
+		return p->prio;
+
+	if (unlikely(should_run_in_background(p)))
+		return SPA_BGND_PRIO;
+
+	/* using the minimum of the hard and soft caps makes things smoother */
+	if (unlikely(exceeding_cap(p)))
+		return SPA_SOFT_CAP_PRIO;
+
+	return p->static_prio;
+}
+#endif
+
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+static int spa_zaphod_effective_prio(task_t *p)
+{
+	if (rt_task(p))
+		return p->prio;
+
+	if (unlikely(should_run_in_background(p)))
+		return SPA_BGND_PRIO;
+
+	/* using the minimum of the hard and soft caps makes things smoother */
+	if (unlikely(exceeding_cap(p)))
+		return SPA_SOFT_CAP_PRIO;
+
+	return zaphod_effective_prio(p);
+}
+#endif
+
+static int (*effective_prio)(struct task_struct *p) =
+#ifdef CONFIG_CPUSCHED_SPA_NF
+spa_nf_effective_prio;
+#else
+spa_zaphod_effective_prio;
+#endif
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq)
+{
+	struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+	enqueue_task(p, rqq);
+	rq->nr_running++;
+	check_restart_promotions(rq);
+}
+
+#ifdef CONFIG_CPUSCHED_SPA_NF
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+static void do_nothing_to_task(task_t *p) {}
+static void (*reassess_at_activation)(task_t *p) = do_nothing_to_task;
+#else
+static inline void reassess_at_activation(task_t *p) {}
+#endif
+#else
+#define reassess_at_activation(p) zaphod_reassess_at_activation(p)
+#endif
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ */
+static void activate_task(task_t *p, runqueue_t *rq)
+{
+	if (rt_task(p))
+		p->sdu.spa.time_slice = sched_rr_time_slice;
+	else {
+		reassess_at_activation(p);
+		p->prio = effective_prio(p);
+		/* hard capped tasks that never use their full time slice evade
+		 * the sinbin so we need to reduce the size of their time slice
+		 * to reduce the size of the hole that they slip through.
+		 * It would be unwise to close it completely.
+		 */
+		if (unlikely(p->sdu.spa.cpustats.cpu_usage_rate > p->sdu.spa.cpu_rate_hard_cap))
+			p->sdu.spa.time_slice = hard_cap_timeslice(p);
+		else
+			p->sdu.spa.time_slice = normal_task_timeslice(p);
+	}
+	p->sdu.spa.flags &= ~SPAF_UISLEEP;
+	__activate_task(p, rq);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	rq->nr_running--;
+	dequeue_task(p, &rq->qu.spa);
+	check_stop_promotions(rq);
+}
+
+/*
+ * Check to see if p preempts rq->curr and resched if it does. In compute
+ * mode we do not preempt for at least cache_delay and set rq->preempted.
+ */
+static void preempt_if_warranted(task_t *p, struct runqueue *rq)
+{
+	if (TASK_PREEMPTS_CURR(p, rq))
+		resched_task(rq->curr);
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: thetask's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+static void spa_wake_up_task(struct task_struct *p, struct runqueue *rq, unsigned int old_state, int sync)
+{
+	/*
+	 * This is the end of one scheduling cycle and the start of the next
+	 */
+	update_cpustats_at_wake_up(p, adjusted_sched_clock(p));
+
+	if (old_state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible--;
+
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	activate_task(p, rq);
+	if (!sync || (rq != this_rq()))
+		preempt_if_warranted(p, rq);
+}
+
+#ifdef CONFIG_CPUSCHED_SPA_NF
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+static void (*spa_fork_extras)(task_t *p) = do_nothing_to_task;
+#else
+static inline void spa_fork_extras(task_t *p) {}
+#endif
+#else
+#define spa_fork_extras(p) zaphod_fork(p)
+#endif
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+static void spa_fork(task_t *p)
+{
+	unsigned long now;
+
+	init_timer(&p->sdu.spa.sinbin_timer);
+	p->sdu.spa.sinbin_timer.data = (unsigned long) p;
+	/*
+	 * Give the task a new timeslice.
+	 */
+	p->sdu.spa.time_slice = spa_task_timeslice(p);
+	local_irq_disable();
+	now = sched_clock();
+	local_irq_enable();
+	/*
+	 * Initialize the scheduling statistics
+	 */
+	initialize_cpustats(p, now);
+	spa_fork_extras(p);
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+#ifdef CONFIG_SMP
+#define rq_is_this_rq(rq) (likely((rq) == this_rq()))
+#else
+#define rq_is_this_rq(rq) 1
+#endif
+static void spa_wake_up_new_task(task_t * p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	BUG_ON(p->state != TASK_RUNNING);
+
+	if (rq_is_this_rq(rq)) {
+		if (!(clone_flags & CLONE_VM)) {
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			if (unlikely(!task_is_queued(current))) {
+				p->prio = effective_prio(p);
+				__activate_task(p, rq);
+			} else {
+				p->prio = current->prio;
+				list_add_tail(&p->run_list, &current->run_list);
+				rq->nr_running++;
+				check_restart_promotions(rq);
+			}
+			set_need_resched();
+		} else {
+			p->prio = effective_prio(p);
+			/* Run child last */
+			__activate_task(p, rq);
+		}
+	} else {
+		p->prio = effective_prio(p);
+		__activate_task(p, rq);
+		preempt_if_warranted(p, rq);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+
+/*
+ * (Optionally) log scheduler statistics at exit.
+ */
+static int log_at_exit = 0;
+static void spa_exit(task_t * p)
+{
+	struct task_cpustats stats;
+
+	if (!log_at_exit)
+		return;
+
+	get_task_cpustats(p, &stats);
+	printk("SCHED_EXIT[%d] (%s) %llu %llu %llu %llu %llu %llu %lu %lu\n",
+		p->pid, p->comm,
+		stats.total_sleep, stats.total_cpu, stats.total_delay,
+		stats.total_sinbin, stats.total_wake_ups, stats.intr_wake_ups,
+		p->nvcsw, p->nivcsw);
+}
+
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static inline
+void pull_task(runqueue_t *src_rq, task_t *p, runqueue_t *this_rq, int this_cpu)
+{
+	dequeue_task(p, &src_rq->qu.spa);
+	src_rq->nr_running--;
+	check_stop_promotions(src_rq);
+	/* not the current task on its cpu so increment delay stats */
+	delta_delay_cpustats(p, adjusted_sched_clock(p));
+	set_task_cpu(p, this_cpu);
+	this_rq->nr_running++;
+	enqueue_task(p, &this_rq->qu.spa);
+	check_restart_promotions(this_rq);
+	preempt_if_warranted(p, this_rq);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int spa_move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+		      unsigned long max_nr_move, struct sched_domain *sd,
+		      enum idle_type idle)
+{
+	struct list_head *head, *curr;
+	int idx, pulled = 0;
+	struct task_struct *tmp;
+
+	if (max_nr_move <= 0 || busiest->nr_running <= 1)
+		goto out;
+
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(busiest->qu.spa.bitmap);
+	else
+		idx = find_next_bit(busiest->qu.spa.bitmap, SPA_IDLE_PRIO, idx);
+	if (idx >= SPA_IDLE_PRIO)
+		goto out;
+
+	head = &busiest->qu.spa.queue[idx].list;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+	/* Take the opportunity to update task's prio field just in
+	 * in case it's been promoted.  This makes sure that the task doesn't
+	 * lose any promotions it has received during the move.
+	 */
+	tmp->prio = idx;
+
+	curr = curr->prev;
+
+	if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, tmp, this_rq, this_cpu);
+	pulled++;
+
+	/* We only want to steal up to the prescribed number of tasks. */
+	if (pulled < max_nr_move) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	return pulled;
+}
+#endif
+
+#ifdef CONFIG_CPUSCHED_SPA_NF
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+static void spa_nf_runq_data_tick(struct sched_zaphod_runq_data *zrq, unsigned long numr) {}
+static void (*spa_reassess_at_end_of_ts)(task_t *p) = do_nothing_to_task;
+static void (*spa_runq_data_tick)(struct sched_zaphod_runq_data *zrq, unsigned long numr) = spa_nf_runq_data_tick;
+#else
+static inline void spa_reassess_at_end_of_ts(task_t *p) {}
+#define spa_runq_data_tick(zrq, numr)
+#endif
+#else
+#define spa_reassess_at_end_of_ts(p) zaphod_reassess_at_end_of_ts(p)
+#define spa_runq_data_tick(zrq, numr) zaphod_runq_data_tick(zrq, numr)
+#endif
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ */
+static void spa_tick(struct task_struct *p, struct runqueue *rq, unsigned long long now)
+{
+	int cpu = smp_processor_id();
+	struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+	spa_runq_data_tick(p->sdu.spa.zrq, rq->nr_running);
+
+	if (p == rq->idle) {
+		if (wake_priority_sleeper(rq))
+			goto out;
+		rebalance_tick(cpu, rq, SCHED_IDLE);
+		return;
+	}
+
+	/*
+	 * SCHED_FIFO tasks never run out of timeslice.
+	 */
+	if (unlikely(p->policy == SCHED_FIFO))
+		goto out;
+
+	spin_lock(&rq->lock);
+	/*
+	 * The task was running during this tick - update the
+	 * time slice counter. Note: we do not update a thread's
+	 * priority until it either goes to sleep or uses up its
+	 * timeslice. This makes it possible for interactive tasks
+	 * to use up their timeslices at their highest priority levels.
+	 */
+	if (!--p->sdu.spa.time_slice) {
+		dequeue_task(p, rqq);
+		set_tsk_need_resched(p);
+		update_cpustats_at_end_of_ts(p, now);
+		if (unlikely(p->policy == SCHED_RR))
+			p->sdu.spa.time_slice = sched_rr_time_slice;
+		else {
+			spa_reassess_at_end_of_ts(p);
+			p->prio = effective_prio(p);
+			p->sdu.spa.time_slice = normal_task_timeslice(p);
+		}
+		enqueue_task(p, rqq);
+	}
+	spin_unlock(&rq->lock);
+out:
+	if (unlikely(promotions_due(rq)))
+		do_promotions(rq);
+	rebalance_tick(cpu, rq, NOT_IDLE);
+}
+
+/*
+ * Take an active task off the runqueue for a short while
+ * Assun=mes that task's runqueue is already locked
+ */
+void put_task_in_sinbin(struct task_struct *p, unsigned long durn)
+{
+	if (durn == 0)
+		return;
+	deactivate_task(p, task_rq(p));
+	p->sdu.spa.flags |= SPAF_SINBINNED;
+	p->sdu.spa.sinbin_timer.expires = jiffies + durn;
+	add_timer(&p->sdu.spa.sinbin_timer);
+}
+
+#ifdef CONFIG_CPUSCHED_SPA_NF
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+static void (*reassess_at_sinbin_release)(task_t *p) = do_nothing_to_task;
+#else
+static inline void reassess_at_sinbin_release(task_t *p) {}
+#endif
+#else
+#define reassess_at_sinbin_release(p) zaphod_reassess_at_sinbin_release(p)
+#endif
+
+/*
+ * Release a task from the sinbin
+ */
+void sinbin_release_fn(unsigned long arg)
+{
+	unsigned long flags;
+	struct task_struct *p = (struct task_struct*)arg;
+	struct runqueue *rq = task_rq_lock(p, &flags);
+
+	/*
+	 * Sinbin time is included in delay time
+	 */
+	delta_delay_cpustats(p, adjusted_sched_clock(p));
+	p->sdu.spa.flags &= ~SPAF_SINBINNED;
+	if (!rt_task(p)) {
+		reassess_at_sinbin_release(p);
+		p->prio = effective_prio(p);
+	}
+	__activate_task(p, rq);
+
+	task_rq_unlock(rq, &flags);
+}
+
+static inline int task_needs_sinbinning(const struct task_struct *p)
+{
+	return unlikely(avg_cpu_usage_rate(p) > p->sdu.spa.cpu_rate_hard_cap) &&
+		(p->state == TASK_RUNNING) && !rt_task(p) &&
+		((p->sdu.spa.flags & PF_EXITING) == 0);
+}
+
+static inline unsigned long required_sinbin_durn(const struct task_struct *p)
+{
+	return delay_in_jiffies_for_usage(p, p->sdu.spa.cpu_rate_hard_cap);
+}
+
+#ifdef CONFIG_SCHED_SMT
+static struct task_struct *spa_head_of_queue(union runqueue_queue *rqq)
+{
+	struct task_struct *tmp;
+	int idx = sched_find_first_bit(rqq->spa.bitmap);
+
+	tmp = list_entry(rqq->spa.queue[idx].list.next, task_t, run_list);
+	/* Take the opportunity to update task's prio field just in
+	 * in case it's been promoted.
+	 */
+	tmp->prio = idx;
+
+	return tmp;
+}
+
+/* maximum expected priority difference for SCHED_NORMAL tasks */
+#define MAX_SN_PD (SPA_IDLE_PRIO - MAX_RT_PRIO)
+static int spa_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct * p2, struct sched_domain *sd)
+{
+	int dp = p2->prio - p1->prio;
+
+	if ((dp > 0) && (sd->per_cpu_gain < 100) && p2->mm && !rt_task(p2)) {
+		unsigned long rq_ts_rm;
+
+		if (rt_task(p1))
+			return 1;
+
+		rq_ts_rm = ((MAX_SN_PD - dp) * time_slice * sd->per_cpu_gain) /
+			(100 * MAX_SN_PD);
+
+		return p1->sdu.spa.time_slice > rq_ts_rm;
+	}
+
+	return 0;
+}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ */
+static void spa_schedule(void)
+{
+	long *switch_count;
+	int cpu, idx;
+	struct task_struct *prev = current, *next;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+	struct list_head *queue;
+
+	spin_lock_irq(&rq->lock);
+
+	if (unlikely(current->flags & PF_DEAD))
+		current->state = EXIT_DEAD;
+	/*
+	 * if entering off of a kernel preemption go straight
+	 * to picking the next task.
+	 */
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE) {
+				rq->nr_uninterruptible++;
+				prev->sdu.spa.flags |= SPAF_UISLEEP;
+			}
+			deactivate_task(prev, rq);
+		}
+	}
+
+	delta_cpu_cpustats(prev, now);
+	prev->sched_time = prev->sdu.spa.cpustats.total_cpu;
+	if (task_needs_sinbinning(prev) && likely(!signal_pending(prev)))
+		put_task_in_sinbin(prev, required_sinbin_durn(prev));
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+go_idle:
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			wake_sleeping_dependent(cpu, rq);
+			/*
+			 * wake_sleeping_dependent() might have released
+			 * the runqueue, so break out if we got new
+			 * tasks meanwhile:
+			 */
+			if (!rq->nr_running)
+				goto switch_tasks;
+		}
+	} else {
+		if (dependent_sleeper(cpu, rq)) {
+			next = rq->idle;
+			goto switch_tasks;
+		}
+		/*
+		 * dependent_sleeper() releases and reacquires the runqueue
+		 * lock, hence go into the idle loop if the rq went
+		 * empty meanwhile:
+		 */
+		if (unlikely(!rq->nr_running))
+			goto go_idle;
+	}
+
+	idx = sched_find_first_bit(rq->qu.spa.bitmap);
+	queue = &rq->qu.spa.queue[idx].list;
+	next = list_entry(queue->next, task_t, run_list);
+	/* Take the opportunity to update task's prio field just in
+	 * in case it's been promoted.
+	 */
+	next->prio = idx;
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prefetch(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(task_cpu(prev));
+
+	next->timestamp = prev->last_ran = now;
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		delta_delay_cpustats(next, now);
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_arch_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+
+		finish_task_switch(prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+static void spa_set_normal_task_nice(task_t *p, long nice)
+{
+	int old_static_prio, delta;
+	struct runqueue *rq = task_rq(p);
+	struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+	old_static_prio = p->static_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+	if (zaphod_in_charge())
+		zaphod_reassess_at_renice(p);
+#endif
+
+	if (p->prio == SPA_BGND_PRIO)
+		return;
+
+	delta = p->static_prio - old_static_prio;
+	if (unlikely(delta > (SPA_SOFT_CAP_PRIO - p->prio)))
+		delta = (SPA_SOFT_CAP_PRIO - p->prio);
+	else if (unlikely(delta < (MAX_RT_PRIO - p->prio)))
+		delta = (MAX_RT_PRIO - p->prio);
+
+	if (delta == 0)
+		return;
+
+	if (task_is_queued(p)) {
+		dequeue_task(p, rqq);
+		p->prio += delta;
+		enqueue_task(p, rqq);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	} else
+		p->prio += delta;
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+static void spa_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	int queued;
+	runqueue_t *rq = task_rq(p);
+
+	queued = task_is_queued(p);
+	if (queued)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	if (queued) {
+		__activate_task(p, rq);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else
+			preempt_if_warranted(p, rq);
+	}
+}
+
+/*
+ * Require: 0 <= new_cap <= 1000
+ */
+int set_cpu_rate_cap(struct task_struct *p, unsigned long new_cap)
+{
+	int is_allowed;
+	unsigned long flags;
+	struct runqueue *rq;
+	long delta;
+
+	/* this function could be called when other schedulers are in
+	 * charge (with catastrophic results) so let's check
+	 */
+	if (!spa_in_charge())
+		return -ENOSYS;
+
+	if (new_cap > 1000)
+		return -EINVAL;
+	is_allowed = capable(CAP_SYS_NICE);
+	/*
+	 * We have to be careful, if called from /proc code,
+	 * the task might be in the middle of scheduling on another CPU.
+	 */
+	new_cap = ppt_to_proportion(new_cap);
+	rq = task_rq_lock(p, &flags);
+	delta = new_cap - p->sdu.spa.cpu_rate_cap;
+	if (!is_allowed) {
+		/*
+		 * Ordinary users can set/change caps on their own tasks
+		 * provided that the new setting is MORE constraining
+		 */
+		if (((current->euid != p->uid) && (current->uid != p->uid)) || (delta > 0)) {
+			task_rq_unlock(rq, &flags);
+			return -EPERM;
+		}
+	}
+	/*
+	 * The RT tasks don't have caps, but we still allow the caps to be
+	 * set - but as expected it wont have any effect on scheduling until
+	 * the task becomes SCHED_NORMAL:
+	 */
+	p->sdu.spa.cpu_rate_cap = new_cap;
+	if (p->sdu.spa.cpu_rate_cap < p->sdu.spa.cpu_rate_hard_cap)
+		p->sdu.spa.min_cpu_rate_cap = p->sdu.spa.cpu_rate_cap;
+	else
+		p->sdu.spa.min_cpu_rate_cap = p->sdu.spa.cpu_rate_hard_cap;
+
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+	if zaphod_in_charge()
+		zaphod_reassess_at_renice(p);
+#endif
+
+	if (!rt_task(p) && task_is_queued(p)) {
+		int delta = -p->prio;
+		struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+		dequeue_task(p, rqq);
+		delta += p->prio = effective_prio(p);
+		enqueue_task(p, rqq);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+	task_rq_unlock(rq, &flags);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(set_cpu_rate_cap);
+
+unsigned long get_cpu_rate_cap(struct task_struct *p)
+{
+	if (!spa_in_charge())
+		return 1000;
+
+	return proportion_to_ppt(p->sdu.spa.cpu_rate_cap);
+}
+
+EXPORT_SYMBOL(get_cpu_rate_cap);
+
+/*
+ * Require: 1 <= new_cap <= 1000
+ */
+int set_cpu_rate_hard_cap(struct task_struct *p, unsigned long new_cap)
+{
+	int is_allowed;
+	unsigned long flags;
+	struct runqueue *rq;
+	long delta;
+
+	/* this function could be called when other schedulers are in
+	 * charge (with catastrophic results) so let's check
+	 */
+	if (!spa_in_charge())
+		return -ENOSYS;
+
+	if ((new_cap > 1000) || (new_cap == 0)) /* zero hard caps are not allowed */
+		return -EINVAL;
+	is_allowed = capable(CAP_SYS_NICE);
+	new_cap = ppt_to_proportion(new_cap);
+	/*
+	 * We have to be careful, if called from /proc code,
+	 * the task might be in the middle of scheduling on another CPU.
+	 */
+	rq = task_rq_lock(p, &flags);
+	delta = new_cap - p->sdu.spa.cpu_rate_hard_cap;
+	if (!is_allowed) {
+		/*
+		 * Ordinary users can set/change caps on their own tasks
+		 * provided that the new setting is MORE constraining
+		 */
+		if (((current->euid != p->uid) && (current->uid != p->uid)) || (delta > 0)) {
+			task_rq_unlock(rq, &flags);
+			return -EPERM;
+		}
+	}
+	/*
+	 * The RT tasks don't have caps, but we still allow the caps to be
+	 * set - but as expected it wont have any effect on scheduling until
+	 * the task becomes SCHED_NORMAL:
+	 */
+	p->sdu.spa.cpu_rate_hard_cap = new_cap;
+	if (p->sdu.spa.cpu_rate_cap < p->sdu.spa.cpu_rate_hard_cap)
+		p->sdu.spa.min_cpu_rate_cap = p->sdu.spa.cpu_rate_cap;
+	else
+		p->sdu.spa.min_cpu_rate_cap = p->sdu.spa.cpu_rate_hard_cap;
+
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+	if zaphod_in_charge()
+		zaphod_reassess_at_renice(p);
+#endif
+
+	/* (POSSIBLY) TODO: if it's sinbinned and the cap is relaxed then
+	 * release it from the sinbin
+	 */
+	task_rq_unlock(rq, &flags);
+	return 0;
+}
+
+EXPORT_SYMBOL(set_cpu_rate_hard_cap);
+
+unsigned long get_cpu_rate_hard_cap(struct task_struct *p)
+{
+	if (!spa_in_charge())
+		return 1000;
+
+	return proportion_to_ppt(p->sdu.spa.cpu_rate_hard_cap);
+}
+
+EXPORT_SYMBOL(get_cpu_rate_hard_cap);
+
+int get_task_cpustats(struct task_struct *tsk, struct task_cpustats *stats)
+{
+	int on_runq = 0;
+	int on_cpu = 0;
+	int is_sinbinned = 0;
+	unsigned long long timestamp = 0;
+	unsigned long flags;
+	struct runqueue *rq;
+
+	if (!spa_in_charge())
+		return -ENOSYS;
+
+	rq = task_rq_lock(tsk, &flags);
+
+	*stats = tsk->sdu.spa.cpustats;
+	timestamp = rq->timestamp_last_tick;
+	is_sinbinned = task_is_sinbinned(tsk);
+	if ((on_runq = task_is_queued(tsk)))
+		on_cpu = task_running(rq, tsk);
+
+	task_rq_unlock(rq, &flags);
+
+	/*
+	 * Update values to the previous tick (only)
+	 */
+	if (timestamp > stats->timestamp) {
+		unsigned long long delta = timestamp - stats->timestamp;
+
+		stats->timestamp = timestamp;
+		if (on_cpu) {
+			stats->total_cpu += delta;
+		} else if (on_runq || is_sinbinned) {
+			stats->total_delay += delta;
+			if (is_sinbinned)
+				stats->total_sinbin += delta;
+		} else {
+			stats->total_sleep += delta;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(get_task_cpustats);
+
+/*
+ * Get scheduling statistics for the nominated CPU
+ */
+int get_cpu_cpustats(unsigned int cpu, struct cpu_cpustats *stats)
+{
+	int idle;
+	unsigned long long idle_timestamp;
+	struct runqueue *rq = cpu_rq(cpu);
+	struct runq_cpustats *csrq;
+
+	if (!spa_in_charge())
+		return -ENOSYS;
+
+	/*
+	 * No need to crash the whole machine if they've asked for stats for
+	 * a non existent CPU.
+	 */
+	if ((csrq = cpu_runq_cpustats(cpu)) == NULL)
+		return -EFAULT;
+
+	local_irq_disable();
+	spin_lock(&rq->lock);
+	idle = rq->curr == rq->idle;
+#ifdef CONFIG_SMP
+	if (rq->timestamp_last_tick > rq->curr->sdu.spa.cpustats.timestamp)
+		stats->timestamp = rq->timestamp_last_tick;
+	else
+#endif
+		stats->timestamp = rq->curr->sdu.spa.cpustats.timestamp;
+	idle_timestamp = rq->idle->sdu.spa.cpustats.timestamp;
+	if (idle_timestamp > stats->timestamp)
+		stats->timestamp = idle_timestamp;
+	stats->total_idle = rq->idle->sdu.spa.cpustats.total_cpu;
+	stats->total_busy = rq->idle->sdu.spa.cpustats.total_delay;
+	stats->total_delay = csrq->total_delay;
+	stats->total_rt_delay = csrq->total_rt_delay;
+	stats->total_intr_delay = csrq->total_intr_delay;
+	stats->total_rt_intr_delay = csrq->total_rt_intr_delay;
+	stats->total_fork_delay = csrq->total_fork_delay;
+	stats->total_sinbin = csrq->total_sinbin;
+	stats->nr_switches = rq->nr_switches;
+	spin_unlock_irq(&rq->lock);
+
+	/*
+	 * Update idle/busy time to the current tick
+	 */
+	if (idle)
+		stats->total_idle += (stats->timestamp - idle_timestamp);
+	else
+		stats->total_busy += (stats->timestamp - idle_timestamp);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(get_cpu_cpustats);
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+static long spa_sys_yield(void)
+{
+	runqueue_t *rq = this_rq_lock();
+	struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+	schedstat_inc(rq, yld_cnt);
+	/* If there's other tasks on this CPU make sure that at least
+	 * one of them get some CPU before this task's next bite of the
+	 * cherry.  Dequeue before looking for the appropriate run
+	 * queue so that we don't find our queue if we were the sole
+	 * occupant of that queue.
+	 */
+	dequeue_task(current, rqq);
+	/*
+	 * special rule: RT tasks will just roundrobin.
+	 */
+	if (likely(!rt_task(current))) {
+		int idx = find_next_bit(rqq->bitmap, SPA_IDLE_PRIO, current->prio);
+
+		if (idx < SPA_IDLE_PRIO) {
+			if ((idx < SPA_BGND_PRIO) || task_is_bgnd(current))
+				current->prio = idx;
+			else
+				current->prio = SPA_BGND_PRIO - 1;
+		}
+	}
+	enqueue_task(current, rqq);
+
+	if (rq->nr_running == 1)
+		schedstat_inc(rq, yld_both_empty);
+
+	/*
+	 * Since we are going to call schedule() anyway, there's
+	 * no need to preempt or enable interrupts:
+	 */
+	__release(rq->lock);
+	_raw_spin_unlock(&rq->lock);
+	preempt_enable_no_resched();
+
+	schedule();
+
+	return 0;
+}
+
+static void spa_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	spa_sys_yield();
+}
+
+static void spa_init_idle(task_t *idle, int cpu)
+{
+	idle->prio = SPA_IDLE_PRIO;
+	/*
+	 * Initialize scheduling statistics counters as they may provide
+	 * valuable about the CPU e.g. avg_cpu_time_per_cycle for the idle
+	 * task will be an estimate of the average time the CPU is idle.
+	 * sched_init() may not be ready so use INITIAL_JIFFIES instead.
+	 */
+	initialize_cpustats(idle, INITIAL_CPUSTATS_TIMESTAMP);
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+static void spa_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	deactivate_task(p, rq_src);
+	/* not the current task on its cpu so increment delay stats */
+	delta_delay_cpustats(p, adjusted_sched_clock(p));
+	set_task_cpu(p, dest_cpu);
+	activate_task(p, rq_dest);
+	preempt_if_warranted(p, rq_dest);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static	void spa_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO - 1);
+	/* Add idle task to _front_ of it's priority queue */
+	enqueue_task_head(rq->idle, &rq->qu.spa);
+	rq->nr_running++;
+}
+
+static	void spa_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = SPA_IDLE_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+static void spa_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (i = 0; i < SPA_IDLE_PRIO; i++) {
+		struct list_head *list = &rq->qu.spa.queue[i].list;
+		while (!list_empty(list))
+			migrate_dead(dead_cpu, list_entry(list->next, task_t, run_list));
+	}
+}
+#endif
+#endif
+
+static void spa_sched_init(void)
+{
+	int i, cpu;
+
+	for (i = 0; i < NR_CPUS; i++) {
+		init_runq_cpustats(i);
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+		if (zaphod_in_charge())
+			zaphod_init_cpu_runq_data(i);
+#endif
+	}
+
+	cpu = smp_processor_id();
+	init_task.sdu.spa.time_slice = HZ;
+	init_task.sdu.spa.cpu_rate_cap = PROPORTION_ONE;
+	init_task.sdu.spa.cpu_rate_hard_cap = PROPORTION_ONE;
+	init_task.sdu.spa.min_cpu_rate_cap = PROPORTION_ONE;
+	init_task.sdu.spa.sinbin_timer.function = sinbin_release_fn;
+	/* make sure that this gets set on single CPU systems */
+	init_task.sdu.spa.csrq = cpu_runq_cpustats(cpu);
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+	if (zaphod_in_charge()) {
+#ifdef CONFIG_CPUSCHED_SPA_NF
+		effective_prio = spa_zaphod_effective_prio;
+		reassess_at_activation = zaphod_reassess_at_activation;
+		spa_fork_extras = zaphod_fork;
+		spa_runq_data_tick = zaphod_runq_data_tick;
+		spa_reassess_at_end_of_ts = zaphod_reassess_at_end_of_ts;
+#endif
+		init_task.sdu.spa.zrq = zaphod_cpu_runq_data(cpu);
+		init_task.sdu.spa.zaphod = zaphod_task_data_init();
+	}
+#endif
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void spa_normalize_rt_task(struct task_struct *p)
+{
+	int queued;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	queued = task_is_queued(p);
+	if (queued)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	if (queued) {
+		__activate_task(p, rq);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+static inline unsigned long rnd_msecs_to_jiffies(unsigned long msecs)
+{
+	return (msecs * HZ + HZ / 2) / 1000;
+}
+
+static inline unsigned long rnd_jiffies_to_msecs(unsigned long msecs)
+{
+	return (msecs * 1000 + 500) / HZ;
+}
+
+#define no_change(a) (a)
+
+SCHED_DRV_SYSFS_UINT_RW_STATIC(time_slice, rnd_msecs_to_jiffies, rnd_jiffies_to_msecs, MIN_TIMESLICE, MAX_TIMESLICE);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(sched_rr_time_slice, rnd_msecs_to_jiffies, rnd_jiffies_to_msecs, MIN_TIMESLICE, MAX_TIMESLICE);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(base_prom_interval, rnd_msecs_to_jiffies, rnd_jiffies_to_msecs, MIN_TIMESLICE, ULONG_MAX);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(log_at_exit, no_change, no_change, 0, 1);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(bgnd_time_slice_multiplier, no_change, no_change, 1, 100);
+
+static int show_cpustats(char *page)
+{
+	int i;
+	int len = 0;
+	int avail = 1;
+	struct cpu_cpustats total = {0, };
+	unsigned long long timestamp = (unsigned long long)-1LL;
+
+	for_each_online_cpu(i) {
+		struct cpu_cpustats stats;
+
+		if (get_cpu_cpustats(i, &stats) != 0) {
+			avail = 0;
+			break;
+		}
+		if (stats.timestamp < timestamp)
+			timestamp = stats.timestamp;
+		total.total_idle += stats.total_idle;
+		total.total_busy += stats.total_busy;
+		total.total_delay += stats.total_delay;
+		total.total_rt_delay += stats.total_rt_delay;
+		total.total_intr_delay += stats.total_intr_delay;
+		total.total_rt_intr_delay += stats.total_rt_intr_delay;
+		total.total_fork_delay += stats.total_fork_delay;
+		total.total_sinbin += stats.total_sinbin;
+		total.nr_switches += stats.nr_switches;
+	}
+	if (avail)
+		len = sprintf(page, "%llu %llu %llu %llu %llu %llu %llu %llu %llu @ %llu\n",
+			total.total_idle,
+			total.total_busy,
+			total.total_delay,
+			total.total_intr_delay,
+			total.total_rt_delay,
+			total.total_rt_intr_delay,
+			total.total_fork_delay,
+			total.total_sinbin,
+			total.nr_switches,
+			timestamp);
+	else
+		len = sprintf(page, "Data unavailable\n");
+
+	return len;
+}
+
+static struct sched_drv_sysfs_entry cpustats_sdse = {
+	.attr = { .name = "cpustats", .mode = S_IRUGO },
+	.show = show_cpustats,
+	.store = NULL,
+};
+
+#ifdef CONFIG_CPUSCHED_SPA_NF
+static struct attribute *spa_nf_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(time_slice),
+	&SCHED_DRV_SYSFS_ATTR(sched_rr_time_slice),
+	&SCHED_DRV_SYSFS_ATTR(bgnd_time_slice_multiplier),
+	&SCHED_DRV_SYSFS_ATTR(base_prom_interval),
+	&SCHED_DRV_SYSFS_ATTR(log_at_exit),
+	&SCHED_DRV_SYSFS_ATTR(cpustats),
+	NULL,
+};
+#endif
+
+SCHED_DRV_DECLARE_SYSFS_ENTRY(max_ia_bonus);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(initial_ia_bonus);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(max_tpt_bonus);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(ia_threshold);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(cpu_hog_threshold);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(zaphod_mode);
+
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+static struct attribute *zaphod_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(time_slice),
+	&SCHED_DRV_SYSFS_ATTR(sched_rr_time_slice),
+	&SCHED_DRV_SYSFS_ATTR(bgnd_time_slice_multiplier),
+	&SCHED_DRV_SYSFS_ATTR(base_prom_interval),
+	&SCHED_DRV_SYSFS_ATTR(log_at_exit),
+	&SCHED_DRV_SYSFS_ATTR(cpustats),
+	&SCHED_DRV_SYSFS_ATTR(max_ia_bonus),
+	&SCHED_DRV_SYSFS_ATTR(initial_ia_bonus),
+	&SCHED_DRV_SYSFS_ATTR(max_tpt_bonus),
+	&SCHED_DRV_SYSFS_ATTR(ia_threshold),
+	&SCHED_DRV_SYSFS_ATTR(cpu_hog_threshold),
+	&SCHED_DRV_SYSFS_ATTR(zaphod_mode),
+	NULL,
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_SPA_NF
+const struct sched_drv spa_nf_sched_drv = {
+	.name = "spa_no_frills",
+	.init_runqueue_queue = spa_init_runqueue_queue,
+	.set_oom_time_slice = spa_set_oom_time_slice,
+	.task_timeslice = spa_task_timeslice,
+	.wake_up_task = spa_wake_up_task,
+	.fork = spa_fork,
+	.wake_up_new_task = spa_wake_up_new_task,
+	.exit = spa_exit,
+#ifdef CONFIG_SMP
+	.set_task_cpu = spa_nf_set_task_cpu,
+	.move_tasks = spa_move_tasks,
+#endif
+	.tick = spa_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = spa_head_of_queue,
+	.dependent_sleeper_trumps = spa_dependent_sleeper_trumps,
+#endif
+	.schedule = spa_schedule,
+	.set_normal_task_nice = spa_set_normal_task_nice,
+	.setscheduler = spa_setscheduler,
+	.sys_yield = spa_sys_yield,
+	.yield = spa_yield,
+	.init_idle = spa_init_idle,
+	.sched_init = spa_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = spa_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = spa_set_select_idle_first,
+	.set_select_idle_last = spa_set_select_idle_last,
+	.migrate_dead_tasks = spa_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = spa_normalize_rt_task,
+#endif
+	.attrs = spa_nf_attrs,
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+const struct sched_drv zaphod_sched_drv = {
+	.name = "zaphod",
+	.init_runqueue_queue = spa_init_runqueue_queue,
+	.set_oom_time_slice = spa_set_oom_time_slice,
+	.task_timeslice = spa_task_timeslice,
+	.wake_up_task = spa_wake_up_task,
+	.fork = spa_fork,
+	.wake_up_new_task = spa_wake_up_new_task,
+	.exit = spa_exit,
+	.tick = spa_tick,
+#ifdef CONFIG_SMP
+	.set_task_cpu = zaphod_set_task_cpu,
+	.move_tasks = spa_move_tasks,
+#endif
+	.tick = spa_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = spa_head_of_queue,
+	.dependent_sleeper_trumps = spa_dependent_sleeper_trumps,
+#endif
+	.schedule = spa_schedule,
+	.set_normal_task_nice = spa_set_normal_task_nice,
+	.setscheduler = spa_setscheduler,
+	.sys_yield = spa_sys_yield,
+	.yield = spa_yield,
+	.init_idle = spa_init_idle,
+	.sched_init = spa_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = spa_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = spa_set_select_idle_first,
+	.set_select_idle_last = spa_set_select_idle_last,
+	.migrate_dead_tasks = spa_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = spa_normalize_rt_task,
+#endif
+	.attrs = zaphod_attrs,
+};
+#endif
diff -Naur linux-2.6.12-rc2-mm3/kernel/sched_zaphod.c linux-2.6.12-rc2-mm3-plugsched/kernel/sched_zaphod.c
--- linux-2.6.12-rc2-mm3/kernel/sched_zaphod.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/kernel/sched_zaphod.c	2005-04-23 13:20:23.672973104 -0700
@@ -0,0 +1,480 @@
+/*
+ *  kernel/sched_zaphod.c
+ *
+ *  CPU scheduler mode
+ *
+ *  Copyright (C) 2004  Aurema Pty Ltd
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+
+#include <asm/uaccess.h>
+
+#define MIN_NORMAL_PRIO ZAPHOD_MIN_NORMAL_PRIO
+#define IDLE_PRIO ZAPHOD_IDLE_PRIO
+#define BGND_PRIO ZAPHOD_BGND_PRIO
+#define TASK_ZD(p) (p)->sdu.spa.zaphod
+#define RUNQ_ZD(p) (p)->sdu.spa.zrq
+#define MIN_RATE_CAP(p) (p)->sdu.spa.min_cpu_rate_cap
+#define task_is_bgnd(p) (unlikely((p)->sdu.spa.cpu_rate_cap == 0))
+
+#define EB_YARDSTICK_DECAY_INTERVAL 100
+
+enum zaphod_mode_enum {
+	ZAPHOD_MODE_PRIORITY_BASED,
+	ZAPHOD_MODE_ENTITLEMENT_BASED
+};
+
+static enum zaphod_mode_enum zaphod_mode = ZAPHOD_MODE_PRIORITY_BASED;
+
+static const char *zaphod_mode_names[] = {
+	"pb",		/* ZAPHOD_MODE_PRIORITY_BASED */
+	"eb",		/* ZAPHOD_MODE_ENTITLEMENT_BASED */
+	NULL		/* end of list marker */
+};
+
+/*
+ * Convert nice to shares
+ * Proportional symmetry is aimed for: i.e.
+ * (nice_to_shares(0) / nice_to_shares(19)) == (nice_to_shares(-20) / nice_to_shares(0))
+ * Make sure that this function is robust for variations of EB_SHARES_PER_NICE
+ */
+static inline unsigned int nice_to_shares(int nice)
+{
+	unsigned int result = DEFAULT_EB_SHARES;
+
+	if (nice > 0)
+		result -= (nice * (20 * EB_SHARES_PER_NICE - 1)) / 19;
+	else if (nice < 0)
+		result += (nice * nice * ((20 * EB_SHARES_PER_NICE - 1) * EB_SHARES_PER_NICE)) / 20;
+
+	return result;
+}
+
+static inline int shares_to_nice(unsigned int shares)
+{
+	int result = 0;
+
+	if (shares > DEFAULT_EB_SHARES)
+		result = -int_sqrt((20 * (shares - DEFAULT_EB_SHARES)) /
+			(EB_SHARES_PER_NICE * (20 * EB_SHARES_PER_NICE - 1)));
+	else if (shares < DEFAULT_EB_SHARES)
+		result = (19 * (DEFAULT_EB_SHARES - shares)) /
+			 (20 * EB_SHARES_PER_NICE - 1);
+
+	return result;
+}
+
+#define MAX_TOTAL_BONUS (BGND_PRIO - ZAPHOD_MAX_PRIO - 1)
+#define MAX_MAX_IA_BONUS ((MAX_TOTAL_BONUS + 1) / 2)
+#define MAX_MAX_TPT_BONUS (MAX_TOTAL_BONUS - MAX_MAX_IA_BONUS)
+#define DEFAULT_MAX_IA_BONUS ((MAX_MAX_IA_BONUS < 7) ? MAX_MAX_IA_BONUS : 7)
+#define DEFAULT_MAX_TPT_BONUS ((DEFAULT_MAX_IA_BONUS - 2) ? : 1)
+
+
+#define SCHED_IA_BONUS_OFFSET 8
+#define SCHED_IA_BONUS_ALPHA ((1 << SCHED_IA_BONUS_OFFSET) - 1)
+#define SCHED_IA_BONUS_MUL(a, b) (((a) * (b)) >> SCHED_IA_BONUS_OFFSET)
+/*
+ * Get the rounded integer value of the interactive bonus
+ */
+#define SCHED_IA_BONUS_RND(x) \
+	(((x) + (1 << (SCHED_IA_BONUS_OFFSET - 1))) >> (SCHED_IA_BONUS_OFFSET))
+
+unsigned int max_ia_bonus = DEFAULT_MAX_IA_BONUS;
+unsigned int max_max_ia_bonus = MAX_MAX_IA_BONUS;
+unsigned int initial_ia_bonus = 1;
+unsigned int max_tpt_bonus = DEFAULT_MAX_TPT_BONUS;
+unsigned int max_max_tpt_bonus = MAX_MAX_TPT_BONUS;
+
+/*
+ * Find the square root of a proportion
+ * Require: x <= PROPORTION_ONE
+ */
+static unsigned long proportion_sqrt(unsigned long x)
+{
+	/* use 64 bits internally to avoid overflow */
+	unsigned long long res, b, ulx;
+	int bshift;
+
+	/*
+	 * Take shortcut AND prevent overflow
+	 */
+	if (x == PROPORTION_ONE)
+		return PROPORTION_ONE;
+
+	res = 0;
+	b = (1UL << (PROPORTION_OFFSET - 1));
+	bshift = PROPORTION_OFFSET - 1;
+	ulx = x << PROPORTION_OFFSET;
+
+	for (; ulx && b; b >>= 1, bshift--) {
+		unsigned long long temp = (((res << 1) + b) << bshift);
+
+		if (ulx >= temp) {
+			res += b;
+                        ulx -= temp;
+		}
+        }
+
+	return res;
+}
+
+/*
+ * Tasks that have a CPU usage rate greater than this threshold (in parts per
+ * thousand) are considered to be CPU bound and start to lose interactive bonus
+ * points
+ */
+#define DEFAULT_CPU_HOG_THRESHOLD 900
+unsigned long cpu_hog_threshold = PROP_FM_PPT(DEFAULT_CPU_HOG_THRESHOLD);
+
+/*
+ * Tasks that would sleep for more than 900 parts per thousand of the time if
+ * they had the CPU to themselves are considered to be interactive provided
+ * that their average sleep duration per scheduling cycle isn't too long
+ */
+#define DEFAULT_IA_THRESHOLD 900
+unsigned long ia_threshold = PROP_FM_PPT(DEFAULT_IA_THRESHOLD);
+#define LOWER_MAX_IA_SLEEP SCHED_AVG_REAL(15 * 60LL * NSEC_PER_SEC)
+#define UPPER_MAX_IA_SLEEP SCHED_AVG_REAL(2 * 60 * 60LL * NSEC_PER_SEC)
+
+/*
+ * Calculate CPU usage rate and sleepiness.
+ * This never gets called on real time tasks
+ */
+static unsigned long calc_sleepiness(task_t *p)
+{
+	unsigned long long bl;
+
+	bl  = TASK_CPUSTATS(p).avg_sleep_per_cycle + TASK_CPUSTATS(p).avg_cpu_per_cycle;
+	/*
+	 * Take a shortcut and avoid possible divide by zero
+	 */
+	if (unlikely(bl == 0))
+		return PROPORTION_ONE;
+	else
+		return calc_proportion(TASK_CPUSTATS(p).avg_sleep_per_cycle, bl);
+}
+
+static inline void decay_sched_ia_bonus(struct task_struct *p)
+{
+	TASK_ZD(p).interactive_bonus *= SCHED_IA_BONUS_ALPHA;
+	TASK_ZD(p).interactive_bonus >>= SCHED_IA_BONUS_OFFSET;
+}
+
+/*
+ * Check whether a task with an interactive bonus still qualifies and if not
+ * decrease its bonus
+ * This never gets called on real time tasks
+ */
+static void reassess_cpu_boundness(task_t *p)
+{
+	if (max_ia_bonus == 0) {
+		TASK_ZD(p).interactive_bonus = 0;
+		return;
+	}
+	/*
+	 * No point going any further if there's no bonus to lose
+	 */
+	if (TASK_ZD(p).interactive_bonus == 0)
+		return;
+
+	if (TASK_CPUSTATS(p).cpu_usage_rate > cpu_hog_threshold)
+		decay_sched_ia_bonus(p);
+}
+
+/*
+ * Check whether a task qualifies for an interactive bonus and if it does
+ * increase its bonus
+ * This never gets called on real time tasks
+ */
+static void reassess_interactiveness(task_t *p)
+{
+	unsigned long sleepiness;
+
+	if (max_ia_bonus == 0) {
+		TASK_ZD(p).interactive_bonus = 0;
+		return;
+	}
+	/*
+	 * No sleep means not interactive (in most cases), but
+	 */
+	if (unlikely(TASK_CPUSTATS(p).avg_sleep_per_cycle > LOWER_MAX_IA_SLEEP)) {
+		/*
+		 * Really long sleeps mean it's probably not interactive
+		 */
+		if (unlikely(TASK_CPUSTATS(p).avg_sleep_per_cycle > UPPER_MAX_IA_SLEEP))
+			decay_sched_ia_bonus(p);
+		return;
+	}
+
+	sleepiness = calc_sleepiness(p);
+	if (sleepiness > ia_threshold) {
+		decay_sched_ia_bonus(p);
+		TASK_ZD(p).interactive_bonus += map_proportion_rnd(sleepiness, max_ia_bonus);
+	}
+}
+
+/*
+ * Check whether a task qualifies for a throughput bonus and if it does
+ * give it one
+ * This never gets called on real time tasks
+ */
+#define NRUN_AVG_OFFSET 6
+#define NRUN_AVG_ALPHA ((1 << NRUN_AVG_OFFSET) - 1)
+#define NRUN_AVG_ONE (1UL << NRUN_AVG_OFFSET)
+#define NRUN_AVG_MUL(a, b) (((a) * (b)) >> NRUN_AVG_OFFSET)
+static void recalc_throughput_bonus(task_t *p)
+{
+	unsigned long long ratio;
+	unsigned long long expected_delay;
+	unsigned long long adjusted_delay;
+	unsigned long long load = RUNQ_ZD(p)->avg_nr_running;
+
+	TASK_ZD(p).throughput_bonus = 0;
+	if (max_tpt_bonus == 0)
+		return;
+
+	if (load <= NRUN_AVG_ONE)
+		expected_delay = 0;
+	else
+		expected_delay = NRUN_AVG_MUL(TASK_CPUSTATS(p).avg_cpu_per_cycle, (load - NRUN_AVG_ONE));
+
+	/*
+	 * No unexpected delay means no bonus, but
+	 * NB this test also avoids a possible divide by zero error if
+	 * cpu is also zero and negative bonuses
+	 */
+	if (TASK_CPUSTATS(p).avg_delay_per_cycle <= expected_delay)
+		return;
+
+	adjusted_delay  = TASK_CPUSTATS(p).avg_delay_per_cycle - expected_delay;
+	ratio = calc_proportion(adjusted_delay, adjusted_delay + TASK_CPUSTATS(p).avg_cpu_per_cycle);
+	ratio = proportion_sqrt(ratio);
+	TASK_ZD(p).throughput_bonus = map_proportion_rnd(ratio, max_tpt_bonus);
+}
+
+/*
+ * Calculate priority based priority (without bonuses).
+ * This never gets called on real time tasks
+ */
+static void calculate_pb_pre_bonus_priority(task_t *p)
+{
+	TASK_ZD(p).pre_bonus_priority = p->static_prio + MAX_TOTAL_BONUS;
+}
+
+/*
+ * We're just trying to protect a reading and writing of the yardstick.
+ * We not to fussed about protecting the calculation so the following is
+ * adequate
+ */
+static inline void decay_eb_yardstick(struct sched_zaphod_runq_data *zrq)
+{
+	static const unsigned long decay_per_interval = PROP_FM_PPT(990);
+	unsigned long curry = atomic_read(&zrq->eb_yardstick);
+	unsigned long pny; /* potential new yardstick */
+	struct task_struct *p = current;
+
+	curry = map_proportion(decay_per_interval, curry);
+	atomic_set(&zrq->eb_ticks_to_decay, EB_YARDSTICK_DECAY_INTERVAL);
+	if (unlikely(rt_task(p) || task_is_bgnd(p)))
+		goto out;
+	if (TASK_CPUSTATS(p).cpu_usage_rate < MIN_RATE_CAP(p))
+		pny = TASK_CPUSTATS(p).cpu_usage_rate / TASK_ZD(p).eb_shares;
+	else
+		pny = MIN_RATE_CAP(p) / TASK_ZD(p).eb_shares;
+	if (pny > curry)
+		curry = pny;
+out:
+	if (unlikely(curry >= PROPORTION_ONE))
+		curry = PROPORTION_ONE - 1;
+	atomic_set(&zrq->eb_yardstick, curry);
+}
+
+/*
+ * Calculate entitlement based priority (without bonuses).
+ * This never gets called on real time tasks
+ */
+#define EB_PAR 19
+static void calculate_eb_pre_bonus_priority(task_t *p)
+{
+	/*
+	 * Prevent possible divide by zero and take shortcut
+	 */
+	if (unlikely(MIN_RATE_CAP(p) == 0)) {
+		TASK_ZD(p).pre_bonus_priority = BGND_PRIO - 1;
+	} else if (TASK_CPUSTATS(p).cpu_usage_rate > MIN_RATE_CAP(p)) {
+		unsigned long cap_per_share = MIN_RATE_CAP(p) / TASK_ZD(p).eb_shares;
+		unsigned long prop = calc_proportion(MIN_RATE_CAP(p), TASK_CPUSTATS(p).cpu_usage_rate);
+
+		TASK_ZD(p).pre_bonus_priority = (BGND_PRIO - 1);
+		TASK_ZD(p).pre_bonus_priority -= map_proportion_rnd(prop, EB_PAR + 1);
+		if (cap_per_share > atomic_read(&RUNQ_ZD(p)->eb_yardstick)) {
+			if (likely(cap_per_share < PROPORTION_ONE))
+				atomic_set(&RUNQ_ZD(p)->eb_yardstick, cap_per_share);
+			else
+				atomic_set(&RUNQ_ZD(p)->eb_yardstick, PROPORTION_ONE - 1);
+		}
+
+	} else {
+		unsigned long usage_per_share = TASK_CPUSTATS(p).cpu_usage_rate / TASK_ZD(p).eb_shares;
+
+		if (usage_per_share > atomic_read(&RUNQ_ZD(p)->eb_yardstick)) {
+			if (likely(usage_per_share < PROPORTION_ONE))
+				atomic_set(&RUNQ_ZD(p)->eb_yardstick, usage_per_share);
+			else
+				atomic_set(&RUNQ_ZD(p)->eb_yardstick, PROPORTION_ONE - 1);
+			TASK_ZD(p).pre_bonus_priority = MAX_RT_PRIO + MAX_TOTAL_BONUS + EB_PAR;
+		} else {
+			unsigned long prop;
+
+			prop = calc_proportion(usage_per_share, atomic_read(&RUNQ_ZD(p)->eb_yardstick));
+			TASK_ZD(p).pre_bonus_priority = MAX_RT_PRIO + MAX_TOTAL_BONUS;
+			TASK_ZD(p).pre_bonus_priority += map_proportion_rnd(prop, EB_PAR);
+		}
+	}
+}
+
+static inline void calculate_pre_bonus_priority(task_t *p)
+{
+	if (zaphod_mode == ZAPHOD_MODE_ENTITLEMENT_BASED)
+		calculate_eb_pre_bonus_priority(p);
+	else
+		calculate_pb_pre_bonus_priority(p);
+}
+
+static DEFINE_PER_CPU(struct sched_zaphod_runq_data, zaphod_runqs);
+
+void zaphod_init_cpu_runq_data(unsigned int cpu)
+{
+	struct sched_zaphod_runq_data *zrq = &per_cpu(zaphod_runqs, cpu);
+
+	zrq->avg_nr_running = 0;
+	atomic_set(&zrq->eb_yardstick, 0);
+	atomic_set(&zrq->eb_ticks_to_decay, EB_YARDSTICK_DECAY_INTERVAL + cpu);
+}
+
+struct sched_zaphod_runq_data *zaphod_cpu_runq_data(unsigned int cpu)
+{
+	return &per_cpu(zaphod_runqs, cpu);
+}
+
+void zaphod_runq_data_tick(struct sched_zaphod_runq_data *zrq, unsigned long numr)
+{
+	unsigned long nval = NRUN_AVG_MUL(zrq->avg_nr_running, NRUN_AVG_ALPHA);
+	nval += numr;
+
+	zrq->avg_nr_running = nval;
+
+	if (atomic_dec_and_test(&zrq->eb_ticks_to_decay))
+		decay_eb_yardstick(zrq);
+}
+
+void zaphod_fork(struct task_struct *p)
+{
+	TASK_ZD(p).interactive_bonus = (max_ia_bonus >= initial_ia_bonus) ?
+				initial_ia_bonus : max_ia_bonus;
+	TASK_ZD(p).throughput_bonus =  0;
+}
+
+unsigned int zaphod_effective_prio(struct task_struct *p)
+{
+	unsigned int bonus = 0;
+
+	/* no bonuses for tasks that have exceeded their cap */
+	if (likely(TASK_CPUSTATS(p).cpu_usage_rate < MIN_RATE_CAP(p))) {
+		bonus = SCHED_IA_BONUS_RND(TASK_ZD(p).interactive_bonus);
+		bonus += TASK_ZD(p).throughput_bonus;
+	}
+
+	return TASK_ZD(p).pre_bonus_priority - bonus;
+}
+
+void zaphod_reassess_at_activation(struct task_struct *p)
+{
+	recalc_throughput_bonus(p);
+	reassess_interactiveness(p);
+	calculate_pre_bonus_priority(p);
+}
+
+void zaphod_reassess_at_end_of_ts(struct task_struct *p)
+{
+	recalc_throughput_bonus(p);
+	reassess_cpu_boundness(p);
+	/*
+	 * Arguably the interactive bonus should be updated here
+	 * as well.  But depends on whether we wish to encourage
+	 * interactive tasks to maintain a high bonus or CPU bound
+	 * tasks to lose some of there bonus?
+	 */
+	calculate_pre_bonus_priority(p);
+}
+
+void zaphod_reassess_at_sinbin_release(struct task_struct *p)
+{
+	calculate_pre_bonus_priority(p);
+}
+
+void zaphod_reassess_at_renice(struct task_struct *p)
+{
+	TASK_ZD(p).eb_shares = nice_to_shares(task_nice(p));
+	if (!rt_task(p))
+		calculate_pre_bonus_priority(p);
+}
+
+#include <linux/sched_pvt.h>
+
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW(max_ia_bonus, no_change, no_change, 0, max_max_ia_bonus);
+SCHED_DRV_SYSFS_UINT_RW(initial_ia_bonus, no_change, no_change, 0, max_max_ia_bonus);
+SCHED_DRV_SYSFS_UINT_RW(max_tpt_bonus, no_change, no_change, 0, max_max_tpt_bonus);
+SCHED_DRV_SYSFS_UINT_RW(ia_threshold, ppt_to_proportion, proportion_to_ppt, 0, 1000);
+SCHED_DRV_SYSFS_UINT_RW(cpu_hog_threshold, ppt_to_proportion, proportion_to_ppt, 0, 1000);
+
+static ssize_t show_zaphod_mode(char *page)
+{
+	return sprintf(page, "%s\n", zaphod_mode_names[zaphod_mode]);
+}
+
+static ssize_t store_zaphod_mode(const char *page, size_t count)
+{
+	int i;
+	int clen = strlen(page);
+
+	{
+		char *nlp = strrchr(page, '\n');
+
+		if (nlp != NULL)
+			clen = nlp - page;
+	}
+
+	for (i = 0; zaphod_mode_names[i] != NULL; i++)
+		if (strncmp(page, zaphod_mode_names[i], clen) == 0)
+			break;
+	if (zaphod_mode_names[i] == NULL)
+		return -EINVAL;
+	else /* set the zaphod mode */
+		zaphod_mode = i;
+
+	return count;
+}
+
+struct sched_drv_sysfs_entry zaphod_mode_sdse = {
+	.attr = { .name = "mode", .mode = S_IRUGO | S_IWUSR },
+	.show = show_zaphod_mode,
+	.store = store_zaphod_mode,
+};
diff -Naur linux-2.6.12-rc2-mm3/kernel/staircase.c linux-2.6.12-rc2-mm3-plugsched/kernel/staircase.c
--- linux-2.6.12-rc2-mm3/kernel/staircase.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-rc2-mm3-plugsched/kernel/staircase.c	2005-04-23 13:20:23.673972952 -0700
@@ -0,0 +1,1017 @@
+/*
+ *  kernel/staircase.c
+ *  Copyright (C) 1991-2005  Linus Torvalds
+ *
+ * 2005-02-13 Staircase scheduler by Con Kolivas
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+
+/*
+ * Unique staircase process flags used by scheduler.
+ */
+#define SF_FORKED	0x00000001	/* I have just forked */
+#define SF_YIELDED	0x00000002	/* I have just yielded */
+#define SF_UISLEEP	0x00000004	/* Uninterruptible sleep */
+
+#define task_is_queued(p) (!list_empty(&(p)->run_list))
+
+static void staircase_init_runqueue_queue(union runqueue_queue *qup)
+{
+	int k;
+
+	qup->staircase.cache_ticks = 0;
+	qup->staircase.preempted = 0;
+
+	for (k = 0; k < STAIRCASE_MAX_PRIO; k++) {
+		INIT_LIST_HEAD(qup->staircase.queue + k);
+		__clear_bit(k, qup->staircase.bitmap);
+	}
+	// delimiter for bitsearch
+	__set_bit(STAIRCASE_MAX_PRIO, qup->staircase.bitmap);
+}
+
+static void staircase_set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+	p->sdu.staircase.slice = p->sdu.staircase.time_slice = t;
+}
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
+#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
+#define MAX_USER_PRIO		(USER_PRIO(STAIRCASE_MAX_PRIO))
+
+/*
+ * Some helpers for converting nanosecond timing to jiffy resolution
+ */
+#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
+#define NSJIFFY			(1000000000 / HZ)	/* One jiffy in ns */
+
+int sched_compute = 0;
+/*
+ *This is the time all tasks within the same priority round robin.
+ *compute setting is reserved for dedicated computational scheduling
+ *and has ten times larger intervals.
+ */
+#define _RR_INTERVAL		((10 * HZ / 1000) ? : 1)
+#define RR_INTERVAL()		(_RR_INTERVAL * (1 + 9 * sched_compute))
+
+#define TASK_PREEMPTS_CURR(p, rq) \
+	((p)->prio < (rq)->curr->prio)
+
+/*
+ * Get nanosecond clock difference without overflowing unsigned long.
+ */
+static unsigned long ns_diff(unsigned long long v1, unsigned long long v2)
+{
+	unsigned long long vdiff;
+	if (unlikely(v1 < v2))
+		/*
+		 * Rarely the clock goes backwards. There should always be
+		 * a positive difference so return 1.
+		 */
+		vdiff = 1;
+	else
+		vdiff = v1 - v2;
+	if (vdiff > (1 << 31))
+		vdiff = 1 << 31;
+	return (unsigned long)vdiff;
+}
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void dequeue_task(struct task_struct *p, struct staircase_runqueue_queue *rqq)
+{
+	list_del_init(&p->run_list);
+	if (list_empty(rqq->queue + p->prio))
+		__clear_bit(p->prio, rqq->bitmap);
+	p->sdu.staircase.ns_debit = 0;
+}
+
+static void enqueue_task(struct task_struct *p, struct staircase_runqueue_queue *rqq)
+{
+	sched_info_queued(p);
+	list_add_tail(&p->run_list, rqq->queue + p->prio);
+	__set_bit(p->prio, rqq->bitmap);
+}
+
+static void requeue_task(struct task_struct *p, struct staircase_runqueue_queue *rq)
+{
+	list_move_tail(&p->run_list, rq->queue + p->prio);
+}
+
+/*
+ * Used by the migration code - we pull tasks from the head of the
+ * remote queue so we want these tasks to show up at the head of the
+ * local queue:
+ */
+static inline void enqueue_task_head(struct task_struct *p, struct staircase_runqueue_queue *rqq)
+{
+	list_add(&p->run_list, rqq->queue + p->prio);
+	__set_bit(p->prio, rqq->bitmap);
+}
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task(p, &rq->qu.staircase);
+	rq->nr_running++;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * __activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task_head(p, &rq->qu.staircase);
+	rq->nr_running++;
+}
+#endif
+
+/*
+ * burst - extra intervals an interactive task can run for at best priority
+ * instead of descending priorities.
+ */
+static unsigned int burst(const task_t *p)
+{
+	if (likely(!rt_task(p))) {
+		unsigned int task_user_prio = TASK_USER_PRIO(p);
+		return 39 - task_user_prio;
+	} else
+		return p->sdu.staircase.burst;
+}
+
+static void inc_burst(task_t *p)
+{
+	unsigned int best_burst;
+	best_burst = burst(p);
+	if (p->sdu.staircase.burst < best_burst)
+		p->sdu.staircase.burst++;
+}
+
+static void dec_burst(task_t *p)
+{
+	if (p->sdu.staircase.burst)
+		p->sdu.staircase.burst--;
+}
+
+static unsigned int rr_interval(const task_t * p)
+{
+	unsigned int rr_interval = RR_INTERVAL();
+	int nice = TASK_NICE(p);
+
+	if (nice < 0 && !rt_task(p))
+		rr_interval += -(nice);
+
+	return rr_interval;
+}
+
+/*
+ * slice - the duration a task runs before getting requeued at its best
+ * priority and has its burst decremented.
+ */
+static unsigned int slice(const task_t *p)
+{
+	unsigned int slice, rr;
+
+	slice = rr = rr_interval(p);
+	if (likely(!rt_task(p)))
+		slice += burst(p) * rr;
+
+	return slice;
+}
+
+/*
+ * sched_interactive - sysctl which allows interactive tasks to have bursts
+ */
+int sched_interactive = 1;
+
+/*
+ * effective_prio - dynamic priority dependent on burst.
+ * The priority normally decreases by one each RR_INTERVAL.
+ * As the burst increases the priority stays at the top "stair" or
+ * priority for longer.
+ */
+static int effective_prio(task_t *p)
+{
+	int prio;
+	unsigned int full_slice, used_slice, first_slice;
+	unsigned int best_burst, rr;
+	if (rt_task(p))
+		return p->prio;
+
+	best_burst = burst(p);
+	full_slice = slice(p);
+	rr = rr_interval(p);
+	used_slice = full_slice - p->sdu.staircase.slice;
+	if (p->sdu.staircase.burst > best_burst)
+		p->sdu.staircase.burst = best_burst;
+	first_slice = rr;
+	if (sched_interactive && !sched_compute && p->mm)
+		first_slice *= (p->sdu.staircase.burst + 1);
+	prio = STAIRCASE_MAX_PRIO - 1 - best_burst;
+
+	if (used_slice < first_slice)
+		return prio;
+	prio += 1 + (used_slice - first_slice) / rr;
+	if (prio > STAIRCASE_MAX_PRIO - 1)
+		prio = STAIRCASE_MAX_PRIO - 1;
+
+	return prio;
+}
+
+static void continue_slice(task_t *p)
+{
+	unsigned long total_run = NS_TO_JIFFIES(p->sdu.staircase.totalrun);
+
+	if (total_run >= p->sdu.staircase.slice) {
+ 		p->sdu.staircase.totalrun = 0;
+		dec_burst(p);
+	} else {
+		unsigned int remainder;
+		p->sdu.staircase.slice -= total_run;
+		remainder = p->sdu.staircase.slice % rr_interval(p);
+		if (remainder)
+			p->sdu.staircase.time_slice = remainder;
+ 	}
+}
+
+/*
+ * recalc_task_prio - this checks for tasks that run ultra short timeslices
+ * or have just forked a thread/process and make them continue their old
+ * slice instead of starting a new one at high priority.
+ */
+static void recalc_task_prio(task_t *p, unsigned long long now, unsigned long rq_load)
+{
+	unsigned long sleep_time;
+
+	if (rq_load > 31)
+		rq_load = 31;
+	sleep_time = ns_diff(now, p->timestamp) / (1 << rq_load);
+
+	p->sdu.staircase.totalrun += p->sdu.staircase.runtime;
+	if (NS_TO_JIFFIES(p->sdu.staircase.totalrun) >= p->sdu.staircase.slice &&
+		NS_TO_JIFFIES(sleep_time) < p->sdu.staircase.slice) {
+			p->sdu.staircase.sflags &= ~SF_FORKED;
+			dec_burst(p);
+			goto new_slice;
+	}
+
+	if (p->sdu.staircase.sflags & SF_FORKED) {
+		continue_slice(p);
+		p->sdu.staircase.sflags &= ~SF_FORKED;
+		return;
+	}
+
+	if (sched_compute) {
+		continue_slice(p);
+		return;
+	}
+
+	if (sleep_time >= p->sdu.staircase.totalrun) {
+		if (!(p->sdu.staircase.sflags & SF_UISLEEP) && (NS_TO_JIFFIES(sleep_time -
+			p->sdu.staircase.totalrun) > p->sdu.staircase.burst * rr_interval(p)))
+			inc_burst(p);
+		goto new_slice;
+	}
+
+	p->sdu.staircase.totalrun -= sleep_time;
+	continue_slice(p);
+	return;
+new_slice:
+	p->sdu.staircase.totalrun = 0;
+}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ *
+ * Update all the scheduling statistics stuff. (sleep average
+ * calculation, priority modifiers, etc.)
+ */
+static void activate_task(task_t *p, runqueue_t *rq, int local)
+{
+	unsigned long long now;
+
+	now = sched_clock();
+#ifdef CONFIG_SMP
+	if (!local) {
+		/* Compensate for drifting sched_clock */
+		runqueue_t *this_rq = this_rq();
+		now = (now - this_rq->timestamp_last_tick)
+			+ rq->timestamp_last_tick;
+	}
+#endif
+	p->sdu.staircase.slice = slice(p);
+	p->sdu.staircase.time_slice = rr_interval(p);
+	recalc_task_prio(p, now, rq->nr_running);
+	p->sdu.staircase.sflags &= ~SF_UISLEEP;
+	p->prio = effective_prio(p);
+	p->timestamp = now;
+	__activate_task(p, rq);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	rq->nr_running--;
+	dequeue_task(p, &rq->qu.staircase);
+}
+
+/*
+ * cache_delay is the time preemption is delayed in sched_compute mode
+ * and is set to 5*cache_decay_ticks on SMP or a nominal 10ms on UP.
+ */
+static int cache_delay = 10 * HZ / 1000;
+
+/*
+ * Check to see if p preempts rq->curr and resched if it does. In compute
+ * mode we do not preempt for at least cache_delay and set rq->preempted.
+ */
+static void preempt(task_t *p, struct runqueue *rq)
+{
+	if (!TASK_PREEMPTS_CURR(p, rq))
+		return;
+
+	if (p->prio == rq->curr->prio &&
+		((p->sdu.staircase.totalrun || p->sdu.staircase.slice != slice(p)) ||
+		rt_task(rq->curr)))
+			return;
+
+	if (!sched_compute || rq->qu.staircase.cache_ticks >= cache_delay ||
+		!p->mm || rt_task(p))
+			resched_task(rq->curr);
+	rq->qu.staircase.preempted = 1;
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: thetask's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+static void staircase_wake_up_task(struct task_struct *p, struct runqueue *rq, unsigned int old_state, int sync)
+{
+	int same_cpu = (rq == this_rq());
+
+	if (old_state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible--;
+
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	activate_task(p, rq, same_cpu);
+	if (!sync || !same_cpu)
+		preempt(p, rq);
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+static void staircase_fork(task_t *p)
+{
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+static void staircase_wake_up_new_task(task_t * p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	int this_cpu, cpu;
+	runqueue_t *rq, *this_rq;
+
+	rq = task_rq_lock(p, &flags);
+	cpu = task_cpu(p);
+	this_cpu = smp_processor_id();
+
+	BUG_ON(p->state != TASK_RUNNING);
+
+	/*
+	 * Forked process gets no burst to prevent fork bombs.
+	 */
+	p->sdu.staircase.burst = 0;
+
+	if (likely(cpu == this_cpu)) {
+		current->sdu.staircase.sflags |= SF_FORKED;
+
+		if (!(clone_flags & CLONE_VM)) {
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			if (unlikely(!task_is_queued(current))) {
+				p->prio = effective_prio(p);
+				__activate_task(p, rq);
+			} else {
+				p->prio = current->prio;
+				list_add_tail(&p->run_list, &current->run_list);
+				rq->nr_running++;
+			}
+			set_need_resched();
+		} else {
+			p->prio = effective_prio(p);
+			/* Run child last */
+			__activate_task(p, rq);
+		}
+		/*
+		 * We skip the following code due to cpu == this_cpu
+		 */
+		this_rq = rq;
+	} else {
+		this_rq = cpu_rq(this_cpu);
+
+		/*
+		 * Not the local CPU - must adjust timestamp. This should
+		 * get optimised away in the !CONFIG_SMP case.
+		 */
+		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
+					+ rq->timestamp_last_tick;
+		p->prio = effective_prio(p);
+		__activate_task(p, rq);
+		preempt(p, rq);
+
+		/*
+		 * Parent and child are on different CPUs, now get the
+		 * parent runqueue to update the parent's ->sdu.staircase.sleep_avg:
+		 */
+		task_rq_unlock(rq, &flags);
+		this_rq = task_rq_lock(current, &flags);
+		current->sdu.staircase.sflags |= SF_FORKED;
+	}
+
+	task_rq_unlock(this_rq, &flags);
+}
+
+/*
+ * Potentially available exiting-child timeslices are
+ * retrieved here - this way the parent does not get
+ * penalized for creating too many threads.
+ *
+ * (this cannot be used to 'generate' timeslices
+ * artificially, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+static void staircase_exit(task_t * p)
+{
+}
+
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static inline
+void pull_task(runqueue_t *src_rq, task_t *p, runqueue_t *this_rq, int this_cpu)
+{
+	dequeue_task(p, &src_rq->qu.staircase);
+	src_rq->nr_running--;
+	set_task_cpu(p, this_cpu);
+	this_rq->nr_running++;
+	enqueue_task(p, &this_rq->qu.staircase);
+	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+				+ this_rq->timestamp_last_tick;
+	/*
+	 * Note that idle threads have a prio of STAIRCASE_MAX_PRIO, for this test
+	 * to be always true for them.
+	 */
+	preempt(p, this_rq);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int staircase_move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+		      unsigned long max_nr_move, struct sched_domain *sd,
+		      enum idle_type idle)
+{
+	struct list_head *head, *curr;
+	int idx, pulled = 0;
+	task_t *tmp;
+
+	if (max_nr_move <= 0 || busiest->nr_running <= 1)
+		goto out;
+
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(busiest->qu.staircase.bitmap);
+	else
+		idx = find_next_bit(busiest->qu.staircase.bitmap, STAIRCASE_MAX_PRIO, idx);
+	if (idx >= STAIRCASE_MAX_PRIO)
+		goto out;
+
+	head = busiest->qu.staircase.queue + idx;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+
+	curr = curr->prev;
+
+	if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, tmp, this_rq, this_cpu);
+	pulled++;
+
+	/* We only want to steal up to the prescribed number of tasks. */
+	if (pulled < max_nr_move) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	return pulled;
+}
+#endif
+
+static void time_slice_expired(task_t *p, runqueue_t *rq)
+{
+	struct staircase_runqueue_queue *rqq = &rq->qu.staircase;
+
+	set_tsk_need_resched(p);
+	dequeue_task(p, rqq);
+	p->prio = effective_prio(p);
+	p->sdu.staircase.time_slice = rr_interval(p);
+	enqueue_task(p, rqq);
+}
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ */
+static void staircase_tick(struct task_struct *p, struct runqueue *rq, unsigned long long now)
+{
+	int cpu = smp_processor_id();
+	unsigned long debit;
+
+	if (p == rq->idle) {
+		if (wake_priority_sleeper(rq))
+			goto out;
+		rebalance_tick(cpu, rq, SCHED_IDLE);
+		return;
+	}
+
+	/* Task might have expired already, but not scheduled off yet */
+	if (unlikely(!task_is_queued(p))) {
+		set_tsk_need_resched(p);
+		goto out;
+	}
+
+	/*
+	 * SCHED_FIFO tasks never run out of timeslice.
+	 */
+	if (unlikely(p->policy == SCHED_FIFO))
+		goto out;
+
+	spin_lock(&rq->lock);
+	debit = ns_diff(rq->timestamp_last_tick, p->timestamp);
+	p->sdu.staircase.ns_debit += debit;
+	if (p->sdu.staircase.ns_debit < NSJIFFY)
+		goto out_unlock;
+	p->sdu.staircase.ns_debit %= NSJIFFY;
+	/*
+	 * Tasks lose burst each time they use up a full slice().
+	 */
+	if (!--p->sdu.staircase.slice) {
+		dec_burst(p);
+		p->sdu.staircase.slice = slice(p);
+		time_slice_expired(p, rq);
+		p->sdu.staircase.totalrun = 0;
+		goto out_unlock;
+	}
+	/*
+	 * Tasks that run out of time_slice but still have slice left get
+	 * requeued with a lower priority && RR_INTERVAL time_slice.
+	 */
+	if (!--p->sdu.staircase.time_slice) {
+		time_slice_expired(p, rq);
+		goto out_unlock;
+	}
+	rq->qu.staircase.cache_ticks++;
+	if (rq->qu.staircase.preempted && rq->qu.staircase.cache_ticks >= cache_delay)
+		set_tsk_need_resched(p);
+out_unlock:
+	spin_unlock(&rq->lock);
+out:
+	rebalance_tick(cpu, rq, NOT_IDLE);
+}
+
+#ifdef CONFIG_SCHED_SMT
+static struct task_struct *staircase_head_of_queue(union runqueue_queue *rqq)
+{
+	return list_entry(rqq->staircase.queue[sched_find_first_bit(rqq->staircase.bitmap)].next,
+		task_t, run_list);
+}
+
+static int staircase_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct * p2, struct sched_domain *sd)
+{
+	return ((p1->sdu.staircase.time_slice * (100 - sd->per_cpu_gain) / 100) >
+			slice(p2) || rt_task(p1)) &&
+			p2->mm && p1->mm && !rt_task(p2);
+}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ */
+static void staircase_schedule(void)
+{
+	long *switch_count;
+	int cpu, idx;
+	struct task_struct *prev = current, *next;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+	unsigned long debit;
+	struct list_head *queue;
+
+	spin_lock_irq(&rq->lock);
+
+	prev->sdu.staircase.runtime = ns_diff(now, prev->timestamp);
+	debit = ns_diff(now, rq->timestamp_last_tick) % NSJIFFY;
+	prev->sdu.staircase.ns_debit += debit;
+
+	if (unlikely(current->flags & PF_DEAD))
+		current->state = EXIT_DEAD;
+	/*
+	 * if entering off of a kernel preemption go straight
+	 * to picking the next task.
+	 */
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE) {
+				rq->nr_uninterruptible++;
+				prev->sdu.staircase.sflags |= SF_UISLEEP;
+			}
+			deactivate_task(prev, rq);
+		}
+	}
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+go_idle:
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			wake_sleeping_dependent(cpu, rq);
+			/*
+			 * wake_sleeping_dependent() might have released
+			 * the runqueue, so break out if we got new
+			 * tasks meanwhile:
+			 */
+			if (!rq->nr_running)
+				goto switch_tasks;
+		}
+	} else {
+		if (dependent_sleeper(cpu, rq)) {
+			next = rq->idle;
+			goto switch_tasks;
+		}
+		/*
+		 * dependent_sleeper() releases and reacquires the runqueue
+		 * lock, hence go into the idle loop if the rq went
+		 * empty meanwhile:
+		 */
+		if (unlikely(!rq->nr_running))
+			goto go_idle;
+	}
+
+	idx = sched_find_first_bit(rq->qu.staircase.bitmap);
+	queue = rq->qu.staircase.queue + idx;
+	next = list_entry(queue->next, task_t, run_list);
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prefetch(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(task_cpu(prev));
+
+	update_cpu_clock(prev, rq, now);
+	prev->timestamp = prev->last_ran = now;
+	if (next->sdu.staircase.sflags & SF_YIELDED) {
+		/*
+		 * Tasks that have yield()ed get requeued at normal priority
+		 */
+		int newprio = effective_prio(next);
+		next->sdu.staircase.sflags &= ~SF_YIELDED;
+		if (newprio != next->prio) {
+			struct staircase_runqueue_queue *rqq = &rq->qu.staircase;
+
+			dequeue_task(next, rqq);
+			next->prio = newprio;
+			enqueue_task_head(next, rqq);
+		}
+	}
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		rq->qu.staircase.preempted = 0;
+		rq->qu.staircase.cache_ticks = 0;
+		next->timestamp = now;
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_arch_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+
+		finish_task_switch(prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+static void staircase_set_normal_task_nice(task_t *p, long nice)
+{
+	int queued;
+	int old_prio, new_prio, delta;
+	struct runqueue *rq = task_rq(p);
+	struct staircase_runqueue_queue *rqq = &rq->qu.staircase;
+
+	queued = task_is_queued(p);
+	if (queued)
+		dequeue_task(p, rqq);
+
+	old_prio = p->prio;
+	new_prio = NICE_TO_PRIO(nice);
+	delta = new_prio - old_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	p->prio += delta;
+
+	if (queued) {
+		enqueue_task(p, rqq);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+static void staircase_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	int queued;
+	runqueue_t *rq = task_rq(p);
+
+	queued = task_is_queued(p);
+	if (queued)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	if (queued) {
+		__activate_task(p, rq);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else
+			preempt(p, rq);
+	}
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+static long staircase_sys_yield(void)
+{
+	int newprio;
+	runqueue_t *rq = this_rq_lock();
+	struct staircase_runqueue_queue *rqq = &rq->qu.staircase;
+
+	schedstat_inc(rq, yld_cnt);
+	newprio = current->prio;
+	current->sdu.staircase.slice = slice(current);
+	current->sdu.staircase.time_slice = rr_interval(current);
+	if (likely(!rt_task(current))) {
+		current->sdu.staircase.sflags |= SF_YIELDED;
+		newprio = STAIRCASE_MAX_PRIO - 1;
+	}
+
+	if (newprio != current->prio) {
+		dequeue_task(current, rqq);
+		current->prio = newprio;
+		enqueue_task(current, rqq);
+	} else
+		requeue_task(current, rqq);
+
+	if (rq->nr_running == 1)
+		schedstat_inc(rq, yld_both_empty);
+
+	/*
+	 * Since we are going to call schedule() anyway, there's
+	 * no need to preempt or enable interrupts:
+	 */
+	__release(rq->lock);
+	_raw_spin_unlock(&rq->lock);
+	preempt_enable_no_resched();
+
+	schedule();
+
+	return 0;
+}
+
+static void staircase_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	staircase_sys_yield();
+}
+
+static void staircase_init_idle(task_t *idle, int cpu)
+{
+	idle->prio = STAIRCASE_MAX_PRIO;
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+static void staircase_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	/*
+	 * Sync timestamp with rq_dest's before activating.
+	 * The same thing could be achieved by doing this step
+	 * afterwards, and pretending it was a local activate.
+	 * This way is cleaner and logically correct.
+	 */
+	p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+			+ rq_dest->timestamp_last_tick;
+	deactivate_task(p, rq_src);
+	set_task_cpu(p, dest_cpu);
+	activate_task(p, rq_dest, 0);
+	preempt(p, rq_dest);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static	void staircase_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO-1);
+	/* Add idle task to _front_ of it's priority queue */
+	__activate_idle_task(rq->idle, rq);
+}
+
+static	void staircase_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = STAIRCASE_MAX_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+static void staircase_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (i = 0; i < STAIRCASE_MAX_PRIO; i++) {
+		struct list_head *list = &rq->qu.staircase.queue[i];
+		while (!list_empty(list))
+			migrate_dead(dead_cpu, list_entry(list->next, task_t, run_list));
+	}
+}
+#endif
+#endif
+
+static void staircase_sched_init(void)
+{
+	init_task.sdu.staircase.time_slice = HZ;
+	init_task.sdu.staircase.slice = HZ;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void staircase_normalize_rt_task(struct task_struct *p)
+{
+	int queued;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	queued = task_is_queued(p);
+	if (queued)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	if (queued) {
+		__activate_task(p, rq);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+#ifdef CONFIG_SYSFS
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW(cache_delay, msecs_to_jiffies, jiffies_to_msecs, 1, 1000);
+SCHED_DRV_SYSFS_UINT_RW(sched_compute, no_change, no_change, 0, 1);
+SCHED_DRV_SYSFS_UINT_RW(sched_interactive, no_change, no_change, 0, 1);
+
+static struct attribute *staircase_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(cache_delay),
+	&SCHED_DRV_SYSFS_ATTR(sched_compute),
+	&SCHED_DRV_SYSFS_ATTR(sched_interactive),
+	NULL,
+};
+#endif
+
+const struct sched_drv staircase_sched_drv = {
+	.name = "staircase",
+	.init_runqueue_queue = staircase_init_runqueue_queue,
+	.set_oom_time_slice = staircase_set_oom_time_slice,
+	.task_timeslice = slice,
+	.wake_up_task = staircase_wake_up_task,
+	.fork = staircase_fork,
+	.wake_up_new_task = staircase_wake_up_new_task,
+	.exit = staircase_exit,
+#ifdef CONFIG_SMP
+	.set_task_cpu = common_set_task_cpu,
+	.move_tasks = staircase_move_tasks,
+#endif
+	.tick = staircase_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = staircase_head_of_queue,
+	.dependent_sleeper_trumps = staircase_dependent_sleeper_trumps,
+#endif
+	.schedule = staircase_schedule,
+	.set_normal_task_nice = staircase_set_normal_task_nice,
+	.setscheduler = staircase_setscheduler,
+	.sys_yield = staircase_sys_yield,
+	.yield = staircase_yield,
+	.init_idle = staircase_init_idle,
+	.sched_init = staircase_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = staircase_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = staircase_set_select_idle_first,
+	.set_select_idle_last = staircase_set_select_idle_last,
+	.migrate_dead_tasks = staircase_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = staircase_normalize_rt_task,
+#endif
+#ifdef CONFIG_SYSFS
+	.attrs = staircase_attrs,
+#endif
+};
diff -Naur linux-2.6.12-rc2-mm3/mm/oom_kill.c linux-2.6.12-rc2-mm3-plugsched/mm/oom_kill.c
--- linux-2.6.12-rc2-mm3/mm/oom_kill.c	2005-04-14 02:47:24.895658480 -0700
+++ linux-2.6.12-rc2-mm3-plugsched/mm/oom_kill.c	2005-04-23 13:20:23.707967784 -0700
@@ -196,7 +196,7 @@
 	 * all the memory it needs. That way it should be able to
 	 * exit() and clear out its resources quickly...
 	 */
-	p->time_slice = HZ;
+	set_oom_time_slice(p, HZ);
 	set_tsk_thread_flag(p, TIF_MEMDIE);
 
 	force_sig(SIGKILL, p);
