diff -urN oldtree/fs/proc/array.c newtree/fs/proc/array.c
--- oldtree/fs/proc/array.c	2006-01-03 03:21:10.000000000 +0000
+++ newtree/fs/proc/array.c	2006-02-04 18:03:13.591288384 +0000
@@ -165,7 +165,6 @@
 	read_lock(&tasklist_lock);
 	buffer += sprintf(buffer,
 		"State:\t%s\n"
-		"SleepAVG:\t%lu%%\n"
 		"Tgid:\t%d\n"
 		"Pid:\t%d\n"
 		"PPid:\t%d\n"
@@ -173,7 +172,6 @@
 		"Uid:\t%d\t%d\t%d\t%d\n"
 		"Gid:\t%d\t%d\t%d\t%d\n",
 		get_task_state(p),
-		(p->sleep_avg/1024)*100/(1020000000/1024),
 	       	p->tgid,
 		p->pid, pid_alive(p) ? p->group_leader->real_parent->tgid : 0,
 		pid_alive(p) && p->ptrace ? p->parent->pid : 0,
diff -urN oldtree/fs/proc/base.c newtree/fs/proc/base.c
--- oldtree/fs/proc/base.c	2006-01-03 03:21:10.000000000 +0000
+++ newtree/fs/proc/base.c	2006-02-04 18:03:13.593288080 +0000
@@ -69,6 +69,7 @@
 #include <linux/ptrace.h>
 #include <linux/seccomp.h>
 #include <linux/cpuset.h>
+#include <linux/sched_task.h>
 #include <linux/audit.h>
 #include <linux/poll.h>
 #include "internal.h"
@@ -153,6 +154,13 @@
 #ifdef CONFIG_CPUSETS
 	PROC_TID_CPUSET,
 #endif
+#ifdef CONFIG_CPUSCHED_SPA
+	PROC_TID_CPU_RATE_CAP,
+	PROC_TID_CPU_RATE_HARD_CAP,
+#endif
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	PROC_TID_CPUSTATS,
+#endif
 #ifdef CONFIG_SECURITY
 	PROC_TID_ATTR,
 	PROC_TID_ATTR_CURRENT,
@@ -262,6 +270,13 @@
 #ifdef CONFIG_AUDITSYSCALL
 	E(PROC_TID_LOGINUID, "loginuid", S_IFREG|S_IWUSR|S_IRUGO),
 #endif
+#ifdef CONFIG_CPUSCHED_SPA
+	E(PROC_TID_CPU_RATE_CAP,  "cpu_rate_cap",   S_IFREG|S_IRUGO|S_IWUSR),
+	E(PROC_TID_CPU_RATE_HARD_CAP,  "cpu_rate_hard_cap",   S_IFREG|S_IRUGO|S_IWUSR),
+#endif
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	E(PROC_TID_CPUSTATS,  "cpustats",   S_IFREG|S_IRUGO),
+#endif
 	{0,0,NULL,0}
 };
 
@@ -1066,6 +1081,100 @@
 };
 #endif /* CONFIG_SECCOMP */
 
+#ifdef CONFIG_CPUSCHED_SPA
+static ssize_t cpu_rate_cap_read(struct file * file, char * buf,
+			size_t count, loff_t *ppos)
+{
+	struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task;
+	char buffer[64];
+	size_t len;
+	unsigned int cppt = get_cpu_rate_cap(task);
+
+	if (*ppos)
+		return 0;
+	*ppos = len = sprintf(buffer, "%u\n", cppt);
+	if (copy_to_user(buf, buffer, len))
+		return -EFAULT;
+
+	return len;
+}
+
+static ssize_t cpu_rate_cap_write(struct file * file, const char * buf,
+			 size_t count, loff_t *ppos)
+{
+	struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task;
+	char buffer[128] = "";
+	char *endptr = NULL;
+	unsigned long hcppt;
+	int res;
+
+
+	if ((count > 63) || *ppos)
+		return -EFBIG;
+	if (copy_from_user(buffer, buf, count))
+		return -EFAULT;
+	hcppt = simple_strtoul(buffer, &endptr, 0);
+	if ((endptr == buffer) || (hcppt == ULONG_MAX))
+		return -EINVAL;
+
+	if ((res = set_cpu_rate_cap(task, hcppt)) != 0)
+		return res;
+
+	return count;
+}
+
+struct file_operations proc_cpu_rate_cap_operations = {
+	read:		cpu_rate_cap_read,
+	write:		cpu_rate_cap_write,
+};
+
+ssize_t cpu_rate_hard_cap_read(struct file * file, char * buf,
+			size_t count, loff_t *ppos)
+{
+	struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task;
+	char buffer[64];
+	size_t len;
+	unsigned int hcppt = get_cpu_rate_hard_cap(task);
+
+	if (*ppos)
+		return 0;
+	*ppos = len = sprintf(buffer, "%u\n", hcppt);
+	if (copy_to_user(buf, buffer, len))
+		return -EFAULT;
+
+	return len;
+}
+
+ssize_t cpu_rate_hard_cap_write(struct file * file, const char * buf,
+			 size_t count, loff_t *ppos)
+{
+	struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task;
+	char buffer[128] = "";
+	char *endptr = NULL;
+	unsigned long long hcppt;
+	int res;
+
+
+	if ((count > 63) || *ppos)
+		return -EFBIG;
+	if (copy_from_user(buffer, buf, count))
+		return -EFAULT;
+	hcppt = simple_strtoul(buffer, &endptr, 0);
+	if ((endptr == buffer) || (hcppt == ULONG_MAX))
+		return -EINVAL;
+
+	if ((res = set_cpu_rate_hard_cap(task, hcppt)) != 0)
+		return res;
+
+	return count;
+}
+
+struct file_operations proc_cpu_rate_hard_cap_operations = {
+	read:		cpu_rate_hard_cap_read,
+	write:		cpu_rate_hard_cap_write,
+};
+#endif
+
 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
 	struct inode *inode = dentry->d_inode;
@@ -1786,6 +1895,20 @@
 			inode->i_fop = &proc_loginuid_operations;
 			break;
 #endif
+#ifdef CONFIG_CPUSCHED_SPA
+		case PROC_TID_CPU_RATE_CAP:
+			inode->i_fop = &proc_cpu_rate_cap_operations;
+			break;
+		case PROC_TID_CPU_RATE_HARD_CAP:
+			inode->i_fop = &proc_cpu_rate_hard_cap_operations;
+			break;
+#endif
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+		case PROC_TID_CPUSTATS:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = task_sched_cpustats;
+			break;
+#endif
 		default:
 			printk("procfs: impossible type (%d)",p->type);
 			iput(inode);
diff -urN oldtree/fs/proc/proc_misc.c newtree/fs/proc/proc_misc.c
--- oldtree/fs/proc/proc_misc.c	2006-01-03 03:21:10.000000000 +0000
+++ newtree/fs/proc/proc_misc.c	2006-02-04 18:03:13.593288080 +0000
@@ -45,6 +45,7 @@
 #include <linux/sysrq.h>
 #include <linux/vmalloc.h>
 #include <linux/crash_dump.h>
+#include <linux/sched_drv.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/io.h>
@@ -243,6 +244,17 @@
 	return proc_calc_metrics(page, start, off, count, eof, len);
 }
 
+static int scheduler_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int len;
+
+	strcpy(page, sched_drvp->name);
+	strcat(page, "\n");
+	len = strlen(page);
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
 extern struct seq_operations cpuinfo_op;
 static int cpuinfo_open(struct inode *inode, struct file *file)
 {
@@ -585,6 +597,7 @@
 		{"cmdline",	cmdline_read_proc},
 		{"locks",	locks_read_proc},
 		{"execdomains",	execdomains_read_proc},
+		{"scheduler",	scheduler_read_proc},
 		{NULL,}
 	};
 	for (p = simple_ones; p->name; p++)
diff -urN oldtree/include/asm-x86_64/system.h newtree/include/asm-x86_64/system.h
--- oldtree/include/asm-x86_64/system.h	2006-01-03 03:21:10.000000000 +0000
+++ newtree/include/asm-x86_64/system.h	2006-02-04 18:03:13.594287928 +0000
@@ -31,8 +31,6 @@
 		     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
 		     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
 		     "call __switch_to\n\t"					  \
-		     ".globl thread_return\n"					\
-		     "thread_return:\n\t"					    \
 		     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
 		     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
 		     LOCK "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"		  \
diff -urN oldtree/include/linux/init_task.h newtree/include/linux/init_task.h
--- oldtree/include/linux/init_task.h	2006-01-03 03:21:10.000000000 +0000
+++ newtree/include/linux/init_task.h	2006-02-04 18:03:15.583985448 +0000
@@ -83,15 +83,14 @@
 	.usage		= ATOMIC_INIT(2),				\
 	.flags		= 0,						\
 	.lock_depth	= -1,						\
-	.prio		= MAX_PRIO-20,					\
-	.static_prio	= MAX_PRIO-20,					\
+	.prio		= NICE_TO_PRIO(0),				\
+	.static_prio	= NICE_TO_PRIO(0),				\
 	.policy		= SCHED_NORMAL,					\
 	.cpus_allowed	= CPU_MASK_ALL,					\
 	.mm		= NULL,						\
 	.active_mm	= &init_mm,					\
 	.run_list	= LIST_HEAD_INIT(tsk.run_list),			\
 	.ioprio		= 0,						\
-	.time_slice	= HZ,						\
 	.tasks		= LIST_HEAD_INIT(tsk.tasks),			\
 	.ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children),		\
 	.ptrace_list	= LIST_HEAD_INIT(tsk.ptrace_list),		\
diff -urN oldtree/include/linux/sched.h newtree/include/linux/sched.h
--- oldtree/include/linux/sched.h	2006-01-03 03:21:10.000000000 +0000
+++ newtree/include/linux/sched.h	2006-02-04 18:03:15.584985296 +0000
@@ -487,8 +487,6 @@
 #define MAX_USER_RT_PRIO	100
 #define MAX_RT_PRIO		MAX_USER_RT_PRIO
 
-#define MAX_PRIO		(MAX_RT_PRIO + 40)
-
 #define rt_task(p)		(unlikely((p)->prio < MAX_RT_PRIO))
 
 /*
@@ -683,6 +681,8 @@
 struct audit_context;		/* See audit.c */
 struct mempolicy;
 
+#include <linux/sched_task.h>
+
 struct task_struct {
 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
 	struct thread_info *thread_info;
@@ -696,19 +696,19 @@
 	int oncpu;
 #endif
 	int prio, static_prio;
+#ifdef CONFIG_SMP
+	int bias_prio;
+#endif
 	struct list_head run_list;
-	prio_array_t *array;
+	union sched_drv_task sdu;
 
 	unsigned short ioprio;
 
-	unsigned long sleep_avg;
 	unsigned long long timestamp, last_ran;
 	unsigned long long sched_time; /* sched_clock time spent running */
-	int activated;
 
 	unsigned long policy;
 	cpumask_t cpus_allowed;
-	unsigned int time_slice, first_time_slice;
 
 #ifdef CONFIG_SCHEDSTATS
 	struct sched_info sched_info;
diff -urN oldtree/include/linux/sched_cpustats.h newtree/include/linux/sched_cpustats.h
--- oldtree/include/linux/sched_cpustats.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/sched_cpustats.h	2006-02-04 18:03:15.585985144 +0000
@@ -0,0 +1,200 @@
+#ifndef _LINUX_SCHED_CPUSTATS_H
+#define _LINUX_SCHED_CPUSTATS_H
+
+#include <linux/sysctl.h>
+
+/*
+ * Fixed denominator rational numbers for use by the CPU scheduler
+ */
+#define SCHED_AVG_OFFSET 4
+/*
+ * Get the rounded integer value of a scheduling statistic average field
+ * i.e. those fields whose names begin with avg_
+ */
+#define SCHED_AVG_RND(x) \
+	(((x) + (1 << (SCHED_AVG_OFFSET - 1))) >> (SCHED_AVG_OFFSET))
+#define SCHED_AVG_REAL(a) ((a) << SCHED_AVG_OFFSET)
+
+#define INITIAL_CPUSTATS_TIMESTAMP \
+	((unsigned long long)INITIAL_JIFFIES * (1000000000ULL / HZ))
+
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+struct runq_cpustats {
+	unsigned long long total_delay;
+	unsigned long long total_rt_delay;
+	unsigned long long total_intr_delay;
+	unsigned long long total_rt_intr_delay;
+	unsigned long long total_fork_delay;
+	unsigned long long total_sinbin;
+	unsigned long long total_latency;
+};
+
+extern DEFINE_PER_CPU(struct runq_cpustats, cpustats_runqs);
+#endif
+
+/*
+ * Scheduling statistics for a task/thread
+ */
+struct task_cpustats {
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	unsigned long long intr_wake_ups;
+	unsigned long long total_sleep;
+	unsigned long long total_ia_sleep;
+	unsigned long long total_cpu;
+	unsigned long long total_delay;
+	unsigned long long total_latency;
+	unsigned long long total_sinbin;
+	unsigned long long avg_latency;
+#endif
+	unsigned long long avg_sleep_per_cycle;
+	unsigned long long avg_ia_sleep_per_cycle;
+	unsigned long long avg_cpu_per_cycle;
+	unsigned long long avg_delay_per_cycle;
+	unsigned long long avg_cycle_length;
+	unsigned long long total_wake_ups;
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+	unsigned long long last_wake;
+	unsigned long long avg_wake_interval;
+	unsigned long long var_wake_interval;
+#endif
+	unsigned long cpu_usage_rate;
+	unsigned long ia_sleepiness;
+	unsigned int flags;
+};
+
+#define CPUSTATS_WOKEN_FOR_INTR_FL (1 << 0)
+#define CPUSTATS_JUST_WOKEN_FL (1 << 1)
+#define CPUSTATS_JUST_FORKED_FL (1 << 2)
+
+#define INIT_CPUSTATS \
+	.cpustats = { 0, }, \
+	.csrq = NULL
+
+
+struct task_struct;
+
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+extern void init_runq_cpustats(unsigned int cpu);
+static inline struct runq_cpustats *cpu_runq_cpustats(unsigned int cpu)
+{
+	return &per_cpu(cpustats_runqs, cpu);
+}
+#else
+static inline void init_runq_cpustats(unsigned int cpu) {}
+#endif
+#ifdef CONFIG_SMP
+extern unsigned long long adjusted_sched_clock(const struct task_struct *p);
+#else
+#define adjusted_sched_clock(p) sched_clock()
+#endif
+
+extern void initialize_cpustats(struct task_struct *p, unsigned long long now);
+extern void delta_sleep_cpustats(struct task_struct *p, unsigned long long now);
+extern void delta_cpu_cpustats(struct task_struct *p, unsigned long long now);
+extern void delta_delay_cpustats(struct task_struct *p, unsigned long long now);
+extern void delta_rq_delay_cpustats(struct task_struct *p, unsigned long long delta);
+extern void update_cpustats_at_wake_up(struct task_struct *p, unsigned long long now);
+extern void update_cpustats_at_end_of_ts(struct task_struct *p, unsigned long long now);
+
+extern unsigned long long cpustats_avg_in_jiffies(unsigned long long avg);
+
+extern unsigned long long msecs_to_nsecs_avg(unsigned long long);
+extern unsigned long long nsecs_avg_to_msecs(unsigned long long);
+
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+/*
+ * Get "up to date" scheduling statistics for the given task
+ * This function should be used if reliable scheduling statistitcs are required
+ * outside the scheduler itself as the relevant fields in the task structure
+ * are not "up to date" NB the possible difference between those in the task
+ * structure and the correct values could be quite large for sleeping tasks.
+ */
+extern int get_task_cpustats(struct task_struct*, struct task_cpustats*, unsigned long long*);
+
+/*
+ * Scheduling statistics for a CPU
+ */
+struct cpu_cpustats {
+	unsigned long long timestamp;
+	unsigned long long total_idle;
+	unsigned long long total_busy;
+	unsigned long long total_delay;
+	unsigned long long total_rt_delay;
+	unsigned long long total_intr_delay;
+	unsigned long long total_rt_intr_delay;
+	unsigned long long total_fork_delay;
+	unsigned long long total_latency;
+	unsigned long long total_sinbin;
+	unsigned long long nr_switches;
+};
+
+/*
+ * Get scheduling statistics for the nominated CPU
+ */
+extern int get_cpu_cpustats(unsigned int, struct cpu_cpustats*);
+
+/*
+ * Make scheduling statistics available via /proc
+ */
+extern int task_sched_cpustats(struct task_struct *p, char *buffer);
+extern int show_cpustats(char *page);
+#endif
+
+/*
+ * CPU rate statistics are estimated as a proportions (i.e. real numbers in the
+ * rang 0 to 1 inclusive) using fixed denominator rational numbers.
+ * Needs to be small enough so that we can map bonuses (up to 20) within
+ * a 32 bit integer
+ */
+#define PROPORTION_OFFSET	26
+/* for static initializations */
+#define PROPORTION_ONE		(1UL << PROPORTION_OFFSET)
+#define PROP_FM_PPT(a) \
+	(((unsigned long long)(a) * PROPORTION_ONE) / 1000)
+
+/* Require: a <= b */
+extern unsigned long calc_proportion(unsigned long long a,
+				     unsigned long long b);
+/* Multiply two proportions to give a proportion or multiplys a proportion
+ * by an integer to give an integer
+ */
+static inline  unsigned long proportion_mul(unsigned long a,
+					    unsigned long b)
+{
+	return ((unsigned long long)a * (unsigned long long)b) >> PROPORTION_OFFSET;
+}
+/*
+ * Find the square root of a proportion
+ * Require: x <= PROPORTION_ONE
+ */
+unsigned long proportion_sqrt(unsigned long x);
+/*
+ * Map a proportion onto a small interger range
+ * Require: range < 63 (to avoid overflow)
+ */
+static inline unsigned long map_proportion(unsigned long p, unsigned long r)
+{
+	return (p * r) >> PROPORTION_OFFSET;
+}
+/*
+ * Map a proportion onto a small interger range (rounded)
+ * Require: range < 31 (to avoid overflow)
+ */
+static inline unsigned long map_proportion_rnd(unsigned long p,
+					       unsigned long r)
+{
+	return (p * ((r << 1) + 1)) >> (PROPORTION_OFFSET + 1);
+}
+
+unsigned long proportion_to_ppt(unsigned long p);
+unsigned long ppt_to_proportion(unsigned long ppt);
+
+extern unsigned long avg_cpu_usage_rate(const struct task_struct*);
+extern unsigned long avg_sleep_rate(const struct task_struct*);
+extern unsigned long avg_cpu_delay_rate(const struct task_struct*);
+extern unsigned long delay_in_jiffies_for_usage(const struct task_struct*,
+						unsigned long);
+
+#define TASK_CPUSTATS(p) (p)->sdu.spa.cpustats
+
+#endif
diff -urN oldtree/include/linux/sched_drv.h newtree/include/linux/sched_drv.h
--- oldtree/include/linux/sched_drv.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/sched_drv.h	2006-02-04 18:03:15.586984992 +0000
@@ -0,0 +1,61 @@
+#ifndef _LINUX_SCHED_DRV_H
+#define _LINUX_SCHED_DRV_H
+/*
+ * include/linux/sched_drv.h
+ * This contains the definition of the driver struct for all the exported per
+ * runqueue scheduler functions, and the private per scheduler data in
+ * struct task_struct.
+ */
+#include <linux/kobject.h>
+
+#include <linux/sched.h>
+#include <linux/sched_runq.h>
+
+/*
+ * This is the main scheduler driver struct.
+ */
+struct sched_drv {
+	const char *name;
+	void (*init_runqueue_queue)(union runqueue_queue *);
+	void (*set_oom_time_slice)(struct task_struct *, unsigned long);
+	unsigned int (*task_timeslice)(const task_t *);
+	void (*wake_up_task)(struct task_struct *, struct runqueue *, unsigned int, int);
+	void (*fork)(task_t *);
+	void (*wake_up_new_task)(task_t *, unsigned long);
+	void (*exit)(task_t *);
+#ifdef CONFIG_SMP
+	int (*move_tasks)(runqueue_t *, int, runqueue_t *, unsigned long, long,
+		 struct sched_domain *, enum idle_type, int *all_pinned);
+#endif
+	void (*tick)(struct task_struct*, struct runqueue *, unsigned long long);
+#ifdef CONFIG_SCHED_SMT
+	struct task_struct *(*head_of_queue)(union runqueue_queue *);
+	int (*dependent_sleeper_trumps)(const struct task_struct *,
+		const struct task_struct *, struct sched_domain *);
+#endif
+	void (*schedule)(void);
+	void (*set_normal_task_nice)(task_t *, long);
+	void (*setscheduler)(task_t *, int, int);
+	long (*sys_yield)(void);
+	void (*yield)(void);
+	void (*init_idle)(task_t *, int);
+	void (*sched_init)(void);
+#ifdef CONFIG_SMP
+	void (*migrate_queued_task)(struct task_struct *, int);
+#ifdef CONFIG_HOTPLUG_CPU
+	void (*set_select_idle_first)(struct runqueue *);
+	void (*set_select_idle_last)(struct runqueue *);
+	void (*migrate_dead_tasks)(unsigned int);
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	void (*normalize_rt_task)(struct task_struct *);
+#endif
+	struct attribute **attrs;
+};
+
+extern const struct sched_drv *sched_drvp;
+
+extern void sched_drv_sysfs_init(void);
+
+#endif
diff -urN oldtree/include/linux/sched_pvt.h newtree/include/linux/sched_pvt.h
--- oldtree/include/linux/sched_pvt.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/sched_pvt.h	2006-02-04 18:03:15.587984840 +0000
@@ -0,0 +1,568 @@
+#ifndef _LINUX_SCHED_PVT_H
+#define _LINUX_SCHED_PVT_H
+/*
+ * include/linux/sched_pvt.h
+ * This contains the definition of the CPU scheduler macros and function
+ * prototypes that are only of interest to scheduler implementations.
+ */
+
+#include <linux/sched_drv.h>
+#include <linux/stat.h> /* S_IRUGO etc on IA64 */
+
+#include <asm/mmu_context.h>
+
+extern DEFINE_PER_CPU(struct runqueue, runqueues);
+
+#define TASK_PREEMPTS_CURR(p, rq) ((p)->prio < (rq)->curr->prio)
+
+#define task_is_queued(p)	(!list_empty(&(p)->run_list))
+
+#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
+#define this_rq()		(&__get_cpu_var(runqueues))
+#define task_rq(p)		cpu_rq(task_cpu(p))
+#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
+
+/*
+ * Context-switch locking:
+ */
+#ifndef prepare_arch_switch
+# define prepare_arch_switch(next)	do { } while (0)
+#endif
+#ifndef finish_arch_switch
+# define finish_arch_switch(prev)	do { } while (0)
+#endif
+
+#ifndef __ARCH_WANT_UNLOCKED_CTXSW
+static inline int task_running(runqueue_t *rq, task_t *p)
+{
+	return rq->curr == p;
+}
+
+static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
+{
+}
+
+static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+	/* this is a valid case when another task releases the spinlock */
+	rq->lock.owner = current;
+#endif
+	spin_unlock_irq(&rq->lock);
+}
+#else /* __ARCH_WANT_UNLOCKED_CTXSW */
+static inline int task_running(runqueue_t *rq, task_t *p)
+{
+#ifdef CONFIG_SMP
+	return p->oncpu;
+#else
+	return rq->curr == p;
+#endif
+}
+
+static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
+{
+#ifdef CONFIG_SMP
+	/*
+	 * We can optimise this out completely for !SMP, because the
+	 * SMP rebalancing from interrupt is the only thing that cares
+	 * here.
+	 */
+	next->oncpu = 1;
+#endif
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	spin_unlock_irq(&rq->lock);
+#else
+	spin_unlock(&rq->lock);
+#endif
+}
+
+static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
+{
+#ifdef CONFIG_SMP
+	/*
+	 * After ->oncpu is cleared, the task can be moved to a different CPU.
+	 * We must ensure this doesn't happen until the switch is completely
+	 * finished.
+	 */
+	smp_wmb();
+	prev->oncpu = 0;
+#endif
+#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	local_irq_enable();
+#endif
+}
+#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
+
+/*
+ * task_rq_lock - lock the runqueue a given task resides on and disable
+ * interrupts.  Note the ordering: we can safely lookup the task_rq without
+ * explicitly disabling preemption.
+ */
+static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
+	__acquires(rq->lock)
+{
+	struct runqueue *rq;
+
+repeat_lock_task:
+	local_irq_save(*flags);
+	rq = task_rq(p);
+	spin_lock(&rq->lock);
+	if (unlikely(rq != task_rq(p))) {
+		spin_unlock_irqrestore(&rq->lock, *flags);
+		goto repeat_lock_task;
+	}
+	return rq;
+}
+
+static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
+	__releases(rq->lock)
+{
+	spin_unlock_irqrestore(&rq->lock, *flags);
+}
+
+/*
+ * rq_lock - lock a given runqueue and disable interrupts.
+ */
+static inline runqueue_t *this_rq_lock(void)
+	__acquires(rq->lock)
+{
+	runqueue_t *rq;
+
+	local_irq_disable();
+	rq = this_rq();
+	spin_lock(&rq->lock);
+
+	return rq;
+}
+
+/*
+ * Place scheduler attributes in sysfs
+ */
+struct sched_drv_sysfs_entry {
+	struct attribute attr;
+	ssize_t (*show)(char *);
+	ssize_t (*store)(const char *, size_t);
+};
+
+#define to_sched_drv_sysfs_entry(a) container_of((a), struct sched_drv_sysfs_entry, attr)
+
+/*
+ * Macros to help define more common scheduler sysfs attribute types
+ */
+#define SCHED_DRV_SYSFS_UINT_RW_EV(sdse_vis, aname, conv_in, conv_out, MINV, MAXV) \
+static ssize_t show_ ## aname(char *page) \
+{ \
+	unsigned long long val = conv_out(aname); \
+ \
+	return sprintf(page, "%lld\n", val); \
+} \
+ \
+static ssize_t store_ ## aname(const char *page, size_t count) \
+{ \
+	unsigned long long val; \
+	char *end = NULL; \
+ \
+	val = simple_strtoull(page, &end, 10); \
+	if ((end == page) || ((*end != '\0') && (*end != '\n'))) \
+		return -EINVAL; \
+	val = conv_in(val); \
+	if (val < (MINV)) \
+		val = (MINV); \
+	else if (val > (MAXV)) \
+		val = (MAXV); \
+ \
+	aname = val; \
+ \
+	return count; \
+} \
+ \
+sdse_vis struct sched_drv_sysfs_entry aname ## _sdse = { \
+	.attr = { .name = # aname, .mode = S_IRUGO | S_IWUSR }, \
+	.show = show_ ## aname, \
+	.store = store_ ## aname, \
+}
+#define SCHED_DRV_SYSFS_UINT_RW(aname, conv_in, conv_out, MINV, MAXV) \
+	SCHED_DRV_SYSFS_UINT_RW_EV(, aname, conv_in, conv_out, MINV, MAXV)
+#define SCHED_DRV_SYSFS_UINT_RW_STATIC(aname, conv_in, conv_out, MINV, MAXV) \
+	SCHED_DRV_SYSFS_UINT_RW_EV(static, aname, conv_in, conv_out, MINV, MAXV)
+
+#define SCHED_DRV_SYSFS_UINT_RO_EV(sdse_vis, ev, aname, conv_out) \
+static ssize_t show_ ## aname(char *page) \
+{ \
+	unsigned long long val = conv_out(aname); \
+ \
+	return sprintf(page, "%lld\n", val); \
+} \
+ \
+sdes_vis struct sched_drv_sysfs_entry aname ## _sdse = { \
+	.attr = { .name = # aname, .mode = S_IRUGO }, \
+	.show = show_ ## aname, \
+	.store = NULL, \
+}
+
+#define SCHED_DRV_SYSFS_UINT_RO(sdse_vis, ev, aname, conv_out) \
+	SCHED_DRV_SYSFS_UINT_RO_EV(, ev, aname, conv_out)
+#define SCHED_DRV_SYSFS_UINT_RO_STATIC(sdse_vis, ev, aname, conv_out) \
+	SCHED_DRV_SYSFS_UINT_RO_EV(static, ev, aname, conv_out)
+
+#define SCHED_DRV_SYSFS_ATTR(aname) (aname ## _sdse.attr)
+#define SCHED_DRV_DECLARE_SYSFS_ENTRY(aname) \
+extern struct sched_drv_sysfs_entry aname ## _sdse
+
+/**
+ * prepare_task_switch - prepare to switch tasks
+ * @rq: the runqueue preparing to switch
+ * @next: the task we are going to switch to.
+ *
+ * This is called with the rq lock held and interrupts off. It must
+ * be paired with a subsequent finish_task_switch after the context
+ * switch.
+ *
+ * prepare_task_switch sets up locking and calls architecture specific
+ * hooks.
+ */
+static inline void prepare_task_switch(runqueue_t *rq, task_t *next)
+{
+	prepare_lock_switch(rq, next);
+	prepare_arch_switch(next);
+}
+
+/**
+ * finish_task_switch - clean up after a task-switch
+ * @rq: runqueue associated with task-switch
+ * @prev: the thread we just switched away from.
+ *
+ * finish_task_switch must be called after the context switch, paired
+ * with a prepare_task_switch call before the context switch.
+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
+ * and do any other architecture-specific cleanup actions.
+ *
+ * Note that we may have delayed dropping an mm in context_switch(). If
+ * so, we finish that here outside of the runqueue lock.  (Doing it
+ * with the lock held can cause deadlocks; see schedule() for
+ * details.)
+ */
+static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
+	__releases(rq->lock)
+{
+	struct mm_struct *mm = rq->prev_mm;
+	unsigned long prev_task_flags;
+
+	rq->prev_mm = NULL;
+
+	/*
+	 * A task struct has one reference for the use as "current".
+	 * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
+	 * calls schedule one last time. The schedule call will never return,
+	 * and the scheduled task must drop that reference.
+	 * The test for EXIT_ZOMBIE must occur while the runqueue locks are
+	 * still held, otherwise prev could be scheduled on another cpu, die
+	 * there before we look at prev->state, and then the reference would
+	 * be dropped twice.
+	 *		Manfred Spraul <manfred@colorfullife.com>
+	 */
+	prev_task_flags = prev->flags;
+	finish_arch_switch(prev);
+	finish_lock_switch(rq, prev);
+	if (mm)
+		mmdrop(mm);
+	if (unlikely(prev_task_flags & PF_DEAD))
+		put_task_struct(prev);
+}
+
+/*
+ * context_switch - switch to the new MM and the new
+ * thread's register state.
+ */
+static inline
+task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
+{
+	struct mm_struct *mm = next->mm;
+	struct mm_struct *oldmm = prev->active_mm;
+
+	if (unlikely(!mm)) {
+		next->active_mm = oldmm;
+		atomic_inc(&oldmm->mm_count);
+		enter_lazy_tlb(oldmm, next);
+	} else
+		switch_mm(oldmm, mm, next);
+
+	if (unlikely(!prev->mm)) {
+		prev->active_mm = NULL;
+		WARN_ON(rq->prev_mm);
+		rq->prev_mm = oldmm;
+	}
+
+	/* Here we just switch the register state and the stack. */
+	switch_to(prev, next, prev);
+
+	return prev;
+}
+
+/*
+ * This is called on clock ticks and on context switches.
+ * Bank in p->sched_time the ns elapsed since the last tick or switch.
+ */
+static inline void update_cpu_clock(task_t *p, runqueue_t *rq,
+				    unsigned long long now)
+{
+	unsigned long long last = max(p->timestamp, rq->timestamp_last_tick);
+	p->sched_time += now - last;
+}
+
+/* Actually do priority change: must hold rq lock. */
+void __setscheduler(struct task_struct *, int, int);
+
+#ifdef CONFIG_SMP
+#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)	\
+				< (long long) (sd)->cache_hot_time)
+extern void resched_task(task_t *p);
+extern void idle_balance(int, runqueue_t *);
+extern void rebalance_tick(int, runqueue_t *, enum idle_type);
+
+/*
+ * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
+ */
+static inline
+int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
+		     struct sched_domain *sd, enum idle_type idle,
+		     int *all_pinned)
+{
+	/*
+	 * We do not migrate tasks that are:
+	 * 1) running (obviously), or
+	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
+	 * 3) are cache-hot on their current CPU.
+	 */
+	if (!cpu_isset(this_cpu, p->cpus_allowed))
+		return 0;
+	*all_pinned = 0;
+
+	if (task_running(rq, p))
+		return 0;
+
+	/*
+	 * Aggressive migration if:
+	 * 1) task is cache cold, or
+	 * 2) too many balance attempts have failed.
+	 */
+
+	if (sd->nr_balance_failed > sd->cache_nice_tries)
+		return 1;
+
+	if (task_hot(p, rq->timestamp_last_tick, sd))
+		return 0;
+	return 1;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void migrate_dead(unsigned int, task_t *);
+#endif
+#else
+static inline void resched_task(task_t *p)
+{
+	assert_spin_locked(&task_rq(p)->lock);
+	set_tsk_need_resched(p);
+}
+
+/*
+ * on UP we do not need to balance between CPUs:
+ */
+static inline void idle_balance(int cpu, runqueue_t *rq) { }
+static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle) { }
+#endif
+
+#ifdef CONFIG_SCHED_SMT
+extern int wake_priority_sleeper(runqueue_t *);
+extern void wake_sleeping_dependent(int, runqueue_t *);
+extern int dependent_sleeper(int, runqueue_t *);
+#else
+static inline int wake_priority_sleeper(runqueue_t *rq) { return 0; }
+static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) { }
+static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) { return 0; }
+#endif
+
+/*
+ * "Nice" biased load balancing
+ */
+#ifdef CONFIG_SMP
+/*
+ * Priority bias for load balancing ranges from 1 (nice==19) to 139 (RT
+ * priority of 100).
+ */
+#define NICE_TO_BIAS_PRIO(nice)	(20 - (nice))
+#define PRIO_TO_BIAS_PRIO(prio)	NICE_TO_BIAS_PRIO(PRIO_TO_NICE(prio))
+#define RTPRIO_TO_BIAS_PRIO(rp)	(40 + (rp))
+
+static inline void set_bias_prio(task_t *p)
+{
+	if (rt_task(p)) {
+		if (p == task_rq(p)->migration_thread)
+			/*
+			 * The migration thread does the actual balancing. Do
+			 * not bias by its priority as the ultra high priority
+			 * will skew balancing adversely.
+			 */
+			p->bias_prio = 0;
+		else
+			p->bias_prio = RTPRIO_TO_BIAS_PRIO(p->rt_priority);
+	} else
+		p->bias_prio = PRIO_TO_BIAS_PRIO(p->static_prio);
+}
+
+static inline void inc_prio_bias(runqueue_t *rq, const task_t *p)
+{
+	rq->prio_bias += p->bias_prio;
+}
+
+static inline void dec_prio_bias(runqueue_t *rq, const task_t *p)
+{
+	rq->prio_bias -= p->bias_prio;
+}
+
+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+{
+	rq->nr_running++;
+	inc_prio_bias(rq, p);
+}
+
+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+{
+	rq->nr_running--;
+	dec_prio_bias(rq, p);
+}
+#else
+static inline void set_bias_prio(task_t *p)
+{
+}
+
+static inline void inc_prio_bias(runqueue_t *rq, const task_t *p)
+{
+}
+
+static inline void dec_prio_bias(runqueue_t *rq, const task_t *p)
+{
+}
+
+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+{
+	rq->nr_running++;
+}
+
+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+{
+	rq->nr_running--;
+}
+#endif
+
+#ifdef CONFIG_SCHEDSTATS
+# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
+
+/*
+ * Called when a process is dequeued from the active array and given
+ * the cpu.  We should note that with the exception of interactive
+ * tasks, the expired queue will become the active queue after the active
+ * queue is empty, without explicitly dequeuing and requeuing tasks in the
+ * expired queue.  (Interactive tasks may be requeued directly to the
+ * active queue, thus delaying tasks in the expired queue from running;
+ * see scheduler_tick()).
+ *
+ * This function is only called from sched_info_arrive(), rather than
+ * dequeue_task(). Even though a task may be queued and dequeued multiple
+ * times as it is shuffled about, we're really interested in knowing how
+ * long it was from the *first* time it was queued to the time that it
+ * finally hit a cpu.
+ */
+static inline void sched_info_dequeued(task_t *t)
+{
+	t->sched_info.last_queued = 0;
+}
+
+/*
+ * Called when a task finally hits the cpu.  We can now calculate how
+ * long it was waiting to run.  We also note when it began so that we
+ * can keep stats on how long its timeslice is.
+ */
+static inline void sched_info_arrive(task_t *t)
+{
+	unsigned long now = jiffies, diff = 0;
+	struct runqueue *rq = task_rq(t);
+
+	if (t->sched_info.last_queued)
+		diff = now - t->sched_info.last_queued;
+	sched_info_dequeued(t);
+	t->sched_info.run_delay += diff;
+	t->sched_info.last_arrival = now;
+	t->sched_info.pcnt++;
+
+	if (!rq)
+		return;
+
+	rq->rq_sched_info.run_delay += diff;
+	rq->rq_sched_info.pcnt++;
+}
+
+/*
+ * Called when a process is queued into either the active or expired
+ * array.  The time is noted and later used to determine how long we
+ * had to wait for us to reach the cpu.  Since the expired queue will
+ * become the active queue after active queue is empty, without dequeuing
+ * and requeuing any tasks, we are interested in queuing to either. It
+ * is unusual but not impossible for tasks to be dequeued and immediately
+ * requeued in the same or another array: this can happen in sched_yield(),
+ * set_user_nice(), and even load_balance() as it moves tasks from runqueue
+ * to runqueue.
+ *
+ * This function is only called from enqueue_task(), but also only updates
+ * the timestamp if it is already not set.  It's assumed that
+ * sched_info_dequeued() will clear that stamp when appropriate.
+ */
+static inline void sched_info_queued(task_t *t)
+{
+	if (!t->sched_info.last_queued)
+		t->sched_info.last_queued = jiffies;
+}
+
+/*
+ * Called when a process ceases being the active-running process, either
+ * voluntarily or involuntarily.  Now we can calculate how long we ran.
+ */
+static inline void sched_info_depart(task_t *t)
+{
+	struct runqueue *rq = task_rq(t);
+	unsigned long diff = jiffies - t->sched_info.last_arrival;
+
+	t->sched_info.cpu_time += diff;
+
+	if (rq)
+		rq->rq_sched_info.cpu_time += diff;
+}
+
+/*
+ * Called when tasks are switched involuntarily due, typically, to expiring
+ * their time slice.  (This may also be called when switching to or from
+ * the idle task.)  We are only called when prev != next.
+ */
+static inline void sched_info_switch(task_t *prev, task_t *next)
+{
+	struct runqueue *rq = task_rq(prev);
+
+	/*
+	 * prev now departs the cpu.  It's not interesting to record
+	 * stats about how efficient we were at scheduling the idle
+	 * process, however.
+	 */
+	if (prev != rq->idle)
+		sched_info_depart(prev);
+
+	if (next != rq->idle)
+		sched_info_arrive(next);
+}
+#else
+# define schedstat_inc(rq, field)	do { } while (0)
+# define sched_info_queued(t)		do { } while (0)
+# define sched_info_switch(t, next)	do { } while (0)
+#endif /* CONFIG_SCHEDSTATS */
+
+#endif
diff -urN oldtree/include/linux/sched_runq.h newtree/include/linux/sched_runq.h
--- oldtree/include/linux/sched_runq.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/sched_runq.h	2006-02-04 18:03:15.588984688 +0000
@@ -0,0 +1,174 @@
+#ifndef _LINUX_SCHED_RUNQ_H
+#define _LINUX_SCHED_RUNQ_H
+/*
+ * include/linux/sched_runq.h
+ * This contains the definition of the CPU scheduler run queue type.
+ * Modified to allow each scheduler to have its own private run queue data.
+ */
+
+/*
+ * These are the runqueue data structures:
+ */
+#ifdef CONFIG_CPUSCHED_INGO
+#define INGO_MAX_PRIO (MAX_RT_PRIO + 40)
+
+#define INGO_BITMAP_SIZE ((((INGO_MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+
+struct prio_array {
+	unsigned int nr_active;
+	unsigned long bitmap[INGO_BITMAP_SIZE];
+	struct list_head queue[INGO_MAX_PRIO];
+};
+
+struct ingo_runqueue_queue {
+	prio_array_t *active, *expired, arrays[2];
+	/*
+	   set to 0 on init, become null or array switch
+	   set to jiffies whenever an non-interactive job expires
+	   reset to jiffies if expires
+	 */
+	unsigned long expired_timestamp;
+	int best_expired_prio;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+#define STAIRCASE_MAX_PRIO (MAX_RT_PRIO + 40)
+#define STAIRCASE_NUM_PRIO_SLOTS (STAIRCASE_MAX_PRIO + 1)
+
+struct staircase_runqueue_queue {
+	DECLARE_BITMAP(bitmap, STAIRCASE_NUM_PRIO_SLOTS);
+	struct list_head queue[STAIRCASE_NUM_PRIO_SLOTS - 1];
+	unsigned int cache_ticks;
+	unsigned int preempted;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_SPA
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+#define SPA_IDLE_PRIO 159
+#else
+#define SPA_IDLE_PRIO (MAX_RT_PRIO + 40 + 2)
+#endif
+#define SPA_NUM_PRIO_SLOTS (SPA_IDLE_PRIO + 1)
+
+struct spa_prio_slot {
+	unsigned int prio;
+	struct list_head list;
+};
+
+struct spa_runqueue_queue {
+	DECLARE_BITMAP(bitmap, SPA_NUM_PRIO_SLOTS);
+	struct spa_prio_slot queue[SPA_NUM_PRIO_SLOTS - 1];
+	unsigned long next_prom_due;
+	unsigned long pcount;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_NICK
+#define NICK_MAX_PRIO (MAX_RT_PRIO + 59)
+
+#define NICK_BITMAP_SIZE ((((NICK_MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+
+struct nick_prio_array {
+	int min_prio;
+	unsigned int nr_active;
+	unsigned long bitmap[NICK_BITMAP_SIZE];
+	struct list_head queue[NICK_MAX_PRIO];
+};
+
+struct nick_runqueue_queue {
+	struct nick_prio_array *active, *expired, arrays[2];
+	/*
+	   set to 0 on init, become null or array switch
+	   set to jiffies whenever an non-interactive job expires
+	   reset to jiffies if expires
+	 */
+	unsigned long array_sequence;
+};
+#endif
+
+typedef struct runqueue runqueue_t;
+
+union runqueue_queue {
+#ifdef CONFIG_CPUSCHED_INGO
+	struct ingo_runqueue_queue ingosched;
+#endif
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+	struct staircase_runqueue_queue staircase;
+#endif
+#ifdef CONFIG_CPUSCHED_SPA
+	struct spa_runqueue_queue spa;
+#endif
+#ifdef CONFIG_CPUSCHED_NICK
+	struct nick_runqueue_queue nicksched;
+#endif
+};
+
+/*
+ * This is the main, per-CPU runqueue data structure.
+ *
+ * Locking rule: those places that want to lock multiple runqueues
+ * (such as the load balancing or the thread migration code), lock
+ * acquire operations must be ordered by ascending &runqueue.
+ */
+struct runqueue {
+	spinlock_t lock;
+
+	/*
+	 * nr_running and cpu_load should be in the same cacheline because
+	 * remote CPUs use both these fields when doing load calculation.
+	 */
+	unsigned long nr_running;
+#ifdef CONFIG_SMP
+	unsigned long prio_bias;
+	unsigned long cpu_load[3];
+#endif
+  	unsigned long long nr_switches;
+
+	/*
+	 * This is part of a global counter where only the total sum
+	 * over all CPUs matters. A task can increase this counter on
+	 * one CPU and if it got migrated afterwards it may decrease
+	 * it on another CPU. Always updated under the runqueue lock:
+	 */
+	unsigned long nr_uninterruptible;
+	union runqueue_queue qu;
+	unsigned long long timestamp_last_tick;
+	task_t *curr, *idle;
+	struct mm_struct *prev_mm;
+  	atomic_t nr_iowait;
+
+#ifdef CONFIG_SMP
+	struct sched_domain *sd;
+
+	/* For active balancing */
+	int active_balance;
+	int push_cpu;
+
+	task_t *migration_thread;
+	struct list_head migration_queue;
+#endif
+
+#ifdef CONFIG_SCHEDSTATS
+	/* latency stats */
+	struct sched_info rq_sched_info;
+
+	/* sys_sched_yield() stats */
+	unsigned long yld_exp_empty;
+	unsigned long yld_act_empty;
+	unsigned long yld_both_empty;
+	unsigned long yld_cnt;
+
+	/* schedule() stats */
+	unsigned long sched_switch;
+	unsigned long sched_cnt;
+	unsigned long sched_goidle;
+
+	/* try_to_wake_up() stats */
+	unsigned long ttwu_cnt;
+	unsigned long ttwu_local;
+#endif
+};
+
+#endif
diff -urN oldtree/include/linux/sched_spa.h newtree/include/linux/sched_spa.h
--- oldtree/include/linux/sched_spa.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/sched_spa.h	2006-02-04 18:03:15.588984688 +0000
@@ -0,0 +1,91 @@
+#ifndef _LINUX_SCHED_SPA_H
+#define _LINUX_SCHED_SPA_H
+
+#include <linux/sched_runq.h>
+#include <linux/sched_pvt.h>
+
+#define SPA_BGND_PRIO		(SPA_IDLE_PRIO - 1)
+#define SPA_SOFT_CAP_PRIO	(SPA_BGND_PRIO - 1)
+
+#define SPAF_SINBINNED	(1 << 0)	/* I am sinbinned */
+#define SPAF_UISLEEP	(1 << 1)	/* Uninterruptible sleep */
+#define SPAF_NONIASLEEP	(1 << 2)	/* Non interactive sleep */
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+#define SPAF_MEDIA	(1 << 3)	/* I'm a media streamer */
+#define SPAF_REGULAR	(1 << 4)	/* I wake at regular intervals */
+#endif
+
+#define task_is_sinbinned(p) \
+	(unlikely(((p)->sdu.spa.flags & SPAF_SINBINNED) != 0))
+#define task_is_bgnd(p) (unlikely((p)->sdu.spa.cpu_rate_cap == 0))
+#define task_is_in_ia_sleep(p) \
+	(((p)->sdu.spa.flags & (SPAF_NONIASLEEP | SPAF_UISLEEP)) == 0)
+
+/*
+ * Define a common interface for SPA based schedulers to allow maximum
+ * sharing of code.
+ */
+struct sched_spa_child {
+	int (*soft_cap_effective_prio)(const struct task_struct *);
+	int (*normal_effective_prio)(const struct task_struct *);
+	void (*reassess_at_activation)(struct task_struct *);
+	void (*fork_extras)(struct task_struct *);
+	void (*runq_data_tick)(unsigned int, unsigned long);
+	void (*reassess_at_end_of_ts)(struct task_struct *);
+	void (*reassess_at_sinbin_release)(struct task_struct *);
+	void (*reassess_at_renice)(struct task_struct *);
+};
+
+extern struct sched_spa_child *spa_sched_child;
+
+/*
+ * Common functions for use by child schedulers
+ */
+extern int spa_pb_soft_cap_priority(const task_t *, int);
+extern void spa_sched_init(void);
+extern void spa_init_runqueue_queue(union runqueue_queue *);
+extern void spa_set_oom_time_slice(struct task_struct *, unsigned long);
+extern unsigned int spa_task_timeslice(const task_t *);
+extern void spa_wake_up_task(struct task_struct *, struct runqueue *,
+			     unsigned int, int);
+extern void spa_fork(task_t *);
+extern void spa_wake_up_new_task(task_t *, unsigned long);
+extern void spa_exit(task_t *);
+extern void spa_tick(struct task_struct *, struct runqueue *,
+		     unsigned long long);
+extern void spa_schedule(void);
+extern void spa_set_normal_task_nice(task_t *, long);
+extern void spa_setscheduler(task_t *, int, int);
+extern long spa_sys_yield(void);
+extern void spa_yield(void);
+extern void spa_init_idle(task_t *, int);
+#ifdef CONFIG_SMP
+extern int spa_move_tasks(runqueue_t *, int, runqueue_t *, unsigned long, long,
+			  struct sched_domain *, enum idle_type, int *);
+extern void spa_migrate_queued_task(struct task_struct *, int);
+#ifdef CONFIG_HOTPLUG_CPU
+extern void spa_set_select_idle_first(struct runqueue *);
+extern void spa_set_select_idle_last(struct runqueue *);
+extern void spa_migrate_dead_tasks(unsigned int);
+#endif
+#endif
+#ifdef CONFIG_SCHED_SMT
+extern struct task_struct *spa_head_of_queue(union runqueue_queue *);
+extern int spa_dependent_sleeper_trumps(const struct task_struct *,
+					const struct task_struct *,
+					struct sched_domain *);
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+extern void spa_normalize_rt_task(struct task_struct *);
+#endif
+
+SCHED_DRV_DECLARE_SYSFS_ENTRY(time_slice);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(sched_rr_time_slice);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(bgnd_time_slice_multiplier);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(base_prom_interval);
+#ifdef CONFIG_CPU_SCHED_ACCRUED_STATS
+SCHED_DRV_DECLARE_SYSFS_ENTRY(log_at_exit);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(cpustats);
+#endif
+
+#endif
diff -urN oldtree/include/linux/sched_task.h newtree/include/linux/sched_task.h
--- oldtree/include/linux/sched_task.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/sched_task.h	2006-02-04 18:03:15.589984536 +0000
@@ -0,0 +1,86 @@
+#ifndef _LINUX_SCHED_TASK_H
+#define _LINUX_SCHED_TASK_H
+/*
+ * include/linux/sched_task.h
+ */
+
+/*
+ * Require that the relationship between 'nice' and 'static_prio' be the same
+ * for all schedulers.
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..(MAX_RT_PRIO + 39) ],
+ * and back.
+ */
+#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
+#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
+#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
+
+#ifdef CONFIG_CPUSCHED_INGO
+struct ingo_sched_drv_task {
+	struct prio_array *array;
+	unsigned int time_slice;
+	unsigned int first_time_slice;
+	unsigned long sleep_avg;
+	int activated;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+struct staircase_sched_drv_task {
+	unsigned long sflags;
+	unsigned long runtime, totalrun, ns_debit;
+	unsigned int bonus;
+	unsigned int slice, time_slice;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_SPA
+#include <linux/sched_cpustats.h>
+
+struct spa_sched_drv_task {
+	unsigned int time_slice;
+	struct task_cpustats cpustats;
+	unsigned long cpu_rate_cap, min_cpu_rate_cap;
+	unsigned long cpu_rate_hard_cap;
+	struct timer_list sinbin_timer;
+	unsigned int flags;
+	/* fields needed by children such as zaphod */
+	unsigned int pre_bonus_priority;
+	unsigned int interactive_bonus;
+	unsigned int throughput_bonus;
+	unsigned int eb_shares;
+};
+
+/* set/get cpu rate caps in parts per thousand */
+extern int set_cpu_rate_cap(struct task_struct *p, unsigned long new_cap);
+extern int set_cpu_rate_hard_cap(struct task_struct *p, unsigned long new_cap);
+extern unsigned long get_cpu_rate_cap(struct task_struct *p);
+extern unsigned long get_cpu_rate_hard_cap(struct task_struct *p);
+#endif
+
+#ifdef CONFIG_CPUSCHED_NICK
+struct nick_sched_drv_task {
+	struct nick_prio_array *array;
+	unsigned long array_sequence;
+	unsigned long total_time, sleep_time;
+	int used_slice;
+};
+#endif
+
+union sched_drv_task {
+#ifdef CONFIG_CPUSCHED_INGO
+	struct ingo_sched_drv_task ingosched;
+#endif
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+	struct staircase_sched_drv_task staircase;
+#endif
+#ifdef CONFIG_CPUSCHED_SPA
+	struct spa_sched_drv_task spa;
+#endif
+#ifdef CONFIG_CPUSCHED_NICK
+	struct nick_sched_drv_task nicksched;
+#endif
+};
+
+void set_oom_time_slice(struct task_struct *p, unsigned long t);
+#endif
diff -urN oldtree/init/Kconfig newtree/init/Kconfig
--- oldtree/init/Kconfig	2006-01-03 03:21:10.000000000 +0000
+++ newtree/init/Kconfig	2006-02-04 18:03:15.589984536 +0000
@@ -269,6 +269,8 @@
 
 	  If unsure, say N.
 
+source "kernel/Kconfig.cpusched"
+
 menuconfig EMBEDDED
 	bool "Configure standard kernel features (for small systems)"
 	help
diff -urN oldtree/init/Kconfig.orig newtree/init/Kconfig.orig
--- oldtree/init/Kconfig.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/init/Kconfig.orig	2006-01-03 03:21:10.000000000 +0000
@@ -0,0 +1,508 @@
+menu "Code maturity level options"
+
+config EXPERIMENTAL
+	bool "Prompt for development and/or incomplete code/drivers"
+	---help---
+	  Some of the various things that Linux supports (such as network
+	  drivers, file systems, network protocols, etc.) can be in a state
+	  of development where the functionality, stability, or the level of
+	  testing is not yet high enough for general use. This is usually
+	  known as the "alpha-test" phase among developers. If a feature is
+	  currently in alpha-test, then the developers usually discourage
+	  uninformed widespread use of this feature by the general public to
+	  avoid "Why doesn't this work?" type mail messages. However, active
+	  testing and use of these systems is welcomed. Just be aware that it
+	  may not meet the normal level of reliability or it may fail to work
+	  in some special cases. Detailed bug reports from people familiar
+	  with the kernel internals are usually welcomed by the developers
+	  (before submitting bug reports, please read the documents
+	  <file:README>, <file:MAINTAINERS>, <file:REPORTING-BUGS>,
+	  <file:Documentation/BUG-HUNTING>, and
+	  <file:Documentation/oops-tracing.txt> in the kernel source).
+
+	  This option will also make obsoleted drivers available. These are
+	  drivers that have been replaced by something else, and/or are
+	  scheduled to be removed in a future kernel release.
+
+	  Unless you intend to help test and develop a feature or driver that
+	  falls into this category, or you have a situation that requires
+	  using these features, you should probably say N here, which will
+	  cause the configurator to present you with fewer choices. If
+	  you say Y here, you will be offered the choice of using features or
+	  drivers that are currently considered to be in the alpha-test phase.
+
+config CLEAN_COMPILE
+	bool "Select only drivers expected to compile cleanly" if EXPERIMENTAL
+	default y
+	help
+	  Select this option if you don't even want to see the option
+	  to configure known-broken drivers.
+
+	  If unsure, say Y
+
+config BROKEN
+	bool
+	depends on !CLEAN_COMPILE
+	default y
+
+config BROKEN_ON_SMP
+	bool
+	depends on BROKEN || !SMP
+	default y
+
+config LOCK_KERNEL
+	bool
+	depends on SMP || PREEMPT
+	default y
+
+config INIT_ENV_ARG_LIMIT
+	int
+	default 32 if !USERMODE
+	default 128 if USERMODE
+	help
+	  Maximum of each of the number of arguments and environment
+	  variables passed to init from the kernel command line.
+
+endmenu
+
+menu "General setup"
+
+config LOCALVERSION
+	string "Local version - append to kernel release"
+	help
+	  Append an extra string to the end of your kernel version.
+	  This will show up when you type uname, for example.
+	  The string you set here will be appended after the contents of
+	  any files with a filename matching localversion* in your
+	  object and source tree, in that order.  Your total string can
+	  be a maximum of 64 characters.
+
+config LOCALVERSION_AUTO
+	bool "Automatically append version information to the version string"
+	default y
+	help
+	  This will try to automatically determine if the current tree is a
+	  release tree by looking for git tags that
+	  belong to the current top of tree revision.
+
+	  A string of the format -gxxxxxxxx will be added to the localversion
+	  if a git based tree is found.  The string generated by this will be
+	  appended after any matching localversion* files, and after the value
+	  set in CONFIG_LOCALVERSION
+
+	  Note: This requires Perl, and a git repository, but not necessarily
+	  the git or cogito tools to be installed.
+
+config SWAP
+	bool "Support for paging of anonymous memory (swap)"
+	depends on MMU
+	default y
+	help
+	  This option allows you to choose whether you want to have support
+	  for socalled swap devices or swap files in your kernel that are
+	  used to provide more virtual memory than the actual RAM present
+	  in your computer.  If unsure say Y.
+
+config SYSVIPC
+	bool "System V IPC"
+	depends on MMU
+	---help---
+	  Inter Process Communication is a suite of library functions and
+	  system calls which let processes (running programs) synchronize and
+	  exchange information. It is generally considered to be a good thing,
+	  and some programs won't run unless you say Y here. In particular, if
+	  you want to run the DOS emulator dosemu under Linux (read the
+	  DOSEMU-HOWTO, available from <http://www.tldp.org/docs.html#howto>),
+	  you'll need to say Y here.
+
+	  You can find documentation about IPC with "info ipc" and also in
+	  section 6.4 of the Linux Programmer's Guide, available from
+	  <http://www.tldp.org/guides.html>.
+
+config POSIX_MQUEUE
+	bool "POSIX Message Queues"
+	depends on NET && EXPERIMENTAL
+	---help---
+	  POSIX variant of message queues is a part of IPC. In POSIX message
+	  queues every message has a priority which decides about succession
+	  of receiving it by a process. If you want to compile and run
+	  programs written e.g. for Solaris with use of its POSIX message
+	  queues (functions mq_*) say Y here. To use this feature you will
+	  also need mqueue library, available from
+	  <http://www.mat.uni.torun.pl/~wrona/posix_ipc/>
+
+	  POSIX message queues are visible as a filesystem called 'mqueue'
+	  and can be mounted somewhere if you want to do filesystem
+	  operations on message queues.
+
+	  If unsure, say Y.
+
+config BSD_PROCESS_ACCT
+	bool "BSD Process Accounting"
+	help
+	  If you say Y here, a user level program will be able to instruct the
+	  kernel (via a special system call) to write process accounting
+	  information to a file: whenever a process exits, information about
+	  that process will be appended to the file by the kernel.  The
+	  information includes things such as creation time, owning user,
+	  command name, memory usage, controlling terminal etc. (the complete
+	  list is in the struct acct in <file:include/linux/acct.h>).  It is
+	  up to the user level program to do useful things with this
+	  information.  This is generally a good idea, so say Y.
+
+config BSD_PROCESS_ACCT_V3
+	bool "BSD Process Accounting version 3 file format"
+	depends on BSD_PROCESS_ACCT
+	default n
+	help
+	  If you say Y here, the process accounting information is written
+	  in a new file format that also logs the process IDs of each
+	  process and it's parent. Note that this file format is incompatible
+	  with previous v0/v1/v2 file formats, so you will need updated tools
+	  for processing it. A preliminary version of these tools is available
+	  at <http://www.physik3.uni-rostock.de/tim/kernel/utils/acct/>.
+
+config SYSCTL
+	bool "Sysctl support"
+	---help---
+	  The sysctl interface provides a means of dynamically changing
+	  certain kernel parameters and variables on the fly without requiring
+	  a recompile of the kernel or reboot of the system.  The primary
+	  interface consists of a system call, but if you say Y to "/proc
+	  file system support", a tree of modifiable sysctl entries will be
+	  generated beneath the /proc/sys directory. They are explained in the
+	  files in <file:Documentation/sysctl/>.  Note that enabling this
+	  option will enlarge the kernel by at least 8 KB.
+
+	  As it is generally a good thing, you should say Y here unless
+	  building a kernel for install/rescue disks or your system is very
+	  limited in memory.
+
+config AUDIT
+	bool "Auditing support"
+	depends on NET
+	default y if SECURITY_SELINUX
+	help
+	  Enable auditing infrastructure that can be used with another
+	  kernel subsystem, such as SELinux (which requires this for
+	  logging of avc messages output).  Does not do system-call
+	  auditing without CONFIG_AUDITSYSCALL.
+
+config AUDITSYSCALL
+	bool "Enable system-call auditing support"
+	depends on AUDIT && (X86 || PPC || PPC64 || ARCH_S390 || IA64 || UML || SPARC64)
+	default y if SECURITY_SELINUX
+	help
+	  Enable low-overhead system-call auditing infrastructure that
+	  can be used independently or with another kernel subsystem,
+	  such as SELinux.
+
+config HOTPLUG
+	bool "Support for hot-pluggable devices" if !ARCH_S390
+	default ARCH_S390
+	help
+	  This option is provided for the case where no in-kernel-tree
+	  modules require HOTPLUG functionality, but a module built
+	  outside the kernel tree does. Such modules require Y here.
+
+config KOBJECT_UEVENT
+	bool "Kernel Userspace Events" if EMBEDDED
+	depends on NET
+	default y
+	help
+	  This option enables the kernel userspace event layer, which is a
+	  simple mechanism for kernel-to-user communication over a netlink
+	  socket.
+	  The goal of the kernel userspace events layer is to provide a simple
+	  and efficient events system, that notifies userspace about kobject
+	  state changes. This will enable applications to just listen for
+	  events instead of polling system devices and files.
+	  Hotplug events (kobject addition and removal) are also available on
+	  the netlink socket in addition to the execution of /sbin/hotplug if
+	  CONFIG_HOTPLUG is enabled.
+
+	  Say Y, unless you are building a system requiring minimal memory
+	  consumption.
+
+config IKCONFIG
+	bool "Kernel .config support"
+	---help---
+	  This option enables the complete Linux kernel ".config" file
+	  contents to be saved in the kernel. It provides documentation
+	  of which kernel options are used in a running kernel or in an
+	  on-disk kernel.  This information can be extracted from the kernel
+	  image file with the script scripts/extract-ikconfig and used as
+	  input to rebuild the current kernel or to build another kernel.
+	  It can also be extracted from a running kernel by reading
+	  /proc/config.gz if enabled (below).
+
+config IKCONFIG_PROC
+	bool "Enable access to .config through /proc/config.gz"
+	depends on IKCONFIG && PROC_FS
+	---help---
+	  This option enables access to the kernel configuration file
+	  through /proc/config.gz.
+
+config CPUSETS
+	bool "Cpuset support"
+	depends on SMP
+	help
+	  This option will let you create and manage CPUSETs which
+	  allow dynamically partitioning a system into sets of CPUs and
+	  Memory Nodes and assigning tasks to run only within those sets.
+	  This is primarily useful on large SMP or NUMA systems.
+
+	  Say N if unsure.
+
+source "usr/Kconfig"
+
+config CC_OPTIMIZE_FOR_SIZE
+	bool "Optimize for size (Look out for broken compilers!)"
+	default y
+	depends on ARM || H8300 || EXPERIMENTAL
+	help
+	  Enabling this option will pass "-Os" instead of "-O2" to gcc
+	  resulting in a smaller kernel.
+
+	  WARNING: some versions of gcc may generate incorrect code with this
+	  option.  If problems are observed, a gcc upgrade may be needed.
+
+	  If unsure, say N.
+
+menuconfig EMBEDDED
+	bool "Configure standard kernel features (for small systems)"
+	help
+	  This option allows certain base kernel options and settings
+          to be disabled or tweaked. This is for specialized
+          environments which can tolerate a "non-standard" kernel.
+          Only use this if you really know what you are doing.
+
+config KALLSYMS
+	 bool "Load all symbols for debugging/kksymoops" if EMBEDDED
+	 default y
+	 help
+	   Say Y here to let the kernel print out symbolic crash information and
+	   symbolic stack backtraces. This increases the size of the kernel
+	   somewhat, as all symbols have to be loaded into the kernel image.
+
+config KALLSYMS_ALL
+	bool "Include all symbols in kallsyms"
+	depends on DEBUG_KERNEL && KALLSYMS
+	help
+	   Normally kallsyms only contains the symbols of functions, for nicer
+	   OOPS messages.  Some debuggers can use kallsyms for other
+	   symbols too: say Y here to include all symbols, if you need them 
+	   and you don't care about adding 300k to the size of your kernel.
+
+	   Say N.
+
+config KALLSYMS_EXTRA_PASS
+	bool "Do an extra kallsyms pass"
+	depends on KALLSYMS
+	help
+	   If kallsyms is not working correctly, the build will fail with
+	   inconsistent kallsyms data.  If that occurs, log a bug report and
+	   turn on KALLSYMS_EXTRA_PASS which should result in a stable build.
+	   Always say N here unless you find a bug in kallsyms, which must be
+	   reported.  KALLSYMS_EXTRA_PASS is only a temporary workaround while
+	   you wait for kallsyms to be fixed.
+
+
+config PRINTK
+	default y
+	bool "Enable support for printk" if EMBEDDED
+	help
+	  This option enables normal printk support. Removing it
+	  eliminates most of the message strings from the kernel image
+	  and makes the kernel more or less silent. As this makes it
+	  very difficult to diagnose system problems, saying N here is
+	  strongly discouraged.
+
+config BUG
+	bool "BUG() support" if EMBEDDED
+	default y
+	help
+          Disabling this option eliminates support for BUG and WARN, reducing
+          the size of your kernel image and potentially quietly ignoring
+          numerous fatal conditions. You should only consider disabling this
+          option for embedded systems with no facilities for reporting errors.
+          Just say Y.
+
+config BASE_FULL
+	default y
+	bool "Enable full-sized data structures for core" if EMBEDDED
+	help
+	  Disabling this option reduces the size of miscellaneous core
+	  kernel data structures. This saves memory on small machines,
+	  but may reduce performance.
+
+config FUTEX
+	bool "Enable futex support" if EMBEDDED
+	default y
+	help
+	  Disabling this option will cause the kernel to be built without
+	  support for "fast userspace mutexes".  The resulting kernel may not
+	  run glibc-based applications correctly.
+
+config EPOLL
+	bool "Enable eventpoll support" if EMBEDDED
+	default y
+	help
+	  Disabling this option will cause the kernel to be built without
+	  support for epoll family of system calls.
+
+config SHMEM
+	bool "Use full shmem filesystem" if EMBEDDED
+	default y
+	depends on MMU
+	help
+	  The shmem is an internal filesystem used to manage shared memory.
+	  It is backed by swap and manages resource limits. It is also exported
+	  to userspace as tmpfs if TMPFS is enabled. Disabling this
+	  option replaces shmem and tmpfs with the much simpler ramfs code,
+	  which may be appropriate on small systems without swap.
+
+config CC_ALIGN_FUNCTIONS
+	int "Function alignment" if EMBEDDED
+	default 0
+	help
+	  Align the start of functions to the next power-of-two greater than n,
+	  skipping up to n bytes.  For instance, 32 aligns functions
+	  to the next 32-byte boundary, but 24 would align to the next
+	  32-byte boundary only if this can be done by skipping 23 bytes or less.
+	  Zero means use compiler's default.
+
+config CC_ALIGN_LABELS
+	int "Label alignment" if EMBEDDED
+	default 0
+	help
+	  Align all branch targets to a power-of-two boundary, skipping
+	  up to n bytes like ALIGN_FUNCTIONS.  This option can easily
+	  make code slower, because it must insert dummy operations for
+	  when the branch target is reached in the usual flow of the code.
+	  Zero means use compiler's default.
+
+config CC_ALIGN_LOOPS
+	int "Loop alignment" if EMBEDDED
+	default 0
+	help
+	  Align loops to a power-of-two boundary, skipping up to n bytes.
+	  Zero means use compiler's default.
+
+config CC_ALIGN_JUMPS
+	int "Jump alignment" if EMBEDDED
+	default 0
+	help
+	  Align branch targets to a power-of-two boundary, for branch
+	  targets where the targets can only be reached by jumping,
+	  skipping up to n bytes like ALIGN_FUNCTIONS.  In this case,
+	  no dummy operations need be executed.
+	  Zero means use compiler's default.
+
+endmenu		# General setup
+
+config TINY_SHMEM
+	default !SHMEM
+	bool
+
+config BASE_SMALL
+	int
+	default 0 if BASE_FULL
+	default 1 if !BASE_FULL
+
+menu "Loadable module support"
+
+config MODULES
+	bool "Enable loadable module support"
+	help
+	  Kernel modules are small pieces of compiled code which can
+	  be inserted in the running kernel, rather than being
+	  permanently built into the kernel.  You use the "modprobe"
+	  tool to add (and sometimes remove) them.  If you say Y here,
+	  many parts of the kernel can be built as modules (by
+	  answering M instead of Y where indicated): this is most
+	  useful for infrequently used options which are not required
+	  for booting.  For more information, see the man pages for
+	  modprobe, lsmod, modinfo, insmod and rmmod.
+
+	  If you say Y here, you will need to run "make
+	  modules_install" to put the modules under /lib/modules/
+	  where modprobe can find them (you may need to be root to do
+	  this).
+
+	  If unsure, say Y.
+
+config MODULE_UNLOAD
+	bool "Module unloading"
+	depends on MODULES
+	help
+	  Without this option you will not be able to unload any
+	  modules (note that some modules may not be unloadable
+	  anyway), which makes your kernel slightly smaller and
+	  simpler.  If unsure, say Y.
+
+config MODULE_FORCE_UNLOAD
+	bool "Forced module unloading"
+	depends on MODULE_UNLOAD && EXPERIMENTAL
+	help
+	  This option allows you to force a module to unload, even if the
+	  kernel believes it is unsafe: the kernel will remove the module
+	  without waiting for anyone to stop using it (using the -f option to
+	  rmmod).  This is mainly for kernel developers and desperate users.
+	  If unsure, say N.
+
+config OBSOLETE_MODPARM
+	bool
+	default y
+	depends on MODULES
+	help
+	  You need this option to use module parameters on modules which
+	  have not been converted to the new module parameter system yet.
+	  If unsure, say Y.
+
+config MODVERSIONS
+	bool "Module versioning support (EXPERIMENTAL)"
+	depends on MODULES && EXPERIMENTAL
+	help
+	  Usually, you have to use modules compiled with your kernel.
+	  Saying Y here makes it sometimes possible to use modules
+	  compiled for different kernels, by adding enough information
+	  to the modules to (hopefully) spot any changes which would
+	  make them incompatible with the kernel you are running.  If
+	  unsure, say N.
+
+config MODULE_SRCVERSION_ALL
+	bool "Source checksum for all modules"
+	depends on MODULES
+	help
+	  Modules which contain a MODULE_VERSION get an extra "srcversion"
+	  field inserted into their modinfo section, which contains a
+    	  sum of the source files which made it.  This helps maintainers
+	  see exactly which source was used to build a module (since
+	  others sometimes change the module source without updating
+	  the version).  With this option, such a "srcversion" field
+	  will be created for all modules.  If unsure, say N.
+
+config KMOD
+	bool "Automatic kernel module loading"
+	depends on MODULES
+	help
+	  Normally when you have selected some parts of the kernel to
+	  be created as kernel modules, you must load them (using the
+	  "modprobe" command) before you can use them. If you say Y
+	  here, some parts of the kernel will be able to load modules
+	  automatically: when a part of the kernel needs a module, it
+	  runs modprobe with the appropriate arguments, thereby
+	  loading the module if it is available.  If unsure, say Y.
+
+config STOP_MACHINE
+	bool
+	default y
+	depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
+	help
+	  Need stop_machine() primitive.
+endmenu
+
+menu "Block layer"
+source "block/Kconfig"
+endmenu
diff -urN oldtree/init/main.c newtree/init/main.c
--- oldtree/init/main.c	2006-01-03 03:21:10.000000000 +0000
+++ newtree/init/main.c	2006-02-04 18:03:15.590984384 +0000
@@ -48,6 +48,7 @@
 #include <linux/mempolicy.h>
 #include <linux/key.h>
 #include <net/sock.h>
+#include <linux/sched_drv.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -464,12 +465,6 @@
 	smp_prepare_boot_cpu();
 
 	/*
-	 * Set up the scheduler prior starting any interrupts (such as the
-	 * timer interrupt). Full topology setup happens at smp_init()
-	 * time - but meanwhile we still have a functioning scheduler.
-	 */
-	sched_init();
-	/*
 	 * Disable preemption - early bootup scheduling is extremely
 	 * fragile until we cpu_idle() for the first time.
 	 */
@@ -481,6 +476,16 @@
 	parse_args("Booting kernel", command_line, __start___param,
 		   __stop___param - __start___param,
 		   &unknown_bootoption);
+	/*
+	 * Set up the scheduler prior starting any interrupts (such as the
+	 * timer interrupt). Full topology setup happens at smp_init()
+	 * time - but meanwhile we still have a functioning scheduler.
+	 * But defer until after boot command line is parsed to avoid doing
+	 * this twice in the event that a different scheduler is selected.
+	 */
+	preempt_enable();
+	sched_init();
+	preempt_disable();
 	sort_main_extable();
 	trap_init();
 	rcu_init();
@@ -544,6 +549,7 @@
 
 	acpi_early_init(); /* before LAPIC and SMP init */
 
+	printk("Running with \"%s\" cpu scheduler.\n", sched_drvp->name);
 	/* Do the rest non-__init'ed, we're now alive */
 	rest_init();
 }
@@ -613,6 +619,7 @@
 #ifdef CONFIG_SYSCTL
 	sysctl_init();
 #endif
+	sched_drv_sysfs_init();
 
 	/* Networking initialization needs a process context */ 
 	sock_init();
diff -urN oldtree/kernel/Kconfig.cpusched newtree/kernel/Kconfig.cpusched
--- oldtree/kernel/Kconfig.cpusched	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/Kconfig.cpusched	2006-02-04 18:03:15.591984232 +0000
@@ -0,0 +1,205 @@
+
+menu "CPU schedulers"
+
+config CPUSCHED_SPA
+	bool
+	default n
+
+config CPUSCHED_CHOICE
+	bool "Support multiple CPU schedulers"
+	default y
+	---help---
+	  Say y here if you wish to be able to make a boot time selection
+	  of which CPU scheduler to use.  The CPU scheduler to be used may
+	  then be selected with the boot parameter "cpusched=".  In the
+          absence of such a command line parameter, the scheduler selected
+          at "Default CPU scheduler" will be used.
+
+	  The choice of which schedulers should be compiled into the
+	  kernel (and be available for boot time selection) can be made
+	  be enabling "Select which CPU schedulers to build in".
+
+	  If you say n here the single scheduler to be built into the
+	  kernel may be selected at "Default CPU scheduler".
+
+config CPUSCHED_CHOOSE_BUILTINS
+	bool "Select which CPU schedulers to build in" if CPUSCHED_CHOICE
+	default n
+	---help---
+	  Say y here if you want to be able to select which CPU schedulers
+	  are built into the kernel (for selection at boot time).
+
+config CPUSCHED_INGO
+	bool "Ingosched CPU scheduler" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	default y
+	---help---
+	  This is the standard CPU scheduler which is an O(1) dual priority
+	  array scheduler with a hybrid interactive design.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=ingosched".
+
+config CPUSCHED_STAIRCASE
+	bool "Staircase CPU scheduler" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	default y
+	---help---
+	  This scheduler is an O(1) single priority array with a foreground-
+	  background interactive design.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=staircase".
+
+config CPUSCHED_SPA_NF
+	bool "SPA CPU scheduler (no frills)" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	select CPUSCHED_SPA
+	default y
+	---help---
+	  This scheduler is a simple round robin O(1) single priority array
+	  scheduler with NO extra scheduling "frills" except for soft and hard
+	  CPU usage rate caps.  This scheduler contains no extra mechanisms
+	  for enhancing interactive response and is best suited for server
+	  systems.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=spa_no_frills".
+
+config CPUSCHED_SPA_WS
+	bool "SPA CPU scheduler (work station)" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	select CPUSCHED_SPA
+	default y
+	---help---
+	  This is a scheduler with a O(1) single priority array intended for
+	  use on work stations.  In addition to soft and hard CPU usage rate
+	  caps, it has modifications to improve interactive responsiveness
+	  and media streamer latency.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=spa_ws".
+
+config CPUSCHED_SPA_SVR
+	bool "SPA CPU scheduler (server)" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	select CPUSCHED_SPA
+	default y
+	---help---
+	  This is a scheduler with a O(1) single priority array intended for
+	  use on servers.   In addition to soft and hard CPU usage rate
+	  caps, it has modifications to reduce CPU delay at moderate load
+	  levels.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=spa_svr".
+
+config CPUSCHED_ZAPHOD
+	bool "Zaphod CPU scheduler" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	select CPUSCHED_SPA
+	default y
+	---help---
+	  This scheduler is an O(1) single priority array with interactive
+	  bonus, throughput bonus, soft and hard CPU rate caps and a runtime
+	  choice between priority based and entitlement based interpretation
+	  of nice.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=zaphod".
+
+config CPUSCHED_NICK
+	bool "Nicksched CPU scheduler" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	default y
+	---help---
+	  This is the default CPU scheduler which is an O(1) dual priority
+	  array scheduler with a hybrid interactive design as modified by
+	  Nick Piggin.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=nicksched".
+
+menu "SPA scheduler extras"
+	depends CPUSCHED_SPA
+
+config CPUSCHED_ACCRUED_STATS
+	bool "Keep accrued statistics for CPU usage"
+	depends on CPUSCHED_SPA
+	default n
+	---help---
+	  Make accrued CPU statistics available.  Adds approx. 0.5% to system
+	  overhead.
+
+config CPUSCHED_AUTODETECT_MEDIA
+	bool "Automatic detection of and special treatment of media streamers"
+	depends on CPUSCHED_SPA_WS
+	default n
+	---help---
+	  Automatically detect media streamer threads based on the length and
+	  regularity of the interval between CPU bursts.  Give such threads the
+	  maximum interactive bonus.
+
+endmenu
+
+choice
+	prompt "Default CPU scheduler"
+	---help---
+	  This option allows you to choose which CPU scheduler shall be
+	  booted by default at startup if you have enabled CPUSCHED_CHOICE,
+	  or it will select the only scheduler to be built in otherwise.
+
+config CPUSCHED_DEFAULT_INGO
+	bool "Ingosched CPU scheduler"
+	select CPUSCHED_INGO
+	---help---
+	  This is the default CPU scheduler which is an O(1) dual priority
+	  array scheduler with a hybrid interactive design.
+
+config CPUSCHED_DEFAULT_STAIRCASE
+	bool "Staircase CPU scheduler"
+	select CPUSCHED_STAIRCASE
+	---help---
+	  This scheduler is an O(1) single priority array with a foreground-
+	  background interactive design.
+
+config CPUSCHED_DEFAULT_SPA_NF
+	bool "Single priority array (SPA) CPU scheduler (no frills)"
+	select CPUSCHED_SPA_NF
+	select CPUSCHED_SPA
+	---help---
+	  This is a simple round robin scheduler with a O(1) single priority
+	  array.
+
+config CPUSCHED_DEFAULT_SPA_WS
+	bool "Single priority array (SPA) CPU scheduler (work station)"
+	select CPUSCHED_SPA_WS
+	select CPUSCHED_SPA
+	---help---
+	  This is a scheduler with a O(1) single priority array intended for
+	  use on work stations.  It has modifications to improve interactive
+	  responsiveness and media streamer latency.
+
+config CPUSCHED_DEFAULT_SPA_SVR
+	bool "Single priority array (SPA) CPU scheduler (server)"
+	select CPUSCHED_SPA_SVR
+	select CPUSCHED_SPA
+	---help---
+	  This is a scheduler with a O(1) single priority array intended for
+	  use on server.  It has modifications to reduce CPU delay at moderate
+	  levels of load.
+
+config CPUSCHED_DEFAULT_ZAPHOD
+	bool "Zaphod CPU scheduler"
+	select CPUSCHED_ZAPHOD
+	select CPUSCHED_SPA
+	---help---
+	  This scheduler is an O(1) single priority array with interactive
+	  bonus, throughput bonus, soft and hard CPU rate caps and a runtime
+	  choice between priority based and entitlement based interpretation
+	  of nice.
+
+config CPUSCHED_DEFAULT_NICK
+	bool "Nicksched CPU scheduler"
+	select CPUSCHED_NICK
+	---help---
+	  This is the default CPU scheduler which is an O(1) dual priority
+	  array scheduler with a hybrid interactive design as modified by
+	  Nick Piggin.
+
+endchoice
+
+endmenu
diff -urN oldtree/kernel/Makefile newtree/kernel/Makefile
--- oldtree/kernel/Makefile	2006-01-03 03:21:10.000000000 +0000
+++ newtree/kernel/Makefile	2006-02-04 18:03:15.592984080 +0000
@@ -7,8 +7,15 @@
 	    sysctl.o capability.o ptrace.o timer.o user.o \
 	    signal.o sys.o kmod.o workqueue.o pid.o \
 	    rcupdate.o intermodule.o extable.o params.o posix-timers.o \
-	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o
+	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o sched_drv.o
 
+obj-$(CONFIG_CPUSCHED_INGO) += ingosched.o
+obj-$(CONFIG_CPUSCHED_STAIRCASE) += staircase.o
+obj-$(CONFIG_CPUSCHED_SPA) += sched_spa.o sched_cpustats.o
+obj-$(CONFIG_CPUSCHED_SPA_WS) += sched_spa_ws.o
+obj-$(CONFIG_CPUSCHED_SPA_SVR) += sched_spa_svr.o
+obj-$(CONFIG_CPUSCHED_ZAPHOD) += sched_zaphod.o
+obj-$(CONFIG_CPUSCHED_NICK) += nicksched.o
 obj-$(CONFIG_FUTEX) += futex.o
 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
 obj-$(CONFIG_SMP) += cpu.o spinlock.o
diff -urN oldtree/kernel/ingosched.c newtree/kernel/ingosched.c
--- oldtree/kernel/ingosched.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/ingosched.c	2006-02-04 18:03:15.593983928 +0000
@@ -0,0 +1,1207 @@
+/*
+ *  kernel/ingosched.c
+ *  Copyright (C) 1991-2005  Linus Torvalds
+ *
+ *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
+ *		hybrid priority-list and round-robin design with
+ *		an array-switch method of distributing timeslices
+ *		and per-CPU runqueues.  Cleanups and useful suggestions
+ *		by Davide Libenzi, preemptible kernel bits by Robert Love.
+ *  2003-09-03	Interactivity tuning by Con Kolivas.
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+
+static void ingo_init_runqueue_queue(union runqueue_queue *rqq)
+{
+	int j;
+
+	rqq->ingosched.active = rqq->ingosched.arrays;
+	rqq->ingosched.expired = rqq->ingosched.arrays + 1;
+	rqq->ingosched.best_expired_prio = INGO_MAX_PRIO;
+
+	for (j = 0; j < 2; j++) {
+		int k;
+		prio_array_t *array = rqq->ingosched.arrays + j;
+
+		for (k = 0; k < INGO_MAX_PRIO; k++) {
+			INIT_LIST_HEAD(array->queue + k);
+			__clear_bit(k, array->bitmap);
+		}
+		// delimiter for bitsearch
+		__set_bit(INGO_MAX_PRIO, array->bitmap);
+		array->nr_active = 0;
+	}
+
+	rqq->ingosched.expired_timestamp = 0;
+}
+
+static void ingo_set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+	p->sdu.ingosched.time_slice = t;
+}
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
+#define MAX_USER_PRIO		(USER_PRIO(INGO_MAX_PRIO))
+
+/*
+ * Some helpers for converting nanosecond timing to jiffy resolution
+ */
+#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
+#define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
+ * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
+ * Timeslices get refilled after they expire.
+ */
+#define MIN_TIMESLICE		max(5 * HZ / 1000, 1)
+#define DEF_TIMESLICE		(100 * HZ / 1000)
+#define ON_RUNQUEUE_WEIGHT	 30
+#define CHILD_PENALTY		 95
+#define PARENT_PENALTY		100
+#define EXIT_WEIGHT		  3
+#define PRIO_BONUS_RATIO	 25
+#define MAX_BONUS		(MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
+#define INTERACTIVE_DELTA	  2
+#define MAX_SLEEP_AVG		(DEF_TIMESLICE * MAX_BONUS)
+#define STARVATION_LIMIT	(MAX_SLEEP_AVG)
+#define NS_MAX_SLEEP_AVG	(JIFFIES_TO_NS(MAX_SLEEP_AVG))
+
+/*
+ * If a task is 'interactive' then we reinsert it in the active
+ * array after it has expired its current timeslice. (it will not
+ * continue to run immediately, it will still roundrobin with
+ * other interactive tasks.)
+ *
+ * This part scales the interactivity limit depending on niceness.
+ *
+ * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
+ * Here are a few examples of different nice levels:
+ *
+ *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
+ *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
+ *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
+ *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
+ *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
+ *
+ * (the X axis represents the possible -5 ... 0 ... +5 dynamic
+ *  priority range a task can explore, a value of '1' means the
+ *  task is rated interactive.)
+ *
+ * Ie. nice +19 tasks can never get 'interactive' enough to be
+ * reinserted into the active array. And only heavily CPU-hog nice -20
+ * tasks will be expired. Default nice 0 tasks are somewhere between,
+ * it takes some effort for them to get interactive, but it's not
+ * too hard.
+ */
+
+#define CURRENT_BONUS(p) \
+	(NS_TO_JIFFIES((p)->sdu.ingosched.sleep_avg) * MAX_BONUS / \
+		MAX_SLEEP_AVG)
+
+#define GRANULARITY	(10 * HZ / 1000 ? : 1)
+
+#ifdef CONFIG_SMP
+#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
+		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
+			num_online_cpus())
+#else
+#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
+		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
+#endif
+
+#define SCALE(v1,v1_max,v2_max) \
+	(v1) * (v2_max) / (v1_max)
+
+#define DELTA(p) \
+	(SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA)
+
+#define TASK_INTERACTIVE(p) \
+	((p)->prio <= (p)->static_prio - DELTA(p))
+
+#define INTERACTIVE_SLEEP(p) \
+	(JIFFIES_TO_NS(MAX_SLEEP_AVG * \
+		(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
+
+/*
+ * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
+ * to time slice values: [800ms ... 100ms ... 5ms]
+ *
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ */
+
+#define SCALE_PRIO(x, prio) \
+	max(x * (INGO_MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
+
+static unsigned int task_timeslice(const task_t *p)
+{
+	if (p->static_prio < NICE_TO_PRIO(0))
+		return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio);
+	else
+		return SCALE_PRIO(DEF_TIMESLICE, p->static_prio);
+}
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void dequeue_task(struct task_struct *p, prio_array_t *array)
+{
+	array->nr_active--;
+	list_del_init(&p->run_list);
+	if (list_empty(array->queue + p->prio))
+		__clear_bit(p->prio, array->bitmap);
+}
+
+static void enqueue_task(struct task_struct *p, prio_array_t *array)
+{
+	sched_info_queued(p);
+	list_add_tail(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.ingosched.array = array;
+}
+
+/*
+ * Put task to the end of the run list without the overhead of dequeue
+ * followed by enqueue.
+ */
+static void requeue_task(struct task_struct *p, prio_array_t *array)
+{
+	list_move_tail(&p->run_list, array->queue + p->prio);
+}
+
+static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
+{
+	list_add(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.ingosched.array = array;
+}
+
+/*
+ * effective_prio - return the priority that is based on the static
+ * priority but is modified by bonuses/penalties.
+ *
+ * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
+ * into the -5 ... 0 ... +5 bonus/penalty range.
+ *
+ * We use 25% of the full 0...39 priority range so that:
+ *
+ * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
+ * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
+ *
+ * Both properties are important to certain workloads.
+ */
+static int effective_prio(task_t *p)
+{
+	int bonus, prio;
+
+	if (rt_task(p))
+		return p->prio;
+
+	bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
+
+	prio = p->static_prio - bonus;
+	if (prio < MAX_RT_PRIO)
+		prio = MAX_RT_PRIO;
+	if (prio > INGO_MAX_PRIO-1)
+		prio = INGO_MAX_PRIO-1;
+	return prio;
+}
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task(p, rq->qu.ingosched.active);
+	inc_nr_running(p, rq);
+}
+
+static int recalc_task_prio(task_t *p, unsigned long long now)
+{
+	/* Caller must always ensure 'now >= p->sdu.ingosched.timestamp' */
+	unsigned long long __sleep_time = now - p->timestamp;
+	unsigned long sleep_time;
+
+	if (__sleep_time > NS_MAX_SLEEP_AVG)
+		sleep_time = NS_MAX_SLEEP_AVG;
+	else
+		sleep_time = (unsigned long)__sleep_time;
+
+	if (likely(sleep_time > 0)) {
+		/*
+		 * User tasks that sleep a long time are categorised as
+		 * idle and will get just interactive status to stay active &
+		 * prevent them suddenly becoming cpu hogs and starving
+		 * other processes.
+		 */
+		if (p->mm && p->sdu.ingosched.activated != -1 &&
+			sleep_time > INTERACTIVE_SLEEP(p)) {
+				p->sdu.ingosched.sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
+						DEF_TIMESLICE);
+		} else {
+			/*
+			 * The lower the sleep avg a task has the more
+			 * rapidly it will rise with sleep time.
+			 */
+			sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
+
+			/*
+			 * Tasks waking from uninterruptible sleep are
+			 * limited in their sleep_avg rise as they
+			 * are likely to be waiting on I/O
+			 */
+			if (p->sdu.ingosched.activated == -1 && p->mm) {
+				if (p->sdu.ingosched.sleep_avg >= INTERACTIVE_SLEEP(p))
+					sleep_time = 0;
+				else if (p->sdu.ingosched.sleep_avg + sleep_time >=
+						INTERACTIVE_SLEEP(p)) {
+					p->sdu.ingosched.sleep_avg = INTERACTIVE_SLEEP(p);
+					sleep_time = 0;
+				}
+			}
+
+			/*
+			 * This code gives a bonus to interactive tasks.
+			 *
+			 * The boost works by updating the 'average sleep time'
+			 * value here, based on ->timestamp. The more time a
+			 * task spends sleeping, the higher the average gets -
+			 * and the higher the priority boost gets as well.
+			 */
+			p->sdu.ingosched.sleep_avg += sleep_time;
+
+			if (p->sdu.ingosched.sleep_avg > NS_MAX_SLEEP_AVG)
+				p->sdu.ingosched.sleep_avg = NS_MAX_SLEEP_AVG;
+		}
+	}
+
+	return effective_prio(p);
+}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ *
+ * Update all the scheduling statistics stuff. (sleep average
+ * calculation, priority modifiers, etc.)
+ */
+static void activate_task(task_t *p, runqueue_t *rq, int local)
+{
+	unsigned long long now;
+
+	now = sched_clock();
+#ifdef CONFIG_SMP
+	if (!local) {
+		/* Compensate for drifting sched_clock */
+		runqueue_t *this_rq = this_rq();
+		now = (now - this_rq->timestamp_last_tick)
+			+ rq->timestamp_last_tick;
+	}
+#endif
+
+	if (!rt_task(p))
+		p->prio = recalc_task_prio(p, now);
+
+	/*
+	 * This checks to make sure it's not an uninterruptible task
+	 * that is now waking up.
+	 */
+	if (!p->sdu.ingosched.activated) {
+		/*
+		 * Tasks which were woken up by interrupts (ie. hw events)
+		 * are most likely of interactive nature. So we give them
+		 * the credit of extending their sleep time to the period
+		 * of time they spend on the runqueue, waiting for execution
+		 * on a CPU, first time around:
+		 */
+		if (in_interrupt())
+			p->sdu.ingosched.activated = 2;
+		else {
+			/*
+			 * Normal first-time wakeups get a credit too for
+			 * on-runqueue time, but it will be weighted down:
+			 */
+			p->sdu.ingosched.activated = 1;
+		}
+	}
+	p->timestamp = now;
+
+	__activate_task(p, rq);
+}
+
+/*
+ * __activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task_head(p, rq->qu.ingosched.active);
+	inc_nr_running(p, rq);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	dec_nr_running(p, rq);
+	dequeue_task(p, p->sdu.ingosched.array);
+	p->sdu.ingosched.array = NULL;
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: the task's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+static void ingo_wake_up_task(struct task_struct *p, struct runqueue *rq, unsigned int old_state, int sync)
+{
+	int same_cpu = (rq == this_rq());
+
+	if (old_state == TASK_UNINTERRUPTIBLE) {
+		rq->nr_uninterruptible--;
+		/*
+		 * Tasks on involuntary sleep don't earn
+		 * sleep_avg beyond just interactive state.
+		 */
+		p->sdu.ingosched.activated = -1;
+	}
+
+	/*
+	 * Tasks that have marked their sleep as noninteractive get
+	 * woken up without updating their sleep average. (i.e. their
+	 * sleep is handled in a priority-neutral manner, no priority
+	 * boost and no penalty.)
+	 */
+	if (old_state & TASK_NONINTERACTIVE)
+		__activate_task(p, rq);
+	else
+		activate_task(p, rq, same_cpu);
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	if (!sync || !same_cpu) {
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+static void ingo_fork(task_t *p)
+{
+	p->sdu.ingosched.array = NULL;
+	/*
+	 * Share the timeslice between parent and child, thus the
+	 * total amount of pending timeslices in the system doesn't change,
+	 * resulting in more scheduling fairness.
+	 */
+	local_irq_disable();
+	p->sdu.ingosched.time_slice = (current->sdu.ingosched.time_slice + 1) >> 1;
+	/*
+	 * The remainder of the first timeslice might be recovered by
+	 * the parent if the child exits early enough.
+	 */
+	p->sdu.ingosched.first_time_slice = 1;
+	current->sdu.ingosched.time_slice >>= 1;
+	p->timestamp = sched_clock();
+	if (unlikely(!current->sdu.ingosched.time_slice)) {
+		/*
+		 * This case is rare, it happens when the parent has only
+		 * a single jiffy left from its timeslice. Taking the
+		 * runqueue lock is not a problem.
+		 */
+		current->sdu.ingosched.time_slice = 1;
+		scheduler_tick();
+	}
+	local_irq_enable();
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+static void ingo_wake_up_new_task(task_t * p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	int this_cpu, cpu;
+	runqueue_t *rq, *this_rq;
+
+	rq = task_rq_lock(p, &flags);
+	BUG_ON(p->state != TASK_RUNNING);
+	this_cpu = smp_processor_id();
+	cpu = task_cpu(p);
+
+	/*
+	 * We decrease the sleep average of forking parents
+	 * and children as well, to keep max-interactive tasks
+	 * from forking tasks that are max-interactive. The parent
+	 * (current) is done further down, under its lock.
+	 */
+	p->sdu.ingosched.sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
+		CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
+
+	p->prio = effective_prio(p);
+
+	if (likely(cpu == this_cpu)) {
+		if (!(clone_flags & CLONE_VM)) {
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			if (unlikely(!current->sdu.ingosched.array))
+				__activate_task(p, rq);
+			else {
+				p->prio = current->prio;
+				list_add_tail(&p->run_list, &current->run_list);
+				p->sdu.ingosched.array = current->sdu.ingosched.array;
+				p->sdu.ingosched.array->nr_active++;
+				inc_nr_running(p, rq);
+			}
+			set_need_resched();
+		} else
+			/* Run child last */
+			__activate_task(p, rq);
+		/*
+		 * We skip the following code due to cpu == this_cpu
+	 	 *
+		 *   task_rq_unlock(rq, &flags);
+		 *   this_rq = task_rq_lock(current, &flags);
+		 */
+		this_rq = rq;
+	} else {
+		this_rq = cpu_rq(this_cpu);
+
+		/*
+		 * Not the local CPU - must adjust timestamp. This should
+		 * get optimised away in the !CONFIG_SMP case.
+		 */
+		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
+					+ rq->timestamp_last_tick;
+		__activate_task(p, rq);
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+
+		/*
+		 * Parent and child are on different CPUs, now get the
+		 * parent runqueue to update the parent's ->sdu.ingosched.sleep_avg:
+		 */
+		task_rq_unlock(rq, &flags);
+		this_rq = task_rq_lock(current, &flags);
+	}
+	current->sdu.ingosched.sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
+		PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
+	task_rq_unlock(this_rq, &flags);
+}
+
+/*
+ * Potentially available exiting-child timeslices are
+ * retrieved here - this way the parent does not get
+ * penalized for creating too many threads.
+ *
+ * (this cannot be used to 'generate' timeslices
+ * artificially, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+static void ingo_exit(task_t *p)
+{
+	unsigned long flags;
+	runqueue_t *rq;
+
+	/*
+	 * If the child was a (relative-) CPU hog then decrease
+	 * the sleep_avg of the parent as well.
+	 */
+	rq = task_rq_lock(p->parent, &flags);
+	if (p->sdu.ingosched.first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
+		p->parent->sdu.ingosched.time_slice += p->sdu.ingosched.time_slice;
+		if (unlikely(p->parent->sdu.ingosched.time_slice > task_timeslice(p)))
+			p->parent->sdu.ingosched.time_slice = task_timeslice(p);
+	}
+	if (p->sdu.ingosched.sleep_avg < p->parent->sdu.ingosched.sleep_avg)
+		p->parent->sdu.ingosched.sleep_avg = p->parent->sdu.ingosched.sleep_avg /
+		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sdu.ingosched.sleep_avg /
+		(EXIT_WEIGHT + 1);
+	task_rq_unlock(rq, &flags);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static inline
+void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
+	       runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
+{
+	dequeue_task(p, src_array);
+	dec_nr_running(p, src_rq);
+	set_task_cpu(p, this_cpu);
+	inc_nr_running(p, this_rq);
+	enqueue_task(p, this_array);
+	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+				+ this_rq->timestamp_last_tick;
+	/*
+	 * Note that idle threads have a prio of INGO_MAX_PRIO, for this test
+	 * to be always true for them.
+	 */
+	if (TASK_PREEMPTS_CURR(p, this_rq))
+		resched_task(this_rq->curr);
+}
+
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int ingo_move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+		      unsigned long max_nr_move, long max_bias_move,
+		      struct sched_domain *sd, enum idle_type idle,
+		      int *all_pinned)
+{
+	prio_array_t *array, *dst_array;
+	struct list_head *head, *curr;
+	int idx, pulled = 0, pinned = 0;
+	task_t *tmp;
+
+	if (max_nr_move == 0 || max_bias_move == 0)
+		goto out;
+
+	pinned = 1;
+
+	/*
+	 * We first consider expired tasks. Those will likely not be
+	 * executed in the near future, and they are most likely to
+	 * be cache-cold, thus switching CPUs has the least effect
+	 * on them.
+	 */
+	if (busiest->qu.ingosched.expired->nr_active) {
+		array = busiest->qu.ingosched.expired;
+		dst_array = this_rq->qu.ingosched.expired;
+	} else {
+		array = busiest->qu.ingosched.active;
+		dst_array = this_rq->qu.ingosched.active;
+	}
+
+new_array:
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(array->bitmap);
+	else
+		idx = find_next_bit(array->bitmap, INGO_MAX_PRIO, idx);
+	if (idx >= INGO_MAX_PRIO) {
+		if (array == busiest->qu.ingosched.expired && busiest->qu.ingosched.active->nr_active) {
+			array = busiest->qu.ingosched.active;
+			dst_array = this_rq->qu.ingosched.active;
+			goto new_array;
+		}
+		goto out;
+	}
+
+	head = array->queue + idx;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+
+	curr = curr->prev;
+
+	if (tmp->bias_prio > max_bias_move ||
+	    !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
+	pulled++;
+	max_bias_move -= tmp->bias_prio;
+
+	/*
+	 * We only want to steal up to the prescribed number of tasks
+	 * and the prescribed amount of biased load.
+	 */
+	if (pulled < max_nr_move && max_bias_move > 0) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	if (all_pinned)
+		*all_pinned = pinned;
+
+	return pulled;
+}
+#endif
+
+/*
+ * We place interactive tasks back into the active array, if possible.
+ *
+ * To guarantee that this does not starve expired tasks we ignore the
+ * interactivity of a task if the first expired task had to wait more
+ * than a 'reasonable' amount of time. This deadline timeout is
+ * load-dependent, as the frequency of array switched decreases with
+ * increasing number of running tasks. We also ignore the interactivity
+ * if a better static_prio task has expired:
+ */
+#define EXPIRED_STARVING(rq) \
+	((STARVATION_LIMIT && ((rq)->qu.ingosched.expired_timestamp && \
+		(jiffies - (rq)->qu.ingosched.expired_timestamp >= \
+			STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
+			((rq)->curr->static_prio > (rq)->qu.ingosched.best_expired_prio))
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ *
+ * It also gets called by the fork code, when changing the parent's
+ * timeslices.
+ */
+static void ingo_tick(struct task_struct *p, struct runqueue *rq, unsigned long long now)
+{
+	int cpu = smp_processor_id();
+
+	if (p == rq->idle) {
+		if (wake_priority_sleeper(rq))
+			goto out;
+		rebalance_tick(cpu, rq, SCHED_IDLE);
+		return;
+	}
+
+	/* Task might have expired already, but not scheduled off yet */
+	if (p->sdu.ingosched.array != rq->qu.ingosched.active) {
+		set_tsk_need_resched(p);
+		goto out;
+	}
+	spin_lock(&rq->lock);
+	/*
+	 * The task was running during this tick - update the
+	 * time slice counter. Note: we do not update a thread's
+	 * priority until it either goes to sleep or uses up its
+	 * timeslice. This makes it possible for interactive tasks
+	 * to use up their timeslices at their highest priority levels.
+	 */
+	if (rt_task(p)) {
+		/*
+		 * RR tasks need a special form of timeslice management.
+		 * FIFO tasks have no timeslices.
+		 */
+		if ((p->policy == SCHED_RR) && !--p->sdu.ingosched.time_slice) {
+			p->sdu.ingosched.time_slice = task_timeslice(p);
+			p->sdu.ingosched.first_time_slice = 0;
+			set_tsk_need_resched(p);
+
+			/* put it at the end of the queue: */
+			requeue_task(p, rq->qu.ingosched.active);
+		}
+		goto out_unlock;
+	}
+	if (!--p->sdu.ingosched.time_slice) {
+		dequeue_task(p, rq->qu.ingosched.active);
+		set_tsk_need_resched(p);
+		p->prio = effective_prio(p);
+		p->sdu.ingosched.time_slice = task_timeslice(p);
+		p->sdu.ingosched.first_time_slice = 0;
+
+		if (!rq->qu.ingosched.expired_timestamp)
+			rq->qu.ingosched.expired_timestamp = jiffies;
+		if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
+			enqueue_task(p, rq->qu.ingosched.expired);
+			if (p->static_prio < rq->qu.ingosched.best_expired_prio)
+				rq->qu.ingosched.best_expired_prio = p->static_prio;
+		} else
+			enqueue_task(p, rq->qu.ingosched.active);
+	} else {
+		/*
+		 * Prevent a too long timeslice allowing a task to monopolize
+		 * the CPU. We do this by splitting up the timeslice into
+		 * smaller pieces.
+		 *
+		 * Note: this does not mean the task's timeslices expire or
+		 * get lost in any way, they just might be preempted by
+		 * another task of equal priority. (one with higher
+		 * priority would have preempted this task already.) We
+		 * requeue this task to the end of the list on this priority
+		 * level, which is in essence a round-robin of tasks with
+		 * equal priority.
+		 *
+		 * This only applies to tasks in the interactive
+		 * delta range with at least TIMESLICE_GRANULARITY to requeue.
+		 */
+		if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
+			p->sdu.ingosched.time_slice) % TIMESLICE_GRANULARITY(p)) &&
+			(p->sdu.ingosched.time_slice >= TIMESLICE_GRANULARITY(p)) &&
+			(p->sdu.ingosched.array == rq->qu.ingosched.active)) {
+
+			requeue_task(p, rq->qu.ingosched.active);
+			set_tsk_need_resched(p);
+		}
+	}
+out_unlock:
+	spin_unlock(&rq->lock);
+out:
+	rebalance_tick(cpu, rq, NOT_IDLE);
+}
+
+#ifdef CONFIG_SCHED_SMT
+static struct task_struct *ingo_head_of_queue(union runqueue_queue *rqq)
+{
+	prio_array_t *array = rqq->ingosched.active;
+
+	if (!array->nr_active)
+		array = rqq->ingosched.expired;
+	BUG_ON(!array->nr_active);
+
+	return list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
+		task_t, run_list);
+}
+
+/*
+ * number of 'lost' timeslices this task wont be able to fully
+ * utilize, if another task runs on a sibling. This models the
+ * slowdown effect of other tasks running on siblings:
+ */
+static inline unsigned long smt_slice(const task_t *p, struct sched_domain *sd)
+{
+	return p->sdu.ingosched.time_slice * (100 - sd->per_cpu_gain) / 100;
+}
+
+static int ingo_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct * p2, struct sched_domain *sd)
+{
+	return smt_slice(p1, sd) > task_timeslice(p2);
+}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ */
+static void ingo_schedule(void)
+{
+	long *switch_count;
+	prio_array_t *array;
+	unsigned long run_time;
+	int cpu, idx, new_prio;
+	struct task_struct *prev = current, *next;
+	struct list_head *queue;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+
+	if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) {
+		run_time = now - prev->timestamp;
+		if (unlikely((long long)(now - prev->timestamp) < 0))
+			run_time = 0;
+	} else
+		run_time = NS_MAX_SLEEP_AVG;
+
+	/*
+	 * Tasks charged proportionately less run_time at high sleep_avg to
+	 * delay them losing their interactive status
+	 */
+	run_time /= (CURRENT_BONUS(prev) ? : 1);
+
+	spin_lock_irq(&rq->lock);
+
+	if (unlikely(prev->flags & PF_DEAD))
+		prev->state = EXIT_DEAD;
+
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE)
+				rq->nr_uninterruptible++;
+			deactivate_task(prev, rq);
+		}
+	}
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+go_idle:
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			rq->qu.ingosched.expired_timestamp = 0;
+			wake_sleeping_dependent(cpu, rq);
+			/*
+			 * wake_sleeping_dependent() might have released
+			 * the runqueue, so break out if we got new
+			 * tasks meanwhile:
+			 */
+			if (!rq->nr_running)
+				goto switch_tasks;
+		}
+	} else {
+		if (dependent_sleeper(cpu, rq)) {
+			next = rq->idle;
+			goto switch_tasks;
+		}
+		/*
+		 * dependent_sleeper() releases and reacquires the runqueue
+		 * lock, hence go into the idle loop if the rq went
+		 * empty meanwhile:
+		 */
+		if (unlikely(!rq->nr_running))
+			goto go_idle;
+	}
+
+	array = rq->qu.ingosched.active;
+	if (unlikely(!array->nr_active)) {
+		/*
+		 * Switch the active and expired arrays.
+		 */
+		schedstat_inc(rq, sched_switch);
+		rq->qu.ingosched.active = rq->qu.ingosched.expired;
+		rq->qu.ingosched.expired = array;
+		array = rq->qu.ingosched.active;
+		rq->qu.ingosched.expired_timestamp = 0;
+		rq->qu.ingosched.best_expired_prio = INGO_MAX_PRIO;
+	}
+
+	idx = sched_find_first_bit(array->bitmap);
+	queue = array->queue + idx;
+	next = list_entry(queue->next, task_t, run_list);
+
+	if (!rt_task(next) && next->sdu.ingosched.activated > 0) {
+		unsigned long long delta = now - next->timestamp;
+		if (unlikely((long long)(now - next->timestamp) < 0))
+			delta = 0;
+
+		if (next->sdu.ingosched.activated == 1)
+			delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
+
+		array = next->sdu.ingosched.array;
+		new_prio = recalc_task_prio(next, next->timestamp + delta);
+
+		if (unlikely(next->prio != new_prio)) {
+			dequeue_task(next, array);
+			next->prio = new_prio;
+			enqueue_task(next, array);
+		} else
+			requeue_task(next, array);
+	}
+	next->sdu.ingosched.activated = 0;
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prefetch(next);
+	prefetch_stack(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(task_cpu(prev));
+
+	update_cpu_clock(prev, rq, now);
+
+	prev->sdu.ingosched.sleep_avg -= run_time;
+	if ((long)prev->sdu.ingosched.sleep_avg <= 0)
+		prev->sdu.ingosched.sleep_avg = 0;
+	prev->timestamp = prev->last_ran = now;
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		next->timestamp = now;
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_task_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+		/*
+		 * this_rq must be evaluated again because prev may have moved
+		 * CPUs since it called schedule(), thus the 'rq' on its stack
+		 * frame will be invalid.
+		 */
+		finish_task_switch(this_rq(), prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+static void ingo_set_normal_task_nice(task_t *p, long nice)
+{
+	prio_array_t *array;
+	int old_prio, new_prio, delta;
+	struct runqueue *rq = task_rq(p);
+
+	array = p->sdu.ingosched.array;
+	if (array) {
+		dequeue_task(p, array);
+		dec_prio_bias(rq, p);
+	}
+
+	old_prio = p->prio;
+	new_prio = NICE_TO_PRIO(nice);
+	delta = new_prio - old_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	set_bias_prio(p);
+	p->prio += delta;
+
+	if (array) {
+		enqueue_task(p, array);
+		inc_prio_bias(rq, p);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+static void ingo_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	prio_array_t *array;
+	runqueue_t *rq = task_rq(p);
+
+	array = p->sdu.ingosched.array;
+	if (array)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	if (array) {
+		__activate_task(p, rq);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+static long ingo_sys_yield(void)
+{
+	runqueue_t *rq = this_rq_lock();
+	prio_array_t *array = current->sdu.ingosched.array;
+	prio_array_t *target = rq->qu.ingosched.expired;
+
+	schedstat_inc(rq, yld_cnt);
+	/*
+	 * We implement yielding by moving the task into the expired
+	 * queue.
+	 *
+	 * (special rule: RT tasks will just roundrobin in the active
+	 *  array.)
+	 */
+	if (rt_task(current))
+		target = rq->qu.ingosched.active;
+
+	if (array->nr_active == 1) {
+		schedstat_inc(rq, yld_act_empty);
+		if (!rq->qu.ingosched.expired->nr_active)
+			schedstat_inc(rq, yld_both_empty);
+	} else if (!rq->qu.ingosched.expired->nr_active)
+		schedstat_inc(rq, yld_exp_empty);
+
+	if (array != target) {
+		dequeue_task(current, array);
+		enqueue_task(current, target);
+	} else
+		/*
+		 * requeue_task is cheaper so perform that if possible.
+		 */
+		requeue_task(current, array);
+
+	/*
+	 * Since we are going to call schedule() anyway, there's
+	 * no need to preempt or enable interrupts:
+	 */
+	__release(rq->lock);
+	_raw_spin_unlock(&rq->lock);
+	preempt_enable_no_resched();
+
+	schedule();
+
+	return 0;
+}
+
+static void ingo_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	ingo_sys_yield();
+}
+
+static void ingo_init_idle(task_t *idle, int cpu)
+{
+	idle->sdu.ingosched.sleep_avg = 0;
+	idle->sdu.ingosched.array = NULL;
+	idle->prio = INGO_MAX_PRIO;
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+static void ingo_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	/*
+	 * Sync timestamp with rq_dest's before activating.
+	 * The same thing could be achieved by doing this step
+	 * afterwards, and pretending it was a local activate.
+	 * This way is cleaner and logically correct.
+	 */
+	p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+			+ rq_dest->timestamp_last_tick;
+	deactivate_task(p, rq_src);
+	set_task_cpu(p, dest_cpu);
+	activate_task(p, rq_dest, 0);
+	if (TASK_PREEMPTS_CURR(p, rq_dest))
+		resched_task(rq_dest->curr);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static	void ingo_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO-1);
+	/* Add idle task to _front_ of it's priority queue */
+	__activate_idle_task(rq->idle, rq);
+}
+
+static	void ingo_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = INGO_MAX_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+static void ingo_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned arr, i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (arr = 0; arr < 2; arr++) {
+		for (i = 0; i < INGO_MAX_PRIO; i++) {
+			struct list_head *list = &rq->qu.ingosched.arrays[arr].queue[i];
+			while (!list_empty(list))
+				migrate_dead(dead_cpu,
+					     list_entry(list->next, task_t,
+							run_list));
+		}
+	}
+}
+#endif
+#endif
+
+static void ingo_sched_init(void)
+{
+	init_task.sdu.ingosched.time_slice = HZ;
+	init_task.sdu.ingosched.array = NULL;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void ingo_normalize_rt_task(struct task_struct *p)
+{
+	prio_array_t *array;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	array = p->sdu.ingosched.array;
+	if (array)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	if (array) {
+		__activate_task(p, rq);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+const struct sched_drv ingo_sched_drv = {
+	.name = "ingosched",
+	.init_runqueue_queue = ingo_init_runqueue_queue,
+	.set_oom_time_slice = ingo_set_oom_time_slice,
+	.task_timeslice = task_timeslice,
+	.wake_up_task = ingo_wake_up_task,
+	.fork = ingo_fork,
+	.wake_up_new_task = ingo_wake_up_new_task,
+	.exit = ingo_exit,
+#ifdef CONFIG_SMP
+	.move_tasks = ingo_move_tasks,
+#endif
+	.tick = ingo_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = ingo_head_of_queue,
+	.dependent_sleeper_trumps = ingo_dependent_sleeper_trumps,
+#endif
+	.schedule = ingo_schedule,
+	.set_normal_task_nice = ingo_set_normal_task_nice,
+	.setscheduler = ingo_setscheduler,
+	.sys_yield = ingo_sys_yield,
+	.yield = ingo_yield,
+	.init_idle = ingo_init_idle,
+	.sched_init = ingo_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = ingo_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = ingo_set_select_idle_first,
+	.set_select_idle_last = ingo_set_select_idle_last,
+	.migrate_dead_tasks = ingo_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = ingo_normalize_rt_task,
+#endif
+	.attrs = NULL,
+};
diff -urN oldtree/kernel/nicksched.c newtree/kernel/nicksched.c
--- oldtree/kernel/nicksched.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/nicksched.c	2006-02-04 18:03:15.595983624 +0000
@@ -0,0 +1,1010 @@
+/*
+ *  kernel/nicksched.c
+ *  Copyright (C) 1991-2005  Linus Torvalds
+ *
+ *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
+ *		hybrid priority-list and round-robin design with
+ *		an array-switch method of distributing timeslices
+ *		and per-CPU runqueues.  Cleanups and useful suggestions
+ *		by Davide Libenzi, preemptible kernel bits by Robert Love.
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+
+static void nick_init_runqueue_queue(union runqueue_queue *rqq)
+{
+	int j;
+
+	rqq->nicksched.active = rqq->nicksched.arrays;
+	rqq->nicksched.expired = rqq->nicksched.arrays + 1;
+
+	for (j = 0; j < 2; j++) {
+		int k;
+		struct nick_prio_array *array = rqq->nicksched.arrays + j;
+
+		array->min_prio = NICK_MAX_PRIO;
+		for (k = 0; k < NICK_MAX_PRIO; k++) {
+			INIT_LIST_HEAD(array->queue + k);
+			__clear_bit(k, array->bitmap);
+		}
+		// delimiter for bitsearch
+		__set_bit(NICK_MAX_PRIO, array->bitmap);
+		array->nr_active = 0;
+	}
+
+	rqq->nicksched.array_sequence = 0;
+}
+
+static void nick_set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+}
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)		((p) - MAX_RT_PRIO)
+#define MAX_USER_PRIO		(USER_PRIO(NICK_MAX_PRIO))
+/*
+ * Correct for fact that p->static_prio has normal mapping
+ */
+#define STATIC_USER_PRIO(p)	((p)->static_prio - MAX_RT_PRIO + 10)
+
+/*
+ * Some helpers for converting microsecond timing to jiffy resolution
+ */
+#define US_TO_JIFFIES(x)	((x) * HZ / 1000000)
+#define JIFFIES_TO_US(x)	((x) * 1000000 / HZ)
+
+static int base_timeslice = 256;
+#define min_base_timeslice 1
+#define max_base_timeslice 10000
+
+#define RT_TIMESLICE		(50 * 1000 / HZ)		/* 50ms */
+#define BASE_TIMESLICE		(base_timeslice)
+#define MIN_TIMESLICE		(base_timeslice / 16 ?: 1)
+
+/* Maximum amount of history that will be used to calculate priority */
+#define MAX_SLEEP_SHIFT		19
+#define MAX_SLEEP		(1UL << MAX_SLEEP_SHIFT)	/* ~0.52s */
+
+/*
+ * Maximum effect that 1 block of activity (run/sleep/etc) can have. This is
+ * will moderate dicard freak events (eg. SIGSTOP)
+ */
+#define MAX_SLEEP_AFFECT	(MAX_SLEEP/4)
+
+/*
+ * The amount of history can be decreased (on fork for example). This puts a
+ * lower bound on it.
+ */
+#define MIN_HISTORY		(MAX_SLEEP/8)
+#define FORKED_TS_MAX		(US_TO_JIFFIES(MIN_HISTORY) ?: 1)
+
+/*
+ * SLEEP_FACTOR is a fixed point factor used to scale history tracking things.
+ * In particular: total_time, sleep_time, sleep_avg.
+ */
+#define SLEEP_FACTOR		1024
+
+/*
+ *  The scheduler classifies a process as performing one of the following
+ *  activities
+ */
+#define STIME_SLEEP		1	/* Sleeping */
+#define STIME_RUN		2	/* Using CPU */
+
+#define TASK_PREEMPTS_CURR(p, rq) \
+	((p)->prio < (rq)->curr->prio)
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void dequeue_task(struct task_struct *p, struct nick_prio_array *array)
+{
+	array->nr_active--;
+	list_del_init(&p->run_list);
+	if (list_empty(array->queue + p->prio))
+		__clear_bit(p->prio, array->bitmap);
+}
+
+static void enqueue_task(struct task_struct *p, struct nick_prio_array *array)
+{
+	struct list_head *entry = array->queue + p->prio;
+
+	sched_info_queued(p);
+	if (!rt_task(p)) {
+		/*
+		 * Cycle tasks on the same priority level. This reduces their
+		 * timeslice fluctuations due to higher priority tasks expiring.
+		 */
+		if (!list_empty(entry))
+			entry = entry->next;
+	}
+	list_add_tail(&p->run_list, entry);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.nicksched.array = array;
+}
+
+static inline void enqueue_task_head(struct task_struct *p, struct nick_prio_array *array)
+{
+	list_add(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.nicksched.array = array;
+}
+
+#define NS_TO_APPROX_US(t) ((t) >> 10)
+
+/*
+ * add_task_time updates a task @p after @time of doing the specified @type
+ * of activity. See STIME_*. This is used for priority calculation.
+ */
+static inline void add_task_time(task_t *p, unsigned long long time, unsigned long type)
+{
+	unsigned long ratio;
+	unsigned long long tmp;
+	unsigned long t;
+	if (type == STIME_SLEEP) {
+		if (time > MAX_SLEEP_AFFECT*4)
+			time = MAX_SLEEP_AFFECT*4;
+		t = ((unsigned long)time + 3) / 4;
+	} else {
+		unsigned long div = 60 - STATIC_USER_PRIO(p);
+		t = (unsigned long)time * 30;
+		t = t / div;
+		t = t * 30;
+		t = t / div;
+	}
+
+	ratio = MAX_SLEEP - t;
+	tmp = (unsigned long long)ratio * p->sdu.nicksched.total_time + MAX_SLEEP/2;
+	tmp >>= MAX_SLEEP_SHIFT;
+	p->sdu.nicksched.total_time = (unsigned long)tmp;
+
+	tmp = (unsigned long long)ratio * p->sdu.nicksched.sleep_time + MAX_SLEEP/2;
+	tmp >>= MAX_SLEEP_SHIFT;
+	p->sdu.nicksched.sleep_time = (unsigned long)tmp;
+
+	p->sdu.nicksched.total_time += t;
+	if (type == STIME_SLEEP)
+		p->sdu.nicksched.sleep_time += t;
+}
+
+static unsigned long task_sleep_avg(task_t *p)
+{
+	return (SLEEP_FACTOR * p->sdu.nicksched.sleep_time) / (p->sdu.nicksched.total_time + 1);
+}
+
+/*
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ *
+ * Timeslices are scaled, so if only low priority processes are running,
+ * they will all get long timeslices.
+ */
+
+static int task_timeslice(const task_t *p, runqueue_t *rq)
+{
+	int idx, base, delta;
+	int timeslice;
+
+	if (rt_task(p))
+		return RT_TIMESLICE;
+
+	idx = min(p->prio, rq->qu.nicksched.expired->min_prio);
+	delta = p->prio - idx;
+	base = BASE_TIMESLICE * (MAX_USER_PRIO + 1) / (delta + 2);
+	base = base * (MAX_USER_PRIO + 1) / (delta + 2);
+
+	base = base * 40 / (70 - USER_PRIO(idx));
+	base = base * 40 / (70 - USER_PRIO(idx));
+
+	timeslice = base >> 10;
+	timeslice = timeslice * HZ / 1000;
+	if (timeslice < MIN_TIMESLICE)
+		timeslice = MIN_TIMESLICE;
+
+	return timeslice;
+}
+
+/*
++ * task_priority: calculates a task's priority based on previous running
++ * history (see add_task_time). The priority is just a simple linear function
++ * based on sleep_avg and static_prio.
++ */
+static int task_priority(task_t *p)
+{
+	unsigned long sleep_avg;
+ 	int bonus, prio;
+
+ 	if (rt_task(p))
+ 		return p->prio;
+
+	sleep_avg = task_sleep_avg(p);
+
+	prio = STATIC_USER_PRIO(p) + 10;
+	bonus = (((MAX_USER_PRIO + 1) / 3) * sleep_avg + (SLEEP_FACTOR / 2))
+					/ SLEEP_FACTOR;
+	prio = MAX_RT_PRIO + prio - bonus;
+
+ 	if (prio < MAX_RT_PRIO)
+		return MAX_RT_PRIO;
+ 	if (prio > NICK_MAX_PRIO-1)
+		return NICK_MAX_PRIO-1;
+
+ 	return prio;
+}
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq, struct nick_prio_array *array)
+{
+	enqueue_task(p, array);
+	inc_nr_running(p, rq);
+	if (!rt_task(p)) {
+		if (p->prio < array->min_prio)
+			array->min_prio = p->prio;
+	}
+}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ *
+ * Update all the scheduling statistics stuff. (sleep average
+ * calculation, priority modifiers, etc.)
+ */
+static void activate_task(task_t *p, runqueue_t *rq, int local)
+{
+	unsigned long long now, sleep;
+	struct nick_prio_array *array;
+
+	now = sched_clock();
+#ifdef CONFIG_SMP
+	if (!local) {
+		/* Compensate for drifting sched_clock */
+		runqueue_t *this_rq = this_rq();
+		now = (now - this_rq->timestamp_last_tick)
+			+ rq->timestamp_last_tick;
+	}
+#endif
+
+	/*
+	 * If we have slept through an active/expired array switch, restart
+	 * our timeslice too.
+	 */
+	sleep = NS_TO_APPROX_US(now - p->timestamp);
+	p->timestamp = now;
+	add_task_time(p, sleep, STIME_SLEEP);
+	p->prio = task_priority(p);
+
+	array = rq->qu.nicksched.active;
+	if (rq->qu.nicksched.array_sequence != p->sdu.nicksched.array_sequence) {
+		p->sdu.nicksched.used_slice = 0;
+	} else if (unlikely(p->sdu.nicksched.used_slice == -1)) {
+		p->sdu.nicksched.used_slice = 0;
+		array = rq->qu.nicksched.expired;
+	}
+
+	__activate_task(p, rq, array);
+}
+
+/*
+ * __activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task_head(p, rq->qu.nicksched.active);
+	inc_nr_running(p, rq);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	p->sdu.nicksched.array_sequence = rq->qu.nicksched.array_sequence;
+	dec_nr_running(p, rq);
+	dequeue_task(p, p->sdu.nicksched.array);
+	p->sdu.nicksched.array = NULL;
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: the task's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+static void nick_wake_up_task(struct task_struct *p, struct runqueue *rq, unsigned int old_state, int sync)
+{
+	int same_cpu = (rq == this_rq());
+
+	if (old_state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible--;
+
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	activate_task(p, rq, same_cpu);
+	if (!sync || !same_cpu) {
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+static void nick_fork(task_t *p)
+{
+	unsigned long sleep_avg;
+	runqueue_t *rq;
+
+	p->sdu.nicksched.array = NULL;
+
+	p->timestamp = sched_clock();
+	p->sdu.nicksched.used_slice = 0;
+	if (rt_task(p)) {
+		BUG_ON(!rt_task(current));
+		return;
+	}
+
+	preempt_disable();
+	rq = this_rq();
+	/* Get MIN_HISTORY of history with the same sleep_avg as parent. */
+	sleep_avg = task_sleep_avg(current);
+	p->sdu.nicksched.total_time = MIN_HISTORY;
+	p->sdu.nicksched.sleep_time = p->sdu.nicksched.total_time * sleep_avg / SLEEP_FACTOR;
+
+	/* Parent loses 1/4 of sleep time for forking */
+	current->sdu.nicksched.sleep_time = 3 * current->sdu.nicksched.sleep_time / 4;
+
+	local_irq_disable();
+	if (unlikely(current->sdu.nicksched.used_slice == -1 || current == rq->idle))
+		p->sdu.nicksched.used_slice = -1;
+	else {
+		int ts = task_timeslice(current, rq);
+		current->sdu.nicksched.used_slice += (ts + 3) / 4;
+		if (current->sdu.nicksched.used_slice >= ts) {
+			current->sdu.nicksched.used_slice = -1;
+			set_need_resched();
+		}
+	}
+	local_irq_enable();
+	preempt_enable();
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+static void nick_wake_up_new_task(task_t * p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	int this_cpu, cpu;
+	runqueue_t *rq;
+	struct nick_prio_array *array;
+
+	rq = task_rq_lock(p, &flags);
+
+	BUG_ON(p->state != TASK_RUNNING);
+
+	cpu = task_cpu(p);
+	this_cpu = smp_processor_id();
+
+	array = rq->qu.nicksched.active;
+	if (!rt_task(p)) {
+		if (unlikely(p->sdu.nicksched.used_slice == -1)) {
+			p->sdu.nicksched.used_slice = 0;
+			array = rq->qu.nicksched.expired;
+		} else {
+			int total = task_timeslice(p, rq);
+			int ts = max((total + 3) / 4, MIN_TIMESLICE);
+			ts = min(ts, (int)FORKED_TS_MAX);
+			p->sdu.nicksched.used_slice = total - ts;
+		}
+	}
+
+	if (likely(cpu == this_cpu)) {
+		if (!(clone_flags & CLONE_VM) && likely(array == rq->qu.nicksched.active)) {
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			if (p->prio >= current->prio) {
+				p->prio = current->prio;
+				list_add_tail(&p->run_list, &current->run_list);
+				p->sdu.nicksched.array = current->sdu.nicksched.array;
+				p->sdu.nicksched.array->nr_active++;
+				inc_nr_running(p, rq);
+			} else {
+				p->prio = task_priority(p);
+				__activate_task(p, rq, array);
+			}
+			set_need_resched();
+		} else {
+			/* Run child last */
+			p->prio = task_priority(p);
+			__activate_task(p, rq, array);
+		}
+#ifdef CONFIG_SMP
+	} else {
+		runqueue_t *this_rq = cpu_rq(this_cpu);
+
+		/*
+		 * Not the local CPU - must adjust timestamp. This should
+		 * get optimised away in the !CONFIG_SMP case.
+		 */
+		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
+					+ rq->timestamp_last_tick;
+		p->prio = task_priority(p);
+		__activate_task(p, rq, array);
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+#endif
+	}
+
+ 	task_rq_unlock(rq, &flags);
+}
+
+/*
+ * Potentially available exiting-child timeslices are
+ * retrieved here - this way the parent does not get
+ * penalized for creating too many threads.
+ *
+ * (this cannot be used to 'generate' timeslices
+ * artificially, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+static void nick_exit(task_t * p)
+{
+}
+
+#ifdef CONFIG_SMP
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static inline
+void pull_task(runqueue_t *src_rq, struct nick_prio_array *src_array, task_t *p,
+	       runqueue_t *this_rq, struct nick_prio_array *this_array, int this_cpu)
+{
+	dequeue_task(p, src_array);
+	dec_nr_running(p, src_rq);
+	set_task_cpu(p, this_cpu);
+	inc_nr_running(p, this_rq);
+	enqueue_task(p, this_array);
+	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+				+ this_rq->timestamp_last_tick;
+	/*
+	 * Note that idle threads have a prio of NICK_MAX_PRIO, for this test
+	 * to be always true for them.
+	 */
+	if (TASK_PREEMPTS_CURR(p, this_rq))
+		resched_task(this_rq->curr);
+}
+
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int nick_move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+		      unsigned long max_nr_move, long max_bias_move,
+		      struct sched_domain *sd, enum idle_type idle,
+		      int *all_pinned)
+{
+	struct nick_prio_array *array, *dst_array;
+	struct list_head *head, *curr;
+	int idx, pulled = 0, pinned = 0;
+	task_t *tmp;
+
+	if (max_nr_move == 0 || max_bias_move == 0)
+		goto out;
+
+	pinned = 1;
+
+	/*
+	 * We first consider expired tasks. Those will likely not be
+	 * executed in the near future, and they are most likely to
+	 * be cache-cold, thus switching CPUs has the least effect
+	 * on them.
+	 */
+	if (busiest->qu.nicksched.expired->nr_active) {
+		array = busiest->qu.nicksched.expired;
+		dst_array = this_rq->qu.nicksched.expired;
+	} else {
+		array = busiest->qu.nicksched.active;
+		dst_array = this_rq->qu.nicksched.active;
+	}
+
+new_array:
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(array->bitmap);
+	else
+		idx = find_next_bit(array->bitmap, NICK_MAX_PRIO, idx);
+	if (idx >= NICK_MAX_PRIO) {
+		if (array == busiest->qu.nicksched.expired && busiest->qu.nicksched.active->nr_active) {
+			array = busiest->qu.nicksched.active;
+			dst_array = this_rq->qu.nicksched.active;
+			goto new_array;
+		}
+		goto out;
+	}
+
+	head = array->queue + idx;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+
+	curr = curr->prev;
+
+	if (tmp->bias_prio > max_bias_move ||
+	    !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
+	pulled++;
+	max_bias_move -= tmp->bias_prio;
+
+	/*
+	 * We only want to steal up to the prescribed number of tasks
+	 * and the prescribed amount of biased load.
+	 */
+	if (pulled < max_nr_move && max_bias_move > 0) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	if (all_pinned)
+		*all_pinned = pinned;
+
+	return pulled;
+}
+#endif
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ *
+ * It also gets called by the fork code, when changing the parent's
+ * timeslices.
+ */
+static void nick_tick(struct task_struct *p, struct runqueue *rq, unsigned long long now)
+{
+	enum idle_type cpu_status;
+	int ts;
+
+	if (p == rq->idle) {
+		cpu_status = SCHED_IDLE;
+		goto out;
+	}
+
+	cpu_status = NOT_IDLE;
+	/* Task might have expired already, but not scheduled off yet */
+	if  (unlikely(p->sdu.nicksched.used_slice == -1))
+		goto out;
+
+	if (unlikely(p->policy == SCHED_FIFO))
+		goto out;
+
+	/* p was running during this tick. Update its time slice counter. */
+	p->sdu.nicksched.used_slice++;
+	ts = task_timeslice(p, rq);
+	if (unlikely(p->sdu.nicksched.used_slice >= ts)) {
+		p->sdu.nicksched.used_slice = -1;
+		set_tsk_need_resched(p);
+	}
+out:
+	rebalance_tick(smp_processor_id(), rq, cpu_status);
+}
+
+#ifdef CONFIG_SCHED_SMT
+/* these should never get called */
+static struct task_struct *nick_head_of_queue(union runqueue_queue *rqq)
+{
+	struct nick_prio_array *array = rqq->nicksched.active;
+
+	if (!array->nr_active)
+		array = rqq->nicksched.expired;
+	BUG_ON(!array->nr_active);
+
+	return list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
+		task_t, run_list);
+}
+
+static int nick_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct * p2, struct sched_domain *sd)
+{
+	return 0;
+}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ */
+static void nick_schedule(void)
+{
+	long *switch_count;
+	struct nick_prio_array *array;
+	unsigned long run_time;
+	int cpu, idx;
+	struct task_struct *prev = current, *next;
+	struct list_head *queue;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+
+	run_time = NS_TO_APPROX_US(now - prev->timestamp);
+	update_cpu_clock(prev, rq, now);
+	prev->timestamp = prev->last_ran = now;
+	add_task_time(prev, run_time, STIME_RUN);
+
+	spin_lock_irq(&rq->lock);
+
+	if (unlikely(prev->flags & PF_DEAD))
+		prev->state = EXIT_DEAD;
+
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE)
+				rq->nr_uninterruptible++;
+			deactivate_task(prev, rq);
+			goto no_check_expired;
+		}
+	}
+
+	if (unlikely(prev->sdu.nicksched.used_slice == -1)) {
+		dequeue_task(prev, prev->sdu.nicksched.array);
+		if (rt_task(prev)) {
+			/* SCHED_FIFO can come in here too, from sched_yield */
+			array = rq->qu.nicksched.active;
+		} else {
+			array = rq->qu.nicksched.expired;
+			prev->prio = task_priority(prev);
+			if (prev->prio < rq->qu.nicksched.expired->min_prio)
+				rq->qu.nicksched.expired->min_prio = prev->prio;
+ 		}
+		enqueue_task(prev, array);
+		prev->sdu.nicksched.used_slice = 0;
+ 	}
+no_check_expired:
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+		rq->qu.nicksched.array_sequence++;
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			rq->qu.nicksched.arrays[0].min_prio = NICK_MAX_PRIO;
+			rq->qu.nicksched.arrays[1].min_prio = NICK_MAX_PRIO;
+ 			goto switch_tasks;
+		}
+	}
+
+	array = rq->qu.nicksched.active;
+	if (unlikely(!array->nr_active)) {
+		/*
+		 * Switch the active and expired arrays.
+		 */
+		schedstat_inc(rq, sched_switch);
+		rq->qu.nicksched.array_sequence++;
+		rq->qu.nicksched.active = rq->qu.nicksched.expired;
+		rq->qu.nicksched.expired = array;
+		rq->qu.nicksched.expired->min_prio = NICK_MAX_PRIO;
+		array = rq->qu.nicksched.active;
+	}
+
+	idx = sched_find_first_bit(array->bitmap);
+	queue = array->queue + idx;
+	next = list_entry(queue->next, task_t, run_list);
+
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prefetch(next);
+	prefetch_stack(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(cpu);
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		next->timestamp = now;
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_task_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+		/*
+		 * this_rq must be evaluated again because prev may have moved
+		 * CPUs since it called schedule(), thus the 'rq' on its stack
+		 * frame will be invalid.
+		 */
+		finish_task_switch(this_rq(), prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+static void nick_set_normal_task_nice(task_t *p, long nice)
+{
+	struct nick_prio_array *array;
+	int old_prio, new_prio, delta;
+
+	array = p->sdu.nicksched.array;
+	if (array) {
+		dequeue_task(p, array);
+		dec_prio_bias(task_rq(p), p);
+	}
+
+	old_prio = p->prio;
+	new_prio = NICE_TO_PRIO(nice);
+	delta = new_prio - old_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	set_bias_prio(p);
+	p->prio = task_priority(p);
+
+	if (array) {
+		struct runqueue *rq = task_rq(p);
+
+		inc_prio_bias(task_rq(p), p);
+		enqueue_task(p, array);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+static void nick_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	struct nick_prio_array *array;
+	runqueue_t *rq = task_rq(p);
+
+	array = p->sdu.nicksched.array;
+	if (array)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	if (policy == SCHED_FIFO || policy == SCHED_RR)
+		p->sdu.nicksched.used_slice = 0;
+
+	if (array) {
+		__activate_task(p, rq, rq->qu.nicksched.active);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+static long nick_sys_yield(void)
+{
+	local_irq_disable();
+#ifdef CONFIG_SCHEDSTATS
+	schedstat_inc(this_rq(), yld_cnt);
+#endif
+	current->sdu.nicksched.used_slice = -1;
+	set_need_resched();
+	local_irq_enable();
+
+	return 0;
+}
+
+static void nick_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	nick_sys_yield();
+#ifndef CONFIG_PREEMPT
+	/*
+	 * Kernel-space yield won't follow the schedule upon
+	 * return from syscall path. Must call schedule() here.
+	 */
+	schedule();
+#endif
+}
+
+static void nick_init_idle(task_t *idle, int cpu)
+{
+	idle->sdu.nicksched.used_slice = 0;
+	idle->sdu.nicksched.array = NULL;
+	idle->prio = NICK_MAX_PRIO;
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+static void nick_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	/*
+	 * Sync timestamp with rq_dest's before activating.
+	 * The same thing could be achieved by doing this step
+	 * afterwards, and pretending it was a local activate.
+	 * This way is cleaner and logically correct.
+	 */
+	p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+			+ rq_dest->timestamp_last_tick;
+	deactivate_task(p, rq_src);
+	set_task_cpu(p, dest_cpu);
+	activate_task(p, rq_dest, 0);
+	if (TASK_PREEMPTS_CURR(p, rq_dest))
+		resched_task(rq_dest->curr);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static	void nick_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO-1);
+	/* Add idle task to _front_ of it's priority queue */
+	__activate_idle_task(rq->idle, rq);
+}
+
+static	void nick_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = NICK_MAX_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+static void nick_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned arr, i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (arr = 0; arr < 2; arr++) {
+		for (i = 0; i < NICK_MAX_PRIO; i++) {
+			struct list_head *list = &rq->qu.nicksched.arrays[arr].queue[i];
+			while (!list_empty(list))
+				migrate_dead(dead_cpu,
+					     list_entry(list->next, task_t,
+							run_list));
+		}
+	}
+}
+#endif
+#endif
+
+static void nick_sched_init(void)
+{
+	init_task.sdu.nicksched.used_slice = 0;
+	init_task.sdu.nicksched.array = NULL;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void nick_normalize_rt_task(struct task_struct *p)
+{
+	struct nick_prio_array *array;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	array = p->sdu.nicksched.array;
+	if (array)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	if (array) {
+		__activate_task(p, rq, array);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+static unsigned int nick_task_timeslice(const struct task_struct *p)
+{
+	return task_timeslice(p, task_rq(p));
+}
+
+#ifdef CONFIG_SYSFS
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW(base_timeslice, no_change, no_change, min_base_timeslice, max_base_timeslice);
+
+static struct attribute *nick_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(base_timeslice),
+	NULL,
+};
+#endif
+
+const struct sched_drv nick_sched_drv = {
+	.name = "nicksched",
+	.init_runqueue_queue = nick_init_runqueue_queue,
+	.set_oom_time_slice = nick_set_oom_time_slice,
+	.task_timeslice = nick_task_timeslice,
+	.wake_up_task = nick_wake_up_task,
+	.fork = nick_fork,
+	.wake_up_new_task = nick_wake_up_new_task,
+	.exit = nick_exit,
+#ifdef CONFIG_SMP
+	.move_tasks = nick_move_tasks,
+#endif
+	.tick = nick_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = nick_head_of_queue,
+	.dependent_sleeper_trumps = nick_dependent_sleeper_trumps,
+#endif
+	.schedule = nick_schedule,
+	.set_normal_task_nice = nick_set_normal_task_nice,
+	.setscheduler = nick_setscheduler,
+	.sys_yield = nick_sys_yield,
+	.yield = nick_yield,
+	.init_idle = nick_init_idle,
+	.sched_init = nick_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = nick_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = nick_set_select_idle_first,
+	.set_select_idle_last = nick_set_select_idle_last,
+	.migrate_dead_tasks = nick_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = nick_normalize_rt_task,
+#endif
+	.attrs = nick_attrs,
+};
diff -urN oldtree/kernel/sched.c newtree/kernel/sched.c
--- oldtree/kernel/sched.c	2006-01-03 03:21:10.000000000 +0000
+++ newtree/kernel/sched.c	2006-02-04 18:03:15.685969944 +0000
@@ -51,215 +51,19 @@
 
 #include <asm/unistd.h>
 
-/*
- * Convert user-nice values [ -20 ... 0 ... 19 ]
- * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
- * and back.
- */
-#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
-#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
-#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
-
-/*
- * 'User priority' is the nice value converted to something we
- * can work with better when scaling various scheduler parameters,
- * it's a [ 0 ... 39 ] range.
- */
-#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
-#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
-#define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))
-
-/*
- * Some helpers for converting nanosecond timing to jiffy resolution
- */
-#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
-#define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
-
-/*
- * These are the 'tuning knobs' of the scheduler:
- *
- * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
- * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
- * Timeslices get refilled after they expire.
- */
-#define MIN_TIMESLICE		max(5 * HZ / 1000, 1)
-#define DEF_TIMESLICE		(100 * HZ / 1000)
-#define ON_RUNQUEUE_WEIGHT	 30
-#define CHILD_PENALTY		 95
-#define PARENT_PENALTY		100
-#define EXIT_WEIGHT		  3
-#define PRIO_BONUS_RATIO	 25
-#define MAX_BONUS		(MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
-#define INTERACTIVE_DELTA	  2
-#define MAX_SLEEP_AVG		(DEF_TIMESLICE * MAX_BONUS)
-#define STARVATION_LIMIT	(MAX_SLEEP_AVG)
-#define NS_MAX_SLEEP_AVG	(JIFFIES_TO_NS(MAX_SLEEP_AVG))
-
-/*
- * If a task is 'interactive' then we reinsert it in the active
- * array after it has expired its current timeslice. (it will not
- * continue to run immediately, it will still roundrobin with
- * other interactive tasks.)
- *
- * This part scales the interactivity limit depending on niceness.
- *
- * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
- * Here are a few examples of different nice levels:
- *
- *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
- *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
- *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
- *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
- *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
- *
- * (the X axis represents the possible -5 ... 0 ... +5 dynamic
- *  priority range a task can explore, a value of '1' means the
- *  task is rated interactive.)
- *
- * Ie. nice +19 tasks can never get 'interactive' enough to be
- * reinserted into the active array. And only heavily CPU-hog nice -20
- * tasks will be expired. Default nice 0 tasks are somewhere between,
- * it takes some effort for them to get interactive, but it's not
- * too hard.
- */
-
-#define CURRENT_BONUS(p) \
-	(NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
-		MAX_SLEEP_AVG)
-
-#define GRANULARITY	(10 * HZ / 1000 ? : 1)
-
-#ifdef CONFIG_SMP
-#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
-		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
-			num_online_cpus())
-#else
-#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
-		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
-#endif
-
-#define SCALE(v1,v1_max,v2_max) \
-	(v1) * (v2_max) / (v1_max)
-
-#define DELTA(p) \
-	(SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA)
-
-#define TASK_INTERACTIVE(p) \
-	((p)->prio <= (p)->static_prio - DELTA(p))
+#include <linux/sched_runq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_drv.h>
 
-#define INTERACTIVE_SLEEP(p) \
-	(JIFFIES_TO_NS(MAX_SLEEP_AVG * \
-		(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
-
-#define TASK_PREEMPTS_CURR(p, rq) \
-	((p)->prio < (rq)->curr->prio)
-
-/*
- * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
- * to time slice values: [800ms ... 100ms ... 5ms]
- *
- * The higher a thread's priority, the bigger timeslices
- * it gets during one round of execution. But even the lowest
- * priority thread gets MIN_TIMESLICE worth of execution time.
- */
-
-#define SCALE_PRIO(x, prio) \
-	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
-
-static unsigned int task_timeslice(task_t *p)
+static inline unsigned int task_timeslice(const task_t *p)
 {
-	if (p->static_prio < NICE_TO_PRIO(0))
-		return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio);
-	else
-		return SCALE_PRIO(DEF_TIMESLICE, p->static_prio);
+ 	return sched_drvp->task_timeslice(p);
 }
-#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)	\
-				< (long long) (sd)->cache_hot_time)
 
 /*
  * These are the runqueue data structures:
  */
-
-#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
-
-typedef struct runqueue runqueue_t;
-
-struct prio_array {
-	unsigned int nr_active;
-	unsigned long bitmap[BITMAP_SIZE];
-	struct list_head queue[MAX_PRIO];
-};
-
-/*
- * This is the main, per-CPU runqueue data structure.
- *
- * Locking rule: those places that want to lock multiple runqueues
- * (such as the load balancing or the thread migration code), lock
- * acquire operations must be ordered by ascending &runqueue.
- */
-struct runqueue {
-	spinlock_t lock;
-
-	/*
-	 * nr_running and cpu_load should be in the same cacheline because
-	 * remote CPUs use both these fields when doing load calculation.
-	 */
-	unsigned long nr_running;
-#ifdef CONFIG_SMP
-	unsigned long prio_bias;
-	unsigned long cpu_load[3];
-#endif
-	unsigned long long nr_switches;
-
-	/*
-	 * This is part of a global counter where only the total sum
-	 * over all CPUs matters. A task can increase this counter on
-	 * one CPU and if it got migrated afterwards it may decrease
-	 * it on another CPU. Always updated under the runqueue lock:
-	 */
-	unsigned long nr_uninterruptible;
-
-	unsigned long expired_timestamp;
-	unsigned long long timestamp_last_tick;
-	task_t *curr, *idle;
-	struct mm_struct *prev_mm;
-	prio_array_t *active, *expired, arrays[2];
-	int best_expired_prio;
-	atomic_t nr_iowait;
-
-#ifdef CONFIG_SMP
-	struct sched_domain *sd;
-
-	/* For active balancing */
-	int active_balance;
-	int push_cpu;
-
-	task_t *migration_thread;
-	struct list_head migration_queue;
-#endif
-
-#ifdef CONFIG_SCHEDSTATS
-	/* latency stats */
-	struct sched_info rq_sched_info;
-
-	/* sys_sched_yield() stats */
-	unsigned long yld_exp_empty;
-	unsigned long yld_act_empty;
-	unsigned long yld_both_empty;
-	unsigned long yld_cnt;
-
-	/* schedule() stats */
-	unsigned long sched_switch;
-	unsigned long sched_cnt;
-	unsigned long sched_goidle;
-
-	/* try_to_wake_up() stats */
-	unsigned long ttwu_cnt;
-	unsigned long ttwu_local;
-#endif
-};
-
-static DEFINE_PER_CPU(struct runqueue, runqueues);
+DEFINE_PER_CPU(struct runqueue, runqueues);
 
 /*
  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
@@ -271,108 +75,6 @@
 #define for_each_domain(cpu, domain) \
 for (domain = rcu_dereference(cpu_rq(cpu)->sd); domain; domain = domain->parent)
 
-#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
-#define this_rq()		(&__get_cpu_var(runqueues))
-#define task_rq(p)		cpu_rq(task_cpu(p))
-#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
-
-#ifndef prepare_arch_switch
-# define prepare_arch_switch(next)	do { } while (0)
-#endif
-#ifndef finish_arch_switch
-# define finish_arch_switch(prev)	do { } while (0)
-#endif
-
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
-static inline int task_running(runqueue_t *rq, task_t *p)
-{
-	return rq->curr == p;
-}
-
-static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
-{
-}
-
-static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
-	/* this is a valid case when another task releases the spinlock */
-	rq->lock.owner = current;
-#endif
-	spin_unlock_irq(&rq->lock);
-}
-
-#else /* __ARCH_WANT_UNLOCKED_CTXSW */
-static inline int task_running(runqueue_t *rq, task_t *p)
-{
-#ifdef CONFIG_SMP
-	return p->oncpu;
-#else
-	return rq->curr == p;
-#endif
-}
-
-static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
-{
-#ifdef CONFIG_SMP
-	/*
-	 * We can optimise this out completely for !SMP, because the
-	 * SMP rebalancing from interrupt is the only thing that cares
-	 * here.
-	 */
-	next->oncpu = 1;
-#endif
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-	spin_unlock_irq(&rq->lock);
-#else
-	spin_unlock(&rq->lock);
-#endif
-}
-
-static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
-{
-#ifdef CONFIG_SMP
-	/*
-	 * After ->oncpu is cleared, the task can be moved to a different CPU.
-	 * We must ensure this doesn't happen until the switch is completely
-	 * finished.
-	 */
-	smp_wmb();
-	prev->oncpu = 0;
-#endif
-#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-	local_irq_enable();
-#endif
-}
-#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
-
-/*
- * task_rq_lock - lock the runqueue a given task resides on and disable
- * interrupts.  Note the ordering: we can safely lookup the task_rq without
- * explicitly disabling preemption.
- */
-static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
-	__acquires(rq->lock)
-{
-	struct runqueue *rq;
-
-repeat_lock_task:
-	local_irq_save(*flags);
-	rq = task_rq(p);
-	spin_lock(&rq->lock);
-	if (unlikely(rq != task_rq(p))) {
-		spin_unlock_irqrestore(&rq->lock, *flags);
-		goto repeat_lock_task;
-	}
-	return rq;
-}
-
-static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
-	__releases(rq->lock)
-{
-	spin_unlock_irqrestore(&rq->lock, *flags);
-}
-
 #ifdef CONFIG_SCHEDSTATS
 /*
  * bump this up when changing the output format or the meaning of an existing
@@ -464,398 +166,12 @@
 	.release = single_release,
 };
 
-# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
 # define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
 #else /* !CONFIG_SCHEDSTATS */
-# define schedstat_inc(rq, field)	do { } while (0)
 # define schedstat_add(rq, field, amt)	do { } while (0)
 #endif
 
 /*
- * rq_lock - lock a given runqueue and disable interrupts.
- */
-static inline runqueue_t *this_rq_lock(void)
-	__acquires(rq->lock)
-{
-	runqueue_t *rq;
-
-	local_irq_disable();
-	rq = this_rq();
-	spin_lock(&rq->lock);
-
-	return rq;
-}
-
-#ifdef CONFIG_SCHEDSTATS
-/*
- * Called when a process is dequeued from the active array and given
- * the cpu.  We should note that with the exception of interactive
- * tasks, the expired queue will become the active queue after the active
- * queue is empty, without explicitly dequeuing and requeuing tasks in the
- * expired queue.  (Interactive tasks may be requeued directly to the
- * active queue, thus delaying tasks in the expired queue from running;
- * see scheduler_tick()).
- *
- * This function is only called from sched_info_arrive(), rather than
- * dequeue_task(). Even though a task may be queued and dequeued multiple
- * times as it is shuffled about, we're really interested in knowing how
- * long it was from the *first* time it was queued to the time that it
- * finally hit a cpu.
- */
-static inline void sched_info_dequeued(task_t *t)
-{
-	t->sched_info.last_queued = 0;
-}
-
-/*
- * Called when a task finally hits the cpu.  We can now calculate how
- * long it was waiting to run.  We also note when it began so that we
- * can keep stats on how long its timeslice is.
- */
-static inline void sched_info_arrive(task_t *t)
-{
-	unsigned long now = jiffies, diff = 0;
-	struct runqueue *rq = task_rq(t);
-
-	if (t->sched_info.last_queued)
-		diff = now - t->sched_info.last_queued;
-	sched_info_dequeued(t);
-	t->sched_info.run_delay += diff;
-	t->sched_info.last_arrival = now;
-	t->sched_info.pcnt++;
-
-	if (!rq)
-		return;
-
-	rq->rq_sched_info.run_delay += diff;
-	rq->rq_sched_info.pcnt++;
-}
-
-/*
- * Called when a process is queued into either the active or expired
- * array.  The time is noted and later used to determine how long we
- * had to wait for us to reach the cpu.  Since the expired queue will
- * become the active queue after active queue is empty, without dequeuing
- * and requeuing any tasks, we are interested in queuing to either. It
- * is unusual but not impossible for tasks to be dequeued and immediately
- * requeued in the same or another array: this can happen in sched_yield(),
- * set_user_nice(), and even load_balance() as it moves tasks from runqueue
- * to runqueue.
- *
- * This function is only called from enqueue_task(), but also only updates
- * the timestamp if it is already not set.  It's assumed that
- * sched_info_dequeued() will clear that stamp when appropriate.
- */
-static inline void sched_info_queued(task_t *t)
-{
-	if (!t->sched_info.last_queued)
-		t->sched_info.last_queued = jiffies;
-}
-
-/*
- * Called when a process ceases being the active-running process, either
- * voluntarily or involuntarily.  Now we can calculate how long we ran.
- */
-static inline void sched_info_depart(task_t *t)
-{
-	struct runqueue *rq = task_rq(t);
-	unsigned long diff = jiffies - t->sched_info.last_arrival;
-
-	t->sched_info.cpu_time += diff;
-
-	if (rq)
-		rq->rq_sched_info.cpu_time += diff;
-}
-
-/*
- * Called when tasks are switched involuntarily due, typically, to expiring
- * their time slice.  (This may also be called when switching to or from
- * the idle task.)  We are only called when prev != next.
- */
-static inline void sched_info_switch(task_t *prev, task_t *next)
-{
-	struct runqueue *rq = task_rq(prev);
-
-	/*
-	 * prev now departs the cpu.  It's not interesting to record
-	 * stats about how efficient we were at scheduling the idle
-	 * process, however.
-	 */
-	if (prev != rq->idle)
-		sched_info_depart(prev);
-
-	if (next != rq->idle)
-		sched_info_arrive(next);
-}
-#else
-#define sched_info_queued(t)		do { } while (0)
-#define sched_info_switch(t, next)	do { } while (0)
-#endif /* CONFIG_SCHEDSTATS */
-
-/*
- * Adding/removing a task to/from a priority array:
- */
-static void dequeue_task(struct task_struct *p, prio_array_t *array)
-{
-	array->nr_active--;
-	list_del(&p->run_list);
-	if (list_empty(array->queue + p->prio))
-		__clear_bit(p->prio, array->bitmap);
-}
-
-static void enqueue_task(struct task_struct *p, prio_array_t *array)
-{
-	sched_info_queued(p);
-	list_add_tail(&p->run_list, array->queue + p->prio);
-	__set_bit(p->prio, array->bitmap);
-	array->nr_active++;
-	p->array = array;
-}
-
-/*
- * Put task to the end of the run list without the overhead of dequeue
- * followed by enqueue.
- */
-static void requeue_task(struct task_struct *p, prio_array_t *array)
-{
-	list_move_tail(&p->run_list, array->queue + p->prio);
-}
-
-static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
-{
-	list_add(&p->run_list, array->queue + p->prio);
-	__set_bit(p->prio, array->bitmap);
-	array->nr_active++;
-	p->array = array;
-}
-
-/*
- * effective_prio - return the priority that is based on the static
- * priority but is modified by bonuses/penalties.
- *
- * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
- * into the -5 ... 0 ... +5 bonus/penalty range.
- *
- * We use 25% of the full 0...39 priority range so that:
- *
- * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
- * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
- *
- * Both properties are important to certain workloads.
- */
-static int effective_prio(task_t *p)
-{
-	int bonus, prio;
-
-	if (rt_task(p))
-		return p->prio;
-
-	bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
-
-	prio = p->static_prio - bonus;
-	if (prio < MAX_RT_PRIO)
-		prio = MAX_RT_PRIO;
-	if (prio > MAX_PRIO-1)
-		prio = MAX_PRIO-1;
-	return prio;
-}
-
-#ifdef CONFIG_SMP
-static inline void inc_prio_bias(runqueue_t *rq, int prio)
-{
-	rq->prio_bias += MAX_PRIO - prio;
-}
-
-static inline void dec_prio_bias(runqueue_t *rq, int prio)
-{
-	rq->prio_bias -= MAX_PRIO - prio;
-}
-
-static inline void inc_nr_running(task_t *p, runqueue_t *rq)
-{
-	rq->nr_running++;
-	if (rt_task(p)) {
-		if (p != rq->migration_thread)
-			/*
-			 * The migration thread does the actual balancing. Do
-			 * not bias by its priority as the ultra high priority
-			 * will skew balancing adversely.
-			 */
-			inc_prio_bias(rq, p->prio);
-	} else
-		inc_prio_bias(rq, p->static_prio);
-}
-
-static inline void dec_nr_running(task_t *p, runqueue_t *rq)
-{
-	rq->nr_running--;
-	if (rt_task(p)) {
-		if (p != rq->migration_thread)
-			dec_prio_bias(rq, p->prio);
-	} else
-		dec_prio_bias(rq, p->static_prio);
-}
-#else
-static inline void inc_prio_bias(runqueue_t *rq, int prio)
-{
-}
-
-static inline void dec_prio_bias(runqueue_t *rq, int prio)
-{
-}
-
-static inline void inc_nr_running(task_t *p, runqueue_t *rq)
-{
-	rq->nr_running++;
-}
-
-static inline void dec_nr_running(task_t *p, runqueue_t *rq)
-{
-	rq->nr_running--;
-}
-#endif
-
-/*
- * __activate_task - move a task to the runqueue.
- */
-static inline void __activate_task(task_t *p, runqueue_t *rq)
-{
-	enqueue_task(p, rq->active);
-	inc_nr_running(p, rq);
-}
-
-/*
- * __activate_idle_task - move idle task to the _front_ of runqueue.
- */
-static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
-{
-	enqueue_task_head(p, rq->active);
-	inc_nr_running(p, rq);
-}
-
-static int recalc_task_prio(task_t *p, unsigned long long now)
-{
-	/* Caller must always ensure 'now >= p->timestamp' */
-	unsigned long long __sleep_time = now - p->timestamp;
-	unsigned long sleep_time;
-
-	if (__sleep_time > NS_MAX_SLEEP_AVG)
-		sleep_time = NS_MAX_SLEEP_AVG;
-	else
-		sleep_time = (unsigned long)__sleep_time;
-
-	if (likely(sleep_time > 0)) {
-		/*
-		 * User tasks that sleep a long time are categorised as
-		 * idle and will get just interactive status to stay active &
-		 * prevent them suddenly becoming cpu hogs and starving
-		 * other processes.
-		 */
-		if (p->mm && p->activated != -1 &&
-			sleep_time > INTERACTIVE_SLEEP(p)) {
-				p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
-						DEF_TIMESLICE);
-		} else {
-			/*
-			 * The lower the sleep avg a task has the more
-			 * rapidly it will rise with sleep time.
-			 */
-			sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
-
-			/*
-			 * Tasks waking from uninterruptible sleep are
-			 * limited in their sleep_avg rise as they
-			 * are likely to be waiting on I/O
-			 */
-			if (p->activated == -1 && p->mm) {
-				if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
-					sleep_time = 0;
-				else if (p->sleep_avg + sleep_time >=
-						INTERACTIVE_SLEEP(p)) {
-					p->sleep_avg = INTERACTIVE_SLEEP(p);
-					sleep_time = 0;
-				}
-			}
-
-			/*
-			 * This code gives a bonus to interactive tasks.
-			 *
-			 * The boost works by updating the 'average sleep time'
-			 * value here, based on ->timestamp. The more time a
-			 * task spends sleeping, the higher the average gets -
-			 * and the higher the priority boost gets as well.
-			 */
-			p->sleep_avg += sleep_time;
-
-			if (p->sleep_avg > NS_MAX_SLEEP_AVG)
-				p->sleep_avg = NS_MAX_SLEEP_AVG;
-		}
-	}
-
-	return effective_prio(p);
-}
-
-/*
- * activate_task - move a task to the runqueue and do priority recalculation
- *
- * Update all the scheduling statistics stuff. (sleep average
- * calculation, priority modifiers, etc.)
- */
-static void activate_task(task_t *p, runqueue_t *rq, int local)
-{
-	unsigned long long now;
-
-	now = sched_clock();
-#ifdef CONFIG_SMP
-	if (!local) {
-		/* Compensate for drifting sched_clock */
-		runqueue_t *this_rq = this_rq();
-		now = (now - this_rq->timestamp_last_tick)
-			+ rq->timestamp_last_tick;
-	}
-#endif
-
-	if (!rt_task(p))
-		p->prio = recalc_task_prio(p, now);
-
-	/*
-	 * This checks to make sure it's not an uninterruptible task
-	 * that is now waking up.
-	 */
-	if (!p->activated) {
-		/*
-		 * Tasks which were woken up by interrupts (ie. hw events)
-		 * are most likely of interactive nature. So we give them
-		 * the credit of extending their sleep time to the period
-		 * of time they spend on the runqueue, waiting for execution
-		 * on a CPU, first time around:
-		 */
-		if (in_interrupt())
-			p->activated = 2;
-		else {
-			/*
-			 * Normal first-time wakeups get a credit too for
-			 * on-runqueue time, but it will be weighted down:
-			 */
-			p->activated = 1;
-		}
-	}
-	p->timestamp = now;
-
-	__activate_task(p, rq);
-}
-
-/*
- * deactivate_task - remove a task from the runqueue.
- */
-static void deactivate_task(struct task_struct *p, runqueue_t *rq)
-{
-	dec_nr_running(p, rq);
-	dequeue_task(p, p->array);
-	p->array = NULL;
-}
-
-/*
  * resched_task - mark a task 'to be rescheduled now'.
  *
  * On UP this means the setting of the need_resched flag, on SMP it
@@ -863,7 +179,7 @@
  * the target CPU.
  */
 #ifdef CONFIG_SMP
-static void resched_task(task_t *p)
+void resched_task(task_t *p)
 {
 	int cpu;
 
@@ -883,12 +199,6 @@
 	if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG))
 		smp_send_reschedule(cpu);
 }
-#else
-static inline void resched_task(task_t *p)
-{
-	assert_spin_locked(&task_rq(p)->lock);
-	set_tsk_need_resched(p);
-}
 #endif
 
 /**
@@ -922,7 +232,7 @@
 	 * If the task is not on a runqueue (and not running), then
 	 * it is sufficient to simply update the task's cpu field.
 	 */
-	if (!p->array && !task_running(rq, p)) {
+	if (!task_is_queued(p) && !task_running(rq, p)) {
 		set_task_cpu(p, dest_cpu);
 		return 0;
 	}
@@ -952,7 +262,7 @@
 repeat:
 	rq = task_rq_lock(p, &flags);
 	/* Must be off runqueue entirely, not preempted. */
-	if (unlikely(p->array || task_running(rq, p))) {
+	if (unlikely(task_is_queued(p) || task_running(rq, p))) {
 		/* If it's preempted, we yield.  It could be a while. */
 		preempted = !task_running(rq, p);
 		task_rq_unlock(rq, &flags);
@@ -994,61 +304,29 @@
  * We want to under-estimate the load of migration sources, to
  * balance conservatively.
  */
-static inline unsigned long __source_load(int cpu, int type, enum idle_type idle)
+static inline unsigned long source_load(int cpu, int type)
 {
 	runqueue_t *rq = cpu_rq(cpu);
-	unsigned long running = rq->nr_running;
-	unsigned long source_load, cpu_load = rq->cpu_load[type-1],
-		load_now = running * SCHED_LOAD_SCALE;
+	unsigned long load_now = rq->prio_bias * SCHED_LOAD_SCALE;
 
 	if (type == 0)
-		source_load = load_now;
-	else
-		source_load = min(cpu_load, load_now);
-
-	if (running > 1 || (idle == NOT_IDLE && running))
-		/*
-		 * If we are busy rebalancing the load is biased by
-		 * priority to create 'nice' support across cpus. When
-		 * idle rebalancing we should only bias the source_load if
-		 * there is more than one task running on that queue to
-		 * prevent idle rebalance from trying to pull tasks from a
-		 * queue with only one running task.
-		 */
-		source_load = source_load * rq->prio_bias / running;
+		return load_now;
 
-	return source_load;
-}
-
-static inline unsigned long source_load(int cpu, int type)
-{
-	return __source_load(cpu, type, NOT_IDLE);
+	return min(rq->cpu_load[type-1], load_now);
 }
 
 /*
  * Return a high guess at the load of a migration-target cpu
  */
-static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
+static inline unsigned long target_load(int cpu, int type)
 {
 	runqueue_t *rq = cpu_rq(cpu);
-	unsigned long running = rq->nr_running;
-	unsigned long target_load, cpu_load = rq->cpu_load[type-1],
-		load_now = running * SCHED_LOAD_SCALE;
+	unsigned long load_now = rq->prio_bias * SCHED_LOAD_SCALE;
 
 	if (type == 0)
-		target_load = load_now;
-	else
-		target_load = max(cpu_load, load_now);
+		return load_now;
 
-	if (running > 1 || (idle == NOT_IDLE && running))
-		target_load = target_load * rq->prio_bias / running;
-
-	return target_load;
-}
-
-static inline unsigned long target_load(int cpu, int type)
-{
-	return __target_load(cpu, type, NOT_IDLE);
+	return max(rq->cpu_load[type-1], load_now);
 }
 
 /*
@@ -1255,7 +533,7 @@
 	if (!(old_state & state))
 		goto out;
 
-	if (p->array)
+	if (task_is_queued(p))
 		goto out_running;
 
 	cpu = task_cpu(p);
@@ -1306,7 +584,7 @@
 			 * of the current CPU:
 			 */
 			if (sync)
-				tl -= SCHED_LOAD_SCALE;
+				tl -= p->bias_prio * SCHED_LOAD_SCALE;
 
 			if ((tl <= load &&
 				tl + target_load(cpu, idx) <= SCHED_LOAD_SCALE) ||
@@ -1344,7 +622,7 @@
 		old_state = p->state;
 		if (!(old_state & state))
 			goto out;
-		if (p->array)
+		if (task_is_queued(p))
 			goto out_running;
 
 		this_cpu = smp_processor_id();
@@ -1353,37 +631,7 @@
 
 out_activate:
 #endif /* CONFIG_SMP */
-	if (old_state == TASK_UNINTERRUPTIBLE) {
-		rq->nr_uninterruptible--;
-		/*
-		 * Tasks on involuntary sleep don't earn
-		 * sleep_avg beyond just interactive state.
-		 */
-		p->activated = -1;
-	}
-
-	/*
-	 * Tasks that have marked their sleep as noninteractive get
-	 * woken up without updating their sleep average. (i.e. their
-	 * sleep is handled in a priority-neutral manner, no priority
-	 * boost and no penalty.)
-	 */
-	if (old_state & TASK_NONINTERACTIVE)
-		__activate_task(p, rq);
-	else
-		activate_task(p, rq, cpu == this_cpu);
-	/*
-	 * Sync wakeups (i.e. those types of wakeups where the waker
-	 * has indicated that it will leave the CPU in short order)
-	 * don't trigger a preemption, if the woken up task will run on
-	 * this cpu. (in this case the 'I will reschedule' promise of
-	 * the waker guarantees that the freshly woken up task is going
-	 * to be considered on this CPU.)
-	 */
-	if (!sync || cpu != this_cpu) {
-		if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
-	}
+	sched_drvp->wake_up_task(p, rq, old_state, sync);
 	success = 1;
 
 out_running:
@@ -1400,243 +648,63 @@
 				 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
 }
 
-EXPORT_SYMBOL(wake_up_process);
-
-int fastcall wake_up_state(task_t *p, unsigned int state)
-{
-	return try_to_wake_up(p, state, 0);
-}
-
-/*
- * Perform scheduler related setup for a newly forked process p.
- * p is forked by current.
- */
-void fastcall sched_fork(task_t *p, int clone_flags)
-{
-	int cpu = get_cpu();
-
-#ifdef CONFIG_SMP
-	cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
-#endif
-	set_task_cpu(p, cpu);
-
-	/*
-	 * We mark the process as running here, but have not actually
-	 * inserted it onto the runqueue yet. This guarantees that
-	 * nobody will actually run it, and a signal or other external
-	 * event cannot wake it up and insert it on the runqueue either.
-	 */
-	p->state = TASK_RUNNING;
-	INIT_LIST_HEAD(&p->run_list);
-	p->array = NULL;
-#ifdef CONFIG_SCHEDSTATS
-	memset(&p->sched_info, 0, sizeof(p->sched_info));
-#endif
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
-	p->oncpu = 0;
-#endif
-#ifdef CONFIG_PREEMPT
-	/* Want to start with kernel preemption disabled. */
-	task_thread_info(p)->preempt_count = 1;
-#endif
-	/*
-	 * Share the timeslice between parent and child, thus the
-	 * total amount of pending timeslices in the system doesn't change,
-	 * resulting in more scheduling fairness.
-	 */
-	local_irq_disable();
-	p->time_slice = (current->time_slice + 1) >> 1;
-	/*
-	 * The remainder of the first timeslice might be recovered by
-	 * the parent if the child exits early enough.
-	 */
-	p->first_time_slice = 1;
-	current->time_slice >>= 1;
-	p->timestamp = sched_clock();
-	if (unlikely(!current->time_slice)) {
-		/*
-		 * This case is rare, it happens when the parent has only
-		 * a single jiffy left from its timeslice. Taking the
-		 * runqueue lock is not a problem.
-		 */
-		current->time_slice = 1;
-		scheduler_tick();
-	}
-	local_irq_enable();
-	put_cpu();
-}
-
-/*
- * wake_up_new_task - wake up a newly created task for the first time.
- *
- * This function will do some initial scheduler statistics housekeeping
- * that must be done for every newly created context, then puts the task
- * on the runqueue and wakes it.
- */
-void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
-{
-	unsigned long flags;
-	int this_cpu, cpu;
-	runqueue_t *rq, *this_rq;
-
-	rq = task_rq_lock(p, &flags);
-	BUG_ON(p->state != TASK_RUNNING);
-	this_cpu = smp_processor_id();
-	cpu = task_cpu(p);
-
-	/*
-	 * We decrease the sleep average of forking parents
-	 * and children as well, to keep max-interactive tasks
-	 * from forking tasks that are max-interactive. The parent
-	 * (current) is done further down, under its lock.
-	 */
-	p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
-		CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
-
-	p->prio = effective_prio(p);
-
-	if (likely(cpu == this_cpu)) {
-		if (!(clone_flags & CLONE_VM)) {
-			/*
-			 * The VM isn't cloned, so we're in a good position to
-			 * do child-runs-first in anticipation of an exec. This
-			 * usually avoids a lot of COW overhead.
-			 */
-			if (unlikely(!current->array))
-				__activate_task(p, rq);
-			else {
-				p->prio = current->prio;
-				list_add_tail(&p->run_list, &current->run_list);
-				p->array = current->array;
-				p->array->nr_active++;
-				inc_nr_running(p, rq);
-			}
-			set_need_resched();
-		} else
-			/* Run child last */
-			__activate_task(p, rq);
-		/*
-		 * We skip the following code due to cpu == this_cpu
-	 	 *
-		 *   task_rq_unlock(rq, &flags);
-		 *   this_rq = task_rq_lock(current, &flags);
-		 */
-		this_rq = rq;
-	} else {
-		this_rq = cpu_rq(this_cpu);
-
-		/*
-		 * Not the local CPU - must adjust timestamp. This should
-		 * get optimised away in the !CONFIG_SMP case.
-		 */
-		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
-					+ rq->timestamp_last_tick;
-		__activate_task(p, rq);
-		if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
-
-		/*
-		 * Parent and child are on different CPUs, now get the
-		 * parent runqueue to update the parent's ->sleep_avg:
-		 */
-		task_rq_unlock(rq, &flags);
-		this_rq = task_rq_lock(current, &flags);
-	}
-	current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
-		PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
-	task_rq_unlock(this_rq, &flags);
+EXPORT_SYMBOL(wake_up_process);
+
+int fastcall wake_up_state(task_t *p, unsigned int state)
+{
+	return try_to_wake_up(p, state, 0);
 }
 
 /*
- * Potentially available exiting-child timeslices are
- * retrieved here - this way the parent does not get
- * penalized for creating too many threads.
- *
- * (this cannot be used to 'generate' timeslices
- * artificially, because any timeslice recovered here
- * was given away by the parent in the first place.)
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
  */
-void fastcall sched_exit(task_t *p)
+void fastcall sched_fork(task_t *p, int clone_flags)
 {
-	unsigned long flags;
-	runqueue_t *rq;
+	int cpu = get_cpu();
+
+#ifdef CONFIG_SMP
+	cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
+#endif
+	set_task_cpu(p, cpu);
 
 	/*
-	 * If the child was a (relative-) CPU hog then decrease
-	 * the sleep_avg of the parent as well.
+	 * We mark the process as running here, but have not actually
+	 * inserted it onto the runqueue yet. This guarantees that
+	 * nobody will actually run it, and a signal or other external
+	 * event cannot wake it up and insert it on the runqueue either.
 	 */
-	rq = task_rq_lock(p->parent, &flags);
-	if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
-		p->parent->time_slice += p->time_slice;
-		if (unlikely(p->parent->time_slice > task_timeslice(p)))
-			p->parent->time_slice = task_timeslice(p);
-	}
-	if (p->sleep_avg < p->parent->sleep_avg)
-		p->parent->sleep_avg = p->parent->sleep_avg /
-		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
-		(EXIT_WEIGHT + 1);
-	task_rq_unlock(rq, &flags);
+	p->state = TASK_RUNNING;
+	INIT_LIST_HEAD(&p->run_list);
+#ifdef CONFIG_SCHEDSTATS
+	memset(&p->sched_info, 0, sizeof(p->sched_info));
+#endif
+#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+	p->oncpu = 0;
+#endif
+#ifdef CONFIG_PREEMPT
+	/* Want to start with kernel preemption disabled. */
+	task_thread_info(p)->preempt_count = 1;
+#endif
+	sched_drvp->fork(p);
+	put_cpu();
 }
 
-/**
- * prepare_task_switch - prepare to switch tasks
- * @rq: the runqueue preparing to switch
- * @next: the task we are going to switch to.
- *
- * This is called with the rq lock held and interrupts off. It must
- * be paired with a subsequent finish_task_switch after the context
- * switch.
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
  *
- * prepare_task_switch sets up locking and calls architecture specific
- * hooks.
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
  */
-static inline void prepare_task_switch(runqueue_t *rq, task_t *next)
+void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
 {
-	prepare_lock_switch(rq, next);
-	prepare_arch_switch(next);
+	sched_drvp->wake_up_new_task(p, clone_flags);
 }
 
-/**
- * finish_task_switch - clean up after a task-switch
- * @rq: runqueue associated with task-switch
- * @prev: the thread we just switched away from.
- *
- * finish_task_switch must be called after the context switch, paired
- * with a prepare_task_switch call before the context switch.
- * finish_task_switch will reconcile locking set up by prepare_task_switch,
- * and do any other architecture-specific cleanup actions.
- *
- * Note that we may have delayed dropping an mm in context_switch(). If
- * so, we finish that here outside of the runqueue lock.  (Doing it
- * with the lock held can cause deadlocks; see schedule() for
- * details.)
- */
-static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
-	__releases(rq->lock)
+void fastcall sched_exit(task_t *p)
 {
-	struct mm_struct *mm = rq->prev_mm;
-	unsigned long prev_task_flags;
-
-	rq->prev_mm = NULL;
-
-	/*
-	 * A task struct has one reference for the use as "current".
-	 * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
-	 * calls schedule one last time. The schedule call will never return,
-	 * and the scheduled task must drop that reference.
-	 * The test for EXIT_ZOMBIE must occur while the runqueue locks are
-	 * still held, otherwise prev could be scheduled on another cpu, die
-	 * there before we look at prev->state, and then the reference would
-	 * be dropped twice.
-	 *		Manfred Spraul <manfred@colorfullife.com>
-	 */
-	prev_task_flags = prev->flags;
-	finish_arch_switch(prev);
-	finish_lock_switch(rq, prev);
-	if (mm)
-		mmdrop(mm);
-	if (unlikely(prev_task_flags & PF_DEAD))
-		put_task_struct(prev);
+	sched_drvp->exit(p);
 }
 
 /**
@@ -1657,35 +725,6 @@
 }
 
 /*
- * context_switch - switch to the new MM and the new
- * thread's register state.
- */
-static inline
-task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
-{
-	struct mm_struct *mm = next->mm;
-	struct mm_struct *oldmm = prev->active_mm;
-
-	if (unlikely(!mm)) {
-		next->active_mm = oldmm;
-		atomic_inc(&oldmm->mm_count);
-		enter_lazy_tlb(oldmm, next);
-	} else
-		switch_mm(oldmm, mm, next);
-
-	if (unlikely(!prev->mm)) {
-		prev->active_mm = NULL;
-		WARN_ON(rq->prev_mm);
-		rq->prev_mm = oldmm;
-	}
-
-	/* Here we just switch the register state and the stack. */
-	switch_to(prev, next, prev);
-
-	return prev;
-}
-
-/*
  * nr_running, nr_uninterruptible and nr_context_switches:
  *
  * externally visible scheduler statistics: current number of runnable
@@ -1846,64 +885,6 @@
 }
 
 /*
- * pull_task - move a task from a remote runqueue to the local runqueue.
- * Both runqueues must be locked.
- */
-static inline
-void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
-	       runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
-{
-	dequeue_task(p, src_array);
-	dec_nr_running(p, src_rq);
-	set_task_cpu(p, this_cpu);
-	inc_nr_running(p, this_rq);
-	enqueue_task(p, this_array);
-	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
-				+ this_rq->timestamp_last_tick;
-	/*
-	 * Note that idle threads have a prio of MAX_PRIO, for this test
-	 * to be always true for them.
-	 */
-	if (TASK_PREEMPTS_CURR(p, this_rq))
-		resched_task(this_rq->curr);
-}
-
-/*
- * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
- */
-static inline
-int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
-		     struct sched_domain *sd, enum idle_type idle,
-		     int *all_pinned)
-{
-	/*
-	 * We do not migrate tasks that are:
-	 * 1) running (obviously), or
-	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
-	 * 3) are cache-hot on their current CPU.
-	 */
-	if (!cpu_isset(this_cpu, p->cpus_allowed))
-		return 0;
-	*all_pinned = 0;
-
-	if (task_running(rq, p))
-		return 0;
-
-	/*
-	 * Aggressive migration if:
-	 * 1) task is cache cold, or
-	 * 2) too many balance attempts have failed.
-	 */
-
-	if (sd->nr_balance_failed > sd->cache_nice_tries)
-		return 1;
-
-	if (task_hot(p, rq->timestamp_last_tick, sd))
-		return 0;
-	return 1;
-}
-
-/*
  * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
  * as part of a balancing operation within "domain". Returns the number of
  * tasks moved.
@@ -1911,95 +892,25 @@
  * Called with both runqueues locked.
  */
 static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
-		      unsigned long max_nr_move, struct sched_domain *sd,
-		      enum idle_type idle, int *all_pinned)
+		      unsigned long max_nr_move, long max_bias_move,
+		      struct sched_domain *sd, enum idle_type idle,
+		      int *all_pinned)
 {
-	prio_array_t *array, *dst_array;
-	struct list_head *head, *curr;
-	int idx, pulled = 0, pinned = 0;
-	task_t *tmp;
-
-	if (max_nr_move == 0)
-		goto out;
-
-	pinned = 1;
+	int pulled = sched_drvp->move_tasks(this_rq, this_cpu, busiest, max_nr_move, max_bias_move, sd, idle, all_pinned);
 
 	/*
-	 * We first consider expired tasks. Those will likely not be
-	 * executed in the near future, and they are most likely to
-	 * be cache-cold, thus switching CPUs has the least effect
-	 * on them.
-	 */
-	if (busiest->expired->nr_active) {
-		array = busiest->expired;
-		dst_array = this_rq->expired;
-	} else {
-		array = busiest->active;
-		dst_array = this_rq->active;
-	}
-
-new_array:
-	/* Start searching at priority 0: */
-	idx = 0;
-skip_bitmap:
-	if (!idx)
-		idx = sched_find_first_bit(array->bitmap);
-	else
-		idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
-	if (idx >= MAX_PRIO) {
-		if (array == busiest->expired && busiest->active->nr_active) {
-			array = busiest->active;
-			dst_array = this_rq->active;
-			goto new_array;
-		}
-		goto out;
-	}
-
-	head = array->queue + idx;
-	curr = head->prev;
-skip_queue:
-	tmp = list_entry(curr, task_t, run_list);
-
-	curr = curr->prev;
-
-	if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
-		if (curr != head)
-			goto skip_queue;
-		idx++;
-		goto skip_bitmap;
-	}
-
-#ifdef CONFIG_SCHEDSTATS
-	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
-		schedstat_inc(sd, lb_hot_gained[idle]);
-#endif
-
-	pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
-	pulled++;
-
-	/* We only want to steal up to the prescribed number of tasks. */
-	if (pulled < max_nr_move) {
-		if (curr != head)
-			goto skip_queue;
-		idx++;
-		goto skip_bitmap;
-	}
-out:
-	/*
 	 * Right now, this is the only place pull_task() is called,
 	 * so we can safely collect pull_task() stats here rather than
 	 * inside pull_task().
 	 */
 	schedstat_add(sd, lb_gained[idle], pulled);
 
-	if (all_pinned)
-		*all_pinned = pinned;
 	return pulled;
 }
 
 /*
  * find_busiest_group finds and returns the busiest CPU group within the
- * domain. It calculates and returns the number of tasks which should be
+ * domain. It calculates and returns the amount of biased load which should be
  * moved to restore balance via the imbalance parameter.
  */
 static struct sched_group *
@@ -2035,9 +946,9 @@
 
 			/* Bias balancing toward cpus of our domain */
 			if (local_group)
-				load = __target_load(i, load_idx, idle);
+				load = target_load(i, load_idx);
 			else
-				load = __source_load(i, load_idx, idle);
+				load = source_load(i, load_idx);
 
 			avg_load += load;
 		}
@@ -2092,7 +1003,7 @@
 		unsigned long tmp;
 
 		if (max_load - this_load >= SCHED_LOAD_SCALE*2) {
-			*imbalance = 1;
+			*imbalance = NICE_TO_BIAS_PRIO(0);
 			return busiest;
 		}
 
@@ -2125,7 +1036,7 @@
 		if (pwr_move <= pwr_now)
 			goto out_balanced;
 
-		*imbalance = 1;
+		*imbalance = NICE_TO_BIAS_PRIO(0);
 		return busiest;
 	}
 
@@ -2142,15 +1053,14 @@
 /*
  * find_busiest_queue - find the busiest runqueue among the cpus in group.
  */
-static runqueue_t *find_busiest_queue(struct sched_group *group,
-	enum idle_type idle)
+static runqueue_t *find_busiest_queue(struct sched_group *group)
 {
 	unsigned long load, max_load = 0;
 	runqueue_t *busiest = NULL;
 	int i;
 
 	for_each_cpu_mask(i, group->cpumask) {
-		load = __source_load(i, 0, idle);
+		load = source_load(i, 0);
 
 		if (load > max_load) {
 			max_load = load;
@@ -2167,6 +1077,7 @@
  */
 #define MAX_PINNED_INTERVAL	512
 
+#define minus_1_or_zero(n) ((n) > 0 ? (n) - 1 : 0)
 /*
  * Check this_cpu to ensure it is balanced within domain. Attempt to move
  * tasks if there is an imbalance.
@@ -2194,7 +1105,7 @@
 		goto out_balanced;
 	}
 
-	busiest = find_busiest_queue(group, idle);
+	busiest = find_busiest_queue(group);
 	if (!busiest) {
 		schedstat_inc(sd, lb_nobusyq[idle]);
 		goto out_balanced;
@@ -2214,6 +1125,7 @@
 		 */
 		double_rq_lock(this_rq, busiest);
 		nr_moved = move_tasks(this_rq, this_cpu, busiest,
+					minus_1_or_zero(busiest->nr_running),
 					imbalance, sd, idle, &all_pinned);
 		double_rq_unlock(this_rq, busiest);
 
@@ -2317,7 +1229,7 @@
 		goto out_balanced;
 	}
 
-	busiest = find_busiest_queue(group, NEWLY_IDLE);
+	busiest = find_busiest_queue(group);
 	if (!busiest) {
 		schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
 		goto out_balanced;
@@ -2332,6 +1244,7 @@
 		/* Attempt to move tasks */
 		double_lock_balance(this_rq, busiest);
 		nr_moved = move_tasks(this_rq, this_cpu, busiest,
+					minus_1_or_zero(busiest->nr_running),
 					imbalance, sd, NEWLY_IDLE, NULL);
 		spin_unlock(&busiest->lock);
 	}
@@ -2357,7 +1270,7 @@
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
  */
-static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
+void idle_balance(int this_cpu, runqueue_t *this_rq)
 {
 	struct sched_domain *sd;
 
@@ -2412,7 +1325,8 @@
 
 	schedstat_inc(sd, alb_cnt);
 
-	if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE, NULL))
+	if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
+			RTPRIO_TO_BIAS_PRIO(100), sd, SCHED_IDLE, NULL))
 		schedstat_inc(sd, alb_pushed);
 	else
 		schedstat_inc(sd, alb_failed);
@@ -2432,15 +1346,14 @@
 /* Don't have all balancing operations going off at once */
 #define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS)
 
-static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
-			   enum idle_type idle)
+void rebalance_tick(int this_cpu, runqueue_t *this_rq, enum idle_type idle)
 {
 	unsigned long old_load, this_load;
 	unsigned long j = jiffies + CPU_OFFSET(this_cpu);
 	struct sched_domain *sd;
 	int i;
 
-	this_load = this_rq->nr_running * SCHED_LOAD_SCALE;
+	this_load = this_rq->prio_bias * SCHED_LOAD_SCALE;
 	/* Update our load */
 	for (i = 0; i < 3; i++) {
 		unsigned long new_load = this_load;
@@ -2484,22 +1397,13 @@
 		}
 	}
 }
-#else
-/*
- * on UP we do not need to balance between CPUs:
- */
-static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle)
-{
-}
-static inline void idle_balance(int cpu, runqueue_t *rq)
-{
-}
 #endif
 
-static inline int wake_priority_sleeper(runqueue_t *rq)
+#ifdef CONFIG_SCHED_SMT
+int wake_priority_sleeper(runqueue_t *rq)
 {
 	int ret = 0;
-#ifdef CONFIG_SCHED_SMT
+
 	spin_lock(&rq->lock);
 	/*
 	 * If an SMT sibling task has been put to sleep for priority
@@ -2510,26 +1414,16 @@
 		ret = 1;
 	}
 	spin_unlock(&rq->lock);
-#endif
+
 	return ret;
 }
+#endif
 
 DEFINE_PER_CPU(struct kernel_stat, kstat);
 
 EXPORT_PER_CPU_SYMBOL(kstat);
 
 /*
- * This is called on clock ticks and on context switches.
- * Bank in p->sched_time the ns elapsed since the last tick or switch.
- */
-static inline void update_cpu_clock(task_t *p, runqueue_t *rq,
-				    unsigned long long now)
-{
-	unsigned long long last = max(p->timestamp, rq->timestamp_last_tick);
-	p->sched_time += now - last;
-}
-
-/*
  * Return current->sched_time plus any more ns on the sched_clock
  * that have not yet been banked.
  */
@@ -2545,22 +1439,6 @@
 }
 
 /*
- * We place interactive tasks back into the active array, if possible.
- *
- * To guarantee that this does not starve expired tasks we ignore the
- * interactivity of a task if the first expired task had to wait more
- * than a 'reasonable' amount of time. This deadline timeout is
- * load-dependent, as the frequency of array switched decreases with
- * increasing number of running tasks. We also ignore the interactivity
- * if a better static_prio task has expired:
- */
-#define EXPIRED_STARVING(rq) \
-	((STARVATION_LIMIT && ((rq)->expired_timestamp && \
-		(jiffies - (rq)->expired_timestamp >= \
-			STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
-			((rq)->curr->static_prio > (rq)->best_expired_prio))
-
-/*
  * Account user cpu time to a process.
  * @p: the process that the cpu time gets accounted to
  * @hardirq_offset: the offset to subtract from hardirq_count()
@@ -2642,7 +1520,6 @@
  */
 void scheduler_tick(void)
 {
-	int cpu = smp_processor_id();
 	runqueue_t *rq = this_rq();
 	task_t *p = current;
 	unsigned long long now = sched_clock();
@@ -2651,86 +1528,7 @@
 
 	rq->timestamp_last_tick = now;
 
-	if (p == rq->idle) {
-		if (wake_priority_sleeper(rq))
-			goto out;
-		rebalance_tick(cpu, rq, SCHED_IDLE);
-		return;
-	}
-
-	/* Task might have expired already, but not scheduled off yet */
-	if (p->array != rq->active) {
-		set_tsk_need_resched(p);
-		goto out;
-	}
-	spin_lock(&rq->lock);
-	/*
-	 * The task was running during this tick - update the
-	 * time slice counter. Note: we do not update a thread's
-	 * priority until it either goes to sleep or uses up its
-	 * timeslice. This makes it possible for interactive tasks
-	 * to use up their timeslices at their highest priority levels.
-	 */
-	if (rt_task(p)) {
-		/*
-		 * RR tasks need a special form of timeslice management.
-		 * FIFO tasks have no timeslices.
-		 */
-		if ((p->policy == SCHED_RR) && !--p->time_slice) {
-			p->time_slice = task_timeslice(p);
-			p->first_time_slice = 0;
-			set_tsk_need_resched(p);
-
-			/* put it at the end of the queue: */
-			requeue_task(p, rq->active);
-		}
-		goto out_unlock;
-	}
-	if (!--p->time_slice) {
-		dequeue_task(p, rq->active);
-		set_tsk_need_resched(p);
-		p->prio = effective_prio(p);
-		p->time_slice = task_timeslice(p);
-		p->first_time_slice = 0;
-
-		if (!rq->expired_timestamp)
-			rq->expired_timestamp = jiffies;
-		if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
-			enqueue_task(p, rq->expired);
-			if (p->static_prio < rq->best_expired_prio)
-				rq->best_expired_prio = p->static_prio;
-		} else
-			enqueue_task(p, rq->active);
-	} else {
-		/*
-		 * Prevent a too long timeslice allowing a task to monopolize
-		 * the CPU. We do this by splitting up the timeslice into
-		 * smaller pieces.
-		 *
-		 * Note: this does not mean the task's timeslices expire or
-		 * get lost in any way, they just might be preempted by
-		 * another task of equal priority. (one with higher
-		 * priority would have preempted this task already.) We
-		 * requeue this task to the end of the list on this priority
-		 * level, which is in essence a round-robin of tasks with
-		 * equal priority.
-		 *
-		 * This only applies to tasks in the interactive
-		 * delta range with at least TIMESLICE_GRANULARITY to requeue.
-		 */
-		if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
-			p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
-			(p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
-			(p->array == rq->active)) {
-
-			requeue_task(p, rq->active);
-			set_tsk_need_resched(p);
-		}
-	}
-out_unlock:
-	spin_unlock(&rq->lock);
-out:
-	rebalance_tick(cpu, rq, NOT_IDLE);
+	sched_drvp->tick(p, rq, now);
 }
 
 #ifdef CONFIG_SCHED_SMT
@@ -2741,7 +1539,7 @@
 		resched_task(rq->idle);
 }
 
-static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
+void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
 {
 	struct sched_domain *tmp, *sd = NULL;
 	cpumask_t sibling_map;
@@ -2785,21 +1583,14 @@
 	 */
 }
 
-/*
- * number of 'lost' timeslices this task wont be able to fully
- * utilize, if another task runs on a sibling. This models the
- * slowdown effect of other tasks running on siblings:
- */
-static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
-{
-	return p->time_slice * (100 - sd->per_cpu_gain) / 100;
-}
+#define SMT_RT_TIME_CHUNK (100 * HZ / 1000)
+#define dependent_sleeper_trumps(p1, p2, sd) \
+	sched_drvp->dependent_sleeper_trumps(p1, p2, sd)
 
-static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
+int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
 {
 	struct sched_domain *tmp, *sd = NULL;
 	cpumask_t sibling_map;
-	prio_array_t *array;
 	int ret = 0, i;
 	task_t *p;
 
@@ -2826,13 +1617,8 @@
 	 */
 	if (!this_rq->nr_running)
 		goto out_unlock;
-	array = this_rq->active;
-	if (!array->nr_active)
-		array = this_rq->expired;
-	BUG_ON(!array->nr_active);
 
-	p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
-		task_t, run_list);
+	p = sched_drvp->head_of_queue(&this_rq->qu);
 
 	for_each_cpu_mask(i, sibling_map) {
 		runqueue_t *smt_rq = cpu_rq(i);
@@ -2855,13 +1641,13 @@
 			 * With real time tasks we run non-rt tasks only
 			 * per_cpu_gain% of the time.
 			 */
-			if ((jiffies % DEF_TIMESLICE) >
-				(sd->per_cpu_gain * DEF_TIMESLICE / 100))
+			if ((jiffies % SMT_RT_TIME_CHUNK) >
+				(sd->per_cpu_gain * SMT_RT_TIME_CHUNK / 100))
 					ret = 1;
 		} else
 			if (smt_curr->static_prio < p->static_prio &&
 				!TASK_PREEMPTS_CURR(p, smt_rq) &&
-				smt_slice(smt_curr, sd) > task_timeslice(p))
+				dependent_sleeper_trumps(smt_curr, p, sd))
 					ret = 1;
 
 check_smt_task:
@@ -2879,12 +1665,12 @@
 		 * sleep for priority reasons to see if it should run now.
 		 */
 		if (rt_task(p)) {
-			if ((jiffies % DEF_TIMESLICE) >
-				(sd->per_cpu_gain * DEF_TIMESLICE / 100))
+			if ((jiffies % SMT_RT_TIME_CHUNK) >
+				(sd->per_cpu_gain * SMT_RT_TIME_CHUNK / 100))
 					resched_task(smt_curr);
 		} else {
 			if (TASK_PREEMPTS_CURR(p, smt_rq) &&
-				smt_slice(p, sd) > task_timeslice(smt_curr))
+			        dependent_sleeper_trumps(p, smt_curr, sd))
 					resched_task(smt_curr);
 			else
 				wakeup_busy_runqueue(smt_rq);
@@ -2895,15 +1681,6 @@
 		spin_unlock(&cpu_rq(i)->lock);
 	return ret;
 }
-#else
-static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
-{
-}
-
-static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
-{
-	return 0;
-}
 #endif
 
 #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
@@ -2943,14 +1720,8 @@
  */
 asmlinkage void __sched schedule(void)
 {
-	long *switch_count;
-	task_t *prev, *next;
+	task_t *prev;
 	runqueue_t *rq;
-	prio_array_t *array;
-	struct list_head *queue;
-	unsigned long long now;
-	unsigned long run_time;
-	int cpu, idx, new_prio;
 
 	/*
 	 * Test if we are atomic.  Since do_exit() needs to call into
@@ -2984,137 +1755,8 @@
 	}
 
 	schedstat_inc(rq, sched_cnt);
-	now = sched_clock();
-	if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) {
-		run_time = now - prev->timestamp;
-		if (unlikely((long long)(now - prev->timestamp) < 0))
-			run_time = 0;
-	} else
-		run_time = NS_MAX_SLEEP_AVG;
-
-	/*
-	 * Tasks charged proportionately less run_time at high sleep_avg to
-	 * delay them losing their interactive status
-	 */
-	run_time /= (CURRENT_BONUS(prev) ? : 1);
-
-	spin_lock_irq(&rq->lock);
-
-	if (unlikely(prev->flags & PF_DEAD))
-		prev->state = EXIT_DEAD;
-
-	switch_count = &prev->nivcsw;
-	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
-		switch_count = &prev->nvcsw;
-		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
-				unlikely(signal_pending(prev))))
-			prev->state = TASK_RUNNING;
-		else {
-			if (prev->state == TASK_UNINTERRUPTIBLE)
-				rq->nr_uninterruptible++;
-			deactivate_task(prev, rq);
-		}
-	}
-
-	cpu = smp_processor_id();
-	if (unlikely(!rq->nr_running)) {
-go_idle:
-		idle_balance(cpu, rq);
-		if (!rq->nr_running) {
-			next = rq->idle;
-			rq->expired_timestamp = 0;
-			wake_sleeping_dependent(cpu, rq);
-			/*
-			 * wake_sleeping_dependent() might have released
-			 * the runqueue, so break out if we got new
-			 * tasks meanwhile:
-			 */
-			if (!rq->nr_running)
-				goto switch_tasks;
-		}
-	} else {
-		if (dependent_sleeper(cpu, rq)) {
-			next = rq->idle;
-			goto switch_tasks;
-		}
-		/*
-		 * dependent_sleeper() releases and reacquires the runqueue
-		 * lock, hence go into the idle loop if the rq went
-		 * empty meanwhile:
-		 */
-		if (unlikely(!rq->nr_running))
-			goto go_idle;
-	}
 
-	array = rq->active;
-	if (unlikely(!array->nr_active)) {
-		/*
-		 * Switch the active and expired arrays.
-		 */
-		schedstat_inc(rq, sched_switch);
-		rq->active = rq->expired;
-		rq->expired = array;
-		array = rq->active;
-		rq->expired_timestamp = 0;
-		rq->best_expired_prio = MAX_PRIO;
-	}
-
-	idx = sched_find_first_bit(array->bitmap);
-	queue = array->queue + idx;
-	next = list_entry(queue->next, task_t, run_list);
-
-	if (!rt_task(next) && next->activated > 0) {
-		unsigned long long delta = now - next->timestamp;
-		if (unlikely((long long)(now - next->timestamp) < 0))
-			delta = 0;
-
-		if (next->activated == 1)
-			delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
-
-		array = next->array;
-		new_prio = recalc_task_prio(next, next->timestamp + delta);
-
-		if (unlikely(next->prio != new_prio)) {
-			dequeue_task(next, array);
-			next->prio = new_prio;
-			enqueue_task(next, array);
-		} else
-			requeue_task(next, array);
-	}
-	next->activated = 0;
-switch_tasks:
-	if (next == rq->idle)
-		schedstat_inc(rq, sched_goidle);
-	prefetch(next);
-	prefetch_stack(next);
-	clear_tsk_need_resched(prev);
-	rcu_qsctr_inc(task_cpu(prev));
-
-	update_cpu_clock(prev, rq, now);
-
-	prev->sleep_avg -= run_time;
-	if ((long)prev->sleep_avg <= 0)
-		prev->sleep_avg = 0;
-	prev->timestamp = prev->last_ran = now;
-
-	sched_info_switch(prev, next);
-	if (likely(prev != next)) {
-		next->timestamp = now;
-		rq->nr_switches++;
-		rq->curr = next;
-		++*switch_count;
-
-		prepare_task_switch(rq, next);
-		prev = context_switch(rq, prev, next);
-		barrier();
-		/*
-		 * this_rq must be evaluated again because prev may have moved
-		 * CPUs since it called schedule(), thus the 'rq' on its stack
-		 * frame will be invalid.
-		 */
-		finish_task_switch(this_rq(), prev);
-	} else
-		spin_unlock_irq(&rq->lock);
+	sched_drvp->schedule();
 
 	prev = current;
 	if (unlikely(reacquire_kernel_lock(prev) < 0))
@@ -3528,9 +2170,7 @@
 void set_user_nice(task_t *p, long nice)
 {
 	unsigned long flags;
-	prio_array_t *array;
 	runqueue_t *rq;
-	int old_prio, new_prio, delta;
 
 	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
 		return;
@@ -3549,28 +2189,8 @@
 		p->static_prio = NICE_TO_PRIO(nice);
 		goto out_unlock;
 	}
-	array = p->array;
-	if (array) {
-		dequeue_task(p, array);
-		dec_prio_bias(rq, p->static_prio);
-	}
 
-	old_prio = p->prio;
-	new_prio = NICE_TO_PRIO(nice);
-	delta = new_prio - old_prio;
-	p->static_prio = NICE_TO_PRIO(nice);
-	p->prio += delta;
-
-	if (array) {
-		enqueue_task(p, array);
-		inc_prio_bias(rq, p->static_prio);
-		/*
-		 * If the task increased its priority or is running and
-		 * lowered its priority, then reschedule its CPU:
-		 */
-		if (delta < 0 || (delta > 0 && task_running(rq, p)))
-			resched_task(rq->curr);
-	}
+	sched_drvp->set_normal_task_nice(p, nice);
 out_unlock:
 	task_rq_unlock(rq, &flags);
 }
@@ -3684,15 +2304,16 @@
 }
 
 /* Actually do priority change: must hold rq lock. */
-static void __setscheduler(struct task_struct *p, int policy, int prio)
+void __setscheduler(struct task_struct *p, int policy, int prio)
 {
-	BUG_ON(p->array);
+	BUG_ON(task_is_queued(p));
 	p->policy = policy;
 	p->rt_priority = prio;
 	if (policy != SCHED_NORMAL)
 		p->prio = MAX_RT_PRIO-1 - p->rt_priority;
 	else
 		p->prio = p->static_prio;
+	set_bias_prio(p);
 }
 
 /**
@@ -3706,8 +2327,7 @@
 		       struct sched_param *param)
 {
 	int retval;
-	int oldprio, oldpolicy = -1;
-	prio_array_t *array;
+	int oldpolicy = -1;
 	unsigned long flags;
 	runqueue_t *rq;
 
@@ -3763,24 +2383,9 @@
 		task_rq_unlock(rq, &flags);
 		goto recheck;
 	}
-	array = p->array;
-	if (array)
-		deactivate_task(p, rq);
-	oldprio = p->prio;
-	__setscheduler(p, policy, param->sched_priority);
-	if (array) {
-		__activate_task(p, rq);
-		/*
-		 * Reschedule if we are currently running on this runqueue and
-		 * our priority decreased, or if we are not currently running on
-		 * this runqueue and our priority is higher than the current's
-		 */
-		if (task_running(rq, p)) {
-			if (p->prio > oldprio)
-				resched_task(rq->curr);
-		} else if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
-	}
+
+	sched_drvp->setscheduler(p, policy, param->sched_priority);
+
 	task_rq_unlock(rq, &flags);
 	return 0;
 }
@@ -4039,48 +2644,7 @@
  */
 asmlinkage long sys_sched_yield(void)
 {
-	runqueue_t *rq = this_rq_lock();
-	prio_array_t *array = current->array;
-	prio_array_t *target = rq->expired;
-
-	schedstat_inc(rq, yld_cnt);
-	/*
-	 * We implement yielding by moving the task into the expired
-	 * queue.
-	 *
-	 * (special rule: RT tasks will just roundrobin in the active
-	 *  array.)
-	 */
-	if (rt_task(current))
-		target = rq->active;
-
-	if (array->nr_active == 1) {
-		schedstat_inc(rq, yld_act_empty);
-		if (!rq->expired->nr_active)
-			schedstat_inc(rq, yld_both_empty);
-	} else if (!rq->expired->nr_active)
-		schedstat_inc(rq, yld_exp_empty);
-
-	if (array != target) {
-		dequeue_task(current, array);
-		enqueue_task(current, target);
-	} else
-		/*
-		 * requeue_task is cheaper so perform that if possible.
-		 */
-		requeue_task(current, array);
-
-	/*
-	 * Since we are going to call schedule() anyway, there's
-	 * no need to preempt or enable interrupts:
-	 */
-	__release(rq->lock);
-	_raw_spin_unlock(&rq->lock);
-	preempt_enable_no_resched();
-
-	schedule();
-
-	return 0;
+	return sched_drvp->sys_yield();
 }
 
 static inline void __cond_resched(void)
@@ -4164,8 +2728,7 @@
  */
 void __sched yield(void)
 {
-	set_current_state(TASK_RUNNING);
-	sys_sched_yield();
+	sched_drvp->yield();
 }
 
 EXPORT_SYMBOL(yield);
@@ -4394,9 +2957,7 @@
 	runqueue_t *rq = cpu_rq(cpu);
 	unsigned long flags;
 
-	idle->sleep_avg = 0;
-	idle->array = NULL;
-	idle->prio = MAX_PRIO;
+	sched_drvp->init_idle(idle, cpu);
 	idle->state = TASK_RUNNING;
 	idle->cpus_allowed = cpumask_of_cpu(cpu);
 	set_task_cpu(idle, cpu);
@@ -4511,21 +3072,10 @@
 	if (!cpu_isset(dest_cpu, p->cpus_allowed))
 		goto out;
 
-	set_task_cpu(p, dest_cpu);
-	if (p->array) {
-		/*
-		 * Sync timestamp with rq_dest's before activating.
-		 * The same thing could be achieved by doing this step
-		 * afterwards, and pretending it was a local activate.
-		 * This way is cleaner and logically correct.
-		 */
-		p->timestamp = p->timestamp - rq_src->timestamp_last_tick
-				+ rq_dest->timestamp_last_tick;
-		deactivate_task(p, rq_src);
-		activate_task(p, rq_dest, 0);
-		if (TASK_PREEMPTS_CURR(p, rq_dest))
-			resched_task(rq_dest->curr);
-	}
+	if (task_is_queued(p))
+		sched_drvp->migrate_queued_task(p, dest_cpu);
+	else
+		set_task_cpu(p, dest_cpu);
 
 out:
 	double_rq_unlock(rq_src, rq_dest);
@@ -4674,7 +3224,6 @@
 {
 	int cpu = smp_processor_id();
 	runqueue_t *rq = this_rq();
-	struct task_struct *p = rq->idle;
 	unsigned long flags;
 
 	/* cpu has to be offline */
@@ -4685,9 +3234,7 @@
 	 */
 	spin_lock_irqsave(&rq->lock, flags);
 
-	__setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
-	/* Add idle task to _front_ of it's priority queue */
-	__activate_idle_task(p, rq);
+	sched_drvp->set_select_idle_first(rq);
 
 	spin_unlock_irqrestore(&rq->lock, flags);
 }
@@ -4706,7 +3253,7 @@
 	mmdrop(mm);
 }
 
-static void migrate_dead(unsigned int dead_cpu, task_t *tsk)
+void migrate_dead(unsigned int dead_cpu, task_t *tsk)
 {
 	struct runqueue *rq = cpu_rq(dead_cpu);
 
@@ -4731,20 +3278,9 @@
 }
 
 /* release_task() removes task from tasklist, so we won't find dead tasks. */
-static void migrate_dead_tasks(unsigned int dead_cpu)
+static inline void migrate_dead_tasks(unsigned int dead_cpu)
 {
-	unsigned arr, i;
-	struct runqueue *rq = cpu_rq(dead_cpu);
-
-	for (arr = 0; arr < 2; arr++) {
-		for (i = 0; i < MAX_PRIO; i++) {
-			struct list_head *list = &rq->arrays[arr].queue[i];
-			while (!list_empty(list))
-				migrate_dead(dead_cpu,
-					     list_entry(list->next, task_t,
-							run_list));
-		}
-	}
+	sched_drvp->migrate_dead_tasks(dead_cpu);
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
@@ -4792,9 +3328,7 @@
 		rq->migration_thread = NULL;
 		/* Idle task back to normal (off runqueue, low prio) */
 		rq = task_rq_lock(rq->idle, &flags);
-		deactivate_task(rq->idle, rq);
-		rq->idle->static_prio = MAX_PRIO;
-		__setscheduler(rq->idle, SCHED_NORMAL, 0);
+		sched_drvp->set_select_idle_last(rq);
 		migrate_dead_tasks(cpu);
 		task_rq_unlock(rq, &flags);
 		migrate_nr_uninterruptible(rq);
@@ -5597,20 +4131,26 @@
 		&& addr < (unsigned long)__sched_text_end);
 }
 
+void set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+	sched_drvp->set_oom_time_slice(p, t);
+}
+
 void __init sched_init(void)
 {
 	runqueue_t *rq;
-	int i, j, k;
+	int i;
+
+	sched_drvp->sched_init();
 
 	for (i = 0; i < NR_CPUS; i++) {
-		prio_array_t *array;
+#ifdef CONFIG_SMP
+		int j;
+#endif
 
 		rq = cpu_rq(i);
 		spin_lock_init(&rq->lock);
 		rq->nr_running = 0;
-		rq->active = rq->arrays;
-		rq->expired = rq->arrays + 1;
-		rq->best_expired_prio = MAX_PRIO;
 
 #ifdef CONFIG_SMP
 		rq->sd = NULL;
@@ -5623,17 +4163,10 @@
 #endif
 		atomic_set(&rq->nr_iowait, 0);
 
-		for (j = 0; j < 2; j++) {
-			array = rq->arrays + j;
-			for (k = 0; k < MAX_PRIO; k++) {
-				INIT_LIST_HEAD(array->queue + k);
-				__clear_bit(k, array->bitmap);
-			}
-			// delimiter for bitsearch
-			__set_bit(MAX_PRIO, array->bitmap);
-		}
+		sched_drvp->init_runqueue_queue(&rq->qu);
 	}
 
+	set_bias_prio(&init_task);
 	/*
 	 * The boot idle thread does lazy MMU switching as well:
 	 */
@@ -5675,27 +4208,11 @@
 void normalize_rt_tasks(void)
 {
 	struct task_struct *p;
-	prio_array_t *array;
-	unsigned long flags;
-	runqueue_t *rq;
 
 	read_lock_irq(&tasklist_lock);
 	for_each_process (p) {
-		if (!rt_task(p))
-			continue;
-
-		rq = task_rq_lock(p, &flags);
-
-		array = p->array;
-		if (array)
-			deactivate_task(p, task_rq(p));
-		__setscheduler(p, SCHED_NORMAL, 0);
-		if (array) {
-			__activate_task(p, task_rq(p));
-			resched_task(rq->curr);
-		}
-
-		task_rq_unlock(rq, &flags);
+		if (rt_task(p))
+			sched_drvp->normalize_rt_task(p);
 	}
 	read_unlock_irq(&tasklist_lock);
 }
diff -urN oldtree/kernel/sched_cpustats.c newtree/kernel/sched_cpustats.c
--- oldtree/kernel/sched_cpustats.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched_cpustats.c	2006-02-04 18:03:15.686969792 +0000
@@ -0,0 +1,656 @@
+/*
+ *  kernel/sched_stats.c
+ *
+ *  Kernel highe resolution cpu statistics for use by schedulers
+ *
+ *  Copyright (C) 2004  Aurema Pty Ltd
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/sched_pvt.h>
+#include <linux/module.h>
+#include <linux/sched_spa.h>
+
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+DEFINE_PER_CPU(struct runq_cpustats, cpustats_runqs);
+
+static unsigned int runq_stats_inited = 0;
+
+void init_runq_cpustats(unsigned int cpu)
+{
+	struct runq_cpustats *csrq = &per_cpu(cpustats_runqs, cpu);
+
+	csrq->total_delay = 0;
+	csrq->total_sinbin = 0;
+	csrq->total_rt_delay = 0;
+	csrq->total_intr_delay = 0;
+	csrq->total_rt_intr_delay = 0;
+	csrq->total_fork_delay = 0;
+	csrq->total_latency = 0;
+	cpu_rq(cpu)->timestamp_last_tick = INITIAL_CPUSTATS_TIMESTAMP;
+	runq_stats_inited++;
+}
+#endif
+
+#ifdef CONFIG_SMP
+unsigned long long adjusted_sched_clock(const task_t *p)
+{
+	return sched_clock() + (task_rq(p)->timestamp_last_tick - this_rq()->timestamp_last_tick);
+}
+#endif
+
+void initialize_cpustats(struct task_struct *p, unsigned long long now)
+{
+	TASK_CPUSTATS(p).avg_sleep_per_cycle = 0;
+	TASK_CPUSTATS(p).avg_ia_sleep_per_cycle = 0;
+	TASK_CPUSTATS(p).avg_delay_per_cycle = 0;
+	TASK_CPUSTATS(p).avg_cpu_per_cycle = 0;
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	TASK_CPUSTATS(p).avg_latency = 0;
+	TASK_CPUSTATS(p).total_sleep = 0;
+	TASK_CPUSTATS(p).total_ia_sleep = 0;
+	TASK_CPUSTATS(p).total_delay = 0;
+	TASK_CPUSTATS(p).total_sinbin = 0;
+	TASK_CPUSTATS(p).total_cpu = 0;
+	TASK_CPUSTATS(p).intr_wake_ups = 0;
+	TASK_CPUSTATS(p).total_latency = 0;
+#endif
+	TASK_CPUSTATS(p).total_wake_ups = 0;
+	TASK_CPUSTATS(p).avg_cycle_length = 0;
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+	TASK_CPUSTATS(p).avg_wake_interval = 0;
+	TASK_CPUSTATS(p).var_wake_interval = 0;
+	TASK_CPUSTATS(p).last_wake = p->timestamp = now;
+#endif
+	TASK_CPUSTATS(p).flags = CPUSTATS_JUST_FORKED_FL;
+}
+
+void delta_sleep_cpustats(struct task_struct *p, unsigned long long now)
+{
+	unsigned long long delta;
+
+	/* sched_clock() is not guaranteed monotonic */
+	if (now <= p->timestamp) {
+		p->timestamp = now;
+		return;
+	}
+
+	delta = now - p->timestamp;
+	p->timestamp = now;
+	TASK_CPUSTATS(p).avg_sleep_per_cycle += delta;
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	TASK_CPUSTATS(p).total_sleep += delta;
+#endif
+	if (task_is_in_ia_sleep(p)) {
+		TASK_CPUSTATS(p).avg_ia_sleep_per_cycle += delta;
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+		TASK_CPUSTATS(p).total_ia_sleep += delta;
+#endif
+	}
+}
+
+void delta_cpu_cpustats(struct task_struct *p, unsigned long long now)
+{
+	unsigned long long delta;
+
+	/* sched_clock() is not guaranteed monotonic */
+	if (now <= p->timestamp) {
+		p->timestamp = now;
+		return;
+	}
+
+	delta = now - p->timestamp;
+	p->timestamp = now;
+	TASK_CPUSTATS(p).avg_cpu_per_cycle += delta;
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	TASK_CPUSTATS(p).total_cpu += delta;
+#endif
+}
+
+void delta_delay_cpustats(struct task_struct *p, unsigned long long now)
+{
+	unsigned long long delta;
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	struct runq_cpustats *rq_stats = &per_cpu(cpustats_runqs, task_cpu(p));
+#endif
+
+	/* sched_clock() is not guaranteed monotonic */
+	if (now <= p->timestamp)
+		delta = 0;
+	else
+		delta = now - p->timestamp;
+
+	p->timestamp = now;
+	TASK_CPUSTATS(p).avg_delay_per_cycle += delta;
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	TASK_CPUSTATS(p).total_delay += delta;
+	rq_stats->total_delay += delta;
+	if (task_is_sinbinned(p)) {
+		TASK_CPUSTATS(p).total_sinbin += delta;
+		rq_stats->total_sinbin += delta;
+	} else if (rt_task(p)) { /* rt tasks are never sinbinned */
+		rq_stats->total_rt_delay += delta;
+		if (TASK_CPUSTATS(p).flags & CPUSTATS_WOKEN_FOR_INTR_FL)
+			rq_stats->total_rt_intr_delay += delta;
+	}
+	if (TASK_CPUSTATS(p).flags & CPUSTATS_JUST_WOKEN_FL) {
+		rq_stats->total_latency += delta;
+		TASK_CPUSTATS(p).total_latency += delta;
+		if (likely(TASK_CPUSTATS(p).total_wake_ups > 1))
+			TASK_CPUSTATS(p).avg_latency += delta;
+		else
+			TASK_CPUSTATS(p).avg_latency = SCHED_AVG_REAL(delta);
+		if (TASK_CPUSTATS(p).flags & CPUSTATS_WOKEN_FOR_INTR_FL)
+			rq_stats->total_intr_delay += delta;
+#endif
+		TASK_CPUSTATS(p).flags &=
+			 ~(CPUSTATS_WOKEN_FOR_INTR_FL|CPUSTATS_JUST_WOKEN_FL);
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	}
+#endif
+}
+
+#define SCHED_AVG_ALPHA ((1 << SCHED_AVG_OFFSET) - 1)
+static inline void sched_avg_first_sample(unsigned long long *valp)
+{
+	*valp <<= SCHED_AVG_OFFSET;
+}
+
+static inline void apply_sched_avg_decay(unsigned long long *valp)
+{
+	*valp *= SCHED_AVG_ALPHA;
+	*valp >>= SCHED_AVG_OFFSET;
+}
+
+static inline void decay_cpustats_for_cycle(struct task_struct *p)
+{
+	if (unlikely(TASK_CPUSTATS(p).flags & CPUSTATS_JUST_FORKED_FL)) {
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+		struct runq_cpustats *rq_stats = &per_cpu(cpustats_runqs, task_cpu(p));
+
+		rq_stats->total_fork_delay += TASK_CPUSTATS(p).total_delay;
+#endif
+		/* set the average to be equal to the first sample */
+		sched_avg_first_sample(&TASK_CPUSTATS(p).avg_sleep_per_cycle);
+		sched_avg_first_sample(&TASK_CPUSTATS(p).avg_ia_sleep_per_cycle);
+		sched_avg_first_sample(&TASK_CPUSTATS(p).avg_delay_per_cycle);
+		sched_avg_first_sample(&TASK_CPUSTATS(p).avg_cpu_per_cycle);
+		TASK_CPUSTATS(p).flags &= ~CPUSTATS_JUST_FORKED_FL;
+	}
+	else {
+		apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_sleep_per_cycle);
+		apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_ia_sleep_per_cycle);
+		apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_delay_per_cycle);
+		apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_cpu_per_cycle);
+	}
+	/*
+	 * ia_sleepiness is an estimate of the the tasks sleep rate waiting
+	 * interactively if it suffered no delays
+	 */
+	TASK_CPUSTATS(p).avg_cycle_length = TASK_CPUSTATS(p).avg_sleep_per_cycle +
+		TASK_CPUSTATS(p).avg_cpu_per_cycle;
+	/*
+	 * take short cut and avoid possible divide by zero below
+	 * NB avg_sleep_per_cycle >= avg_ia_sleep_per_cycle
+	 */
+	if (TASK_CPUSTATS(p).avg_sleep_per_cycle == 0)
+		TASK_CPUSTATS(p).ia_sleepiness = 0;
+	else
+		TASK_CPUSTATS(p).ia_sleepiness =  calc_proportion(TASK_CPUSTATS(p).avg_ia_sleep_per_cycle,
+							       TASK_CPUSTATS(p).avg_cycle_length);
+	TASK_CPUSTATS(p).avg_cycle_length += TASK_CPUSTATS(p).avg_delay_per_cycle;
+	/* take short cut and avoid possible divide by zero below */
+	if (TASK_CPUSTATS(p).avg_cpu_per_cycle == 0)
+		TASK_CPUSTATS(p).cpu_usage_rate = 0;
+	else
+		TASK_CPUSTATS(p).cpu_usage_rate =  calc_proportion(TASK_CPUSTATS(p).avg_cpu_per_cycle,
+								   TASK_CPUSTATS(p).avg_cycle_length);
+}
+
+void update_cpustats_at_wake_up(struct task_struct *p, unsigned long long now)
+{
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+	unsigned long long interval, abs_diff;
+	unsigned long long cai = SCHED_AVG_RND(TASK_CPUSTATS(p).avg_wake_interval);
+#endif
+
+	delta_sleep_cpustats(p, now);
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_latency);
+#endif
+	TASK_CPUSTATS(p).flags |= CPUSTATS_JUST_WOKEN_FL;
+	if (in_interrupt()) {
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+		TASK_CPUSTATS(p).intr_wake_ups++;
+#endif
+		TASK_CPUSTATS(p).flags |= CPUSTATS_WOKEN_FOR_INTR_FL;
+	}
+	TASK_CPUSTATS(p).total_wake_ups++;
+	decay_cpustats_for_cycle(p);
+
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+	if (likely(now > TASK_CPUSTATS(p).last_wake)) {
+		interval = now - TASK_CPUSTATS(p).last_wake;
+		/*
+		 * Calculating variances and standard deviation properly will
+		 * be too expensive as it involves squaring and square roots
+		 * so we'll use a running average of the absolute difference
+		 * between the mean and the samples
+		 */
+		if (interval > cai)
+			abs_diff = interval - cai;
+		else
+			abs_diff = cai - interval;
+	}
+	else {
+		interval = 0;
+		abs_diff = cai;
+	}
+#endif
+
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_latency);
+#endif
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+	TASK_CPUSTATS(p).last_wake = now;
+	apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_wake_interval);
+	apply_sched_avg_decay(&TASK_CPUSTATS(p).var_wake_interval);
+
+	/*
+	 * To reduce/eliminate ramp up in the estimation of the running
+	 * averages we'll initialize the average to the first observed value
+	 */
+	if (likely(TASK_CPUSTATS(p).total_wake_ups > 2)) {
+		TASK_CPUSTATS(p).avg_wake_interval += interval;
+		TASK_CPUSTATS(p).var_wake_interval += abs_diff;
+
+		return;
+	}
+	if (TASK_CPUSTATS(p).total_wake_ups == 2) {
+		TASK_CPUSTATS(p).avg_wake_interval += interval;
+		TASK_CPUSTATS(p).var_wake_interval = SCHED_AVG_REAL(abs_diff);
+
+		return;
+	}
+	if (TASK_CPUSTATS(p).total_wake_ups == 1) {
+		/* No FREE starts :-) */
+		TASK_CPUSTATS(p).avg_wake_interval = SCHED_AVG_REAL(interval);
+		TASK_CPUSTATS(p).var_wake_interval = 0;
+	}
+#endif
+}
+
+void update_cpustats_at_end_of_ts(struct task_struct *p, unsigned long long now)
+{
+	delta_cpu_cpustats(p, now);
+	decay_cpustats_for_cycle(p);
+}
+
+#ifndef task_is_queued
+#define task_is_queued(p) (!list_empty(&(p)->run_list))
+#endif
+
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+int get_task_cpustats(struct task_struct *tsk, struct task_cpustats *stats, unsigned long long *timestamp)
+{
+	int on_runq = 0;
+	int on_cpu = 0;
+	int is_sinbinned = 0;
+	int in_ia_sleep = 0;
+	unsigned long long rq_timestamp;
+	unsigned long flags;
+	struct runqueue *rq;
+
+	if (!runq_stats_inited)
+		return -ENOSYS;
+
+	rq = task_rq_lock(tsk, &flags);
+
+	rq_timestamp = rq->timestamp_last_tick;
+	*stats  = TASK_CPUSTATS(tsk);
+	*timestamp = tsk->timestamp;
+	is_sinbinned = task_is_sinbinned(tsk);
+	if ((on_runq = task_is_queued(tsk)))
+		on_cpu = task_running(rq, tsk);
+	else
+		in_ia_sleep = task_is_in_ia_sleep(tsk);
+
+	task_rq_unlock(rq, &flags);
+
+	/*
+	 * Update values to the previous tick (only)
+	 */
+	if (rq_timestamp > *timestamp) {
+		unsigned long long delta = rq_timestamp - *timestamp;
+
+		*timestamp = rq_timestamp;
+		if (on_cpu) {
+			stats->total_cpu += delta;
+			stats->avg_cpu_per_cycle += delta;
+		} else if (on_runq || is_sinbinned) {
+			stats->total_delay += delta;
+			stats->avg_delay_per_cycle += delta;
+			if (is_sinbinned)
+				stats->total_sinbin += delta;
+			if (stats->flags & CPUSTATS_JUST_WOKEN_FL) {
+				stats->total_latency += delta;
+				stats->avg_latency += delta;
+			}
+		} else {
+			stats->total_sleep += delta;
+			stats->avg_sleep_per_cycle += delta;
+			if (in_ia_sleep) {
+				stats->avg_ia_sleep_per_cycle += delta;
+				stats->total_ia_sleep += delta;
+			}
+		}
+		stats->avg_cycle_length += delta;
+		stats->cpu_usage_rate = calc_proportion(stats->avg_cpu_per_cycle, stats->avg_cycle_length);
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(get_task_cpustats);
+
+/*
+ * Get scheduling statistics for the nominated CPU
+ */
+int get_cpu_cpustats(unsigned int cpu, struct cpu_cpustats *stats)
+{
+	int idle;
+	unsigned long long idle_timestamp;
+	struct runqueue *rq = cpu_rq(cpu);
+	struct runq_cpustats *csrq;
+
+	if (!runq_stats_inited)
+		return -ENOSYS;
+
+	/*
+	 * No need to crash the whole machine if they've asked for stats for
+	 * a non existent CPU.
+	 */
+	if ((csrq = cpu_runq_cpustats(cpu)) == NULL)
+		return -EFAULT;
+
+	local_irq_disable();
+	spin_lock(&rq->lock);
+	idle = rq->curr == rq->idle;
+#ifdef CONFIG_SMP
+	if (rq->timestamp_last_tick > rq->curr->timestamp)
+		stats->timestamp = rq->timestamp_last_tick;
+	else
+#endif
+		stats->timestamp = rq->curr->timestamp;
+	idle_timestamp = rq->idle->timestamp;
+	if (idle_timestamp > stats->timestamp)
+		stats->timestamp = idle_timestamp;
+	stats->total_idle = rq->idle->sdu.spa.cpustats.total_cpu;
+	stats->total_busy = rq->idle->sdu.spa.cpustats.total_delay;
+	stats->total_delay = csrq->total_delay;
+	stats->total_rt_delay = csrq->total_rt_delay;
+	stats->total_intr_delay = csrq->total_intr_delay;
+	stats->total_rt_intr_delay = csrq->total_rt_intr_delay;
+	stats->total_fork_delay = csrq->total_fork_delay;
+	stats->total_sinbin = csrq->total_sinbin;
+	stats->total_latency = csrq->total_latency;
+	stats->nr_switches = rq->nr_switches;
+	spin_unlock_irq(&rq->lock);
+
+	/*
+	 * Update idle/busy time to the current tick
+	 */
+	if (idle)
+		stats->total_idle += (stats->timestamp - idle_timestamp);
+	else
+		stats->total_busy += (stats->timestamp - idle_timestamp);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(get_cpu_cpustats);
+
+int task_sched_cpustats(struct task_struct *p, char *buffer)
+{
+	struct task_cpustats stats;
+	unsigned long nvcsw, nivcsw; /* context switch counts */
+	int result;
+	unsigned long long timestamp;
+
+	read_lock(&tasklist_lock);
+	result = get_task_cpustats(p, &stats, &timestamp);
+	nvcsw = p->nvcsw;
+	nivcsw = p-> nivcsw;
+	read_unlock(&tasklist_lock);
+	if (result)
+		return sprintf(buffer, "Data unavailable\n");
+	return sprintf(buffer,
+		"%llu (%llu) %llu (%llu) %llu (%llu) %llu (%llu) %llu (%llu) %llu %llu %llu %lu %lu @ %llu\n",
+		stats.total_sleep,
+		SCHED_AVG_RND(stats.avg_sleep_per_cycle),
+		stats.total_ia_sleep,
+		SCHED_AVG_RND(stats.avg_ia_sleep_per_cycle),
+		stats.total_cpu,
+		SCHED_AVG_RND(stats.avg_cpu_per_cycle),
+		stats.total_delay,
+		SCHED_AVG_RND(stats.avg_delay_per_cycle),
+		stats.total_latency,
+		SCHED_AVG_RND(stats.avg_latency),
+		stats.total_sinbin,
+		stats.total_wake_ups,
+		stats.intr_wake_ups,
+		nvcsw, nivcsw,
+		timestamp);
+}
+
+int show_cpustats(char *page)
+{
+	int i;
+	int len = 0;
+	int avail = 1;
+	struct cpu_cpustats total = {0, };
+	unsigned long long timestamp = (unsigned long long)-1LL;
+
+	for_each_online_cpu(i) {
+		struct cpu_cpustats stats;
+
+		if (get_cpu_cpustats(i, &stats) != 0) {
+			avail = 0;
+			break;
+		}
+		if (stats.timestamp < timestamp)
+			timestamp = stats.timestamp;
+		total.total_idle += stats.total_idle;
+		total.total_busy += stats.total_busy;
+		total.total_delay += stats.total_delay;
+		total.total_rt_delay += stats.total_rt_delay;
+		total.total_intr_delay += stats.total_intr_delay;
+		total.total_rt_intr_delay += stats.total_rt_intr_delay;
+		total.total_fork_delay += stats.total_fork_delay;
+		total.total_sinbin += stats.total_sinbin;
+		total.total_latency += stats.total_latency;
+		total.nr_switches += stats.nr_switches;
+	}
+	if (avail)
+		len = sprintf(page, "%llu %llu %llu %llu %llu %llu %llu %llu %llu %llu @ %llu\n",
+			total.total_idle,
+			total.total_busy,
+			total.total_delay,
+			total.total_intr_delay,
+			total.total_rt_delay,
+			total.total_rt_intr_delay,
+			total.total_fork_delay,
+			total.total_latency,
+			total.total_sinbin,
+			total.nr_switches,
+			timestamp);
+	else
+		len = sprintf(page, "Data unavailable\n");
+
+	return len;
+}
+#endif
+
+static inline unsigned long long sched_div_64(unsigned long long a, unsigned long long b)
+{
+#if BITS_PER_LONG < 64
+	/*
+	 * Assume that there's no 64 bit divide available
+	 */
+	if (a < b)
+		return 0;
+	/*
+	 * Scale down until b less than 32 bits so that we can do
+	 * a divide using do_div()
+	 */
+	while (b > ULONG_MAX) { a >>= 1; b >>= 1; }
+
+	(void)do_div(a, (unsigned long)b);
+
+	return a;
+#else
+	return a / b;
+#endif
+}
+
+unsigned long long cpustats_avg_in_jiffies(unsigned long long avg)
+{
+	return sched_div_64(SCHED_AVG_RND(avg) * HZ, 1000000000);
+}
+
+unsigned long long msecs_to_nsecs_avg(unsigned long long msecs)
+{
+	return SCHED_AVG_REAL(msecs * 1000000);
+}
+
+unsigned long long nsecs_avg_to_msecs(unsigned long long ansecs)
+{
+	return SCHED_AVG_RND(sched_div_64(ansecs, 1000000));
+}
+
+/*
+ * CPU usage rate is estimated as a proportion of a CPU using fixed denominator
+ * rational numbers.
+ */
+#define PROPORTION_OVERFLOW ((1ULL << (64 - PROPORTION_OFFSET)) - 1)
+
+/*
+ * Convert a / b to a proportion in the range 0 to PROPORTION_ONE
+ * Requires a <= b or may get a divide by zero exception
+ */
+unsigned long calc_proportion(unsigned long long a, unsigned long long b)
+{
+	if (unlikely(a == b))
+		return PROPORTION_ONE;
+
+	while (a > PROPORTION_OVERFLOW) { a >>= 1; b >>= 1; }
+
+	return sched_div_64(a << PROPORTION_OFFSET, b);
+}
+
+/*
+ * Find the square root of a proportion
+ * Require: x <= PROPORTION_ONE
+ */
+unsigned long proportion_sqrt(unsigned long x)
+{
+	/* use 64 bits to avoid overflow */
+	unsigned long long res, b, ulx;
+	int bshift;
+
+	/*
+	 * Take shortcut AND prevent overflow
+	 */
+	if (x == PROPORTION_ONE)
+		return PROPORTION_ONE;
+
+	res = 0;
+	b = (1UL << (PROPORTION_OFFSET - 1));
+	bshift = PROPORTION_OFFSET - 1;
+	ulx = x << PROPORTION_OFFSET;
+
+	for (; ulx && b; b >>= 1, bshift--) {
+		unsigned long long temp = (((res << 1) + b) << bshift);
+
+		if (ulx >= temp) {
+			res += b;
+                        ulx -= temp;
+		}
+        }
+
+	return res;
+}
+
+/* WANT: proportion_to_ppt(ppt_to_proportion(x)) == x
+ */
+unsigned long proportion_to_ppt(unsigned long proportion)
+{
+	return ((unsigned long long)proportion * 2001ULL) >> (PROPORTION_OFFSET + 1);
+}
+
+unsigned long ppt_to_proportion(unsigned long ppt)
+{
+	return sched_div_64((unsigned long long)ppt * PROPORTION_ONE, 1000);
+}
+
+unsigned long avg_cpu_usage_rate(const struct task_struct *p)
+{
+	return TASK_CPUSTATS(p).cpu_usage_rate;
+}
+
+unsigned long avg_sleep_rate(const struct task_struct *p)
+{
+	/* take short cut and avoid possible divide by zero below */
+	if (TASK_CPUSTATS(p).avg_sleep_per_cycle == 0)
+		return 0;
+
+	return calc_proportion(TASK_CPUSTATS(p).avg_sleep_per_cycle, TASK_CPUSTATS(p).avg_cycle_length);
+}
+
+unsigned long avg_cpu_delay_rate(const struct task_struct *p)
+{
+	/* take short cut and avoid possible divide by zero below */
+	if (TASK_CPUSTATS(p).avg_delay_per_cycle == 0)
+		return 0;
+
+	return calc_proportion(TASK_CPUSTATS(p).avg_delay_per_cycle, TASK_CPUSTATS(p).avg_cycle_length);
+}
+
+unsigned long delay_in_jiffies_for_usage(const struct task_struct *p, unsigned long rur)
+{
+	unsigned long long acpc_jiffies, abl_jiffies, res;
+
+	if (rur == 0)
+		return ULONG_MAX;
+
+	acpc_jiffies = cpustats_avg_in_jiffies(TASK_CPUSTATS(p).avg_cpu_per_cycle);
+	abl_jiffies = cpustats_avg_in_jiffies(TASK_CPUSTATS(p).avg_sleep_per_cycle) + acpc_jiffies;
+
+	/*
+	 * we have to be careful about overflow and/or underflow
+	 */
+	while (unlikely(acpc_jiffies > PROPORTION_OVERFLOW)) {
+		acpc_jiffies >>= 1;
+		if (unlikely((rur >>= 1) == 0))
+			return ULONG_MAX;
+	}
+
+	res = sched_div_64(acpc_jiffies << PROPORTION_OFFSET, rur);
+	if (res > abl_jiffies)
+		return res - abl_jiffies;
+	else
+		return 0;
+}
diff -urN oldtree/kernel/sched_drv.c newtree/kernel/sched_drv.c
--- oldtree/kernel/sched_drv.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched_drv.c	2006-02-04 18:03:15.686969792 +0000
@@ -0,0 +1,153 @@
+/*
+ *  kernel/sched_drv.c
+ *
+ *  Kernel scheduler device implementation
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/syscalls.h>
+#include <linux/sched_drv.h>
+#include <linux/sched_pvt.h>
+
+/*
+ * All private per scheduler entries in task_struct are defined as
+ * separate structs and placed into the cpusched union in task_struct.
+ */
+
+/* Ingosched */
+#ifdef CONFIG_CPUSCHED_INGO
+extern const struct sched_drv ingo_sched_drv;
+#endif
+
+/* Staircase */
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+extern const struct sched_drv staircase_sched_drv;
+#endif
+
+/* Single priority array (SPA) schedulers */
+#ifdef CONFIG_CPUSCHED_SPA_NF
+extern const struct sched_drv spa_nf_sched_drv;
+#endif
+#ifdef CONFIG_CPUSCHED_SPA_WS
+extern const struct sched_drv spa_ws_sched_drv;
+#endif
+#ifdef CONFIG_CPUSCHED_SPA_SVR
+extern const struct sched_drv spa_svr_sched_drv;
+#endif
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+extern const struct sched_drv zaphod_sched_drv;
+#endif
+
+/* Nicksched */
+#ifdef CONFIG_CPUSCHED_NICK
+extern const struct sched_drv nick_sched_drv;
+#endif
+
+const struct sched_drv *sched_drvp =
+#if defined(CONFIG_CPUSCHED_DEFAULT_INGO)
+	&ingo_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_STAIRCASE)
+	&staircase_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_SPA_NF)
+	&spa_nf_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_SPA_WS)
+	&spa_ws_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_SPA_SVR)
+	&spa_svr_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_ZAPHOD)
+	&zaphod_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_NICK)
+	&nick_sched_drv;
+#else
+	NULL;
+#error "You must have at least 1 cpu scheduler selected"
+#endif
+
+extern struct task_struct base_init_task;
+
+#define CPUSCHED_CHECK_SELECT(drv) \
+do { \
+	if (!strcmp(str, (drv).name)) { \
+		sched_drvp = &(drv); \
+		return 1; \
+	} \
+} while (0)
+
+static int __init sched_drv_setup(char *str)
+{
+#if defined(CONFIG_CPUSCHED_INGO)
+	CPUSCHED_CHECK_SELECT(ingo_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_STAIRCASE)
+	CPUSCHED_CHECK_SELECT(staircase_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_SPA_NF)
+	CPUSCHED_CHECK_SELECT(spa_nf_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_SPA_WS)
+	CPUSCHED_CHECK_SELECT(spa_ws_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_SPA_SVR)
+	CPUSCHED_CHECK_SELECT(spa_svr_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_ZAPHOD)
+	CPUSCHED_CHECK_SELECT(zaphod_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_NICK)
+	CPUSCHED_CHECK_SELECT(nick_sched_drv);
+#endif
+	return 1;
+}
+
+__setup ("cpusched=", sched_drv_setup);
+
+static ssize_t show_attribute(struct kobject *kobj, struct attribute *attr, char *page)
+{
+	struct sched_drv_sysfs_entry *e = to_sched_drv_sysfs_entry(attr);
+
+	if (!e->show)
+		return 0;
+
+	return e->show(page);
+}
+
+static ssize_t store_attribute(struct kobject *kobj, struct attribute *attr, const char *page, size_t length)
+{
+	struct sched_drv_sysfs_entry *e = to_sched_drv_sysfs_entry(attr);
+
+	if (!e->show)
+		return -EBADF;
+
+	return e->store(page, length);
+}
+
+struct sysfs_ops sched_drv_sysfs_ops = {
+	.show = show_attribute,
+	.store = store_attribute,
+};
+
+static struct kobj_type sched_drv_ktype = {
+	.sysfs_ops = &sched_drv_sysfs_ops,
+	.default_attrs = NULL,
+};
+
+static struct kobject sched_drv_kobj = {
+	.ktype = &sched_drv_ktype
+};
+
+decl_subsys(cpusched, NULL, NULL);
+
+void __init sched_drv_sysfs_init(void)
+{
+	if (subsystem_register(&cpusched_subsys) == 0) {
+		if (sched_drvp->attrs == NULL)
+			return;
+
+		sched_drv_ktype.default_attrs = sched_drvp->attrs;
+		strncpy(sched_drv_kobj.name, sched_drvp->name, KOBJ_NAME_LEN);
+		sched_drv_kobj.kset = &cpusched_subsys.kset;
+		(void)kobject_register(&sched_drv_kobj);
+ 	}
+}
diff -urN oldtree/kernel/sched_spa.c newtree/kernel/sched_spa.c
--- oldtree/kernel/sched_spa.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched_spa.c	2006-02-04 18:03:15.688969488 +0000
@@ -0,0 +1,1306 @@
+/*
+ *  kernel/sched_spa.c
+ *  Copyright (C) 1991-2005  Linus Torvalds
+ *
+ *  2005-01-11 Single priority array scheduler (no frills)
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+#include <linux/module.h>
+#include <linux/sched_spa.h>
+
+static inline void adjust_timestamp(struct task_struct *tsk, struct runqueue *tsk_rq, struct runqueue *other_rq)
+{
+#ifdef CONFIG_SMP
+	tsk->timestamp += (tsk_rq->timestamp_last_tick - other_rq->timestamp_last_tick);
+#endif
+}
+
+extern const struct sched_drv spa_nf_sched_drv;
+extern struct sched_spa_child spa_nf_child;
+
+struct sched_spa_child *spa_sched_child = &spa_nf_child;
+
+/*
+ * Some of our exported functions could be called when other schedulers are
+ * in charge with catastrophic results if not handled properly.
+ * So we need to know whether one of our schedulers is in charge
+ */
+static int spa_in_charge = 0;
+
+void spa_init_runqueue_queue(union runqueue_queue *qup)
+{
+	int k;
+
+	for (k = 0; k < SPA_IDLE_PRIO; k++) {
+		qup->spa.queue[k].prio = k;
+		INIT_LIST_HEAD(&qup->spa.queue[k].list);
+	}
+	bitmap_zero(qup->spa.bitmap, SPA_NUM_PRIO_SLOTS);
+	// delimiter for bitsearch
+	__set_bit(SPA_IDLE_PRIO, qup->spa.bitmap);
+	qup->spa.next_prom_due = ULONG_MAX;
+	qup->spa.pcount = 0;
+}
+
+void spa_set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+	p->sdu.spa.time_slice = t;
+}
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * Default configurable timeslice is 40 msecs, maximum configurable
+ * timeslice is 1000 msecs and minumum configurable timeslice is 1 jiffy.
+ * Timeslices get renewed on task creation, on wake up and after they expire.
+ */
+#define MIN_TIMESLICE		1
+#define DEF_TIMESLICE		((40 * HZ / 1000) ? : MIN_TIMESLICE)
+#define MAX_TIMESLICE		((1000 * HZ / 1000) ? : MIN_TIMESLICE)
+
+static unsigned long time_slice = DEF_TIMESLICE;
+static unsigned long sched_rr_time_slice = DEF_TIMESLICE;
+
+/*
+ * Background tasks may have longer time slices as compensation
+ */
+static unsigned int bgnd_time_slice_multiplier = 1;
+
+#define TASK_PREEMPTS_CURR(p, rq) \
+	((p)->prio < (rq)->curr->prio)
+
+static inline unsigned int normal_task_timeslice(const task_t *p)
+{
+	if (unlikely(p->prio == SPA_BGND_PRIO))
+		return time_slice * bgnd_time_slice_multiplier;
+
+	return time_slice;
+}
+
+static inline unsigned int hard_cap_timeslice(const task_t *p)
+{
+	unsigned int cpu_avg = cpustats_avg_in_jiffies(TASK_CPUSTATS(p).avg_cpu_per_cycle);
+
+	return (cpu_avg / 2) ? (cpu_avg / 2) : 1;
+}
+
+/*
+ * task_timeslice() is the interface that is used internally by the scheduler.
+ */
+static inline unsigned int task_timeslice(const task_t *p)
+{
+	if (rt_task(p))
+		return sched_rr_time_slice;
+
+	return normal_task_timeslice(p);
+}
+
+unsigned int spa_task_timeslice(const task_t *p)
+{
+	return task_timeslice(p);
+}
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void dequeue_task(struct task_struct *p, struct spa_runqueue_queue *rqq)
+{
+	/*
+	 * Initialize after removal from the list so that list_empty() works
+	 * as a means for testing whether the task is runnable
+	 * If p is the last task in this priority slot then slotp will be
+	 * a pointer to the head of the list in the sunqueue structure
+	 * NB we can't use p->prio as is for bitmap as task may have
+	 * been promoted so we update it.
+	 */
+	struct list_head *slotp = p->run_list.next;
+
+	list_del_init(&p->run_list);
+	if (list_empty(slotp)) {
+		p->prio = list_entry(slotp, struct spa_prio_slot, list)->prio;
+		__clear_bit(p->prio, rqq->bitmap);
+	}
+}
+
+static void enqueue_task(struct task_struct *p, struct spa_runqueue_queue *rqq)
+{
+	sched_info_queued(p);
+	list_add_tail(&p->run_list, &rqq->queue[p->prio].list);
+	__set_bit(p->prio, rqq->bitmap);
+}
+
+/*
+ * Used by the migration code - we pull tasks from the head of the
+ * remote queue so we want these tasks to show up at the head of the
+ * local queue:
+ */
+static inline void enqueue_task_head(struct task_struct *p, struct spa_runqueue_queue *rqq)
+{
+	list_add(&p->run_list, &rqq->queue[p->prio].list);
+	__set_bit(p->prio, rqq->bitmap);
+}
+
+/*
+ * Control value for promotion mechanism NB this controls severity of "nice"
+ */
+unsigned long base_prom_interval = ((DEF_TIMESLICE * 15) / 10);
+
+#define PROMOTION_FLOOR MAX_RT_PRIO
+#define PROMOTION_CEILING SPA_BGND_PRIO
+#define in_promotable_range(prio) \
+	((prio) > PROMOTION_FLOOR && (prio) < PROMOTION_CEILING)
+
+static inline void restart_promotions(struct runqueue *rq)
+{
+	rq->qu.spa.next_prom_due = jiffies + base_prom_interval;
+	rq->qu.spa.pcount = 2;
+}
+
+#define check_restart_promotions(rq) \
+do { \
+	if (rq->nr_running == 2) \
+		restart_promotions(rq); \
+} while (0)
+
+/* make it (relatively) easy to switch to using a timer */
+static inline void stop_promotions(struct runqueue *rq)
+{
+}
+
+#define check_stop_promotions(rq) \
+do { \
+	if (rq->nr_running == 1) \
+		stop_promotions(rq); \
+} while (0)
+
+/*
+ * Are promotions due?
+ */
+static inline int promotions_due(const struct runqueue *rq)
+{
+	return unlikely(time_after_eq(jiffies, rq->qu.spa.next_prom_due));
+}
+
+static inline void update_curr_prio_for_promotion(struct runqueue *rq)
+{
+	if (likely(in_promotable_range(rq->curr->prio)))
+		rq->curr->prio--;
+}
+
+/*
+ * Assume spa_runq lock is NOT already held.
+ */
+static void do_promotions(struct runqueue *rq)
+{
+	int idx = PROMOTION_FLOOR;
+
+	spin_lock(&rq->lock);
+	if (unlikely(rq->nr_running < 2))
+		goto out_unlock;
+	if (rq->nr_running > rq->qu.spa.pcount) {
+		rq->qu.spa.pcount++;
+		goto out_unlock;
+	}
+	for (;;) {
+		int new_prio;
+		idx = find_next_bit(rq->qu.spa.bitmap, PROMOTION_CEILING, idx + 1);
+		if (idx > (PROMOTION_CEILING - 1))
+			break;
+
+		new_prio = idx - 1;
+		__list_splice(&rq->qu.spa.queue[idx].list, rq->qu.spa.queue[new_prio].list.prev);
+		INIT_LIST_HEAD(&rq->qu.spa.queue[idx].list);
+		__clear_bit(idx, rq->qu.spa.bitmap);
+		__set_bit(new_prio, rq->qu.spa.bitmap);
+	}
+	/* The only prio field that needs update is the current task's */
+	update_curr_prio_for_promotion(rq);
+	rq->qu.spa.pcount = 2;
+out_unlock:
+	rq->qu.spa.next_prom_due = jiffies + base_prom_interval;
+	spin_unlock(&rq->lock);
+}
+
+static inline unsigned int spa_soft_cap_penalty(const task_t *p)
+{
+	unsigned long rd = delay_in_jiffies_for_usage(p, p->sdu.spa.min_cpu_rate_cap);
+
+	return (rd + base_prom_interval) / base_prom_interval;
+}
+
+int spa_pb_soft_cap_priority(const task_t *p, int base_prio)
+{
+	struct spa_runqueue_queue *rqq = &task_rq(p)->qu.spa;
+	int prio = find_next_bit(rqq->bitmap, SPA_IDLE_PRIO, base_prio);
+
+	if (prio == SPA_IDLE_PRIO)
+		prio = base_prio;
+
+	prio += spa_soft_cap_penalty(p);
+
+	if (prio > SPA_SOFT_CAP_PRIO)
+		return SPA_SOFT_CAP_PRIO;
+
+	return prio;
+}
+
+static inline int spa_nf_soft_cap_effective_prio(const struct task_struct *p)
+{
+	return spa_pb_soft_cap_priority(p, p->static_prio);
+}
+
+static inline int spa_nf_normal_effective_prio(const struct task_struct *p)
+{
+	return p->static_prio;
+}
+
+/*
+ * effective_prio - return the priority that is based on the static
+ * priority
+ */
+#define should_run_in_background(p) \
+	(task_is_bgnd(p) && !((p)->sdu.spa.flags & SPAF_UISLEEP))
+#define exceeding_cap(p) \
+	(TASK_CPUSTATS(p).cpu_usage_rate > (p)->sdu.spa.min_cpu_rate_cap)
+static inline int effective_prio(const task_t *p)
+{
+	if (rt_task(p))
+		return p->prio;
+
+	if (task_is_bgnd(p))
+		return (p->sdu.spa.flags & SPAF_UISLEEP) ?
+			SPA_SOFT_CAP_PRIO : SPA_BGND_PRIO;
+
+	/* using the minimum of the hard and soft caps makes things smoother */
+	if (unlikely(exceeding_cap(p)))
+		return  spa_sched_child->soft_cap_effective_prio(p);
+
+	return spa_sched_child->normal_effective_prio(p);
+}
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq)
+{
+	struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+	enqueue_task(p, rqq);
+	inc_nr_running(p, rq);
+	check_restart_promotions(rq);
+}
+
+static inline void do_nothing_to_task(task_t *p) {}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ */
+static void activate_task(task_t *p, runqueue_t *rq)
+{
+	if (rt_task(p))
+		p->sdu.spa.time_slice = sched_rr_time_slice;
+	else {
+		spa_sched_child->reassess_at_activation(p);
+		p->prio = effective_prio(p);
+		/* hard capped tasks that never use their full time slice evade
+		 * the sinbin so we need to reduce the size of their time slice
+		 * to reduce the size of the hole that they slip through.
+		 * It would be unwise to close it completely.
+		 */
+		if (unlikely(TASK_CPUSTATS(p).cpu_usage_rate > p->sdu.spa.cpu_rate_hard_cap))
+			p->sdu.spa.time_slice = hard_cap_timeslice(p);
+		else
+			p->sdu.spa.time_slice = normal_task_timeslice(p);
+	}
+	p->sdu.spa.flags &= ~(SPAF_UISLEEP | SPAF_NONIASLEEP);
+	__activate_task(p, rq);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	dec_nr_running(p, rq);
+	dequeue_task(p, &rq->qu.spa);
+	check_stop_promotions(rq);
+}
+
+/*
+ * Check to see if p preempts rq->curr and resched if it does. In compute
+ * mode we do not preempt for at least cache_delay and set rq->preempted.
+ */
+static inline void preempt_if_warranted(task_t *p, struct runqueue *rq)
+{
+	if (TASK_PREEMPTS_CURR(p, rq))
+		resched_task(rq->curr);
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: thetask's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+void spa_wake_up_task(struct task_struct *p, struct runqueue *rq, unsigned int old_state, int sync)
+{
+	/*
+	 * Tasks waking from (declared) non interactive sleep will not receive
+	 * any interactive bonus.
+	 */
+	if (old_state & TASK_NONINTERACTIVE)
+		p->sdu.spa.flags |= SPAF_NONIASLEEP;
+
+	/*
+	 * This is the end of one scheduling cycle and the start of the next
+	 */
+	update_cpustats_at_wake_up(p, adjusted_sched_clock(p));
+
+	if (old_state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible--;
+
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	activate_task(p, rq);
+	if (!sync || (rq != this_rq()))
+		preempt_if_warranted(p, rq);
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+void spa_fork(task_t *p)
+{
+	unsigned long long now;
+
+	init_timer(&p->sdu.spa.sinbin_timer);
+	p->sdu.spa.sinbin_timer.data = (unsigned long) p;
+	/*
+	 * Give the task a new timeslice.
+	 */
+	p->sdu.spa.time_slice = task_timeslice(p);
+	local_irq_disable();
+	now = sched_clock();
+	local_irq_enable();
+	/*
+	 * Initialize the scheduling statistics
+	 */
+	initialize_cpustats(p, now);
+	spa_sched_child->fork_extras(p);
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+#ifdef CONFIG_SMP
+#define rq_is_this_rq(rq) (likely((rq) == this_rq()))
+#else
+#define rq_is_this_rq(rq) 1
+#endif
+void spa_wake_up_new_task(task_t * p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	BUG_ON(p->state != TASK_RUNNING);
+
+	if (rq_is_this_rq(rq)) {
+		if (!(clone_flags & CLONE_VM)) {
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			if (unlikely(!task_is_queued(current))) {
+				p->prio = effective_prio(p);
+				__activate_task(p, rq);
+			} else {
+				p->prio = current->prio;
+				list_add_tail(&p->run_list, &current->run_list);
+				inc_nr_running(p, rq);
+				check_restart_promotions(rq);
+			}
+			set_need_resched();
+		} else {
+			p->prio = effective_prio(p);
+			/* Run child last */
+			__activate_task(p, rq);
+		}
+	} else {
+		p->prio = effective_prio(p);
+		__activate_task(p, rq);
+		preempt_if_warranted(p, rq);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+
+/*
+ * (Optionally) log scheduler statistics at exit.
+ */
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+static int log_at_exit = 0;
+#endif
+void spa_exit(task_t * p)
+{
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	struct task_cpustats stats;
+	unsigned long long timestamp;
+
+	if (!log_at_exit)
+		return;
+
+	get_task_cpustats(p, &stats, &timestamp);
+	printk("SCHED_EXIT[%d] (%s) %llu %llu %llu %llu %llu %llu %llu %llu %lu %lu @ %llu\n",
+		p->pid, p->comm,
+	        stats.total_sleep, stats.total_ia_sleep, stats.total_cpu,
+	        stats.total_delay, stats.total_latency,
+		stats.total_sinbin, stats.total_wake_ups, stats.intr_wake_ups,
+		p->nvcsw, p->nivcsw, timestamp);
+#endif
+}
+
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static inline
+void pull_task(runqueue_t *src_rq, task_t *p, runqueue_t *this_rq, int this_cpu)
+{
+	dequeue_task(p, &src_rq->qu.spa);
+	dec_nr_running(p, src_rq);
+	check_stop_promotions(src_rq);
+	/* not the current task on its cpu so increment delay stats */
+	delta_delay_cpustats(p, adjusted_sched_clock(p));
+	set_task_cpu(p, this_cpu);
+	adjust_timestamp(p, this_rq, src_rq);
+	inc_nr_running(p, this_rq);
+	enqueue_task(p, &this_rq->qu.spa);
+	check_restart_promotions(this_rq);
+	preempt_if_warranted(p, this_rq);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+int spa_move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+		      unsigned long max_nr_move, long max_bias_move,
+		      struct sched_domain *sd, enum idle_type idle,
+		      int *all_pinned)
+{
+	struct list_head *head, *curr;
+	int idx, pulled = 0, pinned = 0;
+	struct task_struct *tmp;
+
+	if (max_nr_move == 0 || max_bias_move == 0)
+		goto out;
+
+	pinned = 1;
+
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(busiest->qu.spa.bitmap);
+	else
+		idx = find_next_bit(busiest->qu.spa.bitmap, SPA_IDLE_PRIO, idx);
+	if (idx >= SPA_IDLE_PRIO)
+		goto out;
+
+	head = &busiest->qu.spa.queue[idx].list;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+	/* Take the opportunity to update task's prio field just in
+	 * in case it's been promoted.  This makes sure that the task doesn't
+	 * lose any promotions it has received during the move.
+	 */
+	tmp->prio = idx;
+
+	curr = curr->prev;
+
+	if (tmp->bias_prio > max_bias_move ||
+	    !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, tmp, this_rq, this_cpu);
+	pulled++;
+	max_bias_move -= tmp->bias_prio;
+
+	/*
+	 * We only want to steal up to the prescribed number of tasks
+	 * and the prescribed amount of biased load.
+	 */
+	if (pulled < max_nr_move && max_bias_move > 0) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	if (all_pinned)
+		*all_pinned = pinned;
+
+	return pulled;
+}
+#endif
+
+static inline void spa_nf_runq_data_tick(unsigned int cpu, unsigned long numr)
+{
+}
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ */
+void spa_tick(struct task_struct *p, struct runqueue *rq, unsigned long long now)
+{
+	int cpu = smp_processor_id();
+	struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+	spa_sched_child->runq_data_tick(cpu, rq->nr_running);
+
+	if (p == rq->idle) {
+		if (wake_priority_sleeper(rq))
+			goto out;
+		rebalance_tick(cpu, rq, SCHED_IDLE);
+		return;
+	}
+
+	/*
+	 * SCHED_FIFO tasks never run out of timeslice.
+	 */
+	if (unlikely(p->policy == SCHED_FIFO))
+		goto out;
+
+	spin_lock(&rq->lock);
+	/*
+	 * The task was running during this tick - update the
+	 * time slice counter. Note: we do not update a thread's
+	 * priority until it either goes to sleep or uses up its
+	 * timeslice. This makes it possible for interactive tasks
+	 * to use up their timeslices at their highest priority levels.
+	 */
+	if (!--p->sdu.spa.time_slice) {
+		dequeue_task(p, rqq);
+		set_tsk_need_resched(p);
+		update_cpustats_at_end_of_ts(p, now);
+		if (unlikely(p->policy == SCHED_RR))
+			p->sdu.spa.time_slice = sched_rr_time_slice;
+		else {
+			spa_sched_child->reassess_at_end_of_ts(p);
+			p->prio = effective_prio(p);
+			p->sdu.spa.time_slice = normal_task_timeslice(p);
+		}
+		enqueue_task(p, rqq);
+	}
+	spin_unlock(&rq->lock);
+out:
+	if (unlikely(promotions_due(rq)))
+		do_promotions(rq);
+	rebalance_tick(cpu, rq, NOT_IDLE);
+}
+
+/*
+ * Take an active task off the runqueue for a short while
+ * Assun=mes that task's runqueue is already locked
+ */
+static inline void put_task_in_sinbin(struct task_struct *p, unsigned long durn)
+{
+	if (durn == 0)
+		return;
+	deactivate_task(p, task_rq(p));
+	p->sdu.spa.flags |= SPAF_SINBINNED;
+	p->sdu.spa.sinbin_timer.expires = jiffies + durn;
+	add_timer(&p->sdu.spa.sinbin_timer);
+}
+
+/*
+ * Release a task from the sinbin
+ */
+void sinbin_release_fn(unsigned long arg)
+{
+	unsigned long flags;
+	struct task_struct *p = (struct task_struct*)arg;
+	struct runqueue *rq = task_rq_lock(p, &flags);
+
+	/*
+	 * Sinbin time is included in delay time
+	 */
+	delta_delay_cpustats(p, adjusted_sched_clock(p));
+	p->sdu.spa.flags &= ~SPAF_SINBINNED;
+	TASK_CPUSTATS(p).cpu_usage_rate = p->sdu.spa.cpu_rate_hard_cap;
+	if (!rt_task(p)) {
+		spa_sched_child->reassess_at_sinbin_release(p);
+		p->prio = effective_prio(p);
+	}
+	__activate_task(p, rq);
+
+	task_rq_unlock(rq, &flags);
+}
+
+static inline int task_needs_sinbinning(const struct task_struct *p)
+{
+	return unlikely(TASK_CPUSTATS(p).cpu_usage_rate > p->sdu.spa.cpu_rate_hard_cap) &&
+		(p->state == TASK_RUNNING) && !rt_task(p) &&
+		((p->sdu.spa.flags & PF_EXITING) == 0);
+}
+
+static inline unsigned long required_sinbin_durn(const struct task_struct *p)
+{
+	return delay_in_jiffies_for_usage(p, p->sdu.spa.cpu_rate_hard_cap);
+}
+
+#ifdef CONFIG_SCHED_SMT
+struct task_struct *spa_head_of_queue(union runqueue_queue *rqq)
+{
+	struct task_struct *tmp;
+	int idx = sched_find_first_bit(rqq->spa.bitmap);
+
+	tmp = list_entry(rqq->spa.queue[idx].list.next, task_t, run_list);
+	/* Take the opportunity to update task's prio field just in
+	 * in case it's been promoted.
+	 */
+	tmp->prio = idx;
+
+	return tmp;
+}
+
+/* maximum expected priority difference for SCHED_NORMAL tasks */
+#define MAX_SN_PD (SPA_IDLE_PRIO - MAX_RT_PRIO)
+int spa_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct *p2, struct sched_domain *sd)
+{
+	int dp = p2->static_prio - p1->static_prio;
+
+	if ((dp > 0) && (sd->per_cpu_gain < 100)) {
+		unsigned long rq_ts_rm;
+
+		rq_ts_rm = ((MAX_SN_PD - dp) * time_slice * sd->per_cpu_gain) /
+			(100 * MAX_SN_PD);
+
+		return p1->sdu.spa.time_slice > rq_ts_rm;
+	}
+
+	return 0;
+}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ */
+void spa_schedule(void)
+{
+	long *switch_count;
+	int cpu, idx;
+	struct task_struct *prev = current, *next;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+	struct list_head *queue;
+
+	spin_lock_irq(&rq->lock);
+
+	if (unlikely(current->flags & PF_DEAD))
+		current->state = EXIT_DEAD;
+	/*
+	 * if entering off of a kernel preemption go straight
+	 * to picking the next task.
+	 */
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE) {
+				rq->nr_uninterruptible++;
+				prev->sdu.spa.flags |= SPAF_UISLEEP;
+			}
+			deactivate_task(prev, rq);
+		}
+	}
+
+	update_cpu_clock(prev, rq, now);
+	delta_cpu_cpustats(prev, now);
+	if (task_needs_sinbinning(prev) && likely(!signal_pending(prev)))
+		put_task_in_sinbin(prev, required_sinbin_durn(prev));
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+go_idle:
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			wake_sleeping_dependent(cpu, rq);
+			/*
+			 * wake_sleeping_dependent() might have released
+			 * the runqueue, so break out if we got new
+			 * tasks meanwhile:
+			 */
+			if (!rq->nr_running)
+				goto switch_tasks;
+		}
+	} else {
+		if (dependent_sleeper(cpu, rq)) {
+			next = rq->idle;
+			goto switch_tasks;
+		}
+		/*
+		 * dependent_sleeper() releases and reacquires the runqueue
+		 * lock, hence go into the idle loop if the rq went
+		 * empty meanwhile:
+		 */
+		if (unlikely(!rq->nr_running))
+			goto go_idle;
+	}
+
+	idx = sched_find_first_bit(rq->qu.spa.bitmap);
+	queue = &rq->qu.spa.queue[idx].list;
+	next = list_entry(queue->next, task_t, run_list);
+	/* Take the opportunity to update task's prio field just in
+	 * in case it's been promoted.
+	 */
+	next->prio = idx;
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prefetch(next);
+	prefetch_stack(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(task_cpu(prev));
+
+	prev->last_ran = now;
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		delta_delay_cpustats(next, now);
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_task_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+		/*
+		 * this_rq must be evaluated again because prev may have moved
+		 * CPUs since it called schedule(), thus the 'rq' on its stack
+		 * frame will be invalid.
+		 */
+		finish_task_switch(this_rq(), prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+void spa_set_normal_task_nice(task_t *p, long nice)
+{
+	int old_static_prio, delta;
+	struct runqueue *rq = task_rq(p);
+	struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+	old_static_prio = p->static_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	spa_sched_child->reassess_at_renice(p);
+
+	if (p->prio == SPA_BGND_PRIO)
+		return;
+
+	delta = p->static_prio - old_static_prio;
+	if (delta == 0)
+		return;
+
+	if (task_is_queued(p)) {
+		dec_prio_bias(rq, p);
+		set_bias_prio(p);
+		inc_prio_bias(rq, p);
+		dequeue_task(p, rqq);
+		/* This check is done here rather than outside the if statement
+		 * as there is a need to avoid a race condition with p->prio in
+		 * dequeue_task()
+		 */
+		if (unlikely(delta > (SPA_SOFT_CAP_PRIO - p->prio)))
+			delta = (SPA_SOFT_CAP_PRIO - p->prio);
+		else if (unlikely(delta < (MAX_RT_PRIO - p->prio)))
+			delta = (MAX_RT_PRIO - p->prio);
+		p->prio += delta;
+		enqueue_task(p, rqq);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	} else {
+		set_bias_prio(p);
+		/* See comment in other branch of if statement */
+		if (unlikely(delta > (SPA_SOFT_CAP_PRIO - p->prio)))
+			delta = (SPA_SOFT_CAP_PRIO - p->prio);
+		else if (unlikely(delta < (MAX_RT_PRIO - p->prio)))
+			delta = (MAX_RT_PRIO - p->prio);
+		p->prio += delta;
+	}
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+void spa_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	int queued;
+	runqueue_t *rq = task_rq(p);
+
+	queued = task_is_queued(p);
+	if (queued)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	if (queued) {
+		__activate_task(p, rq);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else
+			preempt_if_warranted(p, rq);
+	}
+}
+
+/*
+ * Require: 0 <= new_cap <= 1000
+ */
+int set_cpu_rate_cap(struct task_struct *p, unsigned long new_cap)
+{
+	int is_allowed;
+	unsigned long flags;
+	struct runqueue *rq;
+	long delta;
+
+	/* this function could be called when other schedulers are in
+	 * charge (with catastrophic results) so let's check
+	 */
+	if (!spa_in_charge)
+		return -ENOSYS;
+
+	if (new_cap > 1000)
+		return -EINVAL;
+	is_allowed = capable(CAP_SYS_NICE);
+	/*
+	 * We have to be careful, if called from /proc code,
+	 * the task might be in the middle of scheduling on another CPU.
+	 */
+	new_cap = ppt_to_proportion(new_cap);
+	rq = task_rq_lock(p, &flags);
+	delta = new_cap - p->sdu.spa.cpu_rate_cap;
+	if (!is_allowed) {
+		/*
+		 * Ordinary users can set/change caps on their own tasks
+		 * provided that the new setting is MORE constraining
+		 */
+		if (((current->euid != p->uid) && (current->uid != p->uid)) || (delta > 0)) {
+			task_rq_unlock(rq, &flags);
+			return -EPERM;
+		}
+	}
+	/*
+	 * The RT tasks don't have caps, but we still allow the caps to be
+	 * set - but as expected it wont have any effect on scheduling until
+	 * the task becomes SCHED_NORMAL:
+	 */
+	p->sdu.spa.cpu_rate_cap = new_cap;
+	if (p->sdu.spa.cpu_rate_cap < p->sdu.spa.cpu_rate_hard_cap)
+		p->sdu.spa.min_cpu_rate_cap = p->sdu.spa.cpu_rate_cap;
+	else
+		p->sdu.spa.min_cpu_rate_cap = p->sdu.spa.cpu_rate_hard_cap;
+
+	spa_sched_child->reassess_at_renice(p);
+
+	if (!rt_task(p) && task_is_queued(p)) {
+		int delta = -p->prio;
+		struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+		dequeue_task(p, rqq);
+		delta += p->prio = effective_prio(p);
+		enqueue_task(p, rqq);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+	task_rq_unlock(rq, &flags);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(set_cpu_rate_cap);
+
+unsigned long get_cpu_rate_cap(struct task_struct *p)
+{
+	if (!spa_in_charge)
+		return 1000;
+
+	return proportion_to_ppt(p->sdu.spa.cpu_rate_cap);
+}
+
+EXPORT_SYMBOL(get_cpu_rate_cap);
+
+/*
+ * Require: 1 <= new_cap <= 1000
+ */
+int set_cpu_rate_hard_cap(struct task_struct *p, unsigned long new_cap)
+{
+	int is_allowed;
+	unsigned long flags;
+	struct runqueue *rq;
+	long delta;
+
+	/* this function could be called when other schedulers are in
+	 * charge (with catastrophic results) so let's check
+	 */
+	if (!spa_in_charge)
+		return -ENOSYS;
+
+	if ((new_cap > 1000) || (new_cap == 0)) /* zero hard caps are not allowed */
+		return -EINVAL;
+	is_allowed = capable(CAP_SYS_NICE);
+	new_cap = ppt_to_proportion(new_cap);
+	/*
+	 * We have to be careful, if called from /proc code,
+	 * the task might be in the middle of scheduling on another CPU.
+	 */
+	rq = task_rq_lock(p, &flags);
+	delta = new_cap - p->sdu.spa.cpu_rate_hard_cap;
+	if (!is_allowed) {
+		/*
+		 * Ordinary users can set/change caps on their own tasks
+		 * provided that the new setting is MORE constraining
+		 */
+		if (((current->euid != p->uid) && (current->uid != p->uid)) || (delta > 0)) {
+			task_rq_unlock(rq, &flags);
+			return -EPERM;
+		}
+	}
+	/*
+	 * The RT tasks don't have caps, but we still allow the caps to be
+	 * set - but as expected it wont have any effect on scheduling until
+	 * the task becomes SCHED_NORMAL:
+	 */
+	p->sdu.spa.cpu_rate_hard_cap = new_cap;
+	if (p->sdu.spa.cpu_rate_cap < p->sdu.spa.cpu_rate_hard_cap)
+		p->sdu.spa.min_cpu_rate_cap = p->sdu.spa.cpu_rate_cap;
+	else
+		p->sdu.spa.min_cpu_rate_cap = p->sdu.spa.cpu_rate_hard_cap;
+
+	spa_sched_child->reassess_at_renice(p);
+
+	/* (POSSIBLY) TODO: if it's sinbinned and the cap is relaxed then
+	 * release it from the sinbin
+	 */
+	task_rq_unlock(rq, &flags);
+	return 0;
+}
+
+EXPORT_SYMBOL(set_cpu_rate_hard_cap);
+
+unsigned long get_cpu_rate_hard_cap(struct task_struct *p)
+{
+	if (!spa_in_charge)
+		return 1000;
+
+	return proportion_to_ppt(p->sdu.spa.cpu_rate_hard_cap);
+}
+
+EXPORT_SYMBOL(get_cpu_rate_hard_cap);
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+long spa_sys_yield(void)
+{
+	runqueue_t *rq = this_rq_lock();
+	struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+	schedstat_inc(rq, yld_cnt);
+	/* If there's other tasks on this CPU make sure that at least
+	 * one of them get some CPU before this task's next bite of the
+	 * cherry.  Dequeue before looking for the appropriate run
+	 * queue so that we don't find our queue if we were the sole
+	 * occupant of that queue.
+	 */
+	dequeue_task(current, rqq);
+	/*
+	 * special rule: RT tasks will just roundrobin.
+	 */
+	if (likely(!rt_task(current))) {
+		int idx = find_next_bit(rqq->bitmap, SPA_IDLE_PRIO, current->prio);
+
+		if (idx < SPA_IDLE_PRIO) {
+			if ((idx < SPA_BGND_PRIO) || task_is_bgnd(current))
+				current->prio = idx;
+			else
+				current->prio = SPA_BGND_PRIO - 1;
+		}
+	}
+	enqueue_task(current, rqq);
+
+	if (rq->nr_running == 1)
+		schedstat_inc(rq, yld_both_empty);
+
+	/*
+	 * Since we are going to call schedule() anyway, there's
+	 * no need to preempt or enable interrupts:
+	 */
+	__release(rq->lock);
+	_raw_spin_unlock(&rq->lock);
+	preempt_enable_no_resched();
+
+	schedule();
+
+	return 0;
+}
+
+void spa_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	spa_sys_yield();
+}
+
+void spa_init_idle(task_t *idle, int cpu)
+{
+	idle->prio = SPA_IDLE_PRIO;
+	/*
+	 * Initialize scheduling statistics counters as they may provide
+	 * valuable about the CPU e.g. avg_cpu_time_per_cycle for the idle
+	 * task will be an estimate of the average time the CPU is idle.
+	 * sched_init() may not be ready so use INITIAL_JIFFIES instead.
+	 */
+	initialize_cpustats(idle, INITIAL_CPUSTATS_TIMESTAMP);
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+void spa_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	deactivate_task(p, rq_src);
+	/* not the current task on its cpu so increment delay stats */
+	delta_delay_cpustats(p, adjusted_sched_clock(p));
+	set_task_cpu(p, dest_cpu);
+	adjust_timestamp(p, rq_dest, rq_src);
+	activate_task(p, rq_dest);
+	preempt_if_warranted(p, rq_dest);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void spa_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO - 1);
+	/* Add idle task to _front_ of it's priority queue */
+	enqueue_task_head(rq->idle, &rq->qu.spa);
+	inc_nr_running(rq->idle, rq);
+}
+
+void spa_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = SPA_IDLE_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+void spa_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (i = 0; i < SPA_IDLE_PRIO; i++) {
+		struct list_head *list = &rq->qu.spa.queue[i].list;
+		while (!list_empty(list))
+			migrate_dead(dead_cpu, list_entry(list->next, task_t, run_list));
+	}
+}
+#endif
+#endif
+
+void spa_sched_init(void)
+{
+	int i;
+
+	spa_in_charge = 1;
+
+	for (i = 0; i < NR_CPUS; i++)
+		init_runq_cpustats(i);
+
+	init_task.sdu.spa.time_slice = HZ;
+	init_task.sdu.spa.cpu_rate_cap = PROPORTION_ONE;
+	init_task.sdu.spa.cpu_rate_hard_cap = PROPORTION_ONE;
+	init_task.sdu.spa.min_cpu_rate_cap = PROPORTION_ONE;
+	init_task.sdu.spa.sinbin_timer.function = sinbin_release_fn;
+	init_task.sdu.spa.pre_bonus_priority = SPA_BGND_PRIO - 20;
+	init_task.sdu.spa.interactive_bonus = 0;
+	init_task.sdu.spa.throughput_bonus = 0;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+void spa_normalize_rt_task(struct task_struct *p)
+{
+	int queued;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	queued = task_is_queued(p);
+	if (queued)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	if (queued) {
+		__activate_task(p, rq);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+static inline unsigned long rnd_msecs_to_jiffies(unsigned long msecs)
+{
+	return (msecs * HZ + HZ / 2) / 1000;
+}
+
+static inline unsigned long rnd_jiffies_to_msecs(unsigned long msecs)
+{
+	return (msecs * 1000 + 500) / HZ;
+}
+
+#define no_change(a) (a)
+
+SCHED_DRV_SYSFS_UINT_RW(time_slice, rnd_msecs_to_jiffies, rnd_jiffies_to_msecs,
+			MIN_TIMESLICE, MAX_TIMESLICE);
+SCHED_DRV_SYSFS_UINT_RW(sched_rr_time_slice, rnd_msecs_to_jiffies,
+			rnd_jiffies_to_msecs, MIN_TIMESLICE, MAX_TIMESLICE);
+SCHED_DRV_SYSFS_UINT_RW(base_prom_interval, rnd_msecs_to_jiffies,
+			rnd_jiffies_to_msecs, MIN_TIMESLICE, ULONG_MAX);
+SCHED_DRV_SYSFS_UINT_RW(bgnd_time_slice_multiplier, no_change, no_change,
+			1, 100);
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+SCHED_DRV_SYSFS_UINT_RW(log_at_exit, no_change, no_change, 0, 1);
+
+struct sched_drv_sysfs_entry cpustats_sdse = {
+	.attr = { .name = "cpustats", .mode = S_IRUGO },
+	.show = show_cpustats,
+	.store = NULL,
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_SPA_NF
+static struct attribute *spa_nf_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(time_slice),
+	&SCHED_DRV_SYSFS_ATTR(sched_rr_time_slice),
+	&SCHED_DRV_SYSFS_ATTR(bgnd_time_slice_multiplier),
+	&SCHED_DRV_SYSFS_ATTR(base_prom_interval),
+#ifdef CONFIG_CPUSCHED_ACCRUED_STATS
+	&SCHED_DRV_SYSFS_ATTR(log_at_exit),
+	&SCHED_DRV_SYSFS_ATTR(cpustats),
+#endif
+	NULL,
+};
+#endif
+
+struct sched_spa_child spa_nf_child = {
+	.soft_cap_effective_prio = spa_nf_soft_cap_effective_prio,
+	.normal_effective_prio = spa_nf_normal_effective_prio,
+	.reassess_at_activation = do_nothing_to_task,
+	.fork_extras = do_nothing_to_task,
+	.runq_data_tick = spa_nf_runq_data_tick,
+	.reassess_at_end_of_ts = do_nothing_to_task,
+	.reassess_at_sinbin_release = do_nothing_to_task,
+	.reassess_at_renice = do_nothing_to_task,
+};
+
+#ifdef CONFIG_CPUSCHED_SPA_NF
+const struct sched_drv spa_nf_sched_drv = {
+	.name = "spa_no_frills",
+	.init_runqueue_queue = spa_init_runqueue_queue,
+	.set_oom_time_slice = spa_set_oom_time_slice,
+	.task_timeslice = spa_task_timeslice,
+	.wake_up_task = spa_wake_up_task,
+	.fork = spa_fork,
+	.wake_up_new_task = spa_wake_up_new_task,
+	.exit = spa_exit,
+#ifdef CONFIG_SMP
+	.move_tasks = spa_move_tasks,
+#endif
+	.tick = spa_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = spa_head_of_queue,
+	.dependent_sleeper_trumps = spa_dependent_sleeper_trumps,
+#endif
+	.schedule = spa_schedule,
+	.set_normal_task_nice = spa_set_normal_task_nice,
+	.setscheduler = spa_setscheduler,
+	.sys_yield = spa_sys_yield,
+	.yield = spa_yield,
+	.init_idle = spa_init_idle,
+	.sched_init = spa_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = spa_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = spa_set_select_idle_first,
+	.set_select_idle_last = spa_set_select_idle_last,
+	.migrate_dead_tasks = spa_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = spa_normalize_rt_task,
+#endif
+	.attrs = spa_nf_attrs,
+};
+#endif
diff -urN oldtree/kernel/sched_spa_svr.c newtree/kernel/sched_spa_svr.c
--- oldtree/kernel/sched_spa_svr.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched_spa_svr.c	2006-02-04 18:03:15.689969336 +0000
@@ -0,0 +1,183 @@
+/*
+ *  kernel/sched_spa_svr.c
+ *
+ *  CPU scheduler mode
+ *
+ *  Copyright (C) 2004  Aurema Pty Ltd
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/sched.h>
+#include <linux/sched_spa.h>
+
+#define MIN_VAL(a, b)		((a) < (b) ? (a) : (b))
+#define MAX_TOTAL_BONUS		(SPA_BGND_PRIO - (MAX_RT_PRIO + 40) - 1)
+#define MAX_MAX_TPT_BONUS	MAX_TOTAL_BONUS
+#define DEF_MAX_TPT_BONUS 	MIN_VAL(MAX_MAX_TPT_BONUS, 15)
+
+#define NRUN_AVG_OFFSET 7
+#define NRUN_AVG_ALPHA ((1 << NRUN_AVG_OFFSET) - 2)
+#define NRUN_AVG_INCR(a) ((a) << 1)
+#define NRUN_AVG_ONE (1UL << NRUN_AVG_OFFSET)
+#define NRUN_AVG_MUL(a, b) (((a) * (b)) >> NRUN_AVG_OFFSET)
+
+static unsigned int max_tpt_bonus = DEF_MAX_TPT_BONUS;
+
+static DEFINE_PER_CPU(unsigned long, rq_avg_tasks);
+
+static void spa_svr_runq_data_tick(unsigned int cpu, unsigned long numr)
+{
+	unsigned long nval = NRUN_AVG_MUL(per_cpu(rq_avg_tasks, cpu),
+					  NRUN_AVG_ALPHA);
+	nval += NRUN_AVG_INCR(numr);
+
+	per_cpu(rq_avg_tasks, cpu) = nval;
+}
+
+static void do_nothing_to_task(struct task_struct *p) { }
+
+static int spa_svr_effective_prio(const struct task_struct *p)
+{
+	unsigned int bonus = MAX_TOTAL_BONUS;
+
+	/* interactive bonuses only count at wake up
+	 */
+	/* no bonuses for tasks that have exceeded their cap */
+	if (likely(TASK_CPUSTATS(p).cpu_usage_rate < p->sdu.spa.min_cpu_rate_cap))
+		bonus -= p->sdu.spa.throughput_bonus;
+
+	return p->static_prio + bonus;
+}
+
+static inline int spa_svr_soft_cap_effective_prio(const struct task_struct *p)
+{
+	return spa_pb_soft_cap_priority(p, p->static_prio + MAX_TOTAL_BONUS);
+}
+
+static void spa_svr_fork(struct task_struct *p)
+{
+	p->sdu.spa.throughput_bonus = 0;
+}
+
+static void spa_svr_reassess_bonus(struct task_struct *p)
+{
+	unsigned long long ratio;
+	unsigned long long expected_delay;
+	unsigned long long adjusted_delay;
+	unsigned long long load;
+
+	p->sdu.spa.throughput_bonus = 0;
+	if (max_tpt_bonus == 0)
+		return;
+
+	load = per_cpu(rq_avg_tasks, task_cpu(p));
+	if (load <= NRUN_AVG_ONE)
+		expected_delay = 0;
+	else
+		expected_delay = NRUN_AVG_MUL(TASK_CPUSTATS(p).avg_cpu_per_cycle,
+					      (load - NRUN_AVG_ONE));
+	/*
+	 * No delay means no bonus, but
+	 * NB this test also avoids a possible divide by zero error if
+	 * cpu is also zero and negative bonuses
+	 */
+	if (TASK_CPUSTATS(p).avg_delay_per_cycle <= expected_delay)
+		return;
+
+	adjusted_delay  = TASK_CPUSTATS(p).avg_delay_per_cycle - expected_delay;
+	ratio = calc_proportion(adjusted_delay, adjusted_delay + TASK_CPUSTATS(p).avg_cpu_per_cycle);
+	ratio = proportion_sqrt(ratio);
+	p->sdu.spa.throughput_bonus = map_proportion_rnd(ratio, max_tpt_bonus);
+}
+
+static struct sched_spa_child spa_svr_child = {
+	.soft_cap_effective_prio = spa_svr_soft_cap_effective_prio,
+	.normal_effective_prio = spa_svr_effective_prio,
+	.reassess_at_activation = spa_svr_reassess_bonus,
+	.fork_extras = spa_svr_fork,
+	.runq_data_tick = spa_svr_runq_data_tick,
+	.reassess_at_end_of_ts = spa_svr_reassess_bonus,
+	.reassess_at_sinbin_release = do_nothing_to_task,
+	.reassess_at_renice = do_nothing_to_task,
+};
+
+static void spa_svr_sched_init(void)
+{
+	int i;
+
+	spa_sched_init();
+	spa_sched_child = &spa_svr_child;
+
+	for (i = 0; i < NR_CPUS; i++)
+		per_cpu(rq_avg_tasks, i) = 0;
+}
+
+#include <linux/sched_pvt.h>
+
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW_STATIC(max_tpt_bonus, no_change, no_change,
+			       0, MAX_MAX_TPT_BONUS);
+
+static struct attribute *spa_svr_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(time_slice),
+	&SCHED_DRV_SYSFS_ATTR(sched_rr_time_slice),
+	&SCHED_DRV_SYSFS_ATTR(bgnd_time_slice_multiplier),
+	&SCHED_DRV_SYSFS_ATTR(base_prom_interval),
+#ifdef CONFIG_CPU_SCHED_ACCRUED_STATS
+	&SCHED_DRV_SYSFS_ATTR(log_at_exit),
+	&SCHED_DRV_SYSFS_ATTR(cpustats),
+#endif
+	&SCHED_DRV_SYSFS_ATTR(max_tpt_bonus),
+	NULL,
+};
+
+const struct sched_drv spa_svr_sched_drv = {
+	.name = "spa_svr",
+	.init_runqueue_queue = spa_init_runqueue_queue,
+	.set_oom_time_slice = spa_set_oom_time_slice,
+	.task_timeslice = spa_task_timeslice,
+	.wake_up_task = spa_wake_up_task,
+	.fork = spa_fork,
+	.wake_up_new_task = spa_wake_up_new_task,
+	.exit = spa_exit,
+	.tick = spa_tick,
+#ifdef CONFIG_SMP
+	.move_tasks = spa_move_tasks,
+#endif
+	.tick = spa_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = spa_head_of_queue,
+	.dependent_sleeper_trumps = spa_dependent_sleeper_trumps,
+#endif
+	.schedule = spa_schedule,
+	.set_normal_task_nice = spa_set_normal_task_nice,
+	.setscheduler = spa_setscheduler,
+	.sys_yield = spa_sys_yield,
+	.yield = spa_yield,
+	.init_idle = spa_init_idle,
+	.sched_init = spa_svr_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = spa_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = spa_set_select_idle_first,
+	.set_select_idle_last = spa_set_select_idle_last,
+	.migrate_dead_tasks = spa_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = spa_normalize_rt_task,
+#endif
+	.attrs = spa_svr_attrs,
+};
diff -urN oldtree/kernel/sched_spa_ws.c newtree/kernel/sched_spa_ws.c
--- oldtree/kernel/sched_spa_ws.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched_spa_ws.c	2006-02-04 18:03:15.689969336 +0000
@@ -0,0 +1,342 @@
+/*
+ *  kernel/sched_spa_ws.c
+ *
+ *  CPU scheduler mode
+ *
+ *  Copyright (C) 2004  Aurema Pty Ltd
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/sched.h>
+#include <linux/sched_spa.h>
+
+#define MIN_VAL(a, b)		((a) < (b) ? (a) : (b))
+#define MAX_TOTAL_BONUS		(SPA_BGND_PRIO - (MAX_RT_PRIO + 40) - 1)
+/* allow a slot for media streamers and 2 for wake up bonuses */
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+#define MAX_MAX_IA_BONUS	(MAX_TOTAL_BONUS - 3)
+#else
+#define MAX_MAX_IA_BONUS	(MAX_TOTAL_BONUS - 2)
+#endif
+#define DEF_MAX_IA_BONUS 	MIN_VAL(MAX_MAX_IA_BONUS, 15)
+#define DEF_INITIAL_IA_BONUS	(DEF_MAX_IA_BONUS / 3)
+#define IA_BONUS_OFFSET		8
+
+#define IA_WAKE_UPS_THRESHOLD	8
+
+/* If the average sleep is extremely long this is probably not
+ * interactive and is in fact probably something annoying like a log
+ * rotator so let its interactive bonus die away
+ */
+#define WS_BIG_SLEEP SCHED_AVG_REAL(2 * 60 * 60LL * NSEC_PER_SEC)
+
+static unsigned int max_ia_bonus = DEF_MAX_IA_BONUS;
+static unsigned int initial_ia_bonus = DEF_INITIAL_IA_BONUS;
+
+static void spa_ws_runq_data_tick(unsigned int cpu, unsigned long numr) {}
+
+static void do_nothing_to_task(struct task_struct *p) { }
+
+/*
+ * Tasks more sleepy than this are considered interactive
+ */
+static unsigned long iab_incr_threshold = PROP_FM_PPT(900);
+
+/*
+ * Tasks less sleepy than this are considered NOT interactive
+ */
+static unsigned long iab_decr_threshold = PROP_FM_PPT(100);
+
+/*
+ * Tasks more active than this are considered NOT interactive
+ */
+static unsigned long cpu_hog_threshold = PROP_FM_PPT(900);
+
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+/*
+ * Only give special treatment to media streamers whose cpu usage rate is less
+ * than this
+ */
+static unsigned long media_max_usage_rate = PROP_FM_PPT(500);
+#endif
+
+static inline int bonuses(const struct task_struct *p)
+{
+	int ret;
+
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+	if (unlikely(p->sdu.spa.flags & SPAF_MEDIA) &&
+	    (TASK_CPUSTATS(p).cpu_usage_rate < media_max_usage_rate))
+		return MAX_TOTAL_BONUS;
+#endif
+
+	/* round during mapping */
+	ret = p->sdu.spa.interactive_bonus + (1UL << (IA_BONUS_OFFSET - 1));
+	ret >>= IA_BONUS_OFFSET;
+
+	if ((TASK_CPUSTATS(p).flags & CPUSTATS_JUST_WOKEN_FL) &&
+	    task_is_in_ia_sleep(p)) {
+		if (TASK_CPUSTATS(p).flags & CPUSTATS_WOKEN_FOR_INTR_FL)
+			ret += 2;
+		else
+			ret += 1;
+	}
+
+	return ret;
+}
+
+static inline void decr_interactive_bonus(struct task_struct *p)
+{
+	p->sdu.spa.interactive_bonus *= ((1UL << IA_BONUS_OFFSET) - 2);
+	p->sdu.spa.interactive_bonus >>= IA_BONUS_OFFSET;
+}
+
+static inline void incr_interactive_bonus(struct task_struct *p)
+{
+	decr_interactive_bonus(p);
+	p->sdu.spa.interactive_bonus += (max_ia_bonus << 1);
+}
+
+static inline void partial_decr_interactive_bonus(struct task_struct *p)
+{
+	decr_interactive_bonus(p);
+	p->sdu.spa.interactive_bonus += (initial_ia_bonus << 1);
+}
+
+static inline void zero_interactive_bonus(struct task_struct *p)
+{
+	p->sdu.spa.interactive_bonus = 0;
+}
+
+static int spa_ws_effective_prio(const struct task_struct *p)
+{
+	unsigned int bonus = MAX_TOTAL_BONUS;
+
+	/* interactive bonuses only count at wake up
+	 */
+	/* no bonuses for tasks that have exceeded their cap */
+	if (likely(TASK_CPUSTATS(p).cpu_usage_rate < p->sdu.spa.min_cpu_rate_cap))
+		bonus -= bonuses(p);
+
+	return p->static_prio + bonus;
+}
+
+static inline int spa_ws_soft_cap_effective_prio(const struct task_struct *p)
+{
+	return spa_pb_soft_cap_priority(p, p->static_prio + MAX_TOTAL_BONUS);
+}
+
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+#define MSECS_TO_ANSECS(m) SCHED_AVG_REAL((unsigned long long)(m) * 1000000ULL)
+#define MEDIA_VAR_SCALE 1000
+ /* based on observation of RealPlayer */
+static unsigned int media_max_variability = 500;
+static unsigned long long media_min_interval = MSECS_TO_ANSECS(6);
+static unsigned long long media_max_interval = MSECS_TO_ANSECS(60);
+#endif
+
+static void spa_ws_fork(struct task_struct *p)
+{
+	p->sdu.spa.interactive_bonus = (max_ia_bonus >= initial_ia_bonus) ?
+				initial_ia_bonus : max_ia_bonus;
+	p->sdu.spa.interactive_bonus <<= IA_BONUS_OFFSET;
+}
+
+static inline int spa_ws_eligible(struct task_struct *p)
+{
+	if (unlikely(TASK_CPUSTATS(p).avg_sleep_per_cycle > WS_BIG_SLEEP))
+		return 0;
+
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+	/* Interactive tasks won't have regular sleep/wake cycles except
+	 * for media streamers.  We want media streamers to get interactive
+	 * bonuses in case the occasional glitch causes the SPAF_MEDIA flag
+	 * not to be set
+	 */
+	if (p->sdu.spa.flags & SPAF_REGULAR)
+		return p->sdu.spa.flags & SPAF_MEDIA;
+#endif
+
+	return 1;
+}
+
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+static inline int has_regular_usage_cycle(const task_t *p)
+{
+	unsigned long long a = TASK_CPUSTATS(p).var_wake_interval * MEDIA_VAR_SCALE;
+	unsigned long long b = TASK_CPUSTATS(p).avg_wake_interval *
+		(unsigned long long)media_max_variability;
+
+	return a < b;
+}
+#endif
+
+static void spa_ws_reassess_at_activation(struct task_struct *p)
+{
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+	/* if it looks like a media streamer mark it as such */
+	if (has_regular_usage_cycle(p)) {
+	    	p->sdu.spa.flags |= SPAF_REGULAR;
+		if (TASK_CPUSTATS(p).avg_wake_interval > media_max_interval ||
+		    TASK_CPUSTATS(p).avg_wake_interval < media_min_interval)
+			p->sdu.spa.flags &= ~SPAF_MEDIA;
+		else
+			p->sdu.spa.flags |= SPAF_MEDIA;
+	}
+	else
+		p->sdu.spa.flags &= ~(SPAF_MEDIA|SPAF_REGULAR);
+#endif
+
+	if (TASK_CPUSTATS(p).ia_sleepiness > iab_incr_threshold) {
+		if (spa_ws_eligible(p))
+			incr_interactive_bonus(p);
+		else
+			partial_decr_interactive_bonus(p);
+	}
+	else if (TASK_CPUSTATS(p).cpu_usage_rate > cpu_hog_threshold)
+		decr_interactive_bonus(p);
+	else if (TASK_CPUSTATS(p).ia_sleepiness < iab_decr_threshold) {
+		if (TASK_CPUSTATS(p).total_wake_ups < IA_WAKE_UPS_THRESHOLD)
+			decr_interactive_bonus(p);
+		else
+			partial_decr_interactive_bonus(p);
+	}
+}
+
+static void spa_ws_reassess_at_end_of_ts(struct task_struct *p)
+{
+	/* tasks that use a full time slice in their first or second CPU burst
+	 * lose their initial bonus and have to start from scratch
+	 */
+	if (TASK_CPUSTATS(p).total_wake_ups < 2) {
+		zero_interactive_bonus(p);
+		return;
+	}
+
+	/* Don't punish tasks that have done a lot of sleeping for the
+	 * occasional run of short sleeps unless they become a cpu hog.
+	 */
+	if (TASK_CPUSTATS(p).cpu_usage_rate > cpu_hog_threshold)
+		decr_interactive_bonus(p);
+	else if (TASK_CPUSTATS(p).ia_sleepiness < iab_decr_threshold) {
+		if (TASK_CPUSTATS(p).total_wake_ups < IA_WAKE_UPS_THRESHOLD)
+			decr_interactive_bonus(p);
+		else
+			partial_decr_interactive_bonus(p);
+	}
+}
+
+static struct sched_spa_child spa_ws_child = {
+	.soft_cap_effective_prio = spa_ws_soft_cap_effective_prio,
+	.normal_effective_prio = spa_ws_effective_prio,
+	.reassess_at_activation = spa_ws_reassess_at_activation,
+	.fork_extras = spa_ws_fork,
+	.runq_data_tick = spa_ws_runq_data_tick,
+	.reassess_at_end_of_ts = spa_ws_reassess_at_end_of_ts,
+	.reassess_at_sinbin_release = do_nothing_to_task,
+	.reassess_at_renice = do_nothing_to_task,
+};
+
+static void spa_ws_sched_init(void)
+{
+	spa_sched_init();
+	spa_sched_child = &spa_ws_child;
+}
+
+#include <linux/sched_pvt.h>
+
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW_STATIC(max_ia_bonus, no_change, no_change,
+			       0, MAX_MAX_IA_BONUS);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(initial_ia_bonus, no_change, no_change,
+			       0, MAX_MAX_IA_BONUS);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(iab_incr_threshold, ppt_to_proportion,
+			       proportion_to_ppt, 0, PROPORTION_ONE);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(iab_decr_threshold, ppt_to_proportion,
+			       proportion_to_ppt, 0, PROPORTION_ONE);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(cpu_hog_threshold, ppt_to_proportion,
+			       proportion_to_ppt, 0, PROPORTION_ONE);
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+SCHED_DRV_SYSFS_UINT_RW_STATIC(media_max_usage_rate, ppt_to_proportion,
+			       proportion_to_ppt, 0, PROPORTION_ONE);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(media_max_variability, no_change,
+			       no_change, 0, MEDIA_VAR_SCALE);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(media_min_interval, msecs_to_nsecs_avg,
+			       nsecs_avg_to_msecs, 0, MSECS_TO_ANSECS(1000));
+SCHED_DRV_SYSFS_UINT_RW_STATIC(media_max_interval, msecs_to_nsecs_avg,
+			       nsecs_avg_to_msecs, 0, MSECS_TO_ANSECS(1000));
+#endif
+
+static struct attribute *spa_ws_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(time_slice),
+	&SCHED_DRV_SYSFS_ATTR(sched_rr_time_slice),
+	&SCHED_DRV_SYSFS_ATTR(bgnd_time_slice_multiplier),
+	&SCHED_DRV_SYSFS_ATTR(base_prom_interval),
+#ifdef CONFIG_CPU_SCHED_ACCRUED_STATS
+	&SCHED_DRV_SYSFS_ATTR(log_at_exit),
+	&SCHED_DRV_SYSFS_ATTR(cpustats),
+#endif
+	&SCHED_DRV_SYSFS_ATTR(max_ia_bonus),
+	&SCHED_DRV_SYSFS_ATTR(initial_ia_bonus),
+	&SCHED_DRV_SYSFS_ATTR(iab_incr_threshold),
+	&SCHED_DRV_SYSFS_ATTR(iab_decr_threshold),
+	&SCHED_DRV_SYSFS_ATTR(cpu_hog_threshold),
+#ifdef CPUSCHED_AUTODETECT_MEDIA
+	&SCHED_DRV_SYSFS_ATTR(media_max_usage_rate),
+	&SCHED_DRV_SYSFS_ATTR(media_max_variability),
+	&SCHED_DRV_SYSFS_ATTR(media_min_interval),
+	&SCHED_DRV_SYSFS_ATTR(media_max_interval),
+#endif
+	NULL,
+};
+
+const struct sched_drv spa_ws_sched_drv = {
+	.name = "spa_ws",
+	.init_runqueue_queue = spa_init_runqueue_queue,
+	.set_oom_time_slice = spa_set_oom_time_slice,
+	.task_timeslice = spa_task_timeslice,
+	.wake_up_task = spa_wake_up_task,
+	.fork = spa_fork,
+	.wake_up_new_task = spa_wake_up_new_task,
+	.exit = spa_exit,
+	.tick = spa_tick,
+#ifdef CONFIG_SMP
+	.move_tasks = spa_move_tasks,
+#endif
+	.tick = spa_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = spa_head_of_queue,
+	.dependent_sleeper_trumps = spa_dependent_sleeper_trumps,
+#endif
+	.schedule = spa_schedule,
+	.set_normal_task_nice = spa_set_normal_task_nice,
+	.setscheduler = spa_setscheduler,
+	.sys_yield = spa_sys_yield,
+	.yield = spa_yield,
+	.init_idle = spa_init_idle,
+	.sched_init = spa_ws_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = spa_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = spa_set_select_idle_first,
+	.set_select_idle_last = spa_set_select_idle_last,
+	.migrate_dead_tasks = spa_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = spa_normalize_rt_task,
+#endif
+	.attrs = spa_ws_attrs,
+};
diff -urN oldtree/kernel/sched_zaphod.c newtree/kernel/sched_zaphod.c
--- oldtree/kernel/sched_zaphod.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched_zaphod.c	2006-02-04 18:03:15.690969184 +0000
@@ -0,0 +1,563 @@
+/*
+ *  kernel/sched_zaphod.c
+ *
+ *  CPU scheduler mode
+ *
+ *  Copyright (C) 2004  Aurema Pty Ltd
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/sched_spa.h>
+
+#include <asm/uaccess.h>
+
+/*
+ * For entitlemnet based scheduling a task's shares will be determined from
+ * their "nice"ness
+ */
+#define EB_SHARES_PER_NICE 3
+#define DEFAULT_EB_SHARES (20 * EB_SHARES_PER_NICE)
+#define MAX_EB_SHARES (DEFAULT_EB_SHARES * DEFAULT_EB_SHARES)
+
+#define MIN_NORMAL_PRIO	MAX_RT_PRIO
+#define ZAPHOD_MAX_PRIO	(MIN_NORMAL_PRIO + 40)
+#define IDLE_PRIO	SPA_IDLE_PRIO
+#define BGND_PRIO	SPA_BGND_PRIO
+#define TASK_ZD(p) (p)->sdu.spa
+#define MIN_RATE_CAP(p) (p)->sdu.spa.min_cpu_rate_cap
+
+#define EB_YARDSTICK_DECAY_INTERVAL 100
+
+struct zaphod_mode {
+	const char *name;
+	void (*calculate_pre_bonus_priority)(task_t *p);
+	int (*soft_cap_effective_prio)(const task_t *p);
+};
+
+static void calculate_pb_pre_bonus_priority(task_t *p);
+static void calculate_eb_pre_bonus_priority(task_t *p);
+static inline int pb_soft_cap_effective_prio(const task_t *p)
+{
+	return spa_pb_soft_cap_priority(p, TASK_ZD(p).pre_bonus_priority);
+}
+static int zaphod_effective_prio(const task_t *p);
+
+static const struct zaphod_mode zaphod_modes[] = {
+	{ .name = "pb",
+	  .calculate_pre_bonus_priority = calculate_pb_pre_bonus_priority,
+	  .soft_cap_effective_prio = pb_soft_cap_effective_prio,
+	},
+	{ .name = "eb",
+	  .calculate_pre_bonus_priority = calculate_eb_pre_bonus_priority,
+	  .soft_cap_effective_prio = zaphod_effective_prio,
+	},
+	{ .name = NULL, }       /* end of list marker */
+};
+
+static const struct zaphod_mode *zm = &zaphod_modes[0];
+
+struct sched_zaphod_runq_data {
+	unsigned long avg_nr_running;
+	atomic_t eb_yardstick;
+	atomic_t eb_ticks_to_decay;
+};
+
+static DEFINE_PER_CPU(struct sched_zaphod_runq_data, zaphod_runqs);
+#define cpu_zrq(cpu)	(&per_cpu(zaphod_runqs, cpu))
+#define task_zrq(p)	cpu_zrq(task_cpu(p))
+/*
+ * Convert nice to shares
+ * Proportional symmetry is aimed for: i.e.
+ * (nice_to_shares(0) / nice_to_shares(19)) == (nice_to_shares(-20) / nice_to_shares(0))
+ * Make sure that this function is robust for variations of EB_SHARES_PER_NICE
+ */
+static inline unsigned int nice_to_shares(int nice)
+{
+	unsigned int result = DEFAULT_EB_SHARES;
+
+	if (nice > 0)
+		result -= (nice * (20 * EB_SHARES_PER_NICE - 1)) / 19;
+	else if (nice < 0)
+		result += (nice * nice * ((20 * EB_SHARES_PER_NICE - 1) * EB_SHARES_PER_NICE)) / 20;
+
+	return result;
+}
+
+static inline int shares_to_nice(unsigned int shares)
+{
+	int result = 0;
+
+	if (shares > DEFAULT_EB_SHARES)
+		result = -int_sqrt((20 * (shares - DEFAULT_EB_SHARES)) /
+			(EB_SHARES_PER_NICE * (20 * EB_SHARES_PER_NICE - 1)));
+	else if (shares < DEFAULT_EB_SHARES)
+		result = (19 * (DEFAULT_EB_SHARES - shares)) /
+			 (20 * EB_SHARES_PER_NICE - 1);
+
+	return result;
+}
+
+#define MAX_TOTAL_BONUS (BGND_PRIO - ZAPHOD_MAX_PRIO - 1)
+#define MAX_MAX_IA_BONUS ((MAX_TOTAL_BONUS + 1) / 2)
+#define MAX_MAX_TPT_BONUS (MAX_TOTAL_BONUS - MAX_MAX_IA_BONUS)
+#define DEFAULT_MAX_IA_BONUS ((MAX_MAX_IA_BONUS < 9) ? MAX_MAX_IA_BONUS : 9)
+#define DEFAULT_MAX_TPT_BONUS ((DEFAULT_MAX_IA_BONUS - 2) ? : 1)
+
+
+#define SCHED_IA_BONUS_OFFSET 8
+#define SCHED_IA_BONUS_ALPHA ((1 << SCHED_IA_BONUS_OFFSET) - 2)
+#define SCHED_IA_BONUS_INCR(a) ((a) << 1)
+#define SCHED_IA_BONUS_MUL(a, b) (((a) * (b)) >> SCHED_IA_BONUS_OFFSET)
+/*
+ * Get the rounded integer value of the interactive bonus
+ */
+#define SCHED_IA_BONUS_RND(x) \
+	(((x) + (1 << (SCHED_IA_BONUS_OFFSET - 1))) >> (SCHED_IA_BONUS_OFFSET))
+
+static unsigned int max_ia_bonus = DEFAULT_MAX_IA_BONUS;
+static unsigned int max_max_ia_bonus = MAX_MAX_IA_BONUS;
+static unsigned int initial_ia_bonus = 5;
+static unsigned int max_tpt_bonus = DEFAULT_MAX_TPT_BONUS;
+static unsigned int max_max_tpt_bonus = MAX_MAX_TPT_BONUS;
+
+/*
+ * Tasks that have a CPU usage rate greater than this threshold (in parts per
+ * thousand) are considered to be CPU bound and start to lose interactive bonus
+ * points
+ */
+#define DEFAULT_CPU_HOG_THRESHOLD 900
+static unsigned long cpu_hog_threshold = PROP_FM_PPT(DEFAULT_CPU_HOG_THRESHOLD);
+
+/*
+ * Tasks that would sleep for more than this many parts per thousand of the
+ * time if they had the CPU to themselves are considered to be interactive
+ * provided that their average sleep duration per scheduling cycle isn't too
+ * long
+ */
+#define DEFAULT_IA_THRESHOLD 900
+static unsigned long ia_threshold = PROP_FM_PPT(DEFAULT_IA_THRESHOLD);
+#define LOWER_MAX_IA_SLEEP SCHED_AVG_REAL(15 * 60LL * NSEC_PER_SEC)
+#define UPPER_MAX_IA_SLEEP SCHED_AVG_REAL(2 * 60 * 60LL * NSEC_PER_SEC)
+
+static inline void decay_sched_ia_bonus(struct task_struct *p)
+{
+	TASK_ZD(p).interactive_bonus *= SCHED_IA_BONUS_ALPHA;
+	TASK_ZD(p).interactive_bonus >>= SCHED_IA_BONUS_OFFSET;
+}
+
+/*
+ * Check whether a task with an interactive bonus still qualifies and if not
+ * decrease its bonus
+ * This never gets called on real time tasks
+ */
+static void reassess_cpu_boundness(task_t *p)
+{
+	if (max_ia_bonus == 0) {
+		TASK_ZD(p).interactive_bonus = 0;
+		return;
+	}
+
+	if (TASK_CPUSTATS(p).cpu_usage_rate > cpu_hog_threshold)
+		decay_sched_ia_bonus(p);
+	else {
+		if (TASK_CPUSTATS(p).ia_sleepiness < (PROPORTION_ONE - cpu_hog_threshold)) {
+			decay_sched_ia_bonus(p);
+			TASK_ZD(p).interactive_bonus +=
+				SCHED_IA_BONUS_INCR(map_proportion_rnd(TASK_CPUSTATS(p).ia_sleepiness,
+								       max_ia_bonus));
+		}
+	}
+}
+
+/*
+ * Check whether a task qualifies for an interactive bonus and if it does
+ * increase its bonus
+ * This never gets called on real time tasks
+ */
+static void reassess_interactiveness(task_t *p)
+{
+	if (max_ia_bonus == 0) {
+		TASK_ZD(p).interactive_bonus = 0;
+		return;
+	}
+	/*
+	 * No sleep means not interactive (in most cases), but
+	 */
+	if (unlikely(TASK_CPUSTATS(p).avg_sleep_per_cycle > LOWER_MAX_IA_SLEEP)) {
+		/*
+		 * Really long sleeps mean it's probably not interactive
+		 */
+		if (unlikely(TASK_CPUSTATS(p).avg_sleep_per_cycle > UPPER_MAX_IA_SLEEP))
+			decay_sched_ia_bonus(p);
+		return;
+	}
+
+	if (TASK_CPUSTATS(p).ia_sleepiness > ia_threshold) {
+		decay_sched_ia_bonus(p);
+		TASK_ZD(p).interactive_bonus += SCHED_IA_BONUS_INCR(max_ia_bonus);
+	}
+}
+
+/*
+ * Check whether a task qualifies for a throughput bonus and if it does
+ * give it one
+ * This never gets called on real time tasks
+ */
+#define NRUN_AVG_OFFSET 7
+#define NRUN_AVG_ALPHA ((1 << NRUN_AVG_OFFSET) - 2)
+#define NRUN_AVG_INCR(a) ((a) << 1)
+#define NRUN_AVG_ONE (1UL << NRUN_AVG_OFFSET)
+#define NRUN_AVG_MUL(a, b) (((a) * (b)) >> NRUN_AVG_OFFSET)
+static void recalc_throughput_bonus(task_t *p)
+{
+	unsigned long long ratio;
+	unsigned long long expected_delay;
+	unsigned long long adjusted_delay;
+	struct sched_zaphod_runq_data *zrq = task_zrq(p);
+	unsigned long long load = zrq->avg_nr_running;
+
+	TASK_ZD(p).throughput_bonus = 0;
+	if (max_tpt_bonus == 0)
+		return;
+
+	if (load <= NRUN_AVG_ONE)
+		expected_delay = 0;
+	else
+		expected_delay = NRUN_AVG_MUL(TASK_CPUSTATS(p).avg_cpu_per_cycle, (load - NRUN_AVG_ONE));
+
+	/*
+	 * No unexpected delay means no bonus, but
+	 * NB this test also avoids a possible divide by zero error if
+	 * cpu is also zero and negative bonuses
+	 */
+	if (TASK_CPUSTATS(p).avg_delay_per_cycle <= expected_delay)
+		return;
+
+	adjusted_delay  = TASK_CPUSTATS(p).avg_delay_per_cycle - expected_delay;
+	ratio = calc_proportion(adjusted_delay, adjusted_delay + TASK_CPUSTATS(p).avg_cpu_per_cycle);
+	ratio = proportion_sqrt(ratio);
+	TASK_ZD(p).throughput_bonus = map_proportion_rnd(ratio, max_tpt_bonus);
+}
+
+/*
+ * Calculate priority based priority (without bonuses).
+ * This never gets called on real time tasks
+ */
+static void calculate_pb_pre_bonus_priority(task_t *p)
+{
+	TASK_ZD(p).pre_bonus_priority = p->static_prio + MAX_TOTAL_BONUS;
+}
+
+/*
+ * We're just trying to protect a reading and writing of the yardstick.
+ * We not to fussed about protecting the calculation so the following is
+ * adequate
+ */
+static inline void decay_eb_yardstick(struct sched_zaphod_runq_data *zrq)
+{
+	static const unsigned long decay_per_interval = PROP_FM_PPT(990);
+	unsigned long curry = atomic_read(&zrq->eb_yardstick);
+	unsigned long pny; /* potential new yardstick */
+	struct task_struct *p = current;
+
+	curry = proportion_mul(decay_per_interval, curry);
+	atomic_set(&zrq->eb_ticks_to_decay, EB_YARDSTICK_DECAY_INTERVAL);
+	if (unlikely(rt_task(p) || task_is_bgnd(p)))
+		goto out;
+	if (TASK_CPUSTATS(p).cpu_usage_rate < MIN_RATE_CAP(p))
+		pny = TASK_CPUSTATS(p).cpu_usage_rate / TASK_ZD(p).eb_shares;
+	else
+		pny = MIN_RATE_CAP(p) / TASK_ZD(p).eb_shares;
+	if (pny > curry)
+		curry = pny;
+out:
+	if (unlikely(curry >= PROPORTION_ONE))
+		curry = PROPORTION_ONE - 1;
+	atomic_set(&zrq->eb_yardstick, curry);
+}
+
+/*
+ * Calculate entitlement based priority (without bonuses).
+ * This never gets called on real time tasks
+ */
+#define EB_PAR 19
+static void calculate_eb_pre_bonus_priority(task_t *p)
+{
+	/*
+	 * Prevent possible divide by zero and take shortcut
+	 */
+	if (unlikely(MIN_RATE_CAP(p) == 0)) {
+		TASK_ZD(p).pre_bonus_priority = BGND_PRIO - 1;
+	} else if (TASK_CPUSTATS(p).cpu_usage_rate > MIN_RATE_CAP(p)) {
+		struct sched_zaphod_runq_data *zrq = task_zrq(p);
+		unsigned long cap_per_share = MIN_RATE_CAP(p) / TASK_ZD(p).eb_shares;
+		unsigned long prop = calc_proportion(MIN_RATE_CAP(p), TASK_CPUSTATS(p).cpu_usage_rate);
+
+		TASK_ZD(p).pre_bonus_priority = (BGND_PRIO - 1);
+		TASK_ZD(p).pre_bonus_priority -= map_proportion_rnd(prop, EB_PAR + 1);
+		if (cap_per_share > atomic_read(&zrq->eb_yardstick)) {
+			if (likely(cap_per_share < PROPORTION_ONE))
+				atomic_set(&zrq->eb_yardstick, cap_per_share);
+			else
+				atomic_set(&zrq->eb_yardstick, PROPORTION_ONE - 1);
+		}
+
+	} else {
+		struct sched_zaphod_runq_data *zrq = task_zrq(p);
+		unsigned long usage_per_share = TASK_CPUSTATS(p).cpu_usage_rate / TASK_ZD(p).eb_shares;
+
+		if (usage_per_share > atomic_read(&zrq->eb_yardstick)) {
+			if (likely(usage_per_share < PROPORTION_ONE))
+				atomic_set(&zrq->eb_yardstick, usage_per_share);
+			else
+				atomic_set(&zrq->eb_yardstick, PROPORTION_ONE - 1);
+			TASK_ZD(p).pre_bonus_priority = MAX_RT_PRIO + MAX_TOTAL_BONUS + EB_PAR;
+		} else {
+			unsigned long prop;
+
+			prop = calc_proportion(usage_per_share, atomic_read(&zrq->eb_yardstick));
+			TASK_ZD(p).pre_bonus_priority = MAX_RT_PRIO + MAX_TOTAL_BONUS;
+			TASK_ZD(p).pre_bonus_priority += map_proportion_rnd(prop, EB_PAR);
+		}
+	}
+}
+
+static inline void calculate_pre_bonus_priority(task_t *p)
+{
+	zm->calculate_pre_bonus_priority(p);
+}
+
+static void zaphod_init_cpu_runq_data(unsigned int cpu)
+{
+	struct sched_zaphod_runq_data *zrq = &per_cpu(zaphod_runqs, cpu);
+
+	zrq->avg_nr_running = 0;
+	atomic_set(&zrq->eb_yardstick, 0);
+	atomic_set(&zrq->eb_ticks_to_decay, EB_YARDSTICK_DECAY_INTERVAL + cpu);
+}
+
+struct sched_zaphod_runq_data *zaphod_cpu_runq_data(unsigned int cpu)
+{
+	return cpu_zrq(cpu);
+}
+
+static void zaphod_runq_data_tick(unsigned int cpu, unsigned long numr)
+{
+	struct sched_zaphod_runq_data *zrq = cpu_zrq(cpu);
+	unsigned long nval = NRUN_AVG_MUL(zrq->avg_nr_running, NRUN_AVG_ALPHA);
+	nval += NRUN_AVG_INCR(numr);
+
+	zrq->avg_nr_running = nval;
+
+	if (atomic_dec_and_test(&zrq->eb_ticks_to_decay))
+		decay_eb_yardstick(zrq);
+}
+
+static void zaphod_fork(struct task_struct *p)
+{
+	TASK_ZD(p).interactive_bonus = (max_ia_bonus >= initial_ia_bonus) ?
+				initial_ia_bonus : max_ia_bonus;
+	TASK_ZD(p).interactive_bonus <<= SCHED_IA_BONUS_OFFSET;
+	TASK_ZD(p).throughput_bonus =  0;
+}
+
+static int zaphod_effective_prio(const struct task_struct *p)
+{
+	unsigned int bonus = 0;
+
+	/* no bonuses for tasks that have exceeded their cap */
+	if (likely(TASK_CPUSTATS(p).cpu_usage_rate < MIN_RATE_CAP(p))) {
+		/* No IA bonus when waking from (declared) non AI sleep */
+		if ((p->sdu.spa.flags & SPAF_NONIASLEEP) == 0)
+			bonus = SCHED_IA_BONUS_RND(TASK_ZD(p).interactive_bonus);
+		bonus += TASK_ZD(p).throughput_bonus;
+	}
+
+	return TASK_ZD(p).pre_bonus_priority - bonus;
+}
+
+static inline int zaphod_soft_cap_effective_prio(const struct task_struct *p)
+{
+	return zm->soft_cap_effective_prio(p);
+}
+
+static void zaphod_reassess_at_activation(struct task_struct *p)
+{
+	recalc_throughput_bonus(p);
+	reassess_interactiveness(p);
+	calculate_pre_bonus_priority(p);
+}
+
+void zaphod_reassess_at_end_of_ts(struct task_struct *p)
+{
+	recalc_throughput_bonus(p);
+	/* if a whole time slice gets used during the first or second
+	 * CPU burst then the initial interactive bonus is forfeit and the
+	 * task starts again from scratch trying to establish its interactive
+	 * bona fides
+	 */
+	if (TASK_CPUSTATS(p).total_wake_ups < 2)
+		TASK_ZD(p).interactive_bonus = 0;
+	else
+		reassess_cpu_boundness(p);
+	/*
+	 * Interactive bonus is not updated here as long CPU bursts (greater
+	 * than a time slice) are atypical of interactive tasks
+	 */
+	calculate_pre_bonus_priority(p);
+}
+
+static void zaphod_reassess_at_sinbin_release(struct task_struct *p)
+{
+	calculate_pre_bonus_priority(p);
+}
+
+static void zaphod_reassess_at_renice(struct task_struct *p)
+{
+	TASK_ZD(p).eb_shares = nice_to_shares(task_nice(p));
+	if (!rt_task(p))
+		calculate_pre_bonus_priority(p);
+}
+
+struct sched_spa_child zaphod_child = {
+	.soft_cap_effective_prio = zaphod_soft_cap_effective_prio,
+	.normal_effective_prio = zaphod_effective_prio,
+	.reassess_at_activation = zaphod_reassess_at_activation,
+	.fork_extras = zaphod_fork,
+	.runq_data_tick = zaphod_runq_data_tick,
+	.reassess_at_end_of_ts = zaphod_reassess_at_end_of_ts,
+	.reassess_at_sinbin_release = zaphod_reassess_at_sinbin_release,
+	.reassess_at_renice = zaphod_reassess_at_renice,
+};
+
+static void zaphod_sched_init(void)
+{
+	int i;
+
+	spa_sched_init();
+
+	for (i = 0; i < NR_CPUS; i++)
+		zaphod_init_cpu_runq_data(i);
+
+	spa_sched_child = &zaphod_child;
+	init_task.sdu.spa.eb_shares = DEFAULT_EB_SHARES;
+}
+
+#include <linux/sched_pvt.h>
+
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW_STATIC(max_ia_bonus, no_change, no_change,
+			       0, max_max_ia_bonus);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(initial_ia_bonus, no_change, no_change,
+			       0, max_max_ia_bonus);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(max_tpt_bonus, no_change, no_change, 0,
+			       max_max_tpt_bonus);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(ia_threshold, ppt_to_proportion,
+			       proportion_to_ppt, 0, PROPORTION_ONE);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(cpu_hog_threshold, ppt_to_proportion,
+			       proportion_to_ppt, 0, PROPORTION_ONE);
+
+static ssize_t show_zaphod_mode(char *page)
+{
+	return sprintf(page, "%s\n", zm->name);
+}
+
+static ssize_t store_zaphod_mode(const char *page, size_t count)
+{
+	int i;
+	int clen = strlen(page);
+
+	{
+		char *nlp = strrchr(page, '\n');
+
+		if (nlp != NULL)
+			clen = nlp - page;
+	}
+
+	for (i = 0; zaphod_modes[i].name != NULL; i++)
+		if (strncmp(page, zaphod_modes[i].name, clen) == 0)
+			break;
+	if (zaphod_modes[i].name == NULL)
+		return -EINVAL;
+	else /* set the zaphod mode */
+		zm = &zaphod_modes[i];
+
+	return count;
+}
+
+struct sched_drv_sysfs_entry zaphod_mode_sdse = {
+	.attr = { .name = "mode", .mode = S_IRUGO | S_IWUSR },
+	.show = show_zaphod_mode,
+	.store = store_zaphod_mode,
+};
+
+static struct attribute *zaphod_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(time_slice),
+	&SCHED_DRV_SYSFS_ATTR(sched_rr_time_slice),
+	&SCHED_DRV_SYSFS_ATTR(bgnd_time_slice_multiplier),
+	&SCHED_DRV_SYSFS_ATTR(base_prom_interval),
+#ifdef CONFIG_CPU_SCHED_ACCRUED_STATS
+	&SCHED_DRV_SYSFS_ATTR(log_at_exit),
+	&SCHED_DRV_SYSFS_ATTR(cpustats),
+#endif
+	&SCHED_DRV_SYSFS_ATTR(max_ia_bonus),
+	&SCHED_DRV_SYSFS_ATTR(initial_ia_bonus),
+	&SCHED_DRV_SYSFS_ATTR(max_tpt_bonus),
+	&SCHED_DRV_SYSFS_ATTR(ia_threshold),
+	&SCHED_DRV_SYSFS_ATTR(cpu_hog_threshold),
+	&SCHED_DRV_SYSFS_ATTR(zaphod_mode),
+	NULL,
+};
+
+const struct sched_drv zaphod_sched_drv = {
+	.name = "zaphod",
+	.init_runqueue_queue = spa_init_runqueue_queue,
+	.set_oom_time_slice = spa_set_oom_time_slice,
+	.task_timeslice = spa_task_timeslice,
+	.wake_up_task = spa_wake_up_task,
+	.fork = spa_fork,
+	.wake_up_new_task = spa_wake_up_new_task,
+	.exit = spa_exit,
+	.tick = spa_tick,
+#ifdef CONFIG_SMP
+	.move_tasks = spa_move_tasks,
+#endif
+	.tick = spa_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = spa_head_of_queue,
+	.dependent_sleeper_trumps = spa_dependent_sleeper_trumps,
+#endif
+	.schedule = spa_schedule,
+	.set_normal_task_nice = spa_set_normal_task_nice,
+	.setscheduler = spa_setscheduler,
+	.yield = spa_yield,
+	.sys_yield = spa_sys_yield,
+	.init_idle = spa_init_idle,
+	.sched_init = zaphod_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = spa_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = spa_set_select_idle_first,
+	.set_select_idle_last = spa_set_select_idle_last,
+	.migrate_dead_tasks = spa_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = spa_normalize_rt_task,
+#endif
+	.attrs = zaphod_attrs,
+};
diff -urN oldtree/kernel/staircase.c newtree/kernel/staircase.c
--- oldtree/kernel/staircase.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/staircase.c	2006-02-04 18:03:15.691969032 +0000
@@ -0,0 +1,1058 @@
+/*
+ *  kernel/staircase.c
+ *  Copyright (C) 1991-2005  Linus Torvalds
+ *
+ * 2005-11-11 Staircase scheduler by Con Kolivas <kernel@kolivas.org>
+ *            Staircase v13
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+
+/*
+ * Unique staircase process flags used by scheduler.
+ */
+#define SF_NONSLEEP	0x00000001	/* Waiting on in kernel activity */
+#define SF_YIELDED	0x00000002	/* I have just yielded */
+
+static void staircase_init_runqueue_queue(union runqueue_queue *qup)
+{
+	int k;
+
+	qup->staircase.cache_ticks = 0;
+	qup->staircase.preempted = 0;
+
+	for (k = 0; k < STAIRCASE_MAX_PRIO; k++) {
+		INIT_LIST_HEAD(qup->staircase.queue + k);
+		__clear_bit(k, qup->staircase.bitmap);
+	}
+	// delimiter for bitsearch
+	__set_bit(STAIRCASE_MAX_PRIO, qup->staircase.bitmap);
+}
+
+static void staircase_set_oom_time_slice(struct task_struct *p,
+	unsigned long t)
+{
+	p->sdu.staircase.slice = p->sdu.staircase.time_slice = t;
+}
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
+#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
+#define MAX_USER_PRIO		(USER_PRIO(STAIRCASE_MAX_PRIO))
+
+/*
+ * Some helpers for converting nanosecond timing to jiffy resolution
+ */
+#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
+#define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
+#define NSJIFFY			(1000000000 / HZ)	/* One jiffy in ns */
+
+int sched_compute = 0;
+/*
+ *This is the time all tasks within the same priority round robin.
+ *compute setting is reserved for dedicated computational scheduling
+ *and has ten times larger intervals. Set to a minimum of 5ms.
+ */
+#define _RR_INTERVAL		((5 * HZ / 1001) + 1)
+#define RR_INTERVAL()		(_RR_INTERVAL * (1 + 19 * sched_compute))
+
+#define TASK_PREEMPTS_CURR(p, rq) \
+	((p)->prio < (rq)->curr->prio)
+
+/*
+ * Get nanosecond clock difference without overflowing unsigned long.
+ */
+static inline unsigned long ns_diff(unsigned long long v1,
+	unsigned long long v2)
+{
+	unsigned long long vdiff;
+	if (likely(v1 > v2)) {
+		vdiff = v1 - v2;
+		if (vdiff > (1 << 31))
+			vdiff = 1 << 31;
+	} else
+		/*
+		 * Rarely the clock appears to go backwards. There should
+		 * always be a positive difference so return 1.
+		 */
+		vdiff = 1;
+	return (unsigned long)vdiff;
+}
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static inline void dequeue_task(struct task_struct *p,
+	struct staircase_runqueue_queue *rqq)
+{
+	list_del_init(&p->run_list);
+	if (list_empty(rqq->queue + p->prio))
+		__clear_bit(p->prio, rqq->bitmap);
+	p->sdu.staircase.ns_debit = 0;
+}
+
+static void enqueue_task(struct task_struct *p,
+	struct staircase_runqueue_queue *rqq)
+{
+	sched_info_queued(p);
+	list_add_tail(&p->run_list, rqq->queue + p->prio);
+	__set_bit(p->prio, rqq->bitmap);
+}
+
+static inline void requeue_task(struct task_struct *p,
+	struct staircase_runqueue_queue *rq)
+{
+	list_move_tail(&p->run_list, rq->queue + p->prio);
+}
+
+/*
+ * Used by the migration code - we pull tasks from the head of the
+ * remote queue so we want these tasks to show up at the head of the
+ * local queue:
+ */
+static inline void enqueue_task_head(struct task_struct *p,
+	struct staircase_runqueue_queue *rqq)
+{
+	list_add(&p->run_list, rqq->queue + p->prio);
+	__set_bit(p->prio, rqq->bitmap);
+}
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task(p, &rq->qu.staircase);
+	inc_nr_running(p, rq);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * __activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task_head(p, &rq->qu.staircase);
+	inc_nr_running(p, rq);
+}
+#endif
+
+/*
+ * Bonus - How much higher than its base priority an interactive task can run.
+ */
+static inline unsigned int bonus(const task_t *p)
+{
+	return TASK_USER_PRIO(p);
+}
+
+/*
+ * We increase our bonus by sleeping more than the time we ran.
+ * The ratio of sleep to run gives us the cpu% that we last ran and determines
+ * the maximum bonus we can acquire.
+ */
+static void inc_bonus(task_t *p, unsigned long totalrun, unsigned long sleep)
+{
+	unsigned int best_bonus;
+
+	best_bonus = sleep / (totalrun + 1);
+	if (p->sdu.staircase.bonus >= best_bonus)
+		return;
+
+	p->sdu.staircase.bonus++;
+	best_bonus = bonus(p);
+	if (p->sdu.staircase.bonus > best_bonus)
+		p->sdu.staircase.bonus = best_bonus;
+}
+
+static void dec_bonus(task_t *p)
+{
+	if (p->sdu.staircase.bonus)
+		p->sdu.staircase.bonus--;
+}
+
+static inline unsigned int rr_interval(const task_t * p)
+{
+	unsigned int rr_interval = RR_INTERVAL();
+	int nice = TASK_NICE(p);
+
+	if (nice < 0 && !rt_task(p))
+		rr_interval += -(nice);
+
+	return rr_interval;
+}
+
+/*
+ * slice - the duration a task runs before getting requeued at its best
+ * priority and has its bonus decremented.
+ */
+static inline unsigned int slice(const task_t *p)
+{
+	unsigned int slice, rr;
+
+	slice = rr = rr_interval(p);
+	if (likely(!rt_task(p)))
+		slice += bonus(p) * rr;
+
+	return slice;
+}
+
+/*
+ * sched_interactive - sysctl which allows interactive tasks to have bonuss
+ */
+int sched_interactive = 1;
+
+/*
+ * effective_prio - dynamic priority dependent on bonus.
+ * The priority normally decreases by one each RR_INTERVAL.
+ * As the bonus increases the initial priority starts at a higher "stair" or
+ * priority for longer.
+ */
+static int effective_prio(task_t *p)
+{
+	int prio;
+	unsigned int full_slice, used_slice = 0;
+	unsigned int best_bonus, rr;
+
+	if (rt_task(p))
+		return p->prio;
+
+	full_slice = slice(p);
+	if (full_slice > p->sdu.staircase.slice)
+		used_slice = full_slice - p->sdu.staircase.slice;
+
+	best_bonus = bonus(p);
+	prio = MAX_RT_PRIO + best_bonus;
+	if (sched_interactive && !sched_compute)
+		prio -= p->sdu.staircase.bonus;
+
+	rr = rr_interval(p);
+	prio += used_slice / rr;
+	if (prio > STAIRCASE_MAX_PRIO - 1)
+		prio = STAIRCASE_MAX_PRIO - 1;
+
+	return prio;
+}
+
+static void continue_slice(task_t *p)
+{
+	unsigned long total_run = NS_TO_JIFFIES(p->sdu.staircase.totalrun);
+
+	if (total_run >= p->sdu.staircase.slice) {
+ 		p->sdu.staircase.totalrun -=
+ 			JIFFIES_TO_NS(p->sdu.staircase.slice);
+		dec_bonus(p);
+	} else {
+		unsigned int remainder;
+		p->sdu.staircase.slice -= total_run;
+		remainder = p->sdu.staircase.slice % rr_interval(p);
+		if (remainder)
+			p->sdu.staircase.time_slice = remainder;
+ 	}
+}
+
+/*
+ * recalc_task_prio - this checks for tasks that run ultra short timeslices
+ * or have just forked a thread/process and make them continue their old
+ * slice instead of starting a new one at high priority.
+ */
+static inline void recalc_task_prio(task_t *p, unsigned long long now,
+	unsigned long rq_running)
+{
+	unsigned long sleep_time = ns_diff(now, p->timestamp);
+
+	/*
+	 * Priority is elevated back to best by amount of sleep_time.
+	 */
+
+	p->sdu.staircase.totalrun += p->sdu.staircase.runtime;
+	if (NS_TO_JIFFIES(p->sdu.staircase.totalrun) >=
+		p->sdu.staircase.slice && NS_TO_JIFFIES(sleep_time) <
+		p->sdu.staircase.slice) {
+			p->sdu.staircase.sflags &= ~SF_NONSLEEP;
+			dec_bonus(p);
+			p->sdu.staircase.totalrun -=
+				JIFFIES_TO_NS(p->sdu.staircase.slice);
+			if (sleep_time > p->sdu.staircase.totalrun)
+				p->sdu.staircase.totalrun = 0;
+			else
+				p->sdu.staircase.totalrun -= sleep_time;
+			goto out;
+	}
+
+	if (p->sdu.staircase.sflags & SF_NONSLEEP) {
+		continue_slice(p);
+		p->sdu.staircase.sflags &= ~SF_NONSLEEP;
+		return;
+	}
+
+	if (sched_compute) {
+		continue_slice(p);
+		return;
+	}
+
+	if (sleep_time >= p->sdu.staircase.totalrun) {
+		if (!(p->sdu.staircase.sflags & SF_NONSLEEP))
+			inc_bonus(p, p->sdu.staircase.totalrun, sleep_time);
+		p->sdu.staircase.totalrun = 0;
+		goto out;
+	}
+
+	p->sdu.staircase.totalrun -= sleep_time;
+	continue_slice(p);
+out:
+	return;
+}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ *
+ * Update all the scheduling statistics stuff. (sleep average
+ * calculation, priority modifiers, etc.)
+ */
+static void activate_task(task_t *p, runqueue_t *rq, int local)
+{
+	unsigned long long now = sched_clock();
+	unsigned long rr = rr_interval(p);
+
+#ifdef CONFIG_SMP
+	if (!local) {
+		/* Compensate for drifting sched_clock */
+		runqueue_t *this_rq = this_rq();
+		now = (now - this_rq->timestamp_last_tick)
+			+ rq->timestamp_last_tick;
+	}
+#endif
+	p->sdu.staircase.slice = slice(p);
+	p->sdu.staircase.time_slice = p->sdu.staircase.slice % rr ? : rr;
+	recalc_task_prio(p, now, rq->nr_running);
+	p->sdu.staircase.sflags &= ~SF_NONSLEEP;
+	p->prio = effective_prio(p);
+	p->timestamp = now;
+	__activate_task(p, rq);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	dec_nr_running(p, rq);
+	dequeue_task(p, &rq->qu.staircase);
+}
+
+/*
+ * cache_delay is the time preemption is delayed in sched_compute mode
+ * and is set to 5*cache_decay_ticks on SMP or a nominal 10ms on UP.
+ */
+static int cache_delay = 10 * HZ / 1000;
+
+/*
+ * Check to see if p preempts rq->curr and resched if it does. In compute
+ * mode we do not preempt for at least cache_delay and set rq->preempted.
+ */
+static void preempt(task_t *p, struct runqueue *rq)
+{
+	if (!TASK_PREEMPTS_CURR(p, rq))
+		return;
+
+	if (p->prio >= rq->curr->prio)
+		return;
+
+	if (!sched_compute || rq->qu.staircase.cache_ticks >= cache_delay ||
+		!p->mm || rt_task(p))
+			resched_task(rq->curr);
+	rq->qu.staircase.preempted = 1;
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: thetask's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+static void staircase_wake_up_task(struct task_struct *p, struct runqueue *rq,
+	unsigned int old_state, int sync)
+{
+	int same_cpu = (rq == this_rq());
+
+	if (old_state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible--;
+
+	/*
+	 * Tasks that have marked their sleep as noninteractive get
+	 * woken up without their sleep counting.
+	 */
+	if (old_state & TASK_NONINTERACTIVE)
+		p->sdu.staircase.sflags |= SF_NONSLEEP;
+
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	activate_task(p, rq, same_cpu);
+	if (!sync || !same_cpu)
+		preempt(p, rq);
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+static void staircase_fork(task_t *p)
+{
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+static void staircase_wake_up_new_task(task_t * p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	int this_cpu, cpu;
+	runqueue_t *rq, *this_rq;
+
+	rq = task_rq_lock(p, &flags);
+	cpu = task_cpu(p);
+	this_cpu = smp_processor_id();
+
+	BUG_ON(p->state != TASK_RUNNING);
+
+	/*
+	 * Forked process gets no bonus to prevent fork bombs.
+	 */
+	p->sdu.staircase.bonus = 0;
+
+	if (likely(cpu == this_cpu)) {
+		current->sdu.staircase.sflags |= SF_NONSLEEP;
+		activate_task(p, rq, 1);
+		if (!(clone_flags & CLONE_VM))
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			set_need_resched();
+		/*
+		 * We skip the following code due to cpu == this_cpu
+	 	 *
+		 *   task_rq_unlock(rq, &flags);
+		 *   this_rq = task_rq_lock(current, &flags);
+		 */
+		this_rq = rq;
+	} else {
+		this_rq = cpu_rq(this_cpu);
+
+		/*
+		 * Not the local CPU - must adjust timestamp. This should
+		 * get optimised away in the !CONFIG_SMP case.
+		 */
+		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
+					+ rq->timestamp_last_tick;
+		p->prio = effective_prio(p);
+		__activate_task(p, rq);
+		preempt(p, rq);
+
+		/*
+		 * Parent and child are on different CPUs, now get the parent
+		 * runqueue to update the parent's ->sdu.staircase.sleep_avg:
+		 */
+		task_rq_unlock(rq, &flags);
+		this_rq = task_rq_lock(current, &flags);
+		current->sdu.staircase.sflags |= SF_NONSLEEP;
+	}
+
+	task_rq_unlock(this_rq, &flags);
+}
+
+/*
+ * Potentially available exiting-child timeslices are
+ * retrieved here - this way the parent does not get
+ * penalized for creating too many threads.
+ *
+ * (this cannot be used to 'generate' timeslices
+ * artificially, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+static void staircase_exit(task_t * p)
+{
+}
+
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static inline void pull_task(runqueue_t *src_rq, task_t *p,
+	runqueue_t *this_rq, int this_cpu)
+{
+	dequeue_task(p, &src_rq->qu.staircase);
+	dec_nr_running(p, src_rq);
+	set_task_cpu(p, this_cpu);
+	inc_nr_running(p, this_rq);
+	enqueue_task(p, &this_rq->qu.staircase);
+	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+				+ this_rq->timestamp_last_tick;
+	/*
+	 * Note that idle threads have a prio of STAIRCASE_MAX_PRIO, for this
+	 * test to be always true for them.
+	 */
+	preempt(p, this_rq);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int staircase_move_tasks(runqueue_t *this_rq, int this_cpu,
+	runqueue_t *busiest, unsigned long max_nr_move, long max_bias_move,
+	struct sched_domain *sd, enum idle_type idle, int *all_pinned)
+{
+	struct list_head *head, *curr;
+	int idx, pulled = 0, pinned = 0;
+	task_t *tmp;
+
+	if (max_nr_move == 0 || max_bias_move == 0)
+		goto out;
+
+	pinned = 1;
+
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(busiest->qu.staircase.bitmap);
+	else
+		idx = find_next_bit(busiest->qu.staircase.bitmap,
+			STAIRCASE_MAX_PRIO, idx);
+	if (idx >= STAIRCASE_MAX_PRIO)
+		goto out;
+
+	head = busiest->qu.staircase.queue + idx;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+
+	curr = curr->prev;
+
+	if (tmp->bias_prio > max_bias_move ||
+	    !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, tmp, this_rq, this_cpu);
+	pulled++;
+	max_bias_move -= tmp->bias_prio;
+
+	/*
+	 * We only want to steal up to the prescribed number of tasks
+	 * and the prescribed amount of biased load.
+	 */
+	if (pulled < max_nr_move && max_bias_move > 0) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	if (all_pinned)
+		*all_pinned = pinned;
+
+	return pulled;
+}
+#endif
+
+static void time_slice_expired(task_t *p, runqueue_t *rq)
+{
+	struct staircase_runqueue_queue *rqq = &rq->qu.staircase;
+
+	set_tsk_need_resched(p);
+	dequeue_task(p, rqq);
+	p->prio = effective_prio(p);
+	p->sdu.staircase.time_slice = rr_interval(p);
+	enqueue_task(p, rqq);
+}
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ */
+static void staircase_tick(struct task_struct *p, struct runqueue *rq,
+	unsigned long long now)
+{
+	int cpu = smp_processor_id();
+	unsigned long debit, expired_balance = rq->nr_running;
+
+	if (p == rq->idle) {
+		if (wake_priority_sleeper(rq))
+			goto out;
+		rebalance_tick(cpu, rq, SCHED_IDLE);
+		return;
+	}
+
+	/* Task might have expired already, but not scheduled off yet */
+	if (unlikely(!task_is_queued(p))) {
+		set_tsk_need_resched(p);
+		goto out;
+	}
+
+	/*
+	 * SCHED_FIFO tasks never run out of timeslice.
+	 */
+	if (unlikely(p->policy == SCHED_FIFO)) {
+		expired_balance = 0;
+		goto out;
+	}
+
+	spin_lock(&rq->lock);
+	debit = ns_diff(rq->timestamp_last_tick, p->timestamp);
+	p->sdu.staircase.ns_debit += debit;
+	if (p->sdu.staircase.ns_debit < NSJIFFY)
+		goto out_unlock;
+	p->sdu.staircase.ns_debit %= NSJIFFY;
+	/*
+	 * Tasks lose bonus each time they use up a full slice().
+	 */
+	if (!--p->sdu.staircase.slice) {
+		dec_bonus(p);
+		p->sdu.staircase.slice = slice(p);
+		time_slice_expired(p, rq);
+		p->sdu.staircase.totalrun = 0;
+		goto out_unlock;
+	}
+	/*
+	 * Tasks that run out of time_slice but still have slice left get
+	 * requeued with a lower priority && RR_INTERVAL time_slice.
+	 */
+	if (!--p->sdu.staircase.time_slice) {
+		time_slice_expired(p, rq);
+		goto out_unlock;
+	}
+	rq->qu.staircase.cache_ticks++;
+	if (rq->qu.staircase.preempted &&
+		rq->qu.staircase.cache_ticks >= cache_delay) {
+		set_tsk_need_resched(p);
+		goto out_unlock;
+	}
+	expired_balance = 0;
+out_unlock:
+	spin_unlock(&rq->lock);
+out:
+	if (expired_balance > 1)
+		rebalance_tick(cpu, rq, NOT_IDLE);
+}
+
+#ifdef CONFIG_SCHED_SMT
+static struct task_struct *staircase_head_of_queue(union runqueue_queue *rqq)
+{
+	return list_entry(rqq->staircase.queue[sched_find_first_bit(rqq->staircase.bitmap)].next,
+		task_t, run_list);
+}
+
+static int staircase_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct * p2, struct sched_domain *sd)
+{
+	return (p1->sdu.staircase.time_slice * (100 - sd->per_cpu_gain) /
+		100) > slice(p2);
+}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ */
+static void staircase_schedule(void)
+{
+	long *switch_count;
+	int cpu, idx;
+	struct task_struct *prev = current, *next;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+	unsigned long debit;
+	struct list_head *queue;
+
+	spin_lock_irq(&rq->lock);
+
+	prev->sdu.staircase.runtime = ns_diff(now, prev->timestamp);
+	debit = ns_diff(now, rq->timestamp_last_tick) % NSJIFFY;
+	prev->sdu.staircase.ns_debit += debit;
+
+	if (unlikely(current->flags & PF_DEAD))
+		current->state = EXIT_DEAD;
+	/*
+	 * if entering off of a kernel preemption go straight
+	 * to picking the next task.
+	 */
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE) {
+				rq->nr_uninterruptible++;
+				prev->sdu.staircase.sflags |= SF_NONSLEEP;
+			}
+			deactivate_task(prev, rq);
+		}
+	}
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+go_idle:
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			wake_sleeping_dependent(cpu, rq);
+			/*
+			 * wake_sleeping_dependent() might have released
+			 * the runqueue, so break out if we got new
+			 * tasks meanwhile:
+			 */
+			if (!rq->nr_running)
+				goto switch_tasks;
+		}
+	} else {
+		if (dependent_sleeper(cpu, rq)) {
+			next = rq->idle;
+			goto switch_tasks;
+		}
+		/*
+		 * dependent_sleeper() releases and reacquires the runqueue
+		 * lock, hence go into the idle loop if the rq went
+		 * empty meanwhile:
+		 */
+		if (unlikely(!rq->nr_running))
+			goto go_idle;
+	}
+
+	idx = sched_find_first_bit(rq->qu.staircase.bitmap);
+	queue = rq->qu.staircase.queue + idx;
+	next = list_entry(queue->next, task_t, run_list);
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prefetch(next);
+	prefetch_stack(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(task_cpu(prev));
+
+	update_cpu_clock(prev, rq, now);
+	prev->timestamp = prev->last_ran = now;
+	if (unlikely(next->sdu.staircase.sflags & SF_YIELDED)) {
+		/*
+		 * Tasks that have yield()ed get requeued at normal priority
+		 */
+		int newprio = effective_prio(next);
+		next->sdu.staircase.sflags &= ~SF_YIELDED;
+		if (newprio != next->prio) {
+			struct staircase_runqueue_queue *rqq =
+				&rq->qu.staircase;
+
+			dequeue_task(next, rqq);
+			next->prio = newprio;
+			enqueue_task_head(next, rqq);
+		}
+	}
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		rq->qu.staircase.preempted = 0;
+		rq->qu.staircase.cache_ticks = 0;
+		next->timestamp = now;
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_task_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+		/*
+		 * this_rq must be evaluated again because prev may have moved
+		 * CPUs since it called schedule(), thus the 'rq' on its stack
+		 * frame will be invalid.
+		 */
+		finish_task_switch(this_rq(), prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+static void staircase_set_normal_task_nice(task_t *p, long nice)
+{
+	int queued;
+	int old_prio, new_prio, delta;
+	struct runqueue *rq = task_rq(p);
+	struct staircase_runqueue_queue *rqq = &rq->qu.staircase;
+
+	queued = task_is_queued(p);
+	if (queued) {
+		dequeue_task(p, rqq);
+		dec_prio_bias(rq, p);
+	}
+
+	old_prio = p->prio;
+	new_prio = NICE_TO_PRIO(nice);
+	delta = new_prio - old_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	set_bias_prio(p);
+	p->prio += delta;
+
+	if (queued) {
+		inc_prio_bias(rq, p);
+		enqueue_task(p, rqq);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+static void staircase_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	int queued;
+	runqueue_t *rq = task_rq(p);
+
+	queued = task_is_queued(p);
+	if (queued)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	if (queued) {
+		__activate_task(p, rq);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else
+			preempt(p, rq);
+	}
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+static long staircase_sys_yield(void)
+{
+	int newprio;
+	runqueue_t *rq = this_rq_lock();
+	struct staircase_runqueue_queue *rqq = &rq->qu.staircase;
+
+	schedstat_inc(rq, yld_cnt);
+	newprio = current->prio;
+	current->sdu.staircase.slice = slice(current);
+	current->sdu.staircase.time_slice = rr_interval(current);
+	if (likely(!rt_task(current))) {
+		current->sdu.staircase.sflags |= SF_YIELDED;
+		newprio = STAIRCASE_MAX_PRIO - 1;
+	}
+
+	if (newprio != current->prio) {
+		dequeue_task(current, rqq);
+		current->prio = newprio;
+		enqueue_task(current, rqq);
+	} else
+		requeue_task(current, rqq);
+
+	if (rq->nr_running == 1)
+		schedstat_inc(rq, yld_both_empty);
+
+	/*
+	 * Since we are going to call schedule() anyway, there's
+	 * no need to preempt or enable interrupts:
+	 */
+	__release(rq->lock);
+	_raw_spin_unlock(&rq->lock);
+	preempt_enable_no_resched();
+
+	schedule();
+
+	return 0;
+}
+
+static void staircase_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	staircase_sys_yield();
+}
+
+static void staircase_init_idle(task_t *idle, int cpu)
+{
+	idle->prio = STAIRCASE_MAX_PRIO;
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+static void staircase_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	/*
+	 * Sync timestamp with rq_dest's before activating.
+	 * The same thing could be achieved by doing this step
+	 * afterwards, and pretending it was a local activate.
+	 * This way is cleaner and logically correct.
+	 */
+	p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+			+ rq_dest->timestamp_last_tick;
+	deactivate_task(p, rq_src);
+	set_task_cpu(p, dest_cpu);
+	activate_task(p, rq_dest, 0);
+	preempt(p, rq_dest);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static	void staircase_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO-1);
+	/* Add idle task to _front_ of it's priority queue */
+	__activate_idle_task(rq->idle, rq);
+}
+
+static	void staircase_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = STAIRCASE_MAX_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+static void staircase_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (i = 0; i < STAIRCASE_MAX_PRIO; i++) {
+		struct list_head *list = &rq->qu.staircase.queue[i];
+		while (!list_empty(list))
+			migrate_dead(dead_cpu, list_entry(list->next, task_t,
+				run_list));
+	}
+}
+#endif
+#endif
+
+static void staircase_sched_init(void)
+{
+	init_task.sdu.staircase.time_slice = HZ;
+	init_task.sdu.staircase.slice = HZ;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void staircase_normalize_rt_task(struct task_struct *p)
+{
+	int queued;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	queued = task_is_queued(p);
+	if (queued)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	if (queued) {
+		__activate_task(p, rq);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+#ifdef CONFIG_SYSFS
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW(cache_delay, msecs_to_jiffies, jiffies_to_msecs, 1, 1000);
+SCHED_DRV_SYSFS_UINT_RW(sched_compute, no_change, no_change, 0, 1);
+SCHED_DRV_SYSFS_UINT_RW(sched_interactive, no_change, no_change, 0, 1);
+
+static struct attribute *staircase_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(cache_delay),
+	&SCHED_DRV_SYSFS_ATTR(sched_compute),
+	&SCHED_DRV_SYSFS_ATTR(sched_interactive),
+	NULL,
+};
+#endif
+
+const struct sched_drv staircase_sched_drv = {
+	.name = "staircase",
+	.init_runqueue_queue = staircase_init_runqueue_queue,
+	.set_oom_time_slice = staircase_set_oom_time_slice,
+	.task_timeslice = slice,
+	.wake_up_task = staircase_wake_up_task,
+	.fork = staircase_fork,
+	.wake_up_new_task = staircase_wake_up_new_task,
+	.exit = staircase_exit,
+#ifdef CONFIG_SMP
+	.move_tasks = staircase_move_tasks,
+#endif
+	.tick = staircase_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = staircase_head_of_queue,
+	.dependent_sleeper_trumps = staircase_dependent_sleeper_trumps,
+#endif
+	.schedule = staircase_schedule,
+	.set_normal_task_nice = staircase_set_normal_task_nice,
+	.setscheduler = staircase_setscheduler,
+	.sys_yield = staircase_sys_yield,
+	.yield = staircase_yield,
+	.init_idle = staircase_init_idle,
+	.sched_init = staircase_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = staircase_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = staircase_set_select_idle_first,
+	.set_select_idle_last = staircase_set_select_idle_last,
+	.migrate_dead_tasks = staircase_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = staircase_normalize_rt_task,
+#endif
+#ifdef CONFIG_SYSFS
+	.attrs = staircase_attrs,
+#endif
+};
diff -urN oldtree/mm/oom_kill.c newtree/mm/oom_kill.c
--- oldtree/mm/oom_kill.c	2006-01-03 03:21:10.000000000 +0000
+++ newtree/mm/oom_kill.c	2006-02-04 18:03:15.692968880 +0000
@@ -206,7 +206,7 @@
 	 * all the memory it needs. That way it should be able to
 	 * exit() and clear out its resources quickly...
 	 */
-	p->time_slice = HZ;
+	set_oom_time_slice(p, HZ);
 	set_tsk_thread_flag(p, TIF_MEMDIE);
 
 	force_sig(SIGKILL, p);
