diff -urN oldtree/fs/nfs/inode.c newtree/fs/nfs/inode.c
--- oldtree/fs/nfs/inode.c	2006-03-08 18:48:01.655982500 +0000
+++ newtree/fs/nfs/inode.c	2006-03-08 18:56:29.923747250 +0000
@@ -1027,7 +1027,8 @@
 
 	rpc_clnt_sigmask(clnt, &oldmask);
 	error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
-					nfs_wait_schedule, TASK_INTERRUPTIBLE);
+					nfs_wait_schedule,
+					TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
 	rpc_clnt_sigunmask(clnt, &oldmask);
 
 	return error;
diff -urN oldtree/fs/nfs/nfs4proc.c newtree/fs/nfs/nfs4proc.c
--- oldtree/fs/nfs/nfs4proc.c	2006-03-08 18:48:01.663983000 +0000
+++ newtree/fs/nfs/nfs4proc.c	2006-03-08 18:56:29.927747500 +0000
@@ -2692,7 +2692,7 @@
 	rpc_clnt_sigmask(clnt, &oldset);
 	res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
 			nfs4_wait_bit_interruptible,
-			TASK_INTERRUPTIBLE);
+			TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
 	rpc_clnt_sigunmask(clnt, &oldset);
 	return res;
 }
diff -urN oldtree/fs/nfs/pagelist.c newtree/fs/nfs/pagelist.c
--- oldtree/fs/nfs/pagelist.c	2006-03-08 18:48:01.667983250 +0000
+++ newtree/fs/nfs/pagelist.c	2006-03-08 18:56:29.931747750 +0000
@@ -216,7 +216,8 @@
 	 */
 	rpc_clnt_sigmask(clnt, &oldmask);
 	ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
-			nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
+			nfs_wait_bit_interruptible,
+			TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
 	rpc_clnt_sigunmask(clnt, &oldmask);
 out:
 	return ret;
diff -urN oldtree/fs/nfs/write.c newtree/fs/nfs/write.c
--- oldtree/fs/nfs/write.c	2006-03-08 18:48:01.671983500 +0000
+++ newtree/fs/nfs/write.c	2006-03-08 18:56:29.935748000 +0000
@@ -644,7 +644,8 @@
 		sigset_t oldset;
 
 		rpc_clnt_sigmask(clnt, &oldset);
-		prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
+		prepare_to_wait(&nfs_write_congestion, &wait,
+				TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
 		if (bdi_write_congested(bdi)) {
 			if (signalled())
 				ret = -ERESTARTSYS;
diff -urN oldtree/fs/proc/array.c newtree/fs/proc/array.c
--- oldtree/fs/proc/array.c	2006-03-08 18:47:15.261083000 +0000
+++ newtree/fs/proc/array.c	2006-03-08 18:56:29.939748250 +0000
@@ -165,7 +165,6 @@
 	read_lock(&tasklist_lock);
 	buffer += sprintf(buffer,
 		"State:\t%s\n"
-		"SleepAVG:\t%lu%%\n"
 		"Tgid:\t%d\n"
 		"Pid:\t%d\n"
 		"PPid:\t%d\n"
@@ -173,7 +172,6 @@
 		"Uid:\t%d\t%d\t%d\t%d\n"
 		"Gid:\t%d\t%d\t%d\t%d\n",
 		get_task_state(p),
-		(p->sleep_avg/1024)*100/(1020000000/1024),
 	       	p->tgid,
 		p->pid, pid_alive(p) ? p->group_leader->real_parent->tgid : 0,
 		pid_alive(p) && p->ptrace ? p->parent->pid : 0,
diff -urN oldtree/fs/proc/base.c newtree/fs/proc/base.c
--- oldtree/fs/proc/base.c	2006-03-08 18:48:01.735987500 +0000
+++ newtree/fs/proc/base.c	2006-03-08 18:56:29.951749000 +0000
@@ -70,6 +70,7 @@
 #include <linux/ptrace.h>
 #include <linux/seccomp.h>
 #include <linux/cpuset.h>
+#include <linux/sched_task.h>
 #include <linux/audit.h>
 #include <linux/poll.h>
 #include "internal.h"
@@ -166,6 +167,10 @@
 #ifdef CONFIG_CPUSETS
 	PROC_TID_CPUSET,
 #endif
+#ifdef CONFIG_CPUSCHED_SPA
+	PROC_TID_CPU_RATE_CAP,
+	PROC_TID_CPU_RATE_HARD_CAP,
+#endif
 #ifdef CONFIG_SECURITY
 	PROC_TID_ATTR,
 	PROC_TID_ATTR_CURRENT,
@@ -279,6 +284,10 @@
 #ifdef CONFIG_AUDITSYSCALL
 	E(PROC_TID_LOGINUID, "loginuid", S_IFREG|S_IWUSR|S_IRUGO),
 #endif
+#ifdef CONFIG_CPUSCHED_SPA
+	E(PROC_TID_CPU_RATE_CAP,  "cpu_rate_cap",   S_IFREG|S_IRUGO|S_IWUSR),
+	E(PROC_TID_CPU_RATE_HARD_CAP,  "cpu_rate_hard_cap",   S_IFREG|S_IRUGO|S_IWUSR),
+#endif
 	{0,0,NULL,0}
 };
 
@@ -1018,6 +1027,100 @@
 };
 #endif /* CONFIG_SECCOMP */
 
+#ifdef CONFIG_CPUSCHED_SPA
+static ssize_t cpu_rate_cap_read(struct file * file, char * buf,
+			size_t count, loff_t *ppos)
+{
+	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+	char buffer[64];
+	size_t len;
+	unsigned int cppt = get_cpu_rate_cap(task);
+
+	if (*ppos)
+		return 0;
+	*ppos = len = sprintf(buffer, "%u\n", cppt);
+	if (copy_to_user(buf, buffer, len))
+		return -EFAULT;
+
+	return len;
+}
+
+static ssize_t cpu_rate_cap_write(struct file * file, const char * buf,
+			 size_t count, loff_t *ppos)
+{
+	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+	char buffer[128] = "";
+	char *endptr = NULL;
+	unsigned long hcppt;
+	int res;
+
+
+	if ((count > 63) || *ppos)
+		return -EFBIG;
+	if (copy_from_user(buffer, buf, count))
+		return -EFAULT;
+	hcppt = simple_strtoul(buffer, &endptr, 0);
+	if ((endptr == buffer) || (hcppt == ULONG_MAX))
+		return -EINVAL;
+
+	if ((res = set_cpu_rate_cap(task, hcppt)) != 0)
+		return res;
+
+	return count;
+}
+
+struct file_operations proc_cpu_rate_cap_operations = {
+	read:		cpu_rate_cap_read,
+	write:		cpu_rate_cap_write,
+};
+
+ssize_t cpu_rate_hard_cap_read(struct file * file, char * buf,
+			size_t count, loff_t *ppos)
+{
+	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+	char buffer[64];
+	size_t len;
+	unsigned int hcppt = get_cpu_rate_hard_cap(task);
+
+	if (*ppos)
+		return 0;
+	*ppos = len = sprintf(buffer, "%u\n", hcppt);
+	if (copy_to_user(buf, buffer, len))
+		return -EFAULT;
+
+	return len;
+}
+
+ssize_t cpu_rate_hard_cap_write(struct file * file, const char * buf,
+			 size_t count, loff_t *ppos)
+{
+	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+	char buffer[128] = "";
+	char *endptr = NULL;
+	unsigned long long hcppt;
+	int res;
+
+
+	if ((count > 63) || *ppos)
+		return -EFBIG;
+	if (copy_from_user(buffer, buf, count))
+		return -EFAULT;
+	hcppt = simple_strtoul(buffer, &endptr, 0);
+	if ((endptr == buffer) || (hcppt == ULONG_MAX))
+		return -EINVAL;
+
+	if ((res = set_cpu_rate_hard_cap(task, hcppt)) != 0)
+		return res;
+
+	return count;
+}
+
+struct file_operations proc_cpu_rate_hard_cap_operations = {
+	read:		cpu_rate_hard_cap_read,
+	write:		cpu_rate_hard_cap_write,
+};
+#endif
+
 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
 	struct inode *inode = dentry->d_inode;
@@ -1743,6 +1846,14 @@
 			inode->i_fop = &proc_loginuid_operations;
 			break;
 #endif
+#ifdef CONFIG_CPUSCHED_SPA
+		case PROC_TID_CPU_RATE_CAP:
+			inode->i_fop = &proc_cpu_rate_cap_operations;
+			break;
+		case PROC_TID_CPU_RATE_HARD_CAP:
+			inode->i_fop = &proc_cpu_rate_hard_cap_operations;
+			break;
+#endif
 		default:
 			printk("procfs: impossible type (%d)",p->type);
 			iput(inode);
diff -urN oldtree/fs/proc/base.c.orig newtree/fs/proc/base.c.orig
--- oldtree/fs/proc/base.c.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/fs/proc/base.c.orig	2006-03-08 18:48:01.000000000 +0000
@@ -0,0 +1,2311 @@
+/*
+ *  linux/fs/proc/base.c
+ *
+ *  Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ *  proc base directory handling functions
+ *
+ *  1999, Al Viro. Rewritten. Now it covers the whole per-process part.
+ *  Instead of using magical inumbers to determine the kind of object
+ *  we allocate and fill in-core inodes upon lookup. They don't even
+ *  go into icache. We cache the reference to task_struct upon lookup too.
+ *  Eventually it should become a filesystem in its own. We don't use the
+ *  rest of procfs anymore.
+ *
+ *
+ *  Changelog:
+ *  17-Jan-2005
+ *  Allan Bezerra
+ *  Bruna Moreira <bruna.moreira@indt.org.br>
+ *  Edjard Mota <edjard.mota@indt.org.br>
+ *  Ilias Biris <ilias.biris@indt.org.br>
+ *  Mauricio Lin <mauricio.lin@indt.org.br>
+ *
+ *  Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
+ *
+ *  A new process specific entry (smaps) included in /proc. It shows the
+ *  size of rss for each memory area. The maps entry lacks information
+ *  about physical memory size (rss) for each mapped file, i.e.,
+ *  rss information for executables and library files.
+ *  This additional information is useful for any tools that need to know
+ *  about physical memory consumption for a process specific library.
+ *
+ *  Changelog:
+ *  21-Feb-2005
+ *  Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
+ *  Pud inclusion in the page table walking.
+ *
+ *  ChangeLog:
+ *  10-Mar-2005
+ *  10LE Instituto Nokia de Tecnologia - INdT:
+ *  A better way to walks through the page table as suggested by Hugh Dickins.
+ *
+ *  Simo Piiroinen <simo.piiroinen@nokia.com>:
+ *  Smaps information related to shared, private, clean and dirty pages.
+ *
+ *  Paul Mundt <paul.mundt@nokia.com>:
+ *  Overall revision about smaps.
+ */
+
+#include <asm/uaccess.h>
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/capability.h>
+#include <linux/file.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/namei.h>
+#include <linux/namespace.h>
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+#include <linux/rcupdate.h>
+#include <linux/kallsyms.h>
+#include <linux/mount.h>
+#include <linux/security.h>
+#include <linux/ptrace.h>
+#include <linux/seccomp.h>
+#include <linux/cpuset.h>
+#include <linux/audit.h>
+#include <linux/poll.h>
+#include "internal.h"
+
+/* NOTE:
+ *	Implementing inode permission operations in /proc is almost
+ *	certainly an error.  Permission checks need to happen during
+ *	each system call not at open time.  The reason is that most of
+ *	what we wish to check for permissions in /proc varies at runtime.
+ *
+ *	The classic example of a problem is opening file descriptors
+ *	in /proc for a task before it execs a suid executable.
+ */
+
+/*
+ * For hysterical raisins we keep the same inumbers as in the old procfs.
+ * Feel free to change the macro below - just keep the range distinct from
+ * inumbers of the rest of procfs (currently those are in 0x0000--0xffff).
+ * As soon as we'll get a separate superblock we will be able to forget
+ * about magical ranges too.
+ */
+
+#define fake_ino(pid,ino) (((pid)<<16)|(ino))
+
+enum pid_directory_inos {
+	PROC_TGID_INO = 2,
+	PROC_TGID_TASK,
+	PROC_TGID_STATUS,
+	PROC_TGID_MEM,
+#ifdef CONFIG_SECCOMP
+	PROC_TGID_SECCOMP,
+#endif
+	PROC_TGID_CWD,
+	PROC_TGID_ROOT,
+	PROC_TGID_EXE,
+	PROC_TGID_FD,
+	PROC_TGID_ENVIRON,
+	PROC_TGID_AUXV,
+	PROC_TGID_CMDLINE,
+	PROC_TGID_STAT,
+	PROC_TGID_STATM,
+	PROC_TGID_MAPS,
+	PROC_TGID_NUMA_MAPS,
+	PROC_TGID_MOUNTS,
+	PROC_TGID_MOUNTSTATS,
+	PROC_TGID_WCHAN,
+#ifdef CONFIG_MMU
+	PROC_TGID_SMAPS,
+#endif
+#ifdef CONFIG_SCHEDSTATS
+	PROC_TGID_SCHEDSTAT,
+#endif
+#ifdef CONFIG_CPUSETS
+	PROC_TGID_CPUSET,
+#endif
+#ifdef CONFIG_SECURITY
+	PROC_TGID_ATTR,
+	PROC_TGID_ATTR_CURRENT,
+	PROC_TGID_ATTR_PREV,
+	PROC_TGID_ATTR_EXEC,
+	PROC_TGID_ATTR_FSCREATE,
+#endif
+#ifdef CONFIG_AUDITSYSCALL
+	PROC_TGID_LOGINUID,
+#endif
+	PROC_TGID_OOM_SCORE,
+	PROC_TGID_OOM_ADJUST,
+	PROC_TID_INO,
+	PROC_TID_STATUS,
+	PROC_TID_MEM,
+#ifdef CONFIG_SECCOMP
+	PROC_TID_SECCOMP,
+#endif
+	PROC_TID_CWD,
+	PROC_TID_ROOT,
+	PROC_TID_EXE,
+	PROC_TID_FD,
+	PROC_TID_ENVIRON,
+	PROC_TID_AUXV,
+	PROC_TID_CMDLINE,
+	PROC_TID_STAT,
+	PROC_TID_STATM,
+	PROC_TID_MAPS,
+	PROC_TID_NUMA_MAPS,
+	PROC_TID_MOUNTS,
+	PROC_TID_MOUNTSTATS,
+	PROC_TID_WCHAN,
+#ifdef CONFIG_MMU
+	PROC_TID_SMAPS,
+#endif
+#ifdef CONFIG_SCHEDSTATS
+	PROC_TID_SCHEDSTAT,
+#endif
+#ifdef CONFIG_CPUSETS
+	PROC_TID_CPUSET,
+#endif
+#ifdef CONFIG_SECURITY
+	PROC_TID_ATTR,
+	PROC_TID_ATTR_CURRENT,
+	PROC_TID_ATTR_PREV,
+	PROC_TID_ATTR_EXEC,
+	PROC_TID_ATTR_FSCREATE,
+#endif
+#ifdef CONFIG_AUDITSYSCALL
+	PROC_TID_LOGINUID,
+#endif
+	PROC_TID_OOM_SCORE,
+	PROC_TID_OOM_ADJUST,
+
+	/* Add new entries before this */
+	PROC_TID_FD_DIR = 0x8000,	/* 0x8000-0xffff */
+};
+
+/* Worst case buffer size needed for holding an integer. */
+#define PROC_NUMBUF 10
+
+struct pid_entry {
+	int type;
+	int len;
+	char *name;
+	mode_t mode;
+};
+
+#define E(type,name,mode) {(type),sizeof(name)-1,(name),(mode)}
+
+static struct pid_entry tgid_base_stuff[] = {
+	E(PROC_TGID_TASK,      "task",    S_IFDIR|S_IRUGO|S_IXUGO),
+	E(PROC_TGID_FD,        "fd",      S_IFDIR|S_IRUSR|S_IXUSR),
+	E(PROC_TGID_ENVIRON,   "environ", S_IFREG|S_IRUSR),
+	E(PROC_TGID_AUXV,      "auxv",	  S_IFREG|S_IRUSR),
+	E(PROC_TGID_STATUS,    "status",  S_IFREG|S_IRUGO),
+	E(PROC_TGID_CMDLINE,   "cmdline", S_IFREG|S_IRUGO),
+	E(PROC_TGID_STAT,      "stat",    S_IFREG|S_IRUGO),
+	E(PROC_TGID_STATM,     "statm",   S_IFREG|S_IRUGO),
+	E(PROC_TGID_MAPS,      "maps",    S_IFREG|S_IRUGO),
+#ifdef CONFIG_NUMA
+	E(PROC_TGID_NUMA_MAPS, "numa_maps", S_IFREG|S_IRUGO),
+#endif
+	E(PROC_TGID_MEM,       "mem",     S_IFREG|S_IRUSR|S_IWUSR),
+#ifdef CONFIG_SECCOMP
+	E(PROC_TGID_SECCOMP,   "seccomp", S_IFREG|S_IRUSR|S_IWUSR),
+#endif
+	E(PROC_TGID_CWD,       "cwd",     S_IFLNK|S_IRWXUGO),
+	E(PROC_TGID_ROOT,      "root",    S_IFLNK|S_IRWXUGO),
+	E(PROC_TGID_EXE,       "exe",     S_IFLNK|S_IRWXUGO),
+	E(PROC_TGID_MOUNTS,    "mounts",  S_IFREG|S_IRUGO),
+	E(PROC_TGID_MOUNTSTATS, "mountstats", S_IFREG|S_IRUSR),
+#ifdef CONFIG_MMU
+	E(PROC_TGID_SMAPS,     "smaps",   S_IFREG|S_IRUGO),
+#endif
+#ifdef CONFIG_SECURITY
+	E(PROC_TGID_ATTR,      "attr",    S_IFDIR|S_IRUGO|S_IXUGO),
+#endif
+#ifdef CONFIG_KALLSYMS
+	E(PROC_TGID_WCHAN,     "wchan",   S_IFREG|S_IRUGO),
+#endif
+#ifdef CONFIG_SCHEDSTATS
+	E(PROC_TGID_SCHEDSTAT, "schedstat", S_IFREG|S_IRUGO),
+#endif
+#ifdef CONFIG_CPUSETS
+	E(PROC_TGID_CPUSET,    "cpuset",  S_IFREG|S_IRUGO),
+#endif
+	E(PROC_TGID_OOM_SCORE, "oom_score",S_IFREG|S_IRUGO),
+	E(PROC_TGID_OOM_ADJUST,"oom_adj", S_IFREG|S_IRUGO|S_IWUSR),
+#ifdef CONFIG_AUDITSYSCALL
+	E(PROC_TGID_LOGINUID, "loginuid", S_IFREG|S_IWUSR|S_IRUGO),
+#endif
+	{0,0,NULL,0}
+};
+static struct pid_entry tid_base_stuff[] = {
+	E(PROC_TID_FD,         "fd",      S_IFDIR|S_IRUSR|S_IXUSR),
+	E(PROC_TID_ENVIRON,    "environ", S_IFREG|S_IRUSR),
+	E(PROC_TID_AUXV,       "auxv",	  S_IFREG|S_IRUSR),
+	E(PROC_TID_STATUS,     "status",  S_IFREG|S_IRUGO),
+	E(PROC_TID_CMDLINE,    "cmdline", S_IFREG|S_IRUGO),
+	E(PROC_TID_STAT,       "stat",    S_IFREG|S_IRUGO),
+	E(PROC_TID_STATM,      "statm",   S_IFREG|S_IRUGO),
+	E(PROC_TID_MAPS,       "maps",    S_IFREG|S_IRUGO),
+#ifdef CONFIG_NUMA
+	E(PROC_TID_NUMA_MAPS,  "numa_maps",    S_IFREG|S_IRUGO),
+#endif
+	E(PROC_TID_MEM,        "mem",     S_IFREG|S_IRUSR|S_IWUSR),
+#ifdef CONFIG_SECCOMP
+	E(PROC_TID_SECCOMP,    "seccomp", S_IFREG|S_IRUSR|S_IWUSR),
+#endif
+	E(PROC_TID_CWD,        "cwd",     S_IFLNK|S_IRWXUGO),
+	E(PROC_TID_ROOT,       "root",    S_IFLNK|S_IRWXUGO),
+	E(PROC_TID_EXE,        "exe",     S_IFLNK|S_IRWXUGO),
+	E(PROC_TID_MOUNTS,     "mounts",  S_IFREG|S_IRUGO),
+#ifdef CONFIG_MMU
+	E(PROC_TID_SMAPS,      "smaps",   S_IFREG|S_IRUGO),
+#endif
+#ifdef CONFIG_SECURITY
+	E(PROC_TID_ATTR,       "attr",    S_IFDIR|S_IRUGO|S_IXUGO),
+#endif
+#ifdef CONFIG_KALLSYMS
+	E(PROC_TID_WCHAN,      "wchan",   S_IFREG|S_IRUGO),
+#endif
+#ifdef CONFIG_SCHEDSTATS
+	E(PROC_TID_SCHEDSTAT, "schedstat",S_IFREG|S_IRUGO),
+#endif
+#ifdef CONFIG_CPUSETS
+	E(PROC_TID_CPUSET,     "cpuset",  S_IFREG|S_IRUGO),
+#endif
+	E(PROC_TID_OOM_SCORE,  "oom_score",S_IFREG|S_IRUGO),
+	E(PROC_TID_OOM_ADJUST, "oom_adj", S_IFREG|S_IRUGO|S_IWUSR),
+#ifdef CONFIG_AUDITSYSCALL
+	E(PROC_TID_LOGINUID, "loginuid", S_IFREG|S_IWUSR|S_IRUGO),
+#endif
+	{0,0,NULL,0}
+};
+
+#ifdef CONFIG_SECURITY
+static struct pid_entry tgid_attr_stuff[] = {
+	E(PROC_TGID_ATTR_CURRENT,  "current",  S_IFREG|S_IRUGO|S_IWUGO),
+	E(PROC_TGID_ATTR_PREV,     "prev",     S_IFREG|S_IRUGO),
+	E(PROC_TGID_ATTR_EXEC,     "exec",     S_IFREG|S_IRUGO|S_IWUGO),
+	E(PROC_TGID_ATTR_FSCREATE, "fscreate", S_IFREG|S_IRUGO|S_IWUGO),
+	{0,0,NULL,0}
+};
+static struct pid_entry tid_attr_stuff[] = {
+	E(PROC_TID_ATTR_CURRENT,   "current",  S_IFREG|S_IRUGO|S_IWUGO),
+	E(PROC_TID_ATTR_PREV,      "prev",     S_IFREG|S_IRUGO),
+	E(PROC_TID_ATTR_EXEC,      "exec",     S_IFREG|S_IRUGO|S_IWUGO),
+	E(PROC_TID_ATTR_FSCREATE,  "fscreate", S_IFREG|S_IRUGO|S_IWUGO),
+	{0,0,NULL,0}
+};
+#endif
+
+#undef E
+
+static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+{
+	struct task_struct *task = get_proc_task(inode);
+	struct files_struct *files = NULL;
+	struct file *file;
+	int fd = proc_fd(inode);
+
+	if (task) {
+		files = get_files_struct(task);
+		put_task_struct(task);
+	}
+	if (files) {
+		rcu_read_lock();
+		file = fcheck_files(files, fd);
+		if (file) {
+			*mnt = mntget(file->f_vfsmnt);
+			*dentry = dget(file->f_dentry);
+			rcu_read_unlock();
+			put_files_struct(files);
+			return 0;
+		}
+		rcu_read_unlock();
+		put_files_struct(files);
+	}
+	return -ENOENT;
+}
+
+static struct fs_struct *get_fs_struct(struct task_struct *task)
+{
+	struct fs_struct *fs;
+	task_lock(task);
+	fs = task->fs;
+	if(fs)
+		atomic_inc(&fs->count);
+	task_unlock(task);
+	return fs;
+}
+
+static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+{
+	struct task_struct *task = get_proc_task(inode);
+	struct fs_struct *fs = NULL;
+	int result = -ENOENT;
+
+	if (task) {
+		fs = get_fs_struct(task);
+		put_task_struct(task);
+	}
+	if (fs) {
+		read_lock(&fs->lock);
+		*mnt = mntget(fs->pwdmnt);
+		*dentry = dget(fs->pwd);
+		read_unlock(&fs->lock);
+		result = 0;
+		put_fs_struct(fs);
+	}
+	return result;
+}
+
+static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+{
+	struct task_struct *task = get_proc_task(inode);
+	struct fs_struct *fs = NULL;
+	int result = -ENOENT;
+
+	if (task) {
+		fs = get_fs_struct(task);
+		put_task_struct(task);
+	}
+	if (fs) {
+		read_lock(&fs->lock);
+		*mnt = mntget(fs->rootmnt);
+		*dentry = dget(fs->root);
+		read_unlock(&fs->lock);
+		result = 0;
+		put_fs_struct(fs);
+	}
+	return result;
+}
+
+#define MAY_PTRACE(task) \
+	(task == current || \
+	(task->parent == current && \
+	(task->ptrace & PT_PTRACED) && \
+	 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
+	 security_ptrace(current,task) == 0))
+
+static int proc_pid_environ(struct task_struct *task, char * buffer)
+{
+	int res = 0;
+	struct mm_struct *mm = get_task_mm(task);
+	if (mm) {
+		unsigned int len = mm->env_end - mm->env_start;
+		if (len > PAGE_SIZE)
+			len = PAGE_SIZE;
+		res = access_process_vm(task, mm->env_start, buffer, len, 0);
+		if (!ptrace_may_attach(task))
+			res = -ESRCH;
+		mmput(mm);
+	}
+	return res;
+}
+
+static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+{
+	int res = 0;
+	unsigned int len;
+	struct mm_struct *mm = get_task_mm(task);
+	if (!mm)
+		goto out;
+	if (!mm->arg_end)
+		goto out_mm;	/* Shh! No looking before we're done */
+
+ 	len = mm->arg_end - mm->arg_start;
+ 
+	if (len > PAGE_SIZE)
+		len = PAGE_SIZE;
+ 
+	res = access_process_vm(task, mm->arg_start, buffer, len, 0);
+
+	// If the nul at the end of args has been overwritten, then
+	// assume application is using setproctitle(3).
+	if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
+		len = strnlen(buffer, res);
+		if (len < res) {
+		    res = len;
+		} else {
+			len = mm->env_end - mm->env_start;
+			if (len > PAGE_SIZE - res)
+				len = PAGE_SIZE - res;
+			res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
+			res = strnlen(buffer, res);
+		}
+	}
+out_mm:
+	mmput(mm);
+out:
+	return res;
+}
+
+static int proc_pid_auxv(struct task_struct *task, char *buffer)
+{
+	int res = 0;
+	struct mm_struct *mm = get_task_mm(task);
+	if (mm) {
+		unsigned int nwords = 0;
+		do
+			nwords += 2;
+		while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
+		res = nwords * sizeof(mm->saved_auxv[0]);
+		if (res > PAGE_SIZE)
+			res = PAGE_SIZE;
+		memcpy(buffer, mm->saved_auxv, res);
+		mmput(mm);
+	}
+	return res;
+}
+
+
+#ifdef CONFIG_KALLSYMS
+/*
+ * Provides a wchan file via kallsyms in a proper one-value-per-file format.
+ * Returns the resolved symbol.  If that fails, simply return the address.
+ */
+static int proc_pid_wchan(struct task_struct *task, char *buffer)
+{
+	char *modname;
+	const char *sym_name;
+	unsigned long wchan, size, offset;
+	char namebuf[KSYM_NAME_LEN+1];
+
+	wchan = get_wchan(task);
+
+	sym_name = kallsyms_lookup(wchan, &size, &offset, &modname, namebuf);
+	if (sym_name)
+		return sprintf(buffer, "%s", sym_name);
+	return sprintf(buffer, "%lu", wchan);
+}
+#endif /* CONFIG_KALLSYMS */
+
+#ifdef CONFIG_SCHEDSTATS
+/*
+ * Provides /proc/PID/schedstat
+ */
+static int proc_pid_schedstat(struct task_struct *task, char *buffer)
+{
+	return sprintf(buffer, "%lu %lu %lu\n",
+			task->sched_info.cpu_time,
+			task->sched_info.run_delay,
+			task->sched_info.pcnt);
+}
+#endif
+
+/* The badness from the OOM killer */
+unsigned long badness(struct task_struct *p, unsigned long uptime);
+static int proc_oom_score(struct task_struct *task, char *buffer)
+{
+	unsigned long points;
+	struct timespec uptime;
+
+	do_posix_clock_monotonic_gettime(&uptime);
+	points = badness(task, uptime.tv_sec);
+	return sprintf(buffer, "%lu\n", points);
+}
+
+/************************************************************************/
+/*                       Here the fs part begins                        */
+/************************************************************************/
+
+/* permission checks */
+static int proc_fd_access_allowed(struct inode *inode)
+{
+	struct task_struct *task;
+	int allowed = 0;
+	/* Allow access to a task's file descriptors if it is us or we
+	 * may use ptrace attach to the process and find out that
+	 * information.
+	 */
+	task = get_proc_task(inode);
+	if (task) {
+		allowed = ptrace_may_attach(task);
+		put_task_struct(task);
+	}
+	return allowed;
+}
+
+extern struct seq_operations mounts_op;
+struct proc_mounts {
+	struct seq_file m;
+	int event;
+};
+
+static int mounts_open(struct inode *inode, struct file *file)
+{
+	struct task_struct *task = get_proc_task(inode);
+	struct namespace *namespace = NULL;
+	struct proc_mounts *p;
+	int ret = -EINVAL;
+
+	if (task) {
+		task_lock(task);
+		namespace = task->namespace;
+		if (namespace)
+			get_namespace(namespace);
+		task_unlock(task);
+		put_task_struct(task);
+	}
+
+	if (namespace) {
+		ret = -ENOMEM;
+		p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
+		if (p) {
+			file->private_data = &p->m;
+			ret = seq_open(file, &mounts_op);
+			if (!ret) {
+				p->m.private = namespace;
+				p->event = namespace->event;
+				return 0;
+			}
+			kfree(p);
+		}
+		put_namespace(namespace);
+	}
+	return ret;
+}
+
+static int mounts_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *m = file->private_data;
+	struct namespace *namespace = m->private;
+	put_namespace(namespace);
+	return seq_release(inode, file);
+}
+
+static unsigned mounts_poll(struct file *file, poll_table *wait)
+{
+	struct proc_mounts *p = file->private_data;
+	struct namespace *ns = p->m.private;
+	unsigned res = 0;
+
+	poll_wait(file, &ns->poll, wait);
+
+	spin_lock(&vfsmount_lock);
+	if (p->event != ns->event) {
+		p->event = ns->event;
+		res = POLLERR;
+	}
+	spin_unlock(&vfsmount_lock);
+
+	return res;
+}
+
+static struct file_operations proc_mounts_operations = {
+	.open		= mounts_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= mounts_release,
+	.poll		= mounts_poll,
+};
+
+extern struct seq_operations mountstats_op;
+static int mountstats_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &mountstats_op);
+
+	if (!ret) {
+		struct seq_file *m = file->private_data;
+		struct namespace *namespace = NULL;
+		struct task_struct *task = get_proc_task(inode);
+
+		if (task) {
+			task_lock(task);
+			namespace = task->namespace;
+			if (namespace)
+				get_namespace(namespace);
+			task_unlock(task);
+			put_task_struct(task);
+		}
+
+		if (namespace)
+			m->private = namespace;
+		else {
+			seq_release(inode, file);
+			ret = -EINVAL;
+		}
+	}
+	return ret;
+}
+
+static struct file_operations proc_mountstats_operations = {
+	.open		= mountstats_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= mounts_release,
+};
+
+#define PROC_BLOCK_SIZE	(3*1024)		/* 4K page size but our output routines use some slack for overruns */
+
+static ssize_t proc_info_read(struct file * file, char __user * buf,
+			  size_t count, loff_t *ppos)
+{
+	struct inode * inode = file->f_dentry->d_inode;
+	unsigned long page;
+	ssize_t length;
+	struct task_struct *task = get_proc_task(inode);
+
+	length = -ESRCH;
+	if (!task)
+		goto out_no_task;
+
+	if (count > PROC_BLOCK_SIZE)
+		count = PROC_BLOCK_SIZE;
+
+	length = -ENOMEM;
+	if (!(page = __get_free_page(GFP_KERNEL)))
+		goto out;
+
+	length = PROC_I(inode)->op.proc_read(task, (char*)page);
+
+	if (length >= 0)
+		length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
+	free_page(page);
+out:
+	put_task_struct(task);
+out_no_task:
+	return length;
+}
+
+static struct file_operations proc_info_file_operations = {
+	.read		= proc_info_read,
+};
+
+static int mem_open(struct inode* inode, struct file* file)
+{
+	file->private_data = (void*)((long)current->self_exec_id);
+	return 0;
+}
+
+static ssize_t mem_read(struct file * file, char __user * buf,
+			size_t count, loff_t *ppos)
+{
+	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+	char *page;
+	unsigned long src = *ppos;
+	int ret = -ESRCH;
+	struct mm_struct *mm;
+
+	if (!task)
+		goto out_no_task;
+
+	if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
+		goto out;
+
+	ret = -ENOMEM;
+	page = (char *)__get_free_page(GFP_USER);
+	if (!page)
+		goto out;
+
+	ret = 0;
+ 
+	mm = get_task_mm(task);
+	if (!mm)
+		goto out_free;
+
+	ret = -EIO;
+ 
+	if (file->private_data != (void*)((long)current->self_exec_id))
+		goto out_put;
+
+	ret = 0;
+ 
+	while (count > 0) {
+		int this_len, retval;
+
+		this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+		retval = access_process_vm(task, src, page, this_len, 0);
+		if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) {
+			if (!ret)
+				ret = -EIO;
+			break;
+		}
+
+		if (copy_to_user(buf, page, retval)) {
+			ret = -EFAULT;
+			break;
+		}
+ 
+		ret += retval;
+		src += retval;
+		buf += retval;
+		count -= retval;
+	}
+	*ppos = src;
+
+out_put:
+	mmput(mm);
+out_free:
+	free_page((unsigned long) page);
+out:
+	put_task_struct(task);
+out_no_task:
+	return ret;
+}
+
+#define mem_write NULL
+
+#ifndef mem_write
+/* This is a security hazard */
+static ssize_t mem_write(struct file * file, const char * buf,
+			 size_t count, loff_t *ppos)
+{
+	int copied = 0;
+	char *page;
+	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+	unsigned long dst = *ppos;
+
+	copied = -ESRCH;
+	if (!task)
+		goto out_no_task;
+
+	if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
+		goto out;
+
+	copied = -ENOMEM;
+	page = (char *)__get_free_page(GFP_USER);
+	if (!page)
+		goto out;
+
+	while (count > 0) {
+		int this_len, retval;
+
+		this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+		if (copy_from_user(page, buf, this_len)) {
+			copied = -EFAULT;
+			break;
+		}
+		retval = access_process_vm(task, dst, page, this_len, 1);
+		if (!retval) {
+			if (!copied)
+				copied = -EIO;
+			break;
+		}
+		copied += retval;
+		buf += retval;
+		dst += retval;
+		count -= retval;			
+	}
+	*ppos = dst;
+	free_page((unsigned long) page);
+out:
+	put_task_struct(task);
+out_no_task:
+	return copied;
+}
+#endif
+
+static loff_t mem_lseek(struct file * file, loff_t offset, int orig)
+{
+	switch (orig) {
+	case 0:
+		file->f_pos = offset;
+		break;
+	case 1:
+		file->f_pos += offset;
+		break;
+	default:
+		return -EINVAL;
+	}
+	force_successful_syscall_return();
+	return file->f_pos;
+}
+
+static struct file_operations proc_mem_operations = {
+	.llseek		= mem_lseek,
+	.read		= mem_read,
+	.write		= mem_write,
+	.open		= mem_open,
+};
+
+static ssize_t oom_adjust_read(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+	char buffer[PROC_NUMBUF];
+	size_t len;
+	int oom_adjust;
+	loff_t __ppos = *ppos;
+
+	if (!task)
+		return -ESRCH;
+	oom_adjust = task->oomkilladj;
+	put_task_struct(task);
+
+	len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
+	if (__ppos >= len)
+		return 0;
+	if (count > len-__ppos)
+		count = len-__ppos;
+	if (copy_to_user(buf, buffer + __ppos, count))
+		return -EFAULT;
+	*ppos = __ppos + count;
+	return count;
+}
+
+static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct task_struct *task;
+	char buffer[PROC_NUMBUF], *end;
+	int oom_adjust;
+
+	if (!capable(CAP_SYS_RESOURCE))
+		return -EPERM;
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count))
+		return -EFAULT;
+	oom_adjust = simple_strtol(buffer, &end, 0);
+	if ((oom_adjust < -16 || oom_adjust > 15) && oom_adjust != OOM_DISABLE)
+		return -EINVAL;
+	if (*end == '\n')
+		end++;
+	task = get_proc_task(file->f_dentry->d_inode);
+	if (!task)
+		return -ESRCH;
+	task->oomkilladj = oom_adjust;
+	put_task_struct(task);
+	if (end - buffer == 0)
+		return -EIO;
+	return end - buffer;
+}
+
+static struct file_operations proc_oom_adjust_operations = {
+	.read		= oom_adjust_read,
+	.write		= oom_adjust_write,
+};
+
+#ifdef CONFIG_AUDITSYSCALL
+#define TMPBUFLEN 21
+static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
+				  size_t count, loff_t *ppos)
+{
+	struct inode * inode = file->f_dentry->d_inode;
+	struct task_struct *task = get_proc_task(inode);
+	ssize_t length;
+	char tmpbuf[TMPBUFLEN];
+
+	if (!task)
+		return -ESRCH;
+	length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
+				audit_get_loginuid(task->audit_context));
+	put_task_struct(task);
+	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
+				   size_t count, loff_t *ppos)
+{
+	struct inode * inode = file->f_dentry->d_inode;
+	char *page, *tmp;
+	ssize_t length;
+	uid_t loginuid;
+
+	if (!capable(CAP_AUDIT_CONTROL))
+		return -EPERM;
+
+	if (current != proc_tref(inode)->task)
+		return -EPERM;
+
+	if (count > PAGE_SIZE)
+		count = PAGE_SIZE;
+
+	if (*ppos != 0) {
+		/* No partial writes. */
+		return -EINVAL;
+	}
+	page = (char*)__get_free_page(GFP_USER);
+	if (!page)
+		return -ENOMEM;
+	length = -EFAULT;
+	if (copy_from_user(page, buf, count))
+		goto out_free_page;
+
+	loginuid = simple_strtoul(page, &tmp, 10);
+	if (tmp == page) {
+		length = -EINVAL;
+		goto out_free_page;
+
+	}
+	length = audit_set_loginuid(current, loginuid);
+	if (likely(length == 0))
+		length = count;
+
+out_free_page:
+	free_page((unsigned long) page);
+	return length;
+}
+
+static struct file_operations proc_loginuid_operations = {
+	.read		= proc_loginuid_read,
+	.write		= proc_loginuid_write,
+};
+#endif
+
+#ifdef CONFIG_SECCOMP
+static ssize_t seccomp_read(struct file *file, char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
+	char __buf[20];
+	loff_t __ppos = *ppos;
+	size_t len;
+
+	if (!tsk)
+		return -ESRCH;
+	/* no need to print the trailing zero, so use only len */
+	len = sprintf(__buf, "%u\n", tsk->seccomp.mode);
+	put_task_struct(tsk);
+	if (__ppos >= len)
+		return 0;
+	if (count > len - __ppos)
+		count = len - __ppos;
+	if (copy_to_user(buf, __buf + __ppos, count))
+		return -EFAULT;
+	*ppos = __ppos + count;
+	return count;
+}
+
+static ssize_t seccomp_write(struct file *file, const char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
+	char __buf[20], *end;
+	unsigned int seccomp_mode;
+	ssize_t result;
+
+	result = -ESRCH;
+	if (!tsk)
+		goto out_no_task;
+
+	/* can set it only once to be even more secure */
+	result = -EPERM;
+	if (unlikely(tsk->seccomp.mode))
+		goto out;
+
+	result = -EFAULT;
+	memset(__buf, 0, sizeof(__buf));
+	count = min(count, sizeof(__buf) - 1);
+	if (copy_from_user(__buf, buf, count))
+		goto out;
+
+	seccomp_mode = simple_strtoul(__buf, &end, 0);
+	if (*end == '\n')
+		end++;
+	result = -EINVAL;
+	if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) {
+		tsk->seccomp.mode = seccomp_mode;
+		set_tsk_thread_flag(tsk, TIF_SECCOMP);
+	} else
+		goto out;
+	result = -EIO;
+	if (unlikely(!(end - __buf)))
+		goto out;
+	result = end - __buf;
+out:
+	put_task_struct(tsk);
+out_no_task:
+	return result;
+}
+
+static struct file_operations proc_seccomp_operations = {
+	.read		= seccomp_read,
+	.write		= seccomp_write,
+};
+#endif /* CONFIG_SECCOMP */
+
+static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = -EACCES;
+
+	/* We don't need a base pointer in the /proc filesystem */
+	path_release(nd);
+
+	/* Are we allowed to snoop on the tasks file descriptors? */
+	if (!proc_fd_access_allowed(inode))
+		goto out;
+
+	error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt);
+	nd->last_type = LAST_BIND;
+out:
+	return ERR_PTR(error);
+}
+
+static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt,
+			    char __user *buffer, int buflen)
+{
+	struct inode * inode;
+	char *tmp = (char*)__get_free_page(GFP_KERNEL), *path;
+	int len;
+
+	if (!tmp)
+		return -ENOMEM;
+		
+	inode = dentry->d_inode;
+	path = d_path(dentry, mnt, tmp, PAGE_SIZE);
+	len = PTR_ERR(path);
+	if (IS_ERR(path))
+		goto out;
+	len = tmp + PAGE_SIZE - 1 - path;
+
+	if (len > buflen)
+		len = buflen;
+	if (copy_to_user(buffer, path, len))
+		len = -EFAULT;
+ out:
+	free_page((unsigned long)tmp);
+	return len;
+}
+
+static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
+{
+	int error = -EACCES;
+	struct inode *inode = dentry->d_inode;
+	struct dentry *de;
+	struct vfsmount *mnt = NULL;
+
+	/* Are we allowed to snoop on the tasks file descriptors? */
+	if (!proc_fd_access_allowed(inode))
+		goto out;
+
+	error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt);
+	if (error)
+		goto out;
+
+	error = do_proc_readlink(de, mnt, buffer, buflen);
+	dput(de);
+	mntput(mnt);
+out:
+	return error;
+}
+
+static struct inode_operations proc_pid_link_inode_operations = {
+	.readlink	= proc_pid_readlink,
+	.follow_link	= proc_pid_follow_link
+};
+
+static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct task_struct *p = get_proc_task(inode);
+	unsigned int fd, tid, ino;
+	int retval;
+	char buf[PROC_NUMBUF];
+	struct files_struct * files;
+	struct fdtable *fdt;
+
+	retval = -ENOENT;
+	if (!p)
+		goto out_no_task;
+	retval = 0;
+	tid = p->pid;
+
+	fd = filp->f_pos;
+	switch (fd) {
+		case 0:
+			if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
+				goto out;
+			filp->f_pos++;
+		case 1:
+			ino = parent_ino(dentry);
+			if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
+				goto out;
+			filp->f_pos++;
+		default:
+			files = get_files_struct(p);
+			if (!files)
+				goto out;
+			rcu_read_lock();
+			fdt = files_fdtable(files);
+			for (fd = filp->f_pos-2;
+			     fd < fdt->max_fds;
+			     fd++, filp->f_pos++) {
+				unsigned int i,j;
+
+				if (!fcheck_files(files, fd))
+					continue;
+				rcu_read_unlock();
+
+				j = PROC_NUMBUF;
+				i = fd;
+				do {
+					j--;
+					buf[j] = '0' + (i % 10);
+					i /= 10;
+				} while (i);
+
+				ino = fake_ino(tid, PROC_TID_FD_DIR + fd);
+				if (filldir(dirent, buf+j, PROC_NUMBUF-j, fd+2, ino, DT_LNK) < 0) {
+					rcu_read_lock();
+					break;
+				}
+				rcu_read_lock();
+			}
+			rcu_read_unlock();
+			put_files_struct(files);
+	}
+out:
+	put_task_struct(p);
+out_no_task:
+	return retval;
+}
+
+static int proc_pident_readdir(struct file *filp,
+		void *dirent, filldir_t filldir,
+		struct pid_entry *ents, unsigned int nents)
+{
+	int i;
+	int pid;
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct task_struct *task = get_proc_task(inode);
+	struct pid_entry *p;
+	ino_t ino;
+	int ret;
+
+	ret = -ENOENT;
+	if (!task)
+		goto out;
+
+	ret = 0;
+	pid = task->pid;
+	put_task_struct(task);
+	i = filp->f_pos;
+	switch (i) {
+	case 0:
+		ino = inode->i_ino;
+		if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+			goto out;
+		i++;
+		filp->f_pos++;
+		/* fall through */
+	case 1:
+		ino = parent_ino(dentry);
+		if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
+			goto out;
+		i++;
+		filp->f_pos++;
+		/* fall through */
+	default:
+		i -= 2;
+		if (i >= nents) {
+			ret = 1;
+			goto out;
+		}
+		p = ents + i;
+		while (p->name) {
+			if (filldir(dirent, p->name, p->len, filp->f_pos,
+				    fake_ino(pid, p->type), p->mode >> 12) < 0)
+				goto out;
+			filp->f_pos++;
+			p++;
+		}
+	}
+
+	ret = 1;
+out:
+	return ret;
+}
+
+static int proc_tgid_base_readdir(struct file * filp,
+			     void * dirent, filldir_t filldir)
+{
+	return proc_pident_readdir(filp,dirent,filldir,
+				   tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff));
+}
+
+static int proc_tid_base_readdir(struct file * filp,
+			     void * dirent, filldir_t filldir)
+{
+	return proc_pident_readdir(filp,dirent,filldir,
+				   tid_base_stuff,ARRAY_SIZE(tid_base_stuff));
+}
+
+/* building an inode */
+
+static int task_dumpable(struct task_struct *task)
+{
+	int dumpable = 0;
+	struct mm_struct *mm;
+
+	task_lock(task);
+	mm = task->mm;
+	if (mm)
+		dumpable = mm->dumpable;
+	task_unlock(task);
+	if(dumpable == 1)
+		return 1;
+	return 0;
+}
+
+
+static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task, int ino)
+{
+	struct inode * inode;
+	struct proc_inode *ei;
+
+	/* We need a new inode */
+	
+	inode = new_inode(sb);
+	if (!inode)
+		goto out;
+
+	/* Common stuff */
+	ei = PROC_I(inode);
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_ino = fake_ino(task->pid, ino);
+
+	/*
+	 * grab the reference to task.
+	 */
+	ei->tref = tref_get_by_task(task);
+	if (!tref_task(ei->tref))
+		goto out_unlock;
+
+	inode->i_uid = 0;
+	inode->i_gid = 0;
+	if (task_dumpable(task)) {
+		inode->i_uid = task->euid;
+		inode->i_gid = task->egid;
+	}
+	security_task_to_inode(task, inode);
+
+out:
+	return inode;
+
+out_unlock:
+	iput(inode);
+	return NULL;
+}
+
+/* dentry stuff */
+
+/*
+ *	Exceptional case: normally we are not allowed to unhash a busy
+ * directory. In this case, however, we can do it - no aliasing problems
+ * due to the way we treat inodes.
+ *
+ * Rewrite the inode's ownerships here because the owning task may have
+ * performed a setuid(), etc.
+ */
+static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = dentry->d_inode;
+	struct task_struct *task = get_proc_task(inode);
+	if (task) {
+		if (task_dumpable(task)) {
+			inode->i_uid = task->euid;
+			inode->i_gid = task->egid;
+		} else {
+			inode->i_uid = 0;
+			inode->i_gid = 0;
+		}
+		security_task_to_inode(task, inode);
+		put_task_struct(task);
+		return 1;
+	}
+	d_drop(dentry);
+	return 0;
+}
+
+static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = dentry->d_inode;
+	struct task_struct *task = get_proc_task(inode);
+	int fd = proc_fd(inode);
+	struct files_struct *files;
+
+	if (task) {
+		files = get_files_struct(task);
+		if (files) {
+			rcu_read_lock();
+			if (fcheck_files(files, fd)) {
+				rcu_read_unlock();
+				put_files_struct(files);
+				if (task_dumpable(task)) {
+					inode->i_uid = task->euid;
+					inode->i_gid = task->egid;
+				} else {
+					inode->i_uid = 0;
+					inode->i_gid = 0;
+				}
+				security_task_to_inode(task, inode);
+				return 1;
+			}
+			rcu_read_unlock();
+			put_files_struct(files);
+		}
+		put_task_struct(task);
+	}
+	d_drop(dentry);
+	return 0;
+}
+
+static int pid_delete_dentry(struct dentry * dentry)
+{
+	/* Is the task we represent dead?
+	 * If so, then don't put the dentry on the lru list,
+	 * kill it immediately.
+	 */
+	return !proc_tref(dentry->d_inode)->task;
+}
+
+static struct dentry_operations tid_fd_dentry_operations =
+{
+	.d_revalidate	= tid_fd_revalidate,
+	.d_delete	= pid_delete_dentry,
+};
+
+static struct dentry_operations pid_dentry_operations =
+{
+	.d_revalidate	= pid_revalidate,
+	.d_delete	= pid_delete_dentry,
+};
+
+/* Lookups */
+
+static unsigned name_to_int(struct dentry *dentry)
+{
+	const char *name = dentry->d_name.name;
+	int len = dentry->d_name.len;
+	unsigned n = 0;
+
+	if (len > 1 && *name == '0')
+		goto out;
+	while (len-- > 0) {
+		unsigned c = *name++ - '0';
+		if (c > 9)
+			goto out;
+		if (n >= (~0U-9)/10)
+			goto out;
+		n *= 10;
+		n += c;
+	}
+	return n;
+out:
+	return ~0U;
+}
+
+/* SMP-safe */
+static struct dentry *proc_lookupfd(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
+{
+	struct task_struct *task = get_proc_task(dir);
+	unsigned fd = name_to_int(dentry);
+	struct dentry *result = ERR_PTR(-ENOENT);
+	struct file * file;
+	struct files_struct * files;
+	struct inode *inode;
+	struct proc_inode *ei;
+
+	if (!task)
+		goto out_no_task;
+	if (fd == ~0U)
+		goto out;
+
+	inode = proc_pid_make_inode(dir->i_sb, task, PROC_TID_FD_DIR+fd);
+	if (!inode)
+		goto out;
+	ei = PROC_I(inode);
+	ei->fd = fd;
+	files = get_files_struct(task);
+	if (!files)
+		goto out_unlock;
+	inode->i_mode = S_IFLNK;
+	rcu_read_lock();
+	file = fcheck_files(files, fd);
+	if (!file)
+		goto out_unlock2;
+	if (file->f_mode & 1)
+		inode->i_mode |= S_IRUSR | S_IXUSR;
+	if (file->f_mode & 2)
+		inode->i_mode |= S_IWUSR | S_IXUSR;
+	rcu_read_unlock();
+	put_files_struct(files);
+	inode->i_op = &proc_pid_link_inode_operations;
+	inode->i_size = 64;
+	ei->op.proc_get_link = proc_fd_link;
+	dentry->d_op = &tid_fd_dentry_operations;
+	d_add(dentry, inode);
+	/* Close the race of the process dying before we return the dentry */
+	if (tid_fd_revalidate(dentry, NULL))
+		result = NULL;
+out:
+	put_task_struct(task);
+out_no_task:
+	return result;
+
+out_unlock2:
+	rcu_read_unlock();
+	put_files_struct(files);
+out_unlock:
+	iput(inode);
+	goto out;
+}
+
+static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir);
+static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd);
+static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
+
+static struct file_operations proc_fd_operations = {
+	.read		= generic_read_dir,
+	.readdir	= proc_readfd,
+};
+
+static struct file_operations proc_task_operations = {
+	.read		= generic_read_dir,
+	.readdir	= proc_task_readdir,
+};
+
+/*
+ * proc directories can do almost nothing..
+ */
+static struct inode_operations proc_fd_inode_operations = {
+	.lookup		= proc_lookupfd,
+};
+
+static struct inode_operations proc_task_inode_operations = {
+	.lookup		= proc_task_lookup,
+	.getattr	= proc_task_getattr,
+};
+
+#ifdef CONFIG_SECURITY
+static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
+				  size_t count, loff_t *ppos)
+{
+	struct inode * inode = file->f_dentry->d_inode;
+	unsigned long page;
+	ssize_t length;
+	struct task_struct *task = get_proc_task(inode);
+
+	length = -ESRCH;
+	if (!task)
+		goto out_no_task;
+
+	if (count > PAGE_SIZE)
+		count = PAGE_SIZE;
+	length = -ENOMEM;
+	if (!(page = __get_free_page(GFP_KERNEL)))
+		goto out;
+
+	length = security_getprocattr(task, 
+				      (char*)file->f_dentry->d_name.name, 
+				      (void*)page, count);
+	if (length >= 0)
+		length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
+	free_page(page);
+out:
+	put_task_struct(task);
+out_no_task:
+	return length;
+}
+
+static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
+				   size_t count, loff_t *ppos)
+{ 
+	struct inode * inode = file->f_dentry->d_inode;
+	char *page; 
+	ssize_t length; 
+	struct task_struct *task = get_proc_task(inode);
+
+	length = -ESRCH;
+	if (!task)
+		goto out_no_task;
+	if (count > PAGE_SIZE) 
+		count = PAGE_SIZE; 
+
+	/* No partial writes. */
+	length = -EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	length = -ENOMEM;
+	page = (char*)__get_free_page(GFP_USER); 
+	if (!page) 
+		goto out;
+
+	length = -EFAULT; 
+	if (copy_from_user(page, buf, count)) 
+		goto out_free;
+
+	length = security_setprocattr(task, 
+				      (char*)file->f_dentry->d_name.name, 
+				      (void*)page, count);
+out_free:
+	free_page((unsigned long) page);
+out:
+	put_task_struct(task);
+out_no_task:
+	return length;
+} 
+
+static struct file_operations proc_pid_attr_operations = {
+	.read		= proc_pid_attr_read,
+	.write		= proc_pid_attr_write,
+};
+
+static struct file_operations proc_tid_attr_operations;
+static struct inode_operations proc_tid_attr_inode_operations;
+static struct file_operations proc_tgid_attr_operations;
+static struct inode_operations proc_tgid_attr_inode_operations;
+#endif
+
+/* SMP-safe */
+static struct dentry *proc_pident_lookup(struct inode *dir, 
+					 struct dentry *dentry,
+					 struct pid_entry *ents)
+{
+	struct inode *inode;
+	struct dentry *error;
+	struct task_struct *task = get_proc_task(dir);
+	struct pid_entry *p;
+	struct proc_inode *ei;
+
+	error = ERR_PTR(-ENOENT);
+	inode = NULL;
+
+	if (!task)
+		goto out_no_task;
+
+	for (p = ents; p->name; p++) {
+		if (p->len != dentry->d_name.len)
+			continue;
+		if (!memcmp(dentry->d_name.name, p->name, p->len))
+			break;
+	}
+	if (!p->name)
+		goto out;
+
+	error = ERR_PTR(-EINVAL);
+	inode = proc_pid_make_inode(dir->i_sb, task, p->type);
+	if (!inode)
+		goto out;
+
+	ei = PROC_I(inode);
+	inode->i_mode = p->mode;
+	/*
+	 * Yes, it does not scale. And it should not. Don't add
+	 * new entries into /proc/<tgid>/ without very good reasons.
+	 */
+	switch(p->type) {
+		case PROC_TGID_TASK:
+			inode->i_nlink = 2;
+			inode->i_op = &proc_task_inode_operations;
+			inode->i_fop = &proc_task_operations;
+			break;
+		case PROC_TID_FD:
+		case PROC_TGID_FD:
+			inode->i_nlink = 2;
+			inode->i_op = &proc_fd_inode_operations;
+			inode->i_fop = &proc_fd_operations;
+			break;
+		case PROC_TID_EXE:
+		case PROC_TGID_EXE:
+			inode->i_op = &proc_pid_link_inode_operations;
+			ei->op.proc_get_link = proc_exe_link;
+			break;
+		case PROC_TID_CWD:
+		case PROC_TGID_CWD:
+			inode->i_op = &proc_pid_link_inode_operations;
+			ei->op.proc_get_link = proc_cwd_link;
+			break;
+		case PROC_TID_ROOT:
+		case PROC_TGID_ROOT:
+			inode->i_op = &proc_pid_link_inode_operations;
+			ei->op.proc_get_link = proc_root_link;
+			break;
+		case PROC_TID_ENVIRON:
+		case PROC_TGID_ENVIRON:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_environ;
+			break;
+		case PROC_TID_AUXV:
+		case PROC_TGID_AUXV:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_auxv;
+			break;
+		case PROC_TID_STATUS:
+		case PROC_TGID_STATUS:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_status;
+			break;
+		case PROC_TID_STAT:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_tid_stat;
+			break;
+		case PROC_TGID_STAT:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_tgid_stat;
+			break;
+		case PROC_TID_CMDLINE:
+		case PROC_TGID_CMDLINE:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_cmdline;
+			break;
+		case PROC_TID_STATM:
+		case PROC_TGID_STATM:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_statm;
+			break;
+		case PROC_TID_MAPS:
+		case PROC_TGID_MAPS:
+			inode->i_fop = &proc_maps_operations;
+			break;
+#ifdef CONFIG_NUMA
+		case PROC_TID_NUMA_MAPS:
+		case PROC_TGID_NUMA_MAPS:
+			inode->i_fop = &proc_numa_maps_operations;
+			break;
+#endif
+		case PROC_TID_MEM:
+		case PROC_TGID_MEM:
+			inode->i_fop = &proc_mem_operations;
+			break;
+#ifdef CONFIG_SECCOMP
+		case PROC_TID_SECCOMP:
+		case PROC_TGID_SECCOMP:
+			inode->i_fop = &proc_seccomp_operations;
+			break;
+#endif /* CONFIG_SECCOMP */
+		case PROC_TID_MOUNTS:
+		case PROC_TGID_MOUNTS:
+			inode->i_fop = &proc_mounts_operations;
+			break;
+#ifdef CONFIG_MMU
+		case PROC_TID_SMAPS:
+		case PROC_TGID_SMAPS:
+			inode->i_fop = &proc_smaps_operations;
+			break;
+#endif
+		case PROC_TID_MOUNTSTATS:
+		case PROC_TGID_MOUNTSTATS:
+			inode->i_fop = &proc_mountstats_operations;
+			break;
+#ifdef CONFIG_SECURITY
+		case PROC_TID_ATTR:
+			inode->i_nlink = 2;
+			inode->i_op = &proc_tid_attr_inode_operations;
+			inode->i_fop = &proc_tid_attr_operations;
+			break;
+		case PROC_TGID_ATTR:
+			inode->i_nlink = 2;
+			inode->i_op = &proc_tgid_attr_inode_operations;
+			inode->i_fop = &proc_tgid_attr_operations;
+			break;
+		case PROC_TID_ATTR_CURRENT:
+		case PROC_TGID_ATTR_CURRENT:
+		case PROC_TID_ATTR_PREV:
+		case PROC_TGID_ATTR_PREV:
+		case PROC_TID_ATTR_EXEC:
+		case PROC_TGID_ATTR_EXEC:
+		case PROC_TID_ATTR_FSCREATE:
+		case PROC_TGID_ATTR_FSCREATE:
+			inode->i_fop = &proc_pid_attr_operations;
+			break;
+#endif
+#ifdef CONFIG_KALLSYMS
+		case PROC_TID_WCHAN:
+		case PROC_TGID_WCHAN:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_wchan;
+			break;
+#endif
+#ifdef CONFIG_SCHEDSTATS
+		case PROC_TID_SCHEDSTAT:
+		case PROC_TGID_SCHEDSTAT:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_schedstat;
+			break;
+#endif
+#ifdef CONFIG_CPUSETS
+		case PROC_TID_CPUSET:
+		case PROC_TGID_CPUSET:
+			inode->i_fop = &proc_cpuset_operations;
+			break;
+#endif
+		case PROC_TID_OOM_SCORE:
+		case PROC_TGID_OOM_SCORE:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_oom_score;
+			break;
+		case PROC_TID_OOM_ADJUST:
+		case PROC_TGID_OOM_ADJUST:
+			inode->i_fop = &proc_oom_adjust_operations;
+			break;
+#ifdef CONFIG_AUDITSYSCALL
+		case PROC_TID_LOGINUID:
+		case PROC_TGID_LOGINUID:
+			inode->i_fop = &proc_loginuid_operations;
+			break;
+#endif
+		default:
+			printk("procfs: impossible type (%d)",p->type);
+			iput(inode);
+			error = ERR_PTR(-EINVAL);
+			goto out;
+	}
+	dentry->d_op = &pid_dentry_operations;
+	d_add(dentry, inode);
+	/* Close the race of the process dying before we return the dentry */
+	if (pid_revalidate(dentry, NULL))
+		error = NULL;
+out:
+	put_task_struct(task);
+out_no_task:
+	return error;
+}
+
+static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
+	return proc_pident_lookup(dir, dentry, tgid_base_stuff);
+}
+
+static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
+	return proc_pident_lookup(dir, dentry, tid_base_stuff);
+}
+
+static struct file_operations proc_tgid_base_operations = {
+	.read		= generic_read_dir,
+	.readdir	= proc_tgid_base_readdir,
+};
+
+static struct file_operations proc_tid_base_operations = {
+	.read		= generic_read_dir,
+	.readdir	= proc_tid_base_readdir,
+};
+
+static struct inode_operations proc_tgid_base_inode_operations = {
+	.lookup		= proc_tgid_base_lookup,
+};
+
+static struct inode_operations proc_tid_base_inode_operations = {
+	.lookup		= proc_tid_base_lookup,
+};
+
+#ifdef CONFIG_SECURITY
+static int proc_tgid_attr_readdir(struct file * filp,
+			     void * dirent, filldir_t filldir)
+{
+	return proc_pident_readdir(filp,dirent,filldir,
+				   tgid_attr_stuff,ARRAY_SIZE(tgid_attr_stuff));
+}
+
+static int proc_tid_attr_readdir(struct file * filp,
+			     void * dirent, filldir_t filldir)
+{
+	return proc_pident_readdir(filp,dirent,filldir,
+				   tid_attr_stuff,ARRAY_SIZE(tid_attr_stuff));
+}
+
+static struct file_operations proc_tgid_attr_operations = {
+	.read		= generic_read_dir,
+	.readdir	= proc_tgid_attr_readdir,
+};
+
+static struct file_operations proc_tid_attr_operations = {
+	.read		= generic_read_dir,
+	.readdir	= proc_tid_attr_readdir,
+};
+
+static struct dentry *proc_tgid_attr_lookup(struct inode *dir,
+				struct dentry *dentry, struct nameidata *nd)
+{
+	return proc_pident_lookup(dir, dentry, tgid_attr_stuff);
+}
+
+static struct dentry *proc_tid_attr_lookup(struct inode *dir,
+				struct dentry *dentry, struct nameidata *nd)
+{
+	return proc_pident_lookup(dir, dentry, tid_attr_stuff);
+}
+
+static struct inode_operations proc_tgid_attr_inode_operations = {
+	.lookup		= proc_tgid_attr_lookup,
+};
+
+static struct inode_operations proc_tid_attr_inode_operations = {
+	.lookup		= proc_tid_attr_lookup,
+};
+#endif
+
+/*
+ * /proc/self:
+ */
+static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
+			      int buflen)
+{
+	char tmp[PROC_NUMBUF];
+	sprintf(tmp, "%d", current->tgid);
+	return vfs_readlink(dentry,buffer,buflen,tmp);
+}
+
+static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	char tmp[PROC_NUMBUF];
+	sprintf(tmp, "%d", current->tgid);
+	return ERR_PTR(vfs_follow_link(nd,tmp));
+}	
+
+static struct inode_operations proc_self_inode_operations = {
+	.readlink	= proc_self_readlink,
+	.follow_link	= proc_self_follow_link,
+};
+
+/**
+ * proc_flush_task -  Remove dcache entries for @task from the /proc dcache.
+ *
+ * @task: task that should be flushed.
+ *
+ * Looks in the dcache for
+ * /proc/@pid
+ * /proc/@tgid/task/@pid
+ * if either directory is present flushes it and all of it'ts children
+ * from the dcache.
+ *
+ * It is safe and reasonable to cache /proc entries for a task until
+ * that task exits.  After that they just clog up the dcache with
+ * useless entries, possibly causing useful dcache entries to be
+ * flushed instead.  This routine is proved to flush those useless
+ * dcache entries at process exit time.
+ *
+ * NOTE: This routine is just an optimization so it does not guarantee
+ *       that no dcache entries will exist at process exit time it
+ *       just makes it very unlikely that any will persist.
+ */
+void proc_flush_task(struct task_struct *task)
+{
+	struct dentry *dentry, *leader, *dir;
+	char buf[PROC_NUMBUF];
+	struct qstr name;
+
+	name.name = buf;
+	name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
+	dentry = d_hash_and_lookup(proc_mnt->mnt_root, &name);
+	if (dentry) {
+		shrink_dcache_parent(dentry);
+		d_drop(dentry);
+		dput(dentry);
+	}
+
+	if (thread_group_leader(task))
+		goto out;
+
+	name.name = buf;
+	name.len = snprintf(buf, sizeof(buf), "%d", task->tgid);
+	leader = d_hash_and_lookup(proc_mnt->mnt_root, &name);
+	if (!leader)
+		goto out;
+
+	name.name = "task";
+	name.len = strlen(name.name);
+	dir = d_hash_and_lookup(leader, &name);
+	if (!dir)
+		goto out_put_leader;
+
+	name.name = buf;
+	name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
+	dentry = d_hash_and_lookup(dir, &name);
+	if (dentry) {
+		shrink_dcache_parent(dentry);
+		d_drop(dentry);
+		dput(dentry);
+	}
+
+	dput(dir);
+out_put_leader:
+	dput(leader);
+out:
+	return;
+}
+
+/* SMP-safe */
+struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
+{
+	struct dentry *result = ERR_PTR(-ENOENT);
+	struct task_struct *task;
+	struct inode *inode;
+	struct proc_inode *ei;
+	unsigned tgid;
+
+	if (dentry->d_name.len == 4 && !memcmp(dentry->d_name.name,"self",4)) {
+		inode = new_inode(dir->i_sb);
+		if (!inode)
+			return ERR_PTR(-ENOMEM);
+		ei = PROC_I(inode);
+		inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+		inode->i_ino = fake_ino(0, PROC_TGID_INO);
+		ei->pde = NULL;
+		inode->i_mode = S_IFLNK|S_IRWXUGO;
+		inode->i_uid = inode->i_gid = 0;
+		inode->i_size = 64;
+		inode->i_op = &proc_self_inode_operations;
+		d_add(dentry, inode);
+		return NULL;
+	}
+	tgid = name_to_int(dentry);
+	if (tgid == ~0U)
+		goto out;
+
+	read_lock(&tasklist_lock);
+	task = find_task_by_pid(tgid);
+	if (task)
+		get_task_struct(task);
+	read_unlock(&tasklist_lock);
+	if (!task)
+		goto out;
+
+	inode = proc_pid_make_inode(dir->i_sb, task, PROC_TGID_INO);
+	if (!inode)
+		goto out_put_task;
+
+	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
+	inode->i_op = &proc_tgid_base_inode_operations;
+	inode->i_fop = &proc_tgid_base_operations;
+	inode->i_flags|=S_IMMUTABLE;
+#ifdef CONFIG_SECURITY
+	inode->i_nlink = 5;
+#else
+	inode->i_nlink = 4;
+#endif
+
+	dentry->d_op = &pid_dentry_operations;
+
+	d_add(dentry, inode);
+	/* Close the race of the process dying before we return the dentry */
+	if (pid_revalidate(dentry, NULL))
+		result = NULL;
+
+out_put_task:
+	put_task_struct(task);
+out:
+	return result;
+}
+
+/* SMP-safe */
+static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
+{
+	struct dentry *result = ERR_PTR(-ENOENT);
+	struct task_struct *task;
+	struct task_struct *leader = get_proc_task(dir);
+	struct inode *inode;
+	unsigned tid;
+
+	if (!leader)
+		goto out_no_task;
+
+	tid = name_to_int(dentry);
+	if (tid == ~0U)
+		goto out;
+
+	read_lock(&tasklist_lock);
+	task = find_task_by_pid(tid);
+	if (task)
+		get_task_struct(task);
+	read_unlock(&tasklist_lock);
+	if (!task)
+		goto out;
+	if (leader->tgid != task->tgid)
+		goto out_drop_task;
+
+	inode = proc_pid_make_inode(dir->i_sb, task, PROC_TID_INO);
+
+
+	if (!inode)
+		goto out_drop_task;
+	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
+	inode->i_op = &proc_tid_base_inode_operations;
+	inode->i_fop = &proc_tid_base_operations;
+	inode->i_flags|=S_IMMUTABLE;
+#ifdef CONFIG_SECURITY
+	inode->i_nlink = 4;
+#else
+	inode->i_nlink = 3;
+#endif
+
+	dentry->d_op = &pid_dentry_operations;
+
+	d_add(dentry, inode);
+	/* Close the race of the process dying before we return the dentry */
+	if (pid_revalidate(dentry, NULL))
+		result = NULL;
+
+out_drop_task:
+	put_task_struct(task);
+out:
+	put_task_struct(leader);
+out_no_task:
+	return result;
+}
+
+/*
+ * Find the first tgid to return to user space.
+ *
+ * Usually this is just whatever follows &init_task, but if the users
+ * buffer was too small to hold the full list or there was a seek into
+ * the middle of the directory we have more work to do.
+ *
+ * In the case of a short read we start with find_task_by_pid.
+ *
+ * In the case of a seek we start with &init_task and walk nr
+ * threads past it.
+ */
+static struct task_struct *first_tgid(int tgid, int nr)
+{
+	struct task_struct *pos = NULL;
+	read_lock(&tasklist_lock);
+	if (tgid && nr) {
+		pos = find_task_by_pid(tgid);
+		if (pos && !thread_group_leader(pos))
+			pos = NULL;
+		if (pos)
+			nr = 0;
+	}
+	/* If nr exceeds the number of processes get out quickly */
+	if (nr && nr >= nr_processes())
+		goto done;
+
+	/* If we haven't found our starting place yet start with
+	 * the init_task and walk nr tasks forward.
+	 */
+	if (!pos && (nr >= 0))
+		pos = next_task(&init_task);
+
+	/* The pid_alive test serves two purposes.
+	 * - The first is to verify the task is actually valid.
+	 * - The second is to ensure we don't go around the list
+	 *   of processes more than once.  pid_alive always
+	 *   fails for init_task as it has pid == 0 and is unhashed.
+	 */
+	for (; pos && pid_alive(pos); pos = next_task(pos)) {
+		if (--nr > 0)
+			continue;
+		get_task_struct(pos);
+		goto done;
+	}
+	pos = NULL;
+done:
+	read_unlock(&tasklist_lock);
+	return pos;
+}
+
+/*
+ * Find the next task in the task list.
+ * Return NULL if we loop or there is any error.
+ *
+ * The reference to the input task_struct is released.
+ */
+static struct task_struct *next_tgid(struct task_struct *start)
+{
+	struct task_struct *pos;
+	read_lock(&tasklist_lock);
+	pos = start;
+	if (pid_alive(start))
+		pos = next_task(start);
+	if (pid_alive(pos)) {
+		get_task_struct(pos);
+		goto done;
+	}
+	pos = NULL;
+done:
+	read_unlock(&tasklist_lock);
+	put_task_struct(start);
+	return pos;
+}
+
+/* for the /proc/ directory itself, after non-process stuff has been done */
+int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+	char buf[PROC_NUMBUF];
+	unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
+	struct task_struct *task;
+	int tgid;
+
+	if (!nr) {
+		ino_t ino = fake_ino(0,PROC_TGID_INO);
+		if (filldir(dirent, "self", 4, filp->f_pos, ino, DT_LNK) < 0)
+			return 0;
+		filp->f_pos++;
+		nr++;
+	}
+	nr -= 1;
+
+	/* f_version caches the tgid value that the last readdir call couldn't
+	 * return. lseek aka telldir automagically resets f_version to 0.
+	 */
+	tgid = filp->f_version;
+	filp->f_version = 0;
+	for (task = first_tgid(tgid, nr);
+	     task;
+	     task = next_tgid(task), filp->f_pos++) {
+		int len;
+		ino_t ino;
+		tgid = task->pid;
+		len = snprintf(buf, sizeof(buf), "%d", tgid);
+		ino = fake_ino(tgid, PROC_TGID_INO);
+		if (filldir(dirent, buf, len, filp->f_pos, ino, DT_DIR) < 0) {
+			/* returning this tgid failed, save it as the first
+			 * pid for the next readir call */
+			filp->f_version = tgid;
+			put_task_struct(task);
+			break;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Find the first tid of a thread group to return to user space.
+ *
+ * Usually this is just the thread group leader, but if the users
+ * buffer was too small or there was a seek into the middle of the
+ * directory we have more work todo.
+ *
+ * In the case of a short read we start with find_task_by_pid.
+ *
+ * In the case of a seek we start with the leader and walk nr
+ * threads past it.
+ */
+static struct task_struct *first_tid(struct task_struct *leader, int tid, int nr)
+{
+	struct task_struct *pos = NULL;
+	read_lock(&tasklist_lock);
+
+	/* Attempt to start with the pid of a thread */
+	if (tid && (nr > 0)) {
+		pos = find_task_by_pid(tid);
+		if (pos && (pos->group_leader != leader))
+			pos = NULL;
+		if (pos)
+			nr = 0;
+	}
+
+	/* If nr exceeds the number of threads there is nothing todo */
+	if (nr) {
+		int threads = 0;
+		task_lock(leader);
+		if (leader->signal)
+			threads = atomic_read(&leader->signal->count);
+		task_unlock(leader);
+		if (nr >= threads)
+			goto done;
+	}
+
+	/* If we haven't found our starting place yet start with the
+	 * leader and walk nr threads forward.
+	 */
+	if (!pos && (nr >= 0))
+		pos = leader;
+
+	for (; pos && pid_alive(pos); pos = next_thread(pos)) {
+		if (--nr > 0)
+			continue;
+		get_task_struct(pos);
+		goto done;
+	}
+	pos = NULL;
+done:
+	read_unlock(&tasklist_lock);
+	return pos;
+}
+
+/*
+ * Find the next thread in the thread list.
+ * Return NULL if there is an error or no next thread.
+ *
+ * The reference to the input task_struct is released.
+ */
+static struct task_struct *next_tid(struct task_struct *start)
+{
+	struct task_struct *pos;
+	read_lock(&tasklist_lock);
+	pos = start;
+	if (pid_alive(start))
+		pos = next_thread(start);
+	if (pid_alive(pos) && (pos != start->group_leader))
+		get_task_struct(pos);
+	else
+		pos = NULL;
+	read_unlock(&tasklist_lock);
+	put_task_struct(start);
+	return pos;
+}
+
+/* for the /proc/TGID/task/ directories */
+static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+	char buf[PROC_NUMBUF];
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct task_struct *leader = get_proc_task(inode);
+	struct task_struct *task;
+	int retval = -ENOENT;
+	ino_t ino;
+	int tid;
+	unsigned long pos = filp->f_pos;  /* avoiding "long long" filp->f_pos */
+
+	if (!leader)
+		goto out_no_task;
+	retval = 0;
+
+	switch (pos) {
+	case 0:
+		ino = inode->i_ino;
+		if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
+			goto out;
+		pos++;
+		/* fall through */
+	case 1:
+		ino = parent_ino(dentry);
+		if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
+			goto out;
+		pos++;
+		/* fall through */
+	}
+
+	/* f_version caches the tgid value that the last readdir call couldn't
+	 * return. lseek aka telldir automagically resets f_version to 0.
+	 */
+	tid = filp->f_version;
+	filp->f_version = 0;
+	for (task = first_tid(leader, tid, pos - 2);
+	     task;
+	     task = next_tid(task), pos++) {
+		int len;
+		tid = task->pid;
+		len = snprintf(buf, sizeof(buf), "%d", tid);
+		ino = fake_ino(tid, PROC_TID_INO);
+		if (filldir(dirent, buf, len, pos, ino, DT_DIR < 0)) {
+			/* returning this tgid failed, save it as the first
+			 * pid for the next readir call */
+			filp->f_version = tid;
+			put_task_struct(task);
+			break;
+		}
+	}
+out:
+	filp->f_pos = pos;
+	put_task_struct(leader);
+out_no_task:
+	return retval;
+}
+
+static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+	struct inode *inode = dentry->d_inode;
+	struct task_struct *p = get_proc_task(inode);
+	generic_fillattr(inode, stat);
+
+	if (p) {
+		task_lock(p);
+		if (p->signal)
+			stat->nlink += atomic_read(&p->signal->count);
+		task_unlock(p);
+		put_task_struct(p);
+	}
+
+	return 0;
+}
diff -urN oldtree/fs/proc/proc_misc.c newtree/fs/proc/proc_misc.c
--- oldtree/fs/proc/proc_misc.c	2006-03-08 18:48:01.739987750 +0000
+++ newtree/fs/proc/proc_misc.c	2006-03-08 18:56:29.979750750 +0000
@@ -45,6 +45,7 @@
 #include <linux/jiffies.h>
 #include <linux/sysrq.h>
 #include <linux/vmalloc.h>
+#include <linux/sched_drv.h>
 #include <linux/crash_dump.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -243,6 +244,17 @@
 	return proc_calc_metrics(page, start, off, count, eof, len);
 }
 
+static int scheduler_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int len;
+
+	strcpy(page, sched_drvp->name);
+	strcat(page, "\n");
+	len = strlen(page);
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
 extern struct seq_operations cpuinfo_op;
 static int cpuinfo_open(struct inode *inode, struct file *file)
 {
@@ -807,6 +819,7 @@
 		{"cmdline",	cmdline_read_proc},
 		{"locks",	locks_read_proc},
 		{"execdomains",	execdomains_read_proc},
+		{"scheduler",	scheduler_read_proc},
 		{NULL,}
 	};
 	for (p = simple_ones; p->name; p++)
diff -urN oldtree/include/asm-x86_64/system.h newtree/include/asm-x86_64/system.h
--- oldtree/include/asm-x86_64/system.h	2006-03-08 18:48:02.176015000 +0000
+++ newtree/include/asm-x86_64/system.h	2006-03-08 18:56:29.983751000 +0000
@@ -31,8 +31,6 @@
 		     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
 		     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
 		     "call __switch_to\n\t"					  \
-		     ".globl thread_return\n"					\
-		     "thread_return:\n\t"					    \
 		     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
 		     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
 		     LOCK "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"		  \
diff -urN oldtree/include/linux/init_task.h newtree/include/linux/init_task.h
--- oldtree/include/linux/init_task.h	2006-03-08 18:48:02.304023000 +0000
+++ newtree/include/linux/init_task.h	2006-03-08 18:56:29.987751250 +0000
@@ -85,15 +85,14 @@
 	.usage		= ATOMIC_INIT(2),				\
 	.flags		= 0,						\
 	.lock_depth	= -1,						\
-	.prio		= MAX_PRIO-20,					\
-	.static_prio	= MAX_PRIO-20,					\
+	.prio		= NICE_TO_PRIO(0),				\
+	.static_prio	= NICE_TO_PRIO(0),				\
 	.policy		= SCHED_NORMAL,					\
 	.cpus_allowed	= CPU_MASK_ALL,					\
 	.mm		= NULL,						\
 	.active_mm	= &init_mm,					\
 	.run_list	= LIST_HEAD_INIT(tsk.run_list),			\
 	.ioprio		= 0,						\
-	.time_slice	= HZ,						\
 	.tasks		= LIST_HEAD_INIT(tsk.tasks),			\
 	.ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children),		\
 	.ptrace_list	= LIST_HEAD_INIT(tsk.ptrace_list),		\
diff -urN oldtree/include/linux/sched.h newtree/include/linux/sched.h
--- oldtree/include/linux/sched.h	2006-03-08 18:48:02.348025750 +0000
+++ newtree/include/linux/sched.h	2006-03-08 18:56:29.991751500 +0000
@@ -481,8 +481,6 @@
 #define MAX_USER_RT_PRIO	100
 #define MAX_RT_PRIO		MAX_USER_RT_PRIO
 
-#define MAX_PRIO		(MAX_RT_PRIO + 40)
-
 #define rt_task(p)		(unlikely((p)->prio < MAX_RT_PRIO))
 
 /*
@@ -684,12 +682,7 @@
 struct audit_context;		/* See audit.c */
 struct mempolicy;
 
-enum sleep_type {
-	SLEEP_NORMAL,
-	SLEEP_NONINTERACTIVE,
-	SLEEP_INTERACTIVE,
-	SLEEP_INTERRUPTED,
-};
+#include <linux/sched_task.h>
 
 struct task_struct {
 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
@@ -708,19 +701,16 @@
 #endif
 	int prio, static_prio;
 	struct list_head run_list;
-	prio_array_t *array;
+	union sched_drv_task sdu;
 
 	unsigned short ioprio;
 	unsigned int btrace_seq;
 
-	unsigned long sleep_avg;
 	unsigned long long timestamp, last_ran;
 	unsigned long long sched_time; /* sched_clock time spent running */
-	enum sleep_type sleep_type;
 
 	unsigned long policy;
 	cpumask_t cpus_allowed;
-	unsigned int time_slice, first_time_slice;
 
 #ifdef CONFIG_SCHEDSTATS
 	struct sched_info sched_info;
diff -urN oldtree/include/linux/sched_drv.h newtree/include/linux/sched_drv.h
--- oldtree/include/linux/sched_drv.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/sched_drv.h	2006-03-08 18:56:29.995751750 +0000
@@ -0,0 +1,65 @@
+#ifndef _LINUX_SCHED_DRV_H
+#define _LINUX_SCHED_DRV_H
+/*
+ * include/linux/sched_drv.h
+ * This contains the definition of the driver struct for all the exported per
+ * runqueue scheduler functions, and the private per scheduler data in
+ * struct task_struct.
+ */
+#include <linux/kobject.h>
+
+#include <linux/sched.h>
+#include <linux/sched_runq.h>
+
+/*
+ * This is the main scheduler driver struct.
+ */
+struct sched_drv {
+	const char *name;
+	void (*init_runqueue_queue)(union runqueue_queue *);
+	void (*set_oom_time_slice)(struct task_struct *, unsigned long);
+#ifdef CONFIG_SMP
+	void (*set_load_weight)(struct task_struct *);
+#endif
+	unsigned int (*task_timeslice)(const task_t *);
+	void (*wake_up_task)(struct task_struct *, struct runqueue *, unsigned int, int);
+	void (*fork)(task_t *);
+	void (*wake_up_new_task)(task_t *, unsigned long);
+	void (*exit)(task_t *);
+#ifdef CONFIG_SMP
+	int (*move_tasks)(runqueue_t *, int, runqueue_t *, unsigned long, unsigned long,
+		 struct sched_domain *, enum idle_type, int *all_pinned);
+#endif
+	void (*tick)(struct task_struct*, struct runqueue *, unsigned long long);
+#ifdef CONFIG_SCHED_SMT
+	struct task_struct *(*head_of_queue)(union runqueue_queue *);
+	int (*dependent_sleeper_trumps)(const struct task_struct *,
+		const struct task_struct *, struct sched_domain *);
+#endif
+	void (*schedule)(void);
+	void (*set_normal_task_nice)(task_t *, long);
+	void (*setscheduler)(task_t *, int, int);
+	void (*init_batch_task)(task_t *);
+	long (*sys_yield)(void);
+	void (*yield)(void);
+	void (*init_idle)(task_t *, int);
+	void (*sched_init)(void);
+#ifdef CONFIG_SMP
+	void (*migrate_queued_task)(struct task_struct *, int);
+#ifdef CONFIG_HOTPLUG_CPU
+	void (*set_select_idle_first)(struct runqueue *);
+	void (*set_select_idle_last)(struct runqueue *);
+	void (*migrate_dead_tasks)(unsigned int);
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	void (*normalize_rt_task)(struct task_struct *);
+#endif
+	struct attribute **attrs;
+};
+
+extern const struct sched_drv *sched_drvp;
+
+extern void sched_drv_sysfs_init(void);
+
+#endif
diff -urN oldtree/include/linux/sched_pvt.h newtree/include/linux/sched_pvt.h
--- oldtree/include/linux/sched_pvt.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/sched_pvt.h	2006-03-08 18:56:29.999752000 +0000
@@ -0,0 +1,492 @@
+#ifndef _LINUX_SCHED_PVT_H
+#define _LINUX_SCHED_PVT_H
+/*
+ * include/linux/sched_pvt.h
+ * This contains the definition of the CPU scheduler macros and function
+ * prototypes that are only of interest to scheduler implementations.
+ */
+
+#include <linux/sched_drv.h>
+#include <linux/stat.h> /* S_IRUGO etc on IA64 */
+#include <linux/kprobes.h>
+
+#include <asm/mmu_context.h>
+
+extern DEFINE_PER_CPU(struct runqueue, runqueues);
+
+#define TASK_PREEMPTS_CURR(p, rq) \
+	((p)->prio < (rq)->curr->prio)
+
+#define task_is_queued(p)	(!list_empty(&(p)->run_list))
+
+#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
+#define this_rq()		(&__get_cpu_var(runqueues))
+#define task_rq(p)		cpu_rq(task_cpu(p))
+#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
+
+/*
+ * Context-switch locking:
+ */
+#ifndef prepare_arch_switch
+# define prepare_arch_switch(next)	do { } while (0)
+#endif
+#ifndef finish_arch_switch
+# define finish_arch_switch(prev)	do { } while (0)
+#endif
+
+#ifndef __ARCH_WANT_UNLOCKED_CTXSW
+static inline int task_running(runqueue_t *rq, task_t *p)
+{
+	return rq->curr == p;
+}
+
+static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
+{
+}
+
+static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+	/* this is a valid case when another task releases the spinlock */
+	rq->lock.owner = current;
+#endif
+	spin_unlock_irq(&rq->lock);
+}
+#else /* __ARCH_WANT_UNLOCKED_CTXSW */
+static inline int task_running(runqueue_t *rq, task_t *p)
+{
+#ifdef CONFIG_SMP
+	return p->oncpu;
+#else
+	return rq->curr == p;
+#endif
+}
+
+static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
+{
+#ifdef CONFIG_SMP
+	/*
+	 * We can optimise this out completely for !SMP, because the
+	 * SMP rebalancing from interrupt is the only thing that cares
+	 * here.
+	 */
+	next->oncpu = 1;
+#endif
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	spin_unlock_irq(&rq->lock);
+#else
+	spin_unlock(&rq->lock);
+#endif
+}
+
+static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
+{
+#ifdef CONFIG_SMP
+	/*
+	 * After ->oncpu is cleared, the task can be moved to a different CPU.
+	 * We must ensure this doesn't happen until the switch is completely
+	 * finished.
+	 */
+	smp_wmb();
+	prev->oncpu = 0;
+#endif
+#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	local_irq_enable();
+#endif
+}
+#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
+
+/*
+ * task_rq_lock - lock the runqueue a given task resides on and disable
+ * interrupts.  Note the ordering: we can safely lookup the task_rq without
+ * explicitly disabling preemption.
+ */
+static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
+	__acquires(rq->lock)
+{
+	struct runqueue *rq;
+
+repeat_lock_task:
+	local_irq_save(*flags);
+	rq = task_rq(p);
+	spin_lock(&rq->lock);
+	if (unlikely(rq != task_rq(p))) {
+		spin_unlock_irqrestore(&rq->lock, *flags);
+		goto repeat_lock_task;
+	}
+	return rq;
+}
+
+static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
+	__releases(rq->lock)
+{
+	spin_unlock_irqrestore(&rq->lock, *flags);
+}
+
+/*
+ * rq_lock - lock a given runqueue and disable interrupts.
+ */
+static inline runqueue_t *this_rq_lock(void)
+	__acquires(rq->lock)
+{
+	runqueue_t *rq;
+
+	local_irq_disable();
+	rq = this_rq();
+	spin_lock(&rq->lock);
+
+	return rq;
+}
+
+/*
+ * Place scheduler attributes in sysfs
+ */
+struct sched_drv_sysfs_entry {
+	struct attribute attr;
+	ssize_t (*show)(char *);
+	ssize_t (*store)(const char *, size_t);
+};
+
+#define to_sched_drv_sysfs_entry(a) container_of((a), struct sched_drv_sysfs_entry, attr)
+
+/*
+ * Macros to help define more common scheduler sysfs attribute types
+ */
+#define SCHED_DRV_SYSFS_UINT_RW_EV(sdse_vis, aname, conv_in, conv_out, MINV, MAXV) \
+static ssize_t show_ ## aname(char *page) \
+{ \
+	unsigned long long val = conv_out(aname); \
+ \
+	return sprintf(page, "%lld\n", val); \
+} \
+ \
+static ssize_t store_ ## aname(const char *page, size_t count) \
+{ \
+	unsigned long long val; \
+	char *end = NULL; \
+ \
+	val = simple_strtoull(page, &end, 10); \
+	if ((end == page) || ((*end != '\0') && (*end != '\n'))) \
+		return -EINVAL; \
+	val = conv_in(val); \
+	if (val < (MINV)) \
+		val = (MINV); \
+	else if (val > (MAXV)) \
+		val = (MAXV); \
+ \
+	aname = val; \
+ \
+	return count; \
+} \
+ \
+sdse_vis struct sched_drv_sysfs_entry aname ## _sdse = { \
+	.attr = { .name = # aname, .mode = S_IRUGO | S_IWUSR }, \
+	.show = show_ ## aname, \
+	.store = store_ ## aname, \
+}
+#define SCHED_DRV_SYSFS_UINT_RW(aname, conv_in, conv_out, MINV, MAXV) \
+	SCHED_DRV_SYSFS_UINT_RW_EV(, aname, conv_in, conv_out, MINV, MAXV)
+#define SCHED_DRV_SYSFS_UINT_RW_STATIC(aname, conv_in, conv_out, MINV, MAXV) \
+	SCHED_DRV_SYSFS_UINT_RW_EV(static, aname, conv_in, conv_out, MINV, MAXV)
+
+#define SCHED_DRV_SYSFS_UINT_RO_EV(sdse_vis, ev, aname, conv_out) \
+static ssize_t show_ ## aname(char *page) \
+{ \
+	unsigned long long val = conv_out(aname); \
+ \
+	return sprintf(page, "%lld\n", val); \
+} \
+ \
+sdes_vis struct sched_drv_sysfs_entry aname ## _sdse = { \
+	.attr = { .name = # aname, .mode = S_IRUGO }, \
+	.show = show_ ## aname, \
+	.store = NULL, \
+}
+
+#define SCHED_DRV_SYSFS_UINT_RO(sdse_vis, ev, aname, conv_out) \
+	SCHED_DRV_SYSFS_UINT_RO_EV(, ev, aname, conv_out)
+#define SCHED_DRV_SYSFS_UINT_RO_STATIC(sdse_vis, ev, aname, conv_out) \
+	SCHED_DRV_SYSFS_UINT_RO_EV(static, ev, aname, conv_out)
+
+#define SCHED_DRV_SYSFS_ATTR(aname) (aname ## _sdse.attr)
+#define SCHED_DRV_DECLARE_SYSFS_ENTRY(aname) \
+extern struct sched_drv_sysfs_entry aname ## _sdse
+
+/*
+ * "Nice" biased load balancing
+ */
+#ifdef CONFIG_SMP
+static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
+{
+	rq->raw_weighted_load += p->load_weight;
+}
+
+static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
+{
+	rq->raw_weighted_load -= p->load_weight;
+}
+#else
+static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
+{
+}
+
+static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
+{
+}
+#endif
+
+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+{
+	rq->nr_running++;
+	inc_raw_weighted_load(rq, p);
+}
+
+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+{
+	rq->nr_running--;
+	dec_raw_weighted_load(rq, p);
+}
+
+#ifdef CONFIG_SCHEDSTATS
+# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
+
+/*
+ * Called when a process is dequeued from the active array and given
+ * the cpu.  We should note that with the exception of interactive
+ * tasks, the expired queue will become the active queue after the active
+ * queue is empty, without explicitly dequeuing and requeuing tasks in the
+ * expired queue.  (Interactive tasks may be requeued directly to the
+ * active queue, thus delaying tasks in the expired queue from running;
+ * see scheduler_tick()).
+ *
+ * This function is only called from sched_info_arrive(), rather than
+ * dequeue_task(). Even though a task may be queued and dequeued multiple
+ * times as it is shuffled about, we're really interested in knowing how
+ * long it was from the *first* time it was queued to the time that it
+ * finally hit a cpu.
+ */
+static inline void sched_info_dequeued(task_t *t)
+{
+	t->sched_info.last_queued = 0;
+}
+
+/*
+ * Called when a task finally hits the cpu.  We can now calculate how
+ * long it was waiting to run.  We also note when it began so that we
+ * can keep stats on how long its timeslice is.
+ */
+void sched_info_arrive(task_t *t);
+
+/*
+ * Called when a process is queued into either the active or expired
+ * array.  The time is noted and later used to determine how long we
+ * had to wait for us to reach the cpu.  Since the expired queue will
+ * become the active queue after active queue is empty, without dequeuing
+ * and requeuing any tasks, we are interested in queuing to either. It
+ * is unusual but not impossible for tasks to be dequeued and immediately
+ * requeued in the same or another array: this can happen in sched_yield(),
+ * set_user_nice(), and even load_balance() as it moves tasks from runqueue
+ * to runqueue.
+ *
+ * This function is only called from enqueue_task(), but also only updates
+ * the timestamp if it is already not set.  It's assumed that
+ * sched_info_dequeued() will clear that stamp when appropriate.
+ */
+static inline void sched_info_queued(task_t *t)
+{
+	if (!t->sched_info.last_queued)
+		t->sched_info.last_queued = jiffies;
+}
+
+/*
+ * Called when a process ceases being the active-running process, either
+ * voluntarily or involuntarily.  Now we can calculate how long we ran.
+ */
+static inline void sched_info_depart(task_t *t)
+{
+	struct runqueue *rq = task_rq(t);
+	unsigned long diff = jiffies - t->sched_info.last_arrival;
+
+	t->sched_info.cpu_time += diff;
+
+	if (rq)
+		rq->rq_sched_info.cpu_time += diff;
+}
+
+/*
+ * Called when tasks are switched involuntarily due, typically, to expiring
+ * their time slice.  (This may also be called when switching to or from
+ * the idle task.)  We are only called when prev != next.
+ */
+static inline void sched_info_switch(task_t *prev, task_t *next)
+{
+	struct runqueue *rq = task_rq(prev);
+
+	/*
+	 * prev now departs the cpu.  It's not interesting to record
+	 * stats about how efficient we were at scheduling the idle
+	 * process, however.
+	 */
+	if (prev != rq->idle)
+		sched_info_depart(prev);
+
+	if (next != rq->idle)
+		sched_info_arrive(next);
+}
+#else
+#define schedstat_inc(rq, field)	do { } while (0)
+#define sched_info_queued(t)		do { } while (0)
+# define sched_info_switch(t, next)	do { } while (0)
+#endif /* CONFIG_SCHEDSTATS */
+
+/**
+ * prepare_task_switch - prepare to switch tasks
+ * @rq: the runqueue preparing to switch
+ * @next: the task we are going to switch to.
+ *
+ * This is called with the rq lock held and interrupts off. It must
+ * be paired with a subsequent finish_task_switch after the context
+ * switch.
+ *
+ * prepare_task_switch sets up locking and calls architecture specific
+ * hooks.
+ */
+static inline void prepare_task_switch(runqueue_t *rq, task_t *next)
+{
+	prepare_lock_switch(rq, next);
+	prepare_arch_switch(next);
+}
+
+/**
+ * finish_task_switch - clean up after a task-switch
+ * @rq: runqueue associated with task-switch
+ * @prev: the thread we just switched away from.
+ *
+ * finish_task_switch must be called after the context switch, paired
+ * with a prepare_task_switch call before the context switch.
+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
+ * and do any other architecture-specific cleanup actions.
+ *
+ * Note that we may have delayed dropping an mm in context_switch(). If
+ * so, we finish that here outside of the runqueue lock.  (Doing it
+ * with the lock held can cause deadlocks; see schedule() for
+ * details.)
+ */
+static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
+	__releases(rq->lock)
+{
+	struct mm_struct *mm = rq->prev_mm;
+	unsigned long prev_task_flags;
+
+	rq->prev_mm = NULL;
+
+	/*
+	 * A task struct has one reference for the use as "current".
+	 * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
+	 * calls schedule one last time. The schedule call will never return,
+	 * and the scheduled task must drop that reference.
+	 * The test for EXIT_ZOMBIE must occur while the runqueue locks are
+	 * still held, otherwise prev could be scheduled on another cpu, die
+	 * there before we look at prev->state, and then the reference would
+	 * be dropped twice.
+	 *		Manfred Spraul <manfred@colorfullife.com>
+	 */
+	prev_task_flags = prev->flags;
+	finish_arch_switch(prev);
+	finish_lock_switch(rq, prev);
+	if (mm)
+		mmdrop(mm);
+	if (unlikely(prev_task_flags & PF_DEAD)) {
+		/*
+		 * Remove function-return probe instances associated with this
+		 * task and put them back on the free list.
+	 	 */
+		kprobe_flush_task(prev);
+		put_task_struct(prev);
+	}
+}
+
+/*
+ * context_switch - switch to the new MM and the new
+ * thread's register state.
+ */
+static inline
+task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
+{
+	struct mm_struct *mm = next->mm;
+	struct mm_struct *oldmm = prev->active_mm;
+
+	if (unlikely(!mm)) {
+		next->active_mm = oldmm;
+		atomic_inc(&oldmm->mm_count);
+		enter_lazy_tlb(oldmm, next);
+	} else
+		switch_mm(oldmm, mm, next);
+
+	if (unlikely(!prev->mm)) {
+		prev->active_mm = NULL;
+		WARN_ON(rq->prev_mm);
+		rq->prev_mm = oldmm;
+	}
+
+	/* Here we just switch the register state and the stack. */
+	switch_to(prev, next, prev);
+
+	return prev;
+}
+
+/*
+ * This is called on clock ticks and on context switches.
+ * Bank in p->sched_time the ns elapsed since the last tick or switch.
+ */
+static inline void update_cpu_clock(task_t *p, runqueue_t *rq,
+				    unsigned long long now)
+{
+	unsigned long long last = max(p->timestamp, rq->timestamp_last_tick);
+	p->sched_time += now - last;
+}
+
+/* Actually do priority change: must hold rq lock. */
+void __setscheduler(struct task_struct *, int, int);
+
+#ifdef CONFIG_SMP
+#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)	\
+				< (long long) (sd)->cache_hot_time)
+extern void resched_task(task_t *p);
+extern void idle_balance(int, runqueue_t *);
+extern void rebalance_tick(int, runqueue_t *, enum idle_type);
+
+/*
+ * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
+ */
+int can_migrate_task(task_t *, runqueue_t *, int, struct sched_domain *,
+	enum idle_type, int *);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void migrate_dead(unsigned int, task_t *);
+#endif
+#else
+static inline void resched_task(task_t *p)
+{
+	assert_spin_locked(&task_rq(p)->lock);
+	set_tsk_need_resched(p);
+}
+
+/*
+ * on UP we do not need to balance between CPUs:
+ */
+static inline void idle_balance(int cpu, runqueue_t *rq) { }
+static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle) { }
+#endif
+
+#ifdef CONFIG_SCHED_SMT
+extern int wake_priority_sleeper(runqueue_t *);
+extern void wake_sleeping_dependent(int, runqueue_t *);
+extern int dependent_sleeper(int, runqueue_t *);
+#else
+static inline int wake_priority_sleeper(runqueue_t *rq) { return 0; }
+static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) { }
+static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) { return 0; }
+#endif
+
+#endif
diff -urN oldtree/include/linux/sched_runq.h newtree/include/linux/sched_runq.h
--- oldtree/include/linux/sched_runq.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/sched_runq.h	2006-03-08 18:56:30.003752250 +0000
@@ -0,0 +1,171 @@
+#ifndef _LINUX_SCHED_RUNQ_H
+#define _LINUX_SCHED_RUNQ_H
+/*
+ * include/linux/sched_runq.h
+ * This contains the definition of the CPU scheduler run queue type.
+ * Modified to allow each scheduler to have its own private run queue data.
+ */
+
+/*
+ * These are the runqueue data structures:
+ */
+#if defined(CONFIG_CPUSCHED_INGO) || defined(CONFIG_CPUSCHED_INGO_LL)
+#define INGO_MAX_PRIO (MAX_RT_PRIO + 40)
+
+#define INGO_BITMAP_SIZE ((((INGO_MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+
+struct prio_array {
+	unsigned int nr_active;
+	unsigned long bitmap[INGO_BITMAP_SIZE];
+	struct list_head queue[INGO_MAX_PRIO];
+};
+
+struct ingo_runqueue_queue {
+	prio_array_t *active, *expired, arrays[2];
+	/*
+	   set to 0 on init, become null or array switch
+	   set to jiffies whenever an non-interactive job expires
+	   reset to jiffies if expires
+	 */
+	unsigned long expired_timestamp;
+	int best_expired_prio;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+#define STAIRCASE_MAX_PRIO (MAX_RT_PRIO + 40)
+#define STAIRCASE_NUM_PRIO_SLOTS (STAIRCASE_MAX_PRIO + 1)
+
+struct staircase_runqueue_queue {
+	DECLARE_BITMAP(bitmap, STAIRCASE_NUM_PRIO_SLOTS);
+	struct list_head queue[STAIRCASE_NUM_PRIO_SLOTS - 1];
+	unsigned int cache_ticks;
+	unsigned int preempted;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_SPA
+#define SPA_IDLE_PRIO 159
+#define SPA_NUM_PRIO_SLOTS (SPA_IDLE_PRIO + 1)
+
+struct spa_prio_slot {
+	unsigned int prio;
+	struct list_head list;
+};
+
+struct spa_runqueue_queue {
+	DECLARE_BITMAP(bitmap, SPA_NUM_PRIO_SLOTS);
+	struct spa_prio_slot queue[SPA_NUM_PRIO_SLOTS - 1];
+	unsigned long next_prom_due;
+	unsigned long pcount;
+	unsigned long nr_active_eb_shares;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_NICK
+#define NICK_MAX_PRIO (MAX_RT_PRIO + 59)
+
+#define NICK_BITMAP_SIZE ((((NICK_MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+
+struct nick_prio_array {
+	int min_prio;
+	unsigned int nr_active;
+	unsigned long bitmap[NICK_BITMAP_SIZE];
+	struct list_head queue[NICK_MAX_PRIO];
+};
+
+struct nick_runqueue_queue {
+	struct nick_prio_array *active, *expired, arrays[2];
+	/*
+	   set to 0 on init, become null or array switch
+	   set to jiffies whenever an non-interactive job expires
+	   reset to jiffies if expires
+	 */
+	unsigned long array_sequence;
+};
+#endif
+
+typedef struct runqueue runqueue_t;
+
+union runqueue_queue {
+#ifdef CONFIG_CPUSCHED_INGO
+	struct ingo_runqueue_queue ingosched;
+#endif
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+	struct staircase_runqueue_queue staircase;
+#endif
+#ifdef CONFIG_CPUSCHED_SPA
+	struct spa_runqueue_queue spa;
+#endif
+#ifdef CONFIG_CPUSCHED_NICK
+	struct nick_runqueue_queue nicksched;
+#endif
+};
+
+/*
+ * This is the main, per-CPU runqueue data structure.
+ *
+ * Locking rule: those places that want to lock multiple runqueues
+ * (such as the load balancing or the thread migration code), lock
+ * acquire operations must be ordered by ascending &runqueue.
+ */
+struct runqueue {
+	spinlock_t lock;
+
+	/*
+	 * nr_running and cpu_load should be in the same cacheline because
+	 * remote CPUs use both these fields when doing load calculation.
+	 */
+	unsigned long nr_running;
+#ifdef CONFIG_SMP
+	unsigned long raw_weighted_load;
+	unsigned long cpu_load[3];
+#endif
+  	unsigned long long nr_switches;
+
+	/*
+	 * This is part of a global counter where only the total sum
+	 * over all CPUs matters. A task can increase this counter on
+	 * one CPU and if it got migrated afterwards it may decrease
+	 * it on another CPU. Always updated under the runqueue lock:
+	 */
+	unsigned long nr_uninterruptible;
+	union runqueue_queue qu;
+	unsigned long long timestamp_last_tick;
+	task_t *curr, *idle;
+	struct mm_struct *prev_mm;
+  	atomic_t nr_iowait;
+
+#ifdef CONFIG_SMP
+	struct sched_domain *sd;
+
+	/* For active balancing */
+	int active_balance;
+	int push_cpu;
+
+	task_t *migration_thread;
+	struct list_head migration_queue;
+#endif
+
+#ifdef CONFIG_SCHEDSTATS
+	/* latency stats */
+	struct sched_info rq_sched_info;
+
+	/* sys_sched_yield() stats */
+	unsigned long yld_exp_empty;
+	unsigned long yld_act_empty;
+	unsigned long yld_both_empty;
+	unsigned long yld_cnt;
+
+	/* schedule() stats */
+	unsigned long sched_switch;
+	unsigned long sched_cnt;
+	unsigned long sched_goidle;
+
+	/* try_to_wake_up() stats */
+	unsigned long ttwu_cnt;
+	unsigned long ttwu_local;
+#endif
+};
+
+#endif
diff -urN oldtree/include/linux/sched_spa.h newtree/include/linux/sched_spa.h
--- oldtree/include/linux/sched_spa.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/sched_spa.h	2006-03-08 18:56:30.007752500 +0000
@@ -0,0 +1,163 @@
+#ifndef _LINUX_SCHED_SPA_H
+#define _LINUX_SCHED_SPA_H
+
+#include <linux/sched_runq.h>
+#include <linux/sched_pvt.h>
+
+/*
+ * For entitlemnet based scheduling a task's shares will be determined from
+ * their "nice"ness. nice == 19 gives 1 share, nice == 0 gives 20 shares and
+ * nice == -19 gives 420 shares.
+ */
+#define DEFAULT_EB_SHARES 20
+#define MAX_EB_SHARES (DEFAULT_EB_SHARES * (DEFAULT_EB_SHARES + 1))
+
+/*
+ * Fixed denominator rational numbers for use by the CPU scheduler
+ */
+#define SPA_AVG_OFFSET 4
+/*
+ * Get the rounded integer value of a scheduling statistic average field
+ * i.e. those fields whose names begin with avg_
+ */
+#define SPA_AVG_RND(x) \
+	(((x) + (1 << (SPA_AVG_OFFSET - 1))) >> (SPA_AVG_OFFSET))
+#define SPA_AVG_REAL(a) ((a) << SPA_AVG_OFFSET)
+
+#define SPA_BGND_PRIO		(SPA_IDLE_PRIO - 1)
+#define SPA_SOFT_CAP_PRIO	(SPA_BGND_PRIO - 1)
+
+#define SPAF_SINBINNED	(1 << 0)	/* I am sinbinned */
+#define SPAF_UISLEEP	(1 << 1)	/* Uninterruptible sleep */
+#define SPAF_NONIASLEEP	(1 << 2)	/* Non interactive sleep */
+#define SPAF_JUST_WOKEN	(1 << 3)	/* In first cycle after waking */
+#define SPAF_INTR_WOKEN	(1 << 4)	/* Woken to service interrupt */
+#define SPAF_JUST_FORK	(1 << 5)	/* In first cycle after forking */
+#define SPAF_IA_LATENCY	(1 << 6)	/* last latency was interactive */
+#define SPAF_FIRST_RUN	(1 << 7)	/* haven't slept since fork */
+
+#define task_is_sinbinned(p) \
+	(unlikely(((p)->sdu.spa.flags & SPAF_SINBINNED) != 0))
+#define task_is_bgnd(p) (unlikely((p)->sdu.spa.cpu_rate_cap == 0))
+#define task_was_in_ia_sleep(p) \
+	(((p)->sdu.spa.flags & (SPAF_NONIASLEEP | SPAF_UISLEEP)) == 0)
+#define latency_interactive(p) \
+	((p)->sdu.spa.flags & SPAF_IA_LATENCY)
+
+#define RATIO_EXCEEDS_PPT(a, b, ppt) \
+	(((a) * 1000) > ((b) * (ppt)))
+
+static inline int spa_ia_sleepiness_exceeds_ppt(const struct task_struct *p,
+					    unsigned int ppt)
+{
+	return RATIO_EXCEEDS_PPT(p->sdu.spa.avg_ia_sleep_per_cycle,
+				 p->sdu.spa.avg_sleep_per_cycle +
+				 p->sdu.spa.avg_cpu_per_cycle,
+				 ppt);
+}
+
+static inline int spa_cpu_usage_rate_exceeds_ppt(const struct task_struct *p,
+						 unsigned int ppt)
+{
+	return RATIO_EXCEEDS_PPT(p->sdu.spa.avg_cpu_per_cycle,
+				 p->sdu.spa.avg_cycle_length,
+				 ppt);
+}
+
+static inline int spa_exceeding_cpu_rate_cap(const struct task_struct *p)
+{
+	return spa_cpu_usage_rate_exceeds_ppt(p, p->sdu.spa.min_cpu_rate_cap);
+}
+
+static inline int spa_exceeding_cpu_rate_hard_cap(const struct task_struct *p)
+{
+	return spa_cpu_usage_rate_exceeds_ppt(p, p->sdu.spa.cpu_rate_hard_cap);
+}
+
+/*
+ * Define a common interface for SPA based schedulers to allow maximum
+ * sharing of code.
+ */
+struct sched_spa_child {
+	int (*soft_cap_effective_prio)(const struct task_struct *);
+	int (*normal_effective_prio)(const struct task_struct *);
+	void (*reassess_at_activation)(struct task_struct *);
+	void (*fork_extras)(struct task_struct *);
+	void (*runq_data_tick)(unsigned int, struct runqueue *);
+	void (*reassess_at_end_of_ts)(struct task_struct *);
+	void (*reassess_at_sinbin_release)(struct task_struct *);
+	void (*reassess_at_renice)(struct task_struct *);
+};
+
+extern struct sched_spa_child *spa_sched_child;
+
+/*
+ * Common functions for use by child schedulers
+ */
+int spa_pb_soft_cap_priority(const task_t *, int);
+int spa_eb_soft_cap_priority(const task_t *, int);
+void spa_sched_init(void);
+void spa_init_runqueue_queue(union runqueue_queue *);
+void spa_set_oom_time_slice(struct task_struct *, unsigned long);
+#ifdef CONFIG_SMP
+void spa_set_load_weight(task_t *);
+#endif
+unsigned int spa_task_timeslice(const task_t *);
+void spa_wake_up_task(struct task_struct *, struct runqueue *, unsigned int,
+		      int);
+void spa_fork(task_t *);
+void spa_wake_up_new_task(task_t *, unsigned long);
+void spa_exit(task_t *);
+void spa_tick(struct task_struct *, struct runqueue *, unsigned long long);
+void spa_schedule(void);
+void spa_set_normal_task_nice(task_t *, long);
+void spa_setscheduler(task_t *, int, int);
+long spa_sys_yield(void);
+void spa_yield(void);
+void spa_init_idle(task_t *, int);
+void spa_init_batch_task(task_t *);
+#ifdef CONFIG_SMP
+int spa_move_tasks(runqueue_t *, int, runqueue_t *, unsigned long,
+	unsigned long, struct sched_domain *, enum idle_type, int *);
+void spa_migrate_queued_task(struct task_struct *, int);
+#ifdef CONFIG_HOTPLUG_CPU
+void spa_set_select_idle_first(struct runqueue *);
+void spa_set_select_idle_last(struct runqueue *);
+void spa_migrate_dead_tasks(unsigned int);
+#endif
+#endif
+#ifdef CONFIG_SCHED_SMT
+struct task_struct *spa_head_of_queue(union runqueue_queue *);
+int spa_dependent_sleeper_trumps(const struct task_struct *,
+				 const struct task_struct *,
+				 struct sched_domain *);
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+void spa_normalize_rt_task(struct task_struct *);
+#endif
+
+/*
+ * Make basic sysfs scheduling parameters available for export by child
+ * schedulers
+ */
+SCHED_DRV_DECLARE_SYSFS_ENTRY(time_slice);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(sched_rr_time_slice);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(bgnd_time_slice_multiplier);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(base_prom_interval);
+SCHED_DRV_DECLARE_SYSFS_ENTRY(promotion_floor);
+
+/*
+ * Functions to allow child schedulers to get/set basic scheduling parameters
+ */
+unsigned long spa_get_time_slice_msecs(void);
+int spa_set_time_slice_msecs(unsigned long);
+unsigned long spa_get_sched_rr_time_slice_msecs(void);
+int spa_set_time_sched_rr_slice_msecs(unsigned long);
+unsigned int spa_get_bgnd_time_slice_multiplier(void);
+int spa_set_bgnd_time_slice_multiplier(unsigned int);
+unsigned long spa_get_base_prom_interval_msecs(void);
+int spa_set_base_prom_interval_msecs(unsigned long);
+unsigned int spa_get_promotion_floor(void);
+int spa_set_promotion_floor(unsigned int);
+
+#endif
diff -urN oldtree/include/linux/sched_task.h newtree/include/linux/sched_task.h
--- oldtree/include/linux/sched_task.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/sched_task.h	2006-03-08 18:56:30.007752500 +0000
@@ -0,0 +1,113 @@
+#ifndef _LINUX_SCHED_TASK_H
+#define _LINUX_SCHED_TASK_H
+/*
+ * include/linux/sched_task.h
+ */
+
+/*
+ * Require that the relationship between 'nice' and 'static_prio' be the same
+ * for all schedulers.
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..(MAX_RT_PRIO + 39) ],
+ * and back.
+ */
+#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
+#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
+#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
+
+#ifdef CONFIG_CPUSCHED_INGO
+enum sleep_type {
+	SLEEP_NORMAL,
+	SLEEP_NONINTERACTIVE,
+	SLEEP_INTERACTIVE,
+	SLEEP_INTERRUPTED,
+};
+
+struct ingo_sched_drv_task {
+	struct prio_array *array;
+	unsigned int time_slice;
+	unsigned int first_time_slice;
+	unsigned long sleep_avg;
+	enum sleep_type sleep_type;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_INGO_LL
+struct ingo_ll_sched_drv_task {
+	struct prio_array *array;
+	unsigned int time_slice;
+	unsigned int first_time_slice;
+	unsigned int latency_bonus;
+	unsigned long long avg_latency;
+	unsigned long long avg_ia_latency;
+	unsigned long long avg_cpu_run;
+	int flags;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+struct staircase_sched_drv_task {
+	unsigned long sflags;
+	unsigned long runtime, totalrun, ns_debit;
+	unsigned int bonus;
+	unsigned int slice, time_slice;
+};
+#endif
+
+#ifdef CONFIG_CPUSCHED_SPA
+struct spa_sched_drv_task {
+	unsigned int time_slice;
+	unsigned long long avg_cpu_per_cycle;
+	unsigned long long avg_sleep_per_cycle;
+	unsigned long long avg_ia_sleep_per_cycle;
+	unsigned long long avg_delay_per_cycle;
+	unsigned long long avg_cycle_length;
+	unsigned long long avg_latency;
+	unsigned long long avg_ia_latency;
+	unsigned long cpu_rate_cap, min_cpu_rate_cap;
+	unsigned long cpu_rate_hard_cap;
+	struct timer_list sinbin_timer;
+	unsigned int flags;
+	/* fields needed by children such as zaphod */
+	unsigned long interactive_bonus;
+	unsigned long auxilary_bonus;
+	unsigned int pre_bonus_priority;
+	unsigned int eb_shares;
+};
+
+/* set/get cpu rate caps in parts per thousand */
+extern int set_cpu_rate_cap(struct task_struct *p, unsigned long new_cap);
+extern int set_cpu_rate_hard_cap(struct task_struct *p, unsigned long new_cap);
+extern unsigned long get_cpu_rate_cap(struct task_struct *p);
+extern unsigned long get_cpu_rate_hard_cap(struct task_struct *p);
+#endif
+
+#ifdef CONFIG_CPUSCHED_NICK
+struct nick_sched_drv_task {
+	struct nick_prio_array *array;
+	unsigned long array_sequence;
+	unsigned long total_time, sleep_time;
+	int used_slice;
+};
+#endif
+
+union sched_drv_task {
+#ifdef CONFIG_CPUSCHED_INGO
+	struct ingo_sched_drv_task ingosched;
+#endif
+#ifdef CONFIG_CPUSCHED_INGO_LL
+	struct ingo_ll_sched_drv_task ingo_ll;
+#endif
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+	struct staircase_sched_drv_task staircase;
+#endif
+#ifdef CONFIG_CPUSCHED_SPA
+	struct spa_sched_drv_task spa;
+#endif
+#ifdef CONFIG_CPUSCHED_NICK
+	struct nick_sched_drv_task nicksched;
+#endif
+};
+
+void set_oom_time_slice(struct task_struct *p, unsigned long t);
+#endif
diff -urN oldtree/init/Kconfig newtree/init/Kconfig
--- oldtree/init/Kconfig	2006-03-08 18:48:02.928062000 +0000
+++ newtree/init/Kconfig	2006-03-08 18:56:30.011752750 +0000
@@ -279,6 +279,8 @@
 
 	  If unsure, say N.
 
+source "kernel/Kconfig.cpusched"
+
 menuconfig EMBEDDED
 	bool "Configure standard kernel features (for small systems)"
 	help
diff -urN oldtree/init/main.c newtree/init/main.c
--- oldtree/init/main.c	2006-03-08 18:48:02.928062000 +0000
+++ newtree/init/main.c	2006-03-08 18:56:30.015753000 +0000
@@ -48,6 +48,7 @@
 #include <linux/rmap.h>
 #include <linux/mempolicy.h>
 #include <linux/key.h>
+#include <linux/sched_drv.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -489,12 +490,6 @@
 	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */
 
 	/*
-	 * Set up the scheduler prior starting any interrupts (such as the
-	 * timer interrupt). Full topology setup happens at smp_init()
-	 * time - but meanwhile we still have a functioning scheduler.
-	 */
-	sched_init();
-	/*
 	 * Disable preemption - early bootup scheduling is extremely
 	 * fragile until we cpu_idle() for the first time.
 	 */
@@ -506,6 +501,16 @@
 	parse_args("Booting kernel", command_line, __start___param,
 		   __stop___param - __start___param,
 		   &unknown_bootoption);
+	/*
+	 * Set up the scheduler prior starting any interrupts (such as the
+	 * timer interrupt). Full topology setup happens at smp_init()
+	 * time - but meanwhile we still have a functioning scheduler.
+	 * But defer until after boot command line is parsed to avoid doing
+	 * this twice in the event that a different scheduler is selected.
+	 */
+	preempt_enable();
+	sched_init();
+	preempt_disable();
 	sort_main_extable();
 	trap_init();
 	rcu_init();
@@ -572,6 +577,7 @@
 
 	acpi_early_init(); /* before LAPIC and SMP init */
 
+	printk("Running with \"%s\" cpu scheduler.\n", sched_drvp->name);
 	/* Do the rest non-__init'ed, we're now alive */
 	rest_init();
 }
@@ -649,6 +655,7 @@
 #ifdef CONFIG_SYSCTL
 	sysctl_init();
 #endif
+	sched_drv_sysfs_init();
 
 	do_initcalls();
 }
diff -urN oldtree/kernel/Kconfig.cpusched newtree/kernel/Kconfig.cpusched
--- oldtree/kernel/Kconfig.cpusched	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/Kconfig.cpusched	2006-03-08 18:56:30.019753250 +0000
@@ -0,0 +1,220 @@
+
+menu "CPU schedulers"
+
+config CPUSCHED_SPA
+	bool
+	default n
+
+config CPUSCHED_CHOICE
+	bool "Support multiple CPU schedulers"
+	default y
+	---help---
+	  Say y here if you wish to be able to make a boot time selection
+	  of which CPU scheduler to use.  The CPU scheduler to be used may
+	  then be selected with the boot parameter "cpusched=".  In the
+          absence of such a command line parameter, the scheduler selected
+          at "Default CPU scheduler" will be used.
+
+	  The choice of which schedulers should be compiled into the
+	  kernel (and be available for boot time selection) can be made
+	  be enabling "Select which CPU schedulers to build in".
+
+	  If you say n here the single scheduler to be built into the
+	  kernel may be selected at "Default CPU scheduler".
+
+config CPUSCHED_CHOOSE_BUILTINS
+	bool "Select which CPU schedulers to build in" if CPUSCHED_CHOICE
+	default n
+	---help---
+	  Say y here if you want to be able to select which CPU schedulers
+	  are built into the kernel (for selection at boot time).
+
+config CPUSCHED_INGO
+	bool "Ingosched CPU scheduler" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	default y
+	---help---
+	  This is the standard CPU scheduler which is an O(1) dual priority
+	  array scheduler with a hybrid interactive design.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=ingosched".
+
+config CPUSCHED_INGO_LL
+	bool "Ingo Low Latency CPU scheduler" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	default y
+	---help---
+	  This is the standard CPU scheduler which is an O(1) dual priority
+	  array scheduler with a modified hybrid interactive mechanism.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=ingo_ll".
+
+config CPUSCHED_STAIRCASE
+	bool "Staircase CPU scheduler" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	default y
+	---help---
+	  This scheduler is an O(1) single priority array with a foreground-
+	  background interactive design.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=staircase".
+
+config CPUSCHED_NICK
+	bool "Nicksched CPU scheduler" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	default y
+	---help---
+	  This is the default CPU scheduler which is an O(1) dual priority
+	  array scheduler with a hybrid interactive design as modified by
+	  Nick Piggin.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=nicksched".
+
+config CPUSCHED_SPA_NF
+	bool "SPA CPU scheduler (no frills)" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	select CPUSCHED_SPA
+	default y
+	---help---
+	  This scheduler is a simple round robin O(1) single priority array
+	  scheduler with NO extra scheduling "frills" except for soft and hard
+	  CPU usage rate caps.  This scheduler contains no extra mechanisms
+	  for enhancing interactive response and is best suited for server
+	  systems.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=spa_no_frills".
+
+config CPUSCHED_SPA_WS
+	bool "SPA CPU scheduler (work station)" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	select CPUSCHED_SPA
+	default y
+	---help---
+	  This is a scheduler with a O(1) single priority array intended for
+	  use on work stations.  In addition to soft and hard CPU usage rate
+	  caps, it has modifications to improve interactive responsiveness
+	  and media streamer latency.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=spa_ws".
+
+config CPUSCHED_SPA_SVR
+	bool "SPA CPU scheduler (server)" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	select CPUSCHED_SPA
+	default y
+	---help---
+	  This is a scheduler with a O(1) single priority array intended for
+	  use on servers.   In addition to soft and hard CPU usage rate
+	  caps, it has modifications to reduce CPU delay at moderate load
+	  levels.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=spa_svr".
+
+config CPUSCHED_SPA_EBS
+	bool "SPA CPU scheduler (entitlement based)" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	select CPUSCHED_SPA
+	default y
+	---help---
+	  This is a scheduler with a O(1) single priority array with an
+	  entitlement based interpretation of nice.   In addition it
+	  provides soft and hard CPU usage rate caps.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=spa_ebs".
+
+config CPUSCHED_ZAPHOD
+	bool "Zaphod CPU scheduler" if CPUSCHED_CHOOSE_BUILTINS
+	depends on CPUSCHED_CHOICE
+	select CPUSCHED_SPA
+	default y
+	---help---
+	  This scheduler is an O(1) single priority array with interactive
+	  bonus, throughput bonus, soft and hard CPU rate caps and a runtime
+	  choice between priority based and entitlement based interpretation
+	  of nice.
+	  To boot this CPU scheduler, if it is not the default, use the
+	  boot parameter "cpusched=zaphod".
+
+choice
+	prompt "Default CPU scheduler"
+	---help---
+	  This option allows you to choose which CPU scheduler shall be
+	  booted by default at startup if you have enabled CPUSCHED_CHOICE,
+	  or it will select the only scheduler to be built in otherwise.
+
+config CPUSCHED_DEFAULT_INGO
+	bool "Ingosched CPU scheduler"
+	select CPUSCHED_INGO
+	---help---
+	  This is the default CPU scheduler which is an O(1) dual priority
+	  array scheduler with a hybrid interactive design.
+
+config CPUSCHED_DEFAULT_INGO_LL
+	bool "Ingo Low Latency CPU scheduler"
+	select CPUSCHED_INGO_LL
+	---help---
+	  This is the default CPU scheduler which is an O(1) dual priority
+	  array scheduler with a modified hybrid interactive mechanism.
+
+config CPUSCHED_DEFAULT_STAIRCASE
+	bool "Staircase CPU scheduler"
+	select CPUSCHED_STAIRCASE
+	---help---
+	  This scheduler is an O(1) single priority array with a foreground-
+	  background interactive design.
+
+config CPUSCHED_DEFAULT_NICK
+	bool "Nicksched CPU scheduler"
+	select CPUSCHED_NICK
+	---help---
+	  This is the default CPU scheduler which is an O(1) dual priority
+	  array scheduler with a hybrid interactive design as modified by
+	  Nick Piggin.
+
+config CPUSCHED_DEFAULT_SPA_NF
+	bool "Single priority array (SPA) CPU scheduler (no frills)"
+	select CPUSCHED_SPA_NF
+	select CPUSCHED_SPA
+	---help---
+	  This is a simple round robin scheduler with a O(1) single priority
+	  array.
+
+config CPUSCHED_DEFAULT_SPA_WS
+	bool "Single priority array (SPA) CPU scheduler (work station)"
+	select CPUSCHED_SPA_WS
+	select CPUSCHED_SPA
+	---help---
+	  This is a scheduler with a O(1) single priority array intended for
+	  use on work stations.  It has modifications to improve interactive
+	  responsiveness and media streamer latency.
+
+config CPUSCHED_DEFAULT_SPA_SVR
+	bool "Single priority array (SPA) CPU scheduler (server)"
+	select CPUSCHED_SPA_SVR
+	select CPUSCHED_SPA
+	---help---
+	  This is a scheduler with a O(1) single priority array intended for
+	  use on server.  It has modifications to reduce CPU delay at moderate
+	  levels of load.
+
+config CPUSCHED_DEFAULT_SPA_EBS
+	bool "Single priority array (SPA) CPU scheduler (entitlement based)"
+	select CPUSCHED_SPA_EBS
+	select CPUSCHED_SPA
+	---help---
+	  This scheduler is an O(1) single priority array with an
+	  entitlement based interpretation of nice.
+
+config CPUSCHED_DEFAULT_ZAPHOD
+	bool "Zaphod CPU scheduler"
+	select CPUSCHED_ZAPHOD
+	select CPUSCHED_SPA
+	---help---
+	  This scheduler is an O(1) single priority array with interactive
+	  bonus, throughput bonus, soft and hard CPU rate caps and a runtime
+	  choice between priority based and entitlement based interpretation
+	  of nice.
+
+endchoice
+
+endmenu
diff -urN oldtree/kernel/Makefile newtree/kernel/Makefile
--- oldtree/kernel/Makefile	2006-03-08 18:48:02.948063250 +0000
+++ newtree/kernel/Makefile	2006-03-08 18:56:30.019753250 +0000
@@ -8,8 +8,17 @@
 	    signal.o sys.o kmod.o workqueue.o pid.o task_ref.o \
 	    rcupdate.o extable.o params.o posix-timers.o \
 	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
-	    hrtimer.o
+	    hrtimer.o sched_drv.o
 
+obj-$(CONFIG_CPUSCHED_INGO) += ingosched.o
+obj-$(CONFIG_CPUSCHED_INGO_LL) += ingo_ll.o
+obj-$(CONFIG_CPUSCHED_STAIRCASE) += staircase.o
+obj-$(CONFIG_CPUSCHED_SPA) += sched_spa.o
+obj-$(CONFIG_CPUSCHED_SPA_WS) += sched_spa_ws.o
+obj-$(CONFIG_CPUSCHED_SPA_SVR) += sched_spa_svr.o
+obj-$(CONFIG_CPUSCHED_SPA_EBS) += sched_spa_ebs.o
+obj-$(CONFIG_CPUSCHED_ZAPHOD) += sched_zaphod.o
+obj-$(CONFIG_CPUSCHED_NICK) += nicksched.o
 obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
 obj-$(CONFIG_FUTEX) += futex.o
 ifeq ($(CONFIG_COMPAT),y)
diff -urN oldtree/kernel/ingo_ll.c newtree/kernel/ingo_ll.c
--- oldtree/kernel/ingo_ll.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/ingo_ll.c	2006-03-08 18:56:30.023753500 +0000
@@ -0,0 +1,1233 @@
+/*
+ *  kernel/ingo_ll.c
+ *  Copyright (C) 1991-2005  Linus Torvalds
+ *
+ *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
+ *		hybrid priority-list and round-robin design with
+ *		an array-switch method of distributing timeslices
+ *		and per-CPU runqueues.  Cleanups and useful suggestions
+ *		by Davide Libenzi, preemptible kernel bits by Robert Love.
+ *  2003-09-03	Interactivity tuning by Con Kolivas.
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+
+static void ingo_init_runqueue_queue(union runqueue_queue *rqq)
+{
+	int j;
+
+	rqq->ingosched.active = rqq->ingosched.arrays;
+	rqq->ingosched.expired = rqq->ingosched.arrays + 1;
+	rqq->ingosched.best_expired_prio = INGO_MAX_PRIO;
+
+	for (j = 0; j < 2; j++) {
+		int k;
+		prio_array_t *array = rqq->ingosched.arrays + j;
+
+		for (k = 0; k < INGO_MAX_PRIO; k++) {
+			INIT_LIST_HEAD(array->queue + k);
+			__clear_bit(k, array->bitmap);
+		}
+		// delimiter for bitsearch
+		__set_bit(INGO_MAX_PRIO, array->bitmap);
+		array->nr_active = 0;
+	}
+
+	rqq->ingosched.expired_timestamp = 0;
+}
+
+static void ingo_set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+	p->sdu.ingo_ll.time_slice = t;
+}
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
+#define MAX_USER_PRIO		(USER_PRIO(INGO_MAX_PRIO))
+
+/*
+ * Some helpers for converting nanosecond timing to jiffy resolution
+ */
+#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
+#define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
+ * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
+ * Timeslices get refilled after they expire.
+ */
+#define MIN_TIMESLICE		max(5 * HZ / 1000, 1)
+#define DEF_TIMESLICE		(100 * HZ / 1000)
+#define PRIO_BONUS_RATIO	 25
+#define MAX_BONUS		(MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
+#define INTERACTIVE_DELTA	  2
+#define STARVATION_LIMIT	(DEF_TIMESLICE * MAX_BONUS)
+
+/*
+ * If a task is 'interactive' then we reinsert it in the active
+ * array after it has expired its current timeslice. (it will not
+ * continue to run immediately, it will still roundrobin with
+ * other interactive tasks.)
+ *
+ * This part scales the interactivity limit depending on niceness.
+ *
+ * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
+ * Here are a few examples of different nice levels:
+ *
+ *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
+ *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
+ *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
+ *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
+ *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
+ *
+ * (the X axis represents the possible -5 ... 0 ... +5 dynamic
+ *  priority range a task can explore, a value of '1' means the
+ *  task is rated interactive.)
+ *
+ * Ie. nice +19 tasks can never get 'interactive' enough to be
+ * reinserted into the active array. And only heavily CPU-hog nice -20
+ * tasks will be expired. Default nice 0 tasks are somewhere between,
+ * it takes some effort for them to get interactive, but it's not
+ * too hard.
+ */
+
+#define CURRENT_BONUS(p) (just_woken_from_ia_sleep(p) ? \
+	(p)->sdu.ingo_ll.latency_bonus + 1 : (p)->sdu.ingo_ll.latency_bonus)
+
+#define GRANULARITY	(10 * HZ / 1000 ? : 1)
+
+#ifdef CONFIG_SMP
+#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
+		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
+			num_online_cpus())
+#else
+#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
+		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
+#endif
+
+#define SCALE(v1,v1_max,v2_max) \
+	(v1) * (v2_max) / (v1_max)
+
+#define DELTA(p) \
+	(SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \
+		INTERACTIVE_DELTA)
+
+#define TASK_INTERACTIVE(p) \
+	((p)->prio <= (p)->static_prio - DELTA(p))
+
+#define ILLF_JUST_WOKEN   0x01      /* just woken */
+#define ILLF_IA_WAKE_UP   0x02      /* just woken from interactive sleep */
+
+/*
+ * Fixed denominator rational numbers for use estimating task's average
+ * latencies and cpu usage per run
+ */
+#define ILL_AVG_OFFSET 4
+/*
+ * Get the rounded integer value of a scheduling statistic average field
+ */
+#define ILL_AVG_RND(x) \
+	(((x) + (1 << (ILL_AVG_OFFSET - 1))) >> (ILL_AVG_OFFSET))
+#define ILL_AVG_REAL(a) ((a) << ILL_AVG_OFFSET)
+#define ILL_AVG_ALPHA ((1 << ILL_AVG_OFFSET) - 1)
+
+unsigned long long unacceptable_ia_latency = ILL_AVG_REAL(800000UL);
+
+/* The range of acceptable interactive latencies in nanosecs */
+#define ACCEPTABLE(l) ((l) >> 8)
+#define UNACCEPTABLE_IA_LATENCY unacceptable_ia_latency
+#define ACCEPTABLE_IA_LATENCY ACCEPTABLE(UNACCEPTABLE_IA_LATENCY)
+
+static inline void incr_latency_bonus(task_t *p)
+{
+	/*
+	 * one bonus point is reserved for allocation to all  interactive
+	 * wake ups
+	 */
+	if (p->sdu.ingo_ll.latency_bonus < (MAX_BONUS - 1))
+		++p->sdu.ingo_ll.latency_bonus;
+}
+
+static inline void decr_latency_bonus(task_t *p)
+{
+	if (p->sdu.ingo_ll.latency_bonus > 0)
+		--p->sdu.ingo_ll.latency_bonus;
+}
+
+static inline int just_woken(task_t *p)
+{
+	return p->sdu.ingo_ll.flags & ILLF_JUST_WOKEN;
+}
+
+static inline int just_woken_from_ia_sleep(task_t *p)
+{
+	return p->sdu.ingo_ll.flags & ILLF_IA_WAKE_UP;
+}
+
+static inline void decay_avg_value(unsigned long long *val)
+{
+	*val *= ILL_AVG_ALPHA;
+	*val >>= ILL_AVG_OFFSET;
+}
+
+static void update_latency_bonus(task_t *p, runqueue_t *rq, unsigned long long now)
+{
+	long long delta = now - p->timestamp;
+
+	/* make allowance for sched_clock() not being monotonic */
+	if (unlikely(delta < 0))
+		delta = 0;
+
+
+	decay_avg_value(&p->sdu.ingo_ll.avg_latency);
+	p->sdu.ingo_ll.avg_latency += delta;
+
+	if (just_woken_from_ia_sleep(p)) {
+		decay_avg_value(&p->sdu.ingo_ll.avg_ia_latency);
+		p->sdu.ingo_ll.avg_ia_latency += delta;
+		/* do this now rather than earlier so that average interactive
+		 * latency is available for didplay for all tasks.
+		 */
+		if (rt_task(p) || p->policy == SCHED_BATCH)
+			goto out;
+
+		if (p->sdu.ingo_ll.avg_ia_latency > UNACCEPTABLE_IA_LATENCY)
+			incr_latency_bonus(p);
+		else if (p->sdu.ingo_ll.avg_ia_latency < ACCEPTABLE_IA_LATENCY)
+			decr_latency_bonus(p);
+	} else if (!(rt_task(p) || p->policy == SCHED_BATCH)) {
+		unsigned long long ual = UNACCEPTABLE_IA_LATENCY;
+
+		/*
+		 * The more tasks runnable the greater the acceptable non
+		 * interactive delay.  In the interests of fairness, tasks that
+		 * use short CPU runs have smaller acceptable latencies.
+		 */
+		if (likely(rq->nr_running > 0))
+			ual += p->sdu.ingo_ll.avg_cpu_run * (rq->nr_running - 1);
+
+		if (p->sdu.ingo_ll.avg_latency > ual)
+			incr_latency_bonus(p);
+		else if (p->sdu.ingo_ll.avg_latency < ACCEPTABLE(ual))
+			decr_latency_bonus(p);
+	}
+out:
+	p->sdu.ingo_ll.flags &= ~(ILLF_IA_WAKE_UP|ILLF_JUST_WOKEN);
+}
+
+/*
+ * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
+ * to time slice values: [800ms ... 100ms ... 5ms]
+ *
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ */
+
+#define SCALE_PRIO(x, prio) \
+	max(x * (INGO_MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
+
+static unsigned int static_prio_timeslice(int static_prio)
+{
+	if (static_prio < NICE_TO_PRIO(0))
+		return SCALE_PRIO(DEF_TIMESLICE*4, static_prio);
+	else
+		return SCALE_PRIO(DEF_TIMESLICE, static_prio);
+}
+
+static inline unsigned int task_timeslice(const task_t *p)
+{
+	return static_prio_timeslice(p->static_prio);
+}
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void dequeue_task(struct task_struct *p, prio_array_t *array)
+{
+	array->nr_active--;
+	list_del_init(&p->run_list);
+	if (list_empty(array->queue + p->prio))
+		__clear_bit(p->prio, array->bitmap);
+}
+
+static void enqueue_task(struct task_struct *p, prio_array_t *array)
+{
+	sched_info_queued(p);
+	list_add_tail(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.ingo_ll.array = array;
+}
+
+/*
+ * Put task to the end of the run list without the overhead of dequeue
+ * followed by enqueue.
+ */
+static void requeue_task(struct task_struct *p, prio_array_t *array)
+{
+	list_move_tail(&p->run_list, array->queue + p->prio);
+}
+
+static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
+{
+	list_add(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.ingo_ll.array = array;
+}
+
+/*
+ * effective_prio - return the priority that is based on the static
+ * priority but is modified by bonuses/penalties.
+ *
+ * We use 25% of the full 0...39 priority range so that:
+ *
+ * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
+ * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
+ *
+ * Both properties are important to certain workloads.
+ */
+static int effective_prio(task_t *p)
+{
+	int bonus, prio;
+
+	if (rt_task(p))
+		return p->prio;
+
+	bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
+
+	prio = p->static_prio - bonus;
+	if (prio < MAX_RT_PRIO)
+		prio = MAX_RT_PRIO;
+	if (prio > INGO_MAX_PRIO-1)
+		prio = INGO_MAX_PRIO-1;
+	return prio;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+ * scheduling class and "nice" value.  For SCHED_NORMAL tasks this is just a
+ * scaled version of the new time slice allocation that they receive on time
+ * slice expiry etc.
+ */
+
+/*
+ * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
+ * If static_prio_timeslice() is ever changed to break this assumption then
+ * this code will need modification
+ */
+#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
+#define LOAD_WEIGHT(lp) \
+	(((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
+#define PRIO_TO_LOAD_WEIGHT(prio) \
+	LOAD_WEIGHT(static_prio_timeslice(prio))
+#define RTPRIO_TO_LOAD_WEIGHT(rp) \
+	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
+
+static inline void ingo_set_load_weight(task_t *p)
+{
+	if (rt_task(p)) {
+		if (p == task_rq(p)->migration_thread)
+			/*
+			 * The migration thread does the actual balancing.
+			 * Giving its load any weight will skew balancing
+			 * adversely.
+			 */
+			p->load_weight = 0;
+		else
+			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
+	} else
+		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
+}
+#else
+static inline void ingo_set_load_weight(task_t *p)
+{
+}
+#endif
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task(p, rq->qu.ingosched.active);
+	inc_nr_running(p, rq);
+}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ *
+ * Update all the scheduling statistics stuff. (sleep average
+ * calculation, priority modifiers, etc.)
+ */
+static void activate_task(task_t *p, runqueue_t *rq, int local)
+{
+	unsigned long long now;
+
+	now = sched_clock();
+#ifdef CONFIG_SMP
+	if (!local) {
+		/* Compensate for drifting sched_clock */
+		runqueue_t *this_rq = this_rq();
+		now = (now - this_rq->timestamp_last_tick)
+			+ rq->timestamp_last_tick;
+	}
+#endif
+
+	if (!rt_task(p))
+		p->prio = effective_prio(p);
+
+	p->timestamp = now;
+
+	__activate_task(p, rq);
+}
+
+/*
+ * __activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task_head(p, rq->qu.ingosched.active);
+	inc_nr_running(p, rq);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	dec_nr_running(p, rq);
+	dequeue_task(p, p->sdu.ingo_ll.array);
+	p->sdu.ingo_ll.array = NULL;
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: the task's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+static void ingo_wake_up_task(struct task_struct *p, struct runqueue *rq, unsigned int old_state, int sync)
+{
+	int same_cpu = (rq == this_rq());
+
+	if (old_state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible--;
+
+	/*
+	 * uninterruptible sleeps are assumed to be non interactive.
+	 * interruptible sleeps are assumed to be interactive unless
+	 * tagged with the TASK_NONINTERACTIVE flag.
+	 */
+	if (old_state == TASK_INTERRUPTIBLE)
+		p->sdu.ingo_ll.flags |= ILLF_IA_WAKE_UP;
+	else
+		p->sdu.ingo_ll.flags &= ~ILLF_IA_WAKE_UP;
+
+	p->sdu.ingo_ll.flags |= ILLF_JUST_WOKEN;
+
+	activate_task(p, rq, same_cpu);
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	if (!sync || !same_cpu) {
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+static void ingo_fork(task_t *p)
+{
+	/*
+	 * Leave the latency bonus the same as the parent's.
+	 * This helps new tasks launched by media to get off to a good start
+	 * when the system is under load. If they don't warrant it they'll soon
+	 * lose it.
+	 */
+	p->sdu.ingo_ll.avg_ia_latency = 0;
+	p->sdu.ingo_ll.avg_latency = 0;
+	p->sdu.ingo_ll.avg_cpu_run = 0;
+
+	p->sdu.ingo_ll.array = NULL;
+	/*
+	 * Share the timeslice between parent and child, thus the
+	 * total amount of pending timeslices in the system doesn't change,
+	 * resulting in more scheduling fairness.
+	 */
+	local_irq_disable();
+	p->sdu.ingo_ll.time_slice = (current->sdu.ingo_ll.time_slice + 1) >> 1;
+	/*
+	 * The remainder of the first timeslice might be recovered by
+	 * the parent if the child exits early enough.
+	 */
+	p->sdu.ingo_ll.first_time_slice = 1;
+	current->sdu.ingo_ll.time_slice >>= 1;
+	p->timestamp = sched_clock();
+	if (unlikely(!current->sdu.ingo_ll.time_slice)) {
+		/*
+		 * This case is rare, it happens when the parent has only
+		 * a single jiffy left from its timeslice. Taking the
+		 * runqueue lock is not a problem.
+		 */
+		current->sdu.ingo_ll.time_slice = 1;
+		scheduler_tick();
+	}
+	local_irq_enable();
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+static void ingo_wake_up_new_task(task_t * p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	int this_cpu, cpu;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+	BUG_ON(p->state != TASK_RUNNING);
+	this_cpu = smp_processor_id();
+	cpu = task_cpu(p);
+
+	p->prio = effective_prio(p);
+
+	if (likely(cpu == this_cpu)) {
+		if (!(clone_flags & CLONE_VM)) {
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			if (unlikely(!current->sdu.ingo_ll.array))
+				__activate_task(p, rq);
+			else {
+				p->prio = current->prio;
+				list_add_tail(&p->run_list, &current->run_list);
+				p->sdu.ingo_ll.array = current->sdu.ingo_ll.array;
+				p->sdu.ingo_ll.array->nr_active++;
+				inc_nr_running(p, rq);
+			}
+			set_need_resched();
+		} else
+			/* Run child last */
+			__activate_task(p, rq);
+	} else {
+		runqueue_t *this_rq = cpu_rq(this_cpu);
+
+		/*
+		 * Not the local CPU - must adjust timestamp. This should
+		 * get optimised away in the !CONFIG_SMP case.
+		 */
+		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
+					+ rq->timestamp_last_tick;
+		__activate_task(p, rq);
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+
+/*
+ * Potentially available exiting-child timeslices are
+ * retrieved here - this way the parent does not get
+ * penalized for creating too many threads.
+ *
+ * (this cannot be used to 'generate' timeslices
+ * artificially, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+static void ingo_exit(task_t *p)
+{
+	unsigned long flags;
+	runqueue_t *rq;
+
+	/*
+	 * If the child was a (relative-) CPU hog then decrease
+	 * the sleep_avg of the parent as well.
+	 */
+	rq = task_rq_lock(p->parent, &flags);
+	if (p->sdu.ingo_ll.first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
+		p->parent->sdu.ingo_ll.time_slice += p->sdu.ingo_ll.time_slice;
+		if (unlikely(p->parent->sdu.ingo_ll.time_slice > task_timeslice(p)))
+			p->parent->sdu.ingo_ll.time_slice = task_timeslice(p);
+	}
+	task_rq_unlock(rq, &flags);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static
+void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
+	       runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
+{
+	dequeue_task(p, src_array);
+	dec_nr_running(p, src_rq);
+	set_task_cpu(p, this_cpu);
+	inc_nr_running(p, this_rq);
+	enqueue_task(p, this_array);
+	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+				+ this_rq->timestamp_last_tick;
+	/*
+	 * Note that idle threads have a prio of INGO_MAX_PRIO, for this test
+	 * to be always true for them.
+	 */
+	if (TASK_PREEMPTS_CURR(p, this_rq))
+		resched_task(this_rq->curr);
+}
+
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int ingo_move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+		      unsigned long max_nr_move, unsigned long max_load_move,
+		      struct sched_domain *sd, enum idle_type idle,
+		      int *all_pinned)
+{
+	prio_array_t *array, *dst_array;
+	struct list_head *head, *curr;
+	int idx, pulled = 0, pinned = 0;
+	long rem_load_move;
+	task_t *tmp;
+
+	if (max_nr_move == 0 || max_load_move == 0)
+		goto out;
+
+	rem_load_move = max_load_move;
+	pinned = 1;
+
+	/*
+	 * We first consider expired tasks. Those will likely not be
+	 * executed in the near future, and they are most likely to
+	 * be cache-cold, thus switching CPUs has the least effect
+	 * on them.
+	 */
+	if (busiest->qu.ingosched.expired->nr_active) {
+		array = busiest->qu.ingosched.expired;
+		dst_array = this_rq->qu.ingosched.expired;
+	} else {
+		array = busiest->qu.ingosched.active;
+		dst_array = this_rq->qu.ingosched.active;
+	}
+
+new_array:
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(array->bitmap);
+	else
+		idx = find_next_bit(array->bitmap, INGO_MAX_PRIO, idx);
+	if (idx >= INGO_MAX_PRIO) {
+		if (array == busiest->qu.ingosched.expired && busiest->qu.ingosched.active->nr_active) {
+			array = busiest->qu.ingosched.active;
+			dst_array = this_rq->qu.ingosched.active;
+			goto new_array;
+		}
+		goto out;
+	}
+
+	head = array->queue + idx;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+
+	curr = curr->prev;
+
+	if (tmp->load_weight > rem_load_move ||
+	    !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
+	pulled++;
+	rem_load_move -= tmp->load_weight;
+
+	/*
+	 * We only want to steal up to the prescribed number of tasks
+	 * and the prescribed amount of weighted load.
+	 */
+	if (pulled < max_nr_move && rem_load_move > 0) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	if (all_pinned)
+		*all_pinned = pinned;
+
+	return pulled;
+}
+#endif
+
+/*
+ * We place interactive tasks back into the active array, if possible.
+ *
+ * To guarantee that this does not starve expired tasks we ignore the
+ * interactivity of a task if the first expired task had to wait more
+ * than a 'reasonable' amount of time. This deadline timeout is
+ * load-dependent, as the frequency of array switched decreases with
+ * increasing number of running tasks. We also ignore the interactivity
+ * if a better static_prio task has expired:
+ */
+#define EXPIRED_STARVING(rq) \
+	((STARVATION_LIMIT && ((rq)->qu.ingosched.expired_timestamp && \
+		(jiffies - (rq)->qu.ingosched.expired_timestamp >= \
+			STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
+			((rq)->curr->static_prio > (rq)->qu.ingosched.best_expired_prio))
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ *
+ * It also gets called by the fork code, when changing the parent's
+ * timeslices.
+ */
+static void ingo_tick(struct task_struct *p, struct runqueue *rq, unsigned long long now)
+{
+	int cpu = smp_processor_id();
+
+	if (p == rq->idle) {
+		if (wake_priority_sleeper(rq))
+			goto out;
+		rebalance_tick(cpu, rq, SCHED_IDLE);
+		return;
+	}
+
+	/* Task might have expired already, but not scheduled off yet */
+	if (p->sdu.ingo_ll.array != rq->qu.ingosched.active) {
+		set_tsk_need_resched(p);
+		goto out;
+	}
+	spin_lock(&rq->lock);
+	/*
+	 * The task was running during this tick - update the
+	 * time slice counter. Note: we do not update a thread's
+	 * priority until it either goes to sleep or uses up its
+	 * timeslice. This makes it possible for interactive tasks
+	 * to use up their timeslices at their highest priority levels.
+	 */
+	if (rt_task(p)) {
+		/*
+		 * RR tasks need a special form of timeslice management.
+		 * FIFO tasks have no timeslices.
+		 */
+		if ((p->policy == SCHED_RR) && !--p->sdu.ingo_ll.time_slice) {
+			p->sdu.ingo_ll.time_slice = task_timeslice(p);
+			p->sdu.ingo_ll.first_time_slice = 0;
+			set_tsk_need_resched(p);
+
+			/* put it at the end of the queue: */
+			requeue_task(p, rq->qu.ingosched.active);
+		}
+		goto out_unlock;
+	}
+	if (!--p->sdu.ingo_ll.time_slice) {
+		dequeue_task(p, rq->qu.ingosched.active);
+		set_tsk_need_resched(p);
+		/* make sure that tasks that obtain an latency_bonus but then
+		 * become CPU bound eventually lose the bonus.
+		 */
+		decr_latency_bonus(p);
+		p->prio = effective_prio(p);
+		p->sdu.ingo_ll.time_slice = task_timeslice(p);
+		p->sdu.ingo_ll.first_time_slice = 0;
+
+		if (!rq->qu.ingosched.expired_timestamp)
+			rq->qu.ingosched.expired_timestamp = jiffies;
+		if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
+			enqueue_task(p, rq->qu.ingosched.expired);
+			if (p->static_prio < rq->qu.ingosched.best_expired_prio)
+				rq->qu.ingosched.best_expired_prio = p->static_prio;
+		} else
+			enqueue_task(p, rq->qu.ingosched.active);
+	} else {
+		/*
+		 * Prevent a too long timeslice allowing a task to monopolize
+		 * the CPU. We do this by splitting up the timeslice into
+		 * smaller pieces.
+		 *
+		 * Note: this does not mean the task's timeslices expire or
+		 * get lost in any way, they just might be preempted by
+		 * another task of equal priority. (one with higher
+		 * priority would have preempted this task already.) We
+		 * requeue this task to the end of the list on this priority
+		 * level, which is in essence a round-robin of tasks with
+		 * equal priority.
+		 *
+		 * This only applies to tasks in the interactive
+		 * delta range with at least TIMESLICE_GRANULARITY to requeue.
+		 */
+		if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
+			p->sdu.ingo_ll.time_slice) % TIMESLICE_GRANULARITY(p)) &&
+			(p->sdu.ingo_ll.time_slice >= TIMESLICE_GRANULARITY(p)) &&
+			(p->sdu.ingo_ll.array == rq->qu.ingosched.active)) {
+
+			requeue_task(p, rq->qu.ingosched.active);
+			set_tsk_need_resched(p);
+		}
+	}
+out_unlock:
+	spin_unlock(&rq->lock);
+out:
+	rebalance_tick(cpu, rq, NOT_IDLE);
+}
+
+#ifdef CONFIG_SCHED_SMT
+static struct task_struct *ingo_head_of_queue(union runqueue_queue *rqq)
+{
+	prio_array_t *array = rqq->ingosched.active;
+
+	if (!array->nr_active)
+		array = rqq->ingosched.expired;
+	BUG_ON(!array->nr_active);
+
+	return list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
+		task_t, run_list);
+}
+
+/*
+ * number of 'lost' timeslices this task wont be able to fully
+ * utilize, if another task runs on a sibling. This models the
+ * slowdown effect of other tasks running on siblings:
+ */
+static inline unsigned long smt_slice(const task_t *p, struct sched_domain *sd)
+{
+	return p->sdu.ingo_ll.time_slice * (100 - sd->per_cpu_gain) / 100;
+}
+
+static int ingo_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct * p2, struct sched_domain *sd)
+{
+	return smt_slice(p1, sd) > task_timeslice(p2);
+}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ */
+static void ingo_schedule(void)
+{
+	long *switch_count;
+	prio_array_t *array;
+	int cpu, idx;
+	struct task_struct *prev = current, *next;
+	struct list_head *queue;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+
+	spin_lock_irq(&rq->lock);
+
+	if (likely(now > prev->timestamp))
+		prev->sdu.ingo_ll.avg_cpu_run += now - prev->timestamp;
+
+	if (unlikely(prev->flags & PF_DEAD))
+		prev->state = EXIT_DEAD;
+
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE)
+				rq->nr_uninterruptible++;
+			deactivate_task(prev, rq);
+		}
+	}
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+go_idle:
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			rq->qu.ingosched.expired_timestamp = 0;
+			wake_sleeping_dependent(cpu, rq);
+			/*
+			 * wake_sleeping_dependent() might have released
+			 * the runqueue, so break out if we got new
+			 * tasks meanwhile:
+			 */
+			if (!rq->nr_running)
+				goto switch_tasks;
+		}
+	} else {
+		if (dependent_sleeper(cpu, rq)) {
+			next = rq->idle;
+			goto switch_tasks;
+		}
+		/*
+		 * dependent_sleeper() releases and reacquires the runqueue
+		 * lock, hence go into the idle loop if the rq went
+		 * empty meanwhile:
+		 */
+		if (unlikely(!rq->nr_running))
+			goto go_idle;
+	}
+
+	array = rq->qu.ingosched.active;
+	if (unlikely(!array->nr_active)) {
+		/*
+		 * Switch the active and expired arrays.
+		 */
+		schedstat_inc(rq, sched_switch);
+		rq->qu.ingosched.active = rq->qu.ingosched.expired;
+		rq->qu.ingosched.expired = array;
+		array = rq->qu.ingosched.active;
+		rq->qu.ingosched.expired_timestamp = 0;
+		rq->qu.ingosched.best_expired_prio = INGO_MAX_PRIO;
+	}
+
+	idx = sched_find_first_bit(array->bitmap);
+	queue = array->queue + idx;
+	next = list_entry(queue->next, task_t, run_list);
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prefetch(next);
+	prefetch_stack(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(task_cpu(prev));
+
+	update_cpu_clock(prev, rq, now);
+
+	prev->timestamp = prev->last_ran = now;
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		decay_avg_value(&prev->sdu.ingo_ll.avg_cpu_run);
+		if (just_woken(next))
+			update_latency_bonus(next, rq, now);
+		next->timestamp = now;
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_task_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+		/*
+		 * this_rq must be evaluated again because prev may have moved
+		 * CPUs since it called schedule(), thus the 'rq' on its stack
+		 * frame will be invalid.
+		 */
+		finish_task_switch(this_rq(), prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+static void ingo_set_normal_task_nice(task_t *p, long nice)
+{
+	prio_array_t *array;
+	int old_prio, new_prio, delta;
+	struct runqueue *rq = task_rq(p);
+
+	array = p->sdu.ingo_ll.array;
+	if (array) {
+		dequeue_task(p, array);
+		dec_raw_weighted_load(rq, p);
+	}
+
+	old_prio = p->prio;
+	new_prio = NICE_TO_PRIO(nice);
+	delta = new_prio - old_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	ingo_set_load_weight(p);
+	p->prio += delta;
+
+	if (array) {
+		enqueue_task(p, array);
+		inc_raw_weighted_load(rq, p);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+}
+
+static void ingo_init_batch_task(task_t *p)
+{
+	p->sdu.ingo_ll.latency_bonus = 0;
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+static void ingo_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	prio_array_t *array;
+	runqueue_t *rq = task_rq(p);
+
+	array = p->sdu.ingo_ll.array;
+	if (array)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	if (array) {
+		__activate_task(p, rq);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+static long ingo_sys_yield(void)
+{
+	runqueue_t *rq = this_rq_lock();
+	prio_array_t *array = current->sdu.ingo_ll.array;
+	prio_array_t *target = rq->qu.ingosched.expired;
+
+	schedstat_inc(rq, yld_cnt);
+	/*
+	 * We implement yielding by moving the task into the expired
+	 * queue.
+	 *
+	 * (special rule: RT tasks will just roundrobin in the active
+	 *  array.)
+	 */
+	if (rt_task(current))
+		target = rq->qu.ingosched.active;
+
+	if (array->nr_active == 1) {
+		schedstat_inc(rq, yld_act_empty);
+		if (!rq->qu.ingosched.expired->nr_active)
+			schedstat_inc(rq, yld_both_empty);
+	} else if (!rq->qu.ingosched.expired->nr_active)
+		schedstat_inc(rq, yld_exp_empty);
+
+	if (array != target) {
+		dequeue_task(current, array);
+		enqueue_task(current, target);
+	} else
+		/*
+		 * requeue_task is cheaper so perform that if possible.
+		 */
+		requeue_task(current, array);
+
+	/*
+	 * Since we are going to call schedule() anyway, there's
+	 * no need to preempt or enable interrupts:
+	 */
+	__release(rq->lock);
+	_raw_spin_unlock(&rq->lock);
+	preempt_enable_no_resched();
+
+	schedule();
+
+	return 0;
+}
+
+static void ingo_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	ingo_sys_yield();
+}
+
+static void ingo_init_idle(task_t *idle, int cpu)
+{
+	idle->sdu.ingo_ll.avg_ia_latency = 0;
+	idle->sdu.ingo_ll.avg_latency = 0;
+	idle->sdu.ingo_ll.avg_cpu_run = 0;
+	idle->sdu.ingo_ll.latency_bonus = 0;
+	idle->sdu.ingo_ll.array = NULL;
+	idle->prio = INGO_MAX_PRIO;
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+static void ingo_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	/*
+	 * Sync timestamp with rq_dest's before activating.
+	 * The same thing could be achieved by doing this step
+	 * afterwards, and pretending it was a local activate.
+	 * This way is cleaner and logically correct.
+	 */
+	p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+			+ rq_dest->timestamp_last_tick;
+	deactivate_task(p, rq_src);
+	set_task_cpu(p, dest_cpu);
+	activate_task(p, rq_dest, 0);
+	if (TASK_PREEMPTS_CURR(p, rq_dest))
+		resched_task(rq_dest->curr);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static	void ingo_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO-1);
+	/* Add idle task to _front_ of it's priority queue */
+	__activate_idle_task(rq->idle, rq);
+}
+
+static	void ingo_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = INGO_MAX_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+static void ingo_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned arr, i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (arr = 0; arr < 2; arr++) {
+		for (i = 0; i < INGO_MAX_PRIO; i++) {
+			struct list_head *list = &rq->qu.ingosched.arrays[arr].queue[i];
+			while (!list_empty(list))
+				migrate_dead(dead_cpu,
+					     list_entry(list->next, task_t,
+							run_list));
+		}
+	}
+}
+#endif
+#endif
+
+static void ingo_sched_init(void)
+{
+	init_task.sdu.ingo_ll.time_slice = HZ;
+	init_task.sdu.ingo_ll.array = NULL;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void ingo_normalize_rt_task(struct task_struct *p)
+{
+	prio_array_t *array;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	array = p->sdu.ingo_ll.array;
+	if (array)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	if (array) {
+		__activate_task(p, rq);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+SCHED_DRV_SYSFS_UINT_RW(unacceptable_ia_latency, ILL_AVG_REAL, ILL_AVG_RND,
+			0, ULONG_MAX);
+
+static struct attribute *ingo_ll_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(unacceptable_ia_latency),
+	NULL,
+};
+
+const struct sched_drv ingo_ll_sched_drv = {
+	.name = "ingo_ll",
+	.init_runqueue_queue = ingo_init_runqueue_queue,
+	.set_oom_time_slice = ingo_set_oom_time_slice,
+#ifdef CONFIG_SMP
+	.set_load_weight = ingo_set_load_weight,
+#endif
+	.task_timeslice = task_timeslice,
+	.wake_up_task = ingo_wake_up_task,
+	.fork = ingo_fork,
+	.wake_up_new_task = ingo_wake_up_new_task,
+	.exit = ingo_exit,
+#ifdef CONFIG_SMP
+	.move_tasks = ingo_move_tasks,
+#endif
+	.tick = ingo_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = ingo_head_of_queue,
+	.dependent_sleeper_trumps = ingo_dependent_sleeper_trumps,
+#endif
+	.schedule = ingo_schedule,
+	.set_normal_task_nice = ingo_set_normal_task_nice,
+	.init_batch_task = ingo_init_batch_task,
+	.setscheduler = ingo_setscheduler,
+	.sys_yield = ingo_sys_yield,
+	.yield = ingo_yield,
+	.init_idle = ingo_init_idle,
+	.sched_init = ingo_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = ingo_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = ingo_set_select_idle_first,
+	.set_select_idle_last = ingo_set_select_idle_last,
+	.migrate_dead_tasks = ingo_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = ingo_normalize_rt_task,
+#endif
+	.attrs = ingo_ll_attrs,
+};
diff -urN oldtree/kernel/ingosched.c newtree/kernel/ingosched.c
--- oldtree/kernel/ingosched.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/ingosched.c	2006-03-08 18:56:30.027753750 +0000
@@ -0,0 +1,1269 @@
+/*
+ *  kernel/ingosched.c
+ *  Copyright (C) 1991-2005  Linus Torvalds
+ *
+ *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
+ *		hybrid priority-list and round-robin design with
+ *		an array-switch method of distributing timeslices
+ *		and per-CPU runqueues.  Cleanups and useful suggestions
+ *		by Davide Libenzi, preemptible kernel bits by Robert Love.
+ *  2003-09-03	Interactivity tuning by Con Kolivas.
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+
+static void ingo_init_runqueue_queue(union runqueue_queue *rqq)
+{
+	int j;
+
+	rqq->ingosched.active = rqq->ingosched.arrays;
+	rqq->ingosched.expired = rqq->ingosched.arrays + 1;
+	rqq->ingosched.best_expired_prio = INGO_MAX_PRIO;
+
+	for (j = 0; j < 2; j++) {
+		int k;
+		prio_array_t *array = rqq->ingosched.arrays + j;
+
+		for (k = 0; k < INGO_MAX_PRIO; k++) {
+			INIT_LIST_HEAD(array->queue + k);
+			__clear_bit(k, array->bitmap);
+		}
+		// delimiter for bitsearch
+		__set_bit(INGO_MAX_PRIO, array->bitmap);
+		array->nr_active = 0;
+	}
+
+	rqq->ingosched.expired_timestamp = 0;
+}
+
+static void ingo_set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+	p->sdu.ingosched.time_slice = t;
+}
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
+#define MAX_USER_PRIO		(USER_PRIO(INGO_MAX_PRIO))
+
+/*
+ * Some helpers for converting nanosecond timing to jiffy resolution
+ */
+#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
+#define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
+ * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
+ * Timeslices get refilled after they expire.
+ */
+#define MIN_TIMESLICE		max(5 * HZ / 1000, 1)
+#define DEF_TIMESLICE		(100 * HZ / 1000)
+#define ON_RUNQUEUE_WEIGHT	 30
+#define CHILD_PENALTY		 95
+#define PARENT_PENALTY		100
+#define EXIT_WEIGHT		  3
+#define PRIO_BONUS_RATIO	 25
+#define MAX_BONUS		(MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
+#define INTERACTIVE_DELTA	  2
+#define MAX_SLEEP_AVG		(DEF_TIMESLICE * MAX_BONUS)
+#define STARVATION_LIMIT	(MAX_SLEEP_AVG)
+#define NS_MAX_SLEEP_AVG	(JIFFIES_TO_NS(MAX_SLEEP_AVG))
+
+/*
+ * If a task is 'interactive' then we reinsert it in the active
+ * array after it has expired its current timeslice. (it will not
+ * continue to run immediately, it will still roundrobin with
+ * other interactive tasks.)
+ *
+ * This part scales the interactivity limit depending on niceness.
+ *
+ * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
+ * Here are a few examples of different nice levels:
+ *
+ *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
+ *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
+ *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
+ *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
+ *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
+ *
+ * (the X axis represents the possible -5 ... 0 ... +5 dynamic
+ *  priority range a task can explore, a value of '1' means the
+ *  task is rated interactive.)
+ *
+ * Ie. nice +19 tasks can never get 'interactive' enough to be
+ * reinserted into the active array. And only heavily CPU-hog nice -20
+ * tasks will be expired. Default nice 0 tasks are somewhere between,
+ * it takes some effort for them to get interactive, but it's not
+ * too hard.
+ */
+
+#define CURRENT_BONUS(p) \
+	(NS_TO_JIFFIES((p)->sdu.ingosched.sleep_avg) * MAX_BONUS / \
+		MAX_SLEEP_AVG)
+
+#define GRANULARITY	(10 * HZ / 1000 ? : 1)
+
+#ifdef CONFIG_SMP
+#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
+		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
+			num_online_cpus())
+#else
+#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
+		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
+#endif
+
+#define SCALE(v1,v1_max,v2_max) \
+	(v1) * (v2_max) / (v1_max)
+
+#define DELTA(p) \
+	(SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \
+		INTERACTIVE_DELTA)
+
+#define TASK_INTERACTIVE(p) \
+	((p)->prio <= (p)->static_prio - DELTA(p))
+
+#define INTERACTIVE_SLEEP(p) \
+	(JIFFIES_TO_NS(MAX_SLEEP_AVG * \
+		(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
+
+/*
+ * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
+ * to time slice values: [800ms ... 100ms ... 5ms]
+ *
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ */
+
+#define SCALE_PRIO(x, prio) \
+	max(x * (INGO_MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
+
+static unsigned int static_prio_timeslice(int static_prio)
+{
+	if (static_prio < NICE_TO_PRIO(0))
+		return SCALE_PRIO(DEF_TIMESLICE*4, static_prio);
+	else
+		return SCALE_PRIO(DEF_TIMESLICE, static_prio);
+}
+
+static inline unsigned int task_timeslice(const task_t *p)
+{
+	return static_prio_timeslice(p->static_prio);
+}
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void dequeue_task(struct task_struct *p, prio_array_t *array)
+{
+	array->nr_active--;
+	list_del_init(&p->run_list);
+	if (list_empty(array->queue + p->prio))
+		__clear_bit(p->prio, array->bitmap);
+}
+
+static void enqueue_task(struct task_struct *p, prio_array_t *array)
+{
+	sched_info_queued(p);
+	list_add_tail(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.ingosched.array = array;
+}
+
+/*
+ * Put task to the end of the run list without the overhead of dequeue
+ * followed by enqueue.
+ */
+static void requeue_task(struct task_struct *p, prio_array_t *array)
+{
+	list_move_tail(&p->run_list, array->queue + p->prio);
+}
+
+static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
+{
+	list_add(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.ingosched.array = array;
+}
+
+/*
+ * effective_prio - return the priority that is based on the static
+ * priority but is modified by bonuses/penalties.
+ *
+ * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
+ * into the -5 ... 0 ... +5 bonus/penalty range.
+ *
+ * We use 25% of the full 0...39 priority range so that:
+ *
+ * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
+ * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
+ *
+ * Both properties are important to certain workloads.
+ */
+static int effective_prio(task_t *p)
+{
+	int bonus, prio;
+
+	if (rt_task(p))
+		return p->prio;
+
+	bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
+
+	prio = p->static_prio - bonus;
+	if (prio < MAX_RT_PRIO)
+		prio = MAX_RT_PRIO;
+	if (prio > INGO_MAX_PRIO-1)
+		prio = INGO_MAX_PRIO-1;
+	return prio;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+ * scheduling class and "nice" value.  For SCHED_NORMAL tasks this is just a
+ * scaled version of the new time slice allocation that they receive on time
+ * slice expiry etc.
+ */
+
+/*
+ * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
+ * If static_prio_timeslice() is ever changed to break this assumption then
+ * this code will need modification
+ */
+#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
+#define LOAD_WEIGHT(lp) \
+	(((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
+#define PRIO_TO_LOAD_WEIGHT(prio) \
+	LOAD_WEIGHT(static_prio_timeslice(prio))
+#define RTPRIO_TO_LOAD_WEIGHT(rp) \
+	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
+
+static inline void ingo_set_load_weight(task_t *p)
+{
+	if (rt_task(p)) {
+		if (p == task_rq(p)->migration_thread)
+			/*
+			 * The migration thread does the actual balancing.
+			 * Giving its load any weight will skew balancing
+			 * adversely.
+			 */
+			p->load_weight = 0;
+		else
+			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
+	} else
+		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
+}
+#else
+static inline void ingo_set_load_weight(task_t *p)
+{
+}
+#endif
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task(p, rq->qu.ingosched.active);
+	inc_nr_running(p, rq);
+}
+
+static int recalc_task_prio(task_t *p, unsigned long long now)
+{
+	/* Caller must always ensure 'now >= p->sdu.ingosched.timestamp' */
+	unsigned long long __sleep_time = now - p->timestamp;
+	unsigned long sleep_time;
+
+	if (unlikely(p->policy == SCHED_BATCH))
+		sleep_time = 0;
+	else {
+		if (__sleep_time > NS_MAX_SLEEP_AVG)
+			sleep_time = NS_MAX_SLEEP_AVG;
+		else
+			sleep_time = (unsigned long)__sleep_time;
+	}
+
+	if (likely(sleep_time > 0)) {
+		/*
+		 * User tasks that sleep a long time are categorised as
+		 * idle. They will only have their sleep_avg increased to a
+		 * level that makes them just interactive priority to stay
+		 * active yet prevent them suddenly becoming cpu hogs and
+		 * starving other processes.
+		 */
+		if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) {
+				unsigned long ceiling;
+
+				ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG -
+					DEF_TIMESLICE);
+				if (p->sdu.ingosched.sleep_avg < ceiling)
+					p->sdu.ingosched.sleep_avg = ceiling;
+		} else {
+
+			/*
+			 * The lower the sleep avg a task has the more
+			 * rapidly it will rise with sleep time. This enables
+			 * tasks to rapidly recover to a low latency priority.
+			 * If a task was sleeping with the noninteractive
+			 * label do not apply this non-linear boost
+			 */
+			if (p->sdu.ingosched.sleep_type != SLEEP_NONINTERACTIVE || !p->mm)
+				sleep_time *=
+					(MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
+
+			/*
+			 * This code gives a bonus to interactive tasks.
+			 *
+			 * The boost works by updating the 'average sleep time'
+			 * value here, based on ->timestamp. The more time a
+			 * task spends sleeping, the higher the average gets -
+			 * and the higher the priority boost gets as well.
+			 */
+			p->sdu.ingosched.sleep_avg += sleep_time;
+
+			if (p->sdu.ingosched.sleep_avg > NS_MAX_SLEEP_AVG)
+				p->sdu.ingosched.sleep_avg = NS_MAX_SLEEP_AVG;
+		}
+	}
+
+	return effective_prio(p);
+}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ *
+ * Update all the scheduling statistics stuff. (sleep average
+ * calculation, priority modifiers, etc.)
+ */
+static void activate_task(task_t *p, runqueue_t *rq, int local)
+{
+	unsigned long long now;
+
+	now = sched_clock();
+#ifdef CONFIG_SMP
+	if (!local) {
+		/* Compensate for drifting sched_clock */
+		runqueue_t *this_rq = this_rq();
+		now = (now - this_rq->timestamp_last_tick)
+			+ rq->timestamp_last_tick;
+	}
+#endif
+
+	if (!rt_task(p))
+		p->prio = recalc_task_prio(p, now);
+
+	if (p->sdu.ingosched.sleep_type != SLEEP_NONINTERACTIVE) {
+		/*
+		 * Tasks which were woken up by interrupts (ie. hw events)
+		 * are most likely of interactive nature. So we give them
+		 * the credit of extending their sleep time to the period
+		 * of time they spend on the runqueue, waiting for execution
+		 * on a CPU, first time around:
+		 */
+		if (in_interrupt())
+			p->sdu.ingosched.sleep_type = SLEEP_INTERRUPTED;
+		else {
+			/*
+			 * Normal first-time wakeups get a credit too for
+			 * on-runqueue time, but it will be weighted down:
+			 */
+			p->sdu.ingosched.sleep_type = SLEEP_INTERACTIVE;
+		}
+	}
+	p->timestamp = now;
+
+	__activate_task(p, rq);
+}
+
+/*
+ * __activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task_head(p, rq->qu.ingosched.active);
+	inc_nr_running(p, rq);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	dec_nr_running(p, rq);
+	dequeue_task(p, p->sdu.ingosched.array);
+	p->sdu.ingosched.array = NULL;
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: the task's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+static void ingo_wake_up_task(struct task_struct *p, struct runqueue *rq, unsigned int old_state, int sync)
+{
+	int same_cpu = (rq == this_rq());
+
+	if (old_state == TASK_UNINTERRUPTIBLE) {
+		rq->nr_uninterruptible--;
+		/*
+		 * Tasks waking from uninterruptible sleep are likely
+		 * to be sleeping involuntarily on I/O and are otherwise
+		 * cpu bound so label them as noninteractive.
+		 */
+		p->sdu.ingosched.sleep_type = SLEEP_NONINTERACTIVE;
+	} else
+
+	/*
+	 * Tasks that have marked their sleep as noninteractive get
+	 * woken up with their sleep average not weighted in an
+	 * interactive way.
+	 */
+		if (old_state & TASK_NONINTERACTIVE)
+			p->sdu.ingosched.sleep_type = SLEEP_NONINTERACTIVE;
+
+
+	activate_task(p, rq, same_cpu);
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	if (!sync || !same_cpu) {
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+static void ingo_fork(task_t *p)
+{
+	p->sdu.ingosched.array = NULL;
+	/*
+	 * Share the timeslice between parent and child, thus the
+	 * total amount of pending timeslices in the system doesn't change,
+	 * resulting in more scheduling fairness.
+	 */
+	local_irq_disable();
+	p->sdu.ingosched.time_slice = (current->sdu.ingosched.time_slice + 1) >> 1;
+	/*
+	 * The remainder of the first timeslice might be recovered by
+	 * the parent if the child exits early enough.
+	 */
+	p->sdu.ingosched.first_time_slice = 1;
+	current->sdu.ingosched.time_slice >>= 1;
+	p->timestamp = sched_clock();
+	if (unlikely(!current->sdu.ingosched.time_slice)) {
+		/*
+		 * This case is rare, it happens when the parent has only
+		 * a single jiffy left from its timeslice. Taking the
+		 * runqueue lock is not a problem.
+		 */
+		current->sdu.ingosched.time_slice = 1;
+		scheduler_tick();
+	}
+	local_irq_enable();
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+static void ingo_wake_up_new_task(task_t * p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	int this_cpu, cpu;
+	runqueue_t *rq, *this_rq;
+
+	rq = task_rq_lock(p, &flags);
+	BUG_ON(p->state != TASK_RUNNING);
+	this_cpu = smp_processor_id();
+	cpu = task_cpu(p);
+
+	/*
+	 * We decrease the sleep average of forking parents
+	 * and children as well, to keep max-interactive tasks
+	 * from forking tasks that are max-interactive. The parent
+	 * (current) is done further down, under its lock.
+	 */
+	p->sdu.ingosched.sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
+		CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
+
+	p->prio = effective_prio(p);
+
+	if (likely(cpu == this_cpu)) {
+		if (!(clone_flags & CLONE_VM)) {
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			if (unlikely(!current->sdu.ingosched.array))
+				__activate_task(p, rq);
+			else {
+				p->prio = current->prio;
+				list_add_tail(&p->run_list, &current->run_list);
+				p->sdu.ingosched.array = current->sdu.ingosched.array;
+				p->sdu.ingosched.array->nr_active++;
+				inc_nr_running(p, rq);
+			}
+			set_need_resched();
+		} else
+			/* Run child last */
+			__activate_task(p, rq);
+		/*
+		 * We skip the following code due to cpu == this_cpu
+	 	 *
+		 *   task_rq_unlock(rq, &flags);
+		 *   this_rq = task_rq_lock(current, &flags);
+		 */
+		this_rq = rq;
+	} else {
+		this_rq = cpu_rq(this_cpu);
+
+		/*
+		 * Not the local CPU - must adjust timestamp. This should
+		 * get optimised away in the !CONFIG_SMP case.
+		 */
+		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
+					+ rq->timestamp_last_tick;
+		__activate_task(p, rq);
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+
+		/*
+		 * Parent and child are on different CPUs, now get the
+		 * parent runqueue to update the parent's ->sdu.ingosched.sleep_avg:
+		 */
+		task_rq_unlock(rq, &flags);
+		this_rq = task_rq_lock(current, &flags);
+	}
+	current->sdu.ingosched.sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
+		PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
+	task_rq_unlock(this_rq, &flags);
+}
+
+/*
+ * Potentially available exiting-child timeslices are
+ * retrieved here - this way the parent does not get
+ * penalized for creating too many threads.
+ *
+ * (this cannot be used to 'generate' timeslices
+ * artificially, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+static void ingo_exit(task_t *p)
+{
+	unsigned long flags;
+	runqueue_t *rq;
+
+	/*
+	 * If the child was a (relative-) CPU hog then decrease
+	 * the sleep_avg of the parent as well.
+	 */
+	rq = task_rq_lock(p->parent, &flags);
+	if (p->sdu.ingosched.first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
+		p->parent->sdu.ingosched.time_slice += p->sdu.ingosched.time_slice;
+		if (unlikely(p->parent->sdu.ingosched.time_slice > task_timeslice(p)))
+			p->parent->sdu.ingosched.time_slice = task_timeslice(p);
+	}
+	if (p->sdu.ingosched.sleep_avg < p->parent->sdu.ingosched.sleep_avg)
+		p->parent->sdu.ingosched.sleep_avg = p->parent->sdu.ingosched.sleep_avg /
+		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sdu.ingosched.sleep_avg /
+		(EXIT_WEIGHT + 1);
+	task_rq_unlock(rq, &flags);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static
+void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
+	       runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
+{
+	dequeue_task(p, src_array);
+	dec_nr_running(p, src_rq);
+	set_task_cpu(p, this_cpu);
+	inc_nr_running(p, this_rq);
+	enqueue_task(p, this_array);
+	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+				+ this_rq->timestamp_last_tick;
+	/*
+	 * Note that idle threads have a prio of INGO_MAX_PRIO, for this test
+	 * to be always true for them.
+	 */
+	if (TASK_PREEMPTS_CURR(p, this_rq))
+		resched_task(this_rq->curr);
+}
+
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int ingo_move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+		      unsigned long max_nr_move, unsigned long max_load_move,
+		      struct sched_domain *sd, enum idle_type idle,
+		      int *all_pinned)
+{
+	prio_array_t *array, *dst_array;
+	struct list_head *head, *curr;
+	int idx, pulled = 0, pinned = 0;
+	long rem_load_move;
+	task_t *tmp;
+
+	if (max_nr_move == 0 || max_load_move == 0)
+		goto out;
+
+	rem_load_move = max_load_move;
+	pinned = 1;
+
+	/*
+	 * We first consider expired tasks. Those will likely not be
+	 * executed in the near future, and they are most likely to
+	 * be cache-cold, thus switching CPUs has the least effect
+	 * on them.
+	 */
+	if (busiest->qu.ingosched.expired->nr_active) {
+		array = busiest->qu.ingosched.expired;
+		dst_array = this_rq->qu.ingosched.expired;
+	} else {
+		array = busiest->qu.ingosched.active;
+		dst_array = this_rq->qu.ingosched.active;
+	}
+
+new_array:
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(array->bitmap);
+	else
+		idx = find_next_bit(array->bitmap, INGO_MAX_PRIO, idx);
+	if (idx >= INGO_MAX_PRIO) {
+		if (array == busiest->qu.ingosched.expired && busiest->qu.ingosched.active->nr_active) {
+			array = busiest->qu.ingosched.active;
+			dst_array = this_rq->qu.ingosched.active;
+			goto new_array;
+		}
+		goto out;
+	}
+
+	head = array->queue + idx;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+
+	curr = curr->prev;
+
+	if (tmp->load_weight > rem_load_move ||
+	    !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
+	pulled++;
+	rem_load_move -= tmp->load_weight;
+
+	/*
+	 * We only want to steal up to the prescribed number of tasks
+	 * and the prescribed amount of weighted load.
+	 */
+	if (pulled < max_nr_move && rem_load_move > 0) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	if (all_pinned)
+		*all_pinned = pinned;
+
+	return pulled;
+}
+#endif
+
+/*
+ * We place interactive tasks back into the active array, if possible.
+ *
+ * To guarantee that this does not starve expired tasks we ignore the
+ * interactivity of a task if the first expired task had to wait more
+ * than a 'reasonable' amount of time. This deadline timeout is
+ * load-dependent, as the frequency of array switched decreases with
+ * increasing number of running tasks. We also ignore the interactivity
+ * if a better static_prio task has expired:
+ */
+#define EXPIRED_STARVING(rq) \
+	((STARVATION_LIMIT && ((rq)->qu.ingosched.expired_timestamp && \
+		(jiffies - (rq)->qu.ingosched.expired_timestamp >= \
+			STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
+			((rq)->curr->static_prio > (rq)->qu.ingosched.best_expired_prio))
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ *
+ * It also gets called by the fork code, when changing the parent's
+ * timeslices.
+ */
+static void ingo_tick(struct task_struct *p, struct runqueue *rq, unsigned long long now)
+{
+	int cpu = smp_processor_id();
+
+	if (p == rq->idle) {
+		if (wake_priority_sleeper(rq))
+			goto out;
+		rebalance_tick(cpu, rq, SCHED_IDLE);
+		return;
+	}
+
+	/* Task might have expired already, but not scheduled off yet */
+	if (p->sdu.ingosched.array != rq->qu.ingosched.active) {
+		set_tsk_need_resched(p);
+		goto out;
+	}
+	spin_lock(&rq->lock);
+	/*
+	 * The task was running during this tick - update the
+	 * time slice counter. Note: we do not update a thread's
+	 * priority until it either goes to sleep or uses up its
+	 * timeslice. This makes it possible for interactive tasks
+	 * to use up their timeslices at their highest priority levels.
+	 */
+	if (rt_task(p)) {
+		/*
+		 * RR tasks need a special form of timeslice management.
+		 * FIFO tasks have no timeslices.
+		 */
+		if ((p->policy == SCHED_RR) && !--p->sdu.ingosched.time_slice) {
+			p->sdu.ingosched.time_slice = task_timeslice(p);
+			p->sdu.ingosched.first_time_slice = 0;
+			set_tsk_need_resched(p);
+
+			/* put it at the end of the queue: */
+			requeue_task(p, rq->qu.ingosched.active);
+		}
+		goto out_unlock;
+	}
+	if (!--p->sdu.ingosched.time_slice) {
+		dequeue_task(p, rq->qu.ingosched.active);
+		set_tsk_need_resched(p);
+		p->prio = effective_prio(p);
+		p->sdu.ingosched.time_slice = task_timeslice(p);
+		p->sdu.ingosched.first_time_slice = 0;
+
+		if (!rq->qu.ingosched.expired_timestamp)
+			rq->qu.ingosched.expired_timestamp = jiffies;
+		if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
+			enqueue_task(p, rq->qu.ingosched.expired);
+			if (p->static_prio < rq->qu.ingosched.best_expired_prio)
+				rq->qu.ingosched.best_expired_prio = p->static_prio;
+		} else
+			enqueue_task(p, rq->qu.ingosched.active);
+	} else {
+		/*
+		 * Prevent a too long timeslice allowing a task to monopolize
+		 * the CPU. We do this by splitting up the timeslice into
+		 * smaller pieces.
+		 *
+		 * Note: this does not mean the task's timeslices expire or
+		 * get lost in any way, they just might be preempted by
+		 * another task of equal priority. (one with higher
+		 * priority would have preempted this task already.) We
+		 * requeue this task to the end of the list on this priority
+		 * level, which is in essence a round-robin of tasks with
+		 * equal priority.
+		 *
+		 * This only applies to tasks in the interactive
+		 * delta range with at least TIMESLICE_GRANULARITY to requeue.
+		 */
+		if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
+			p->sdu.ingosched.time_slice) % TIMESLICE_GRANULARITY(p)) &&
+			(p->sdu.ingosched.time_slice >= TIMESLICE_GRANULARITY(p)) &&
+			(p->sdu.ingosched.array == rq->qu.ingosched.active)) {
+
+			requeue_task(p, rq->qu.ingosched.active);
+			set_tsk_need_resched(p);
+		}
+	}
+out_unlock:
+	spin_unlock(&rq->lock);
+out:
+	rebalance_tick(cpu, rq, NOT_IDLE);
+}
+
+#ifdef CONFIG_SCHED_SMT
+static struct task_struct *ingo_head_of_queue(union runqueue_queue *rqq)
+{
+	prio_array_t *array = rqq->ingosched.active;
+
+	if (!array->nr_active)
+		array = rqq->ingosched.expired;
+	BUG_ON(!array->nr_active);
+
+	return list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
+		task_t, run_list);
+}
+
+/*
+ * number of 'lost' timeslices this task wont be able to fully
+ * utilize, if another task runs on a sibling. This models the
+ * slowdown effect of other tasks running on siblings:
+ */
+static inline unsigned long smt_slice(const task_t *p, struct sched_domain *sd)
+{
+	return p->sdu.ingosched.time_slice * (100 - sd->per_cpu_gain) / 100;
+}
+
+static int ingo_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct * p2, struct sched_domain *sd)
+{
+	return smt_slice(p1, sd) > task_timeslice(p2);
+}
+#endif
+
+static inline int interactive_sleep(enum sleep_type sleep_type)
+{
+	return (sleep_type == SLEEP_INTERACTIVE ||
+		sleep_type == SLEEP_INTERRUPTED);
+}
+
+/*
+ * schedule() is the main scheduler function.
+ */
+static void ingo_schedule(void)
+{
+	long *switch_count;
+	prio_array_t *array;
+	unsigned long run_time;
+	int cpu, idx, new_prio;
+	struct task_struct *prev = current, *next;
+	struct list_head *queue;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+
+	if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) {
+		run_time = now - prev->timestamp;
+		if (unlikely((long long)(now - prev->timestamp) < 0))
+			run_time = 0;
+	} else
+		run_time = NS_MAX_SLEEP_AVG;
+
+	/*
+	 * Tasks charged proportionately less run_time at high sleep_avg to
+	 * delay them losing their interactive status
+	 */
+	run_time /= (CURRENT_BONUS(prev) ? : 1);
+
+	spin_lock_irq(&rq->lock);
+
+	if (unlikely(prev->flags & PF_DEAD))
+		prev->state = EXIT_DEAD;
+
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE)
+				rq->nr_uninterruptible++;
+			deactivate_task(prev, rq);
+		}
+	}
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+go_idle:
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			rq->qu.ingosched.expired_timestamp = 0;
+			wake_sleeping_dependent(cpu, rq);
+			/*
+			 * wake_sleeping_dependent() might have released
+			 * the runqueue, so break out if we got new
+			 * tasks meanwhile:
+			 */
+			if (!rq->nr_running)
+				goto switch_tasks;
+		}
+	} else {
+		if (dependent_sleeper(cpu, rq)) {
+			next = rq->idle;
+			goto switch_tasks;
+		}
+		/*
+		 * dependent_sleeper() releases and reacquires the runqueue
+		 * lock, hence go into the idle loop if the rq went
+		 * empty meanwhile:
+		 */
+		if (unlikely(!rq->nr_running))
+			goto go_idle;
+	}
+
+	array = rq->qu.ingosched.active;
+	if (unlikely(!array->nr_active)) {
+		/*
+		 * Switch the active and expired arrays.
+		 */
+		schedstat_inc(rq, sched_switch);
+		rq->qu.ingosched.active = rq->qu.ingosched.expired;
+		rq->qu.ingosched.expired = array;
+		array = rq->qu.ingosched.active;
+		rq->qu.ingosched.expired_timestamp = 0;
+		rq->qu.ingosched.best_expired_prio = INGO_MAX_PRIO;
+	}
+
+	idx = sched_find_first_bit(array->bitmap);
+	queue = array->queue + idx;
+	next = list_entry(queue->next, task_t, run_list);
+
+	if (!rt_task(next) && interactive_sleep(next->sdu.ingosched.sleep_type)) {
+		unsigned long long delta = now - next->timestamp;
+		if (unlikely((long long)(now - next->timestamp) < 0))
+			delta = 0;
+
+		if (next->sdu.ingosched.sleep_type == SLEEP_INTERACTIVE)
+			delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
+
+		array = next->sdu.ingosched.array;
+		new_prio = recalc_task_prio(next, next->timestamp + delta);
+
+		if (unlikely(next->prio != new_prio)) {
+			dequeue_task(next, array);
+			next->prio = new_prio;
+			enqueue_task(next, array);
+		}
+	}
+	next->sdu.ingosched.sleep_type = SLEEP_NORMAL;
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prefetch(next);
+	prefetch_stack(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(task_cpu(prev));
+
+	update_cpu_clock(prev, rq, now);
+
+	prev->sdu.ingosched.sleep_avg -= run_time;
+	if ((long)prev->sdu.ingosched.sleep_avg <= 0)
+		prev->sdu.ingosched.sleep_avg = 0;
+	prev->timestamp = prev->last_ran = now;
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		next->timestamp = now;
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_task_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+		/*
+		 * this_rq must be evaluated again because prev may have moved
+		 * CPUs since it called schedule(), thus the 'rq' on its stack
+		 * frame will be invalid.
+		 */
+		finish_task_switch(this_rq(), prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+static void ingo_set_normal_task_nice(task_t *p, long nice)
+{
+	prio_array_t *array;
+	int old_prio, new_prio, delta;
+	struct runqueue *rq = task_rq(p);
+
+	array = p->sdu.ingosched.array;
+	if (array) {
+		dequeue_task(p, array);
+		dec_raw_weighted_load(rq, p);
+	}
+
+	old_prio = p->prio;
+	new_prio = NICE_TO_PRIO(nice);
+	delta = new_prio - old_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	ingo_set_load_weight(p);
+	p->prio += delta;
+
+	if (array) {
+		enqueue_task(p, array);
+		inc_raw_weighted_load(rq, p);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+}
+
+static void ingo_init_batch_task(task_t *p)
+{
+	p->sdu.ingosched.sleep_avg = 0;
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+static void ingo_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	prio_array_t *array;
+	runqueue_t *rq = task_rq(p);
+
+	array = p->sdu.ingosched.array;
+	if (array)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	if (array) {
+		__activate_task(p, rq);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+static long ingo_sys_yield(void)
+{
+	runqueue_t *rq = this_rq_lock();
+	prio_array_t *array = current->sdu.ingosched.array;
+	prio_array_t *target = rq->qu.ingosched.expired;
+
+	schedstat_inc(rq, yld_cnt);
+	/*
+	 * We implement yielding by moving the task into the expired
+	 * queue.
+	 *
+	 * (special rule: RT tasks will just roundrobin in the active
+	 *  array.)
+	 */
+	if (rt_task(current))
+		target = rq->qu.ingosched.active;
+
+	if (array->nr_active == 1) {
+		schedstat_inc(rq, yld_act_empty);
+		if (!rq->qu.ingosched.expired->nr_active)
+			schedstat_inc(rq, yld_both_empty);
+	} else if (!rq->qu.ingosched.expired->nr_active)
+		schedstat_inc(rq, yld_exp_empty);
+
+	if (array != target) {
+		dequeue_task(current, array);
+		enqueue_task(current, target);
+	} else
+		/*
+		 * requeue_task is cheaper so perform that if possible.
+		 */
+		requeue_task(current, array);
+
+	/*
+	 * Since we are going to call schedule() anyway, there's
+	 * no need to preempt or enable interrupts:
+	 */
+	__release(rq->lock);
+	_raw_spin_unlock(&rq->lock);
+	preempt_enable_no_resched();
+
+	schedule();
+
+	return 0;
+}
+
+static void ingo_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	ingo_sys_yield();
+}
+
+static void ingo_init_idle(task_t *idle, int cpu)
+{
+	idle->sdu.ingosched.sleep_avg = 0;
+	idle->sdu.ingosched.array = NULL;
+	idle->prio = INGO_MAX_PRIO;
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+static void ingo_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	/*
+	 * Sync timestamp with rq_dest's before activating.
+	 * The same thing could be achieved by doing this step
+	 * afterwards, and pretending it was a local activate.
+	 * This way is cleaner and logically correct.
+	 */
+	p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+			+ rq_dest->timestamp_last_tick;
+	deactivate_task(p, rq_src);
+	set_task_cpu(p, dest_cpu);
+	activate_task(p, rq_dest, 0);
+	if (TASK_PREEMPTS_CURR(p, rq_dest))
+		resched_task(rq_dest->curr);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static	void ingo_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO-1);
+	/* Add idle task to _front_ of it's priority queue */
+	__activate_idle_task(rq->idle, rq);
+}
+
+static	void ingo_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = INGO_MAX_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+static void ingo_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned arr, i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (arr = 0; arr < 2; arr++) {
+		for (i = 0; i < INGO_MAX_PRIO; i++) {
+			struct list_head *list = &rq->qu.ingosched.arrays[arr].queue[i];
+			while (!list_empty(list))
+				migrate_dead(dead_cpu,
+					     list_entry(list->next, task_t,
+							run_list));
+		}
+	}
+}
+#endif
+#endif
+
+static void ingo_sched_init(void)
+{
+	init_task.sdu.ingosched.time_slice = HZ;
+	init_task.sdu.ingosched.array = NULL;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void ingo_normalize_rt_task(struct task_struct *p)
+{
+	prio_array_t *array;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	array = p->sdu.ingosched.array;
+	if (array)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	if (array) {
+		__activate_task(p, rq);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+const struct sched_drv ingo_sched_drv = {
+	.name = "ingosched",
+	.init_runqueue_queue = ingo_init_runqueue_queue,
+	.set_oom_time_slice = ingo_set_oom_time_slice,
+#ifdef CONFIG_SMP
+	.set_load_weight = ingo_set_load_weight,
+#endif
+	.task_timeslice = task_timeslice,
+	.wake_up_task = ingo_wake_up_task,
+	.fork = ingo_fork,
+	.wake_up_new_task = ingo_wake_up_new_task,
+	.exit = ingo_exit,
+#ifdef CONFIG_SMP
+	.move_tasks = ingo_move_tasks,
+#endif
+	.tick = ingo_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = ingo_head_of_queue,
+	.dependent_sleeper_trumps = ingo_dependent_sleeper_trumps,
+#endif
+	.schedule = ingo_schedule,
+	.set_normal_task_nice = ingo_set_normal_task_nice,
+	.init_batch_task = ingo_init_batch_task,
+	.setscheduler = ingo_setscheduler,
+	.sys_yield = ingo_sys_yield,
+	.yield = ingo_yield,
+	.init_idle = ingo_init_idle,
+	.sched_init = ingo_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = ingo_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = ingo_set_select_idle_first,
+	.set_select_idle_last = ingo_set_select_idle_last,
+	.migrate_dead_tasks = ingo_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = ingo_normalize_rt_task,
+#endif
+	.attrs = NULL,
+};
diff -urN oldtree/kernel/nicksched.c newtree/kernel/nicksched.c
--- oldtree/kernel/nicksched.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/nicksched.c	2006-03-08 20:33:41.508198500 +0000
@@ -0,0 +1,1059 @@
+/*
+ *  kernel/nicksched.c
+ *  Copyright (C) 1991-2005  Linus Torvalds
+ *
+ *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
+ *		hybrid priority-list and round-robin design with
+ *		an array-switch method of distributing timeslices
+ *		and per-CPU runqueues.  Cleanups and useful suggestions
+ *		by Davide Libenzi, preemptible kernel bits by Robert Love.
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+
+static void nick_init_runqueue_queue(union runqueue_queue *rqq)
+{
+	int j;
+
+	rqq->nicksched.active = rqq->nicksched.arrays;
+	rqq->nicksched.expired = rqq->nicksched.arrays + 1;
+
+	for (j = 0; j < 2; j++) {
+		int k;
+		struct nick_prio_array *array = rqq->nicksched.arrays + j;
+
+		array->min_prio = NICK_MAX_PRIO;
+		for (k = 0; k < NICK_MAX_PRIO; k++) {
+			INIT_LIST_HEAD(array->queue + k);
+			__clear_bit(k, array->bitmap);
+		}
+		// delimiter for bitsearch
+		__set_bit(NICK_MAX_PRIO, array->bitmap);
+		array->nr_active = 0;
+	}
+
+	rqq->nicksched.array_sequence = 0;
+}
+
+static void nick_set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+}
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)		((p) - MAX_RT_PRIO)
+#define MAX_USER_PRIO		(USER_PRIO(NICK_MAX_PRIO))
+/*
+ * Correct for fact that p->static_prio has normal mapping
+ */
+#define STATIC_USER_PRIO(p)	((p)->static_prio - MAX_RT_PRIO + 10)
+
+/*
+ * Some helpers for converting microsecond timing to jiffy resolution
+ */
+#define US_TO_JIFFIES(x)	((x) * HZ / 1000000)
+#define JIFFIES_TO_US(x)	((x) * 1000000 / HZ)
+
+static int base_timeslice = 128;
+#define min_base_timeslice 1
+#define max_base_timeslice 10000
+
+#define RT_TIMESLICE		(50 * 1000 / HZ)		/* 50ms */
+#define BASE_TIMESLICE		(base_timeslice)
+#define MIN_TIMESLICE		(base_timeslice / 16 ?: 1)
+
+/* Maximum amount of history that will be used to calculate priority */
+#define MAX_SLEEP_SHIFT		19
+#define MAX_SLEEP		(1UL << MAX_SLEEP_SHIFT)	/* ~0.52s */
+
+/*
+ * Maximum effect that 1 block of activity (run/sleep/etc) can have. This is
+ * will moderate dicard freak events (eg. SIGSTOP)
+ */
+#define MAX_SLEEP_AFFECT	(MAX_SLEEP/4)
+
+/*
+ * The amount of history can be decreased (on fork for example). This puts a
+ * lower bound on it.
+ */
+#define MIN_HISTORY		(MAX_SLEEP/8)
+#define FORKED_TS_MAX		(US_TO_JIFFIES(MIN_HISTORY) ?: 1)
+
+/*
+ * SLEEP_FACTOR is a fixed point factor used to scale history tracking things.
+ * In particular: total_time, sleep_time, sleep_avg.
+ */
+#define SLEEP_FACTOR		1024
+
+/*
+ *  The scheduler classifies a process as performing one of the following
+ *  activities
+ */
+#define STIME_SLEEP		1	/* Sleeping */
+#define STIME_RUN		2	/* Using CPU */
+
+#define TASK_PREEMPTS_CURR(p, rq) \
+	((p)->prio < (rq)->curr->prio)
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void dequeue_task(struct task_struct *p, struct nick_prio_array *array)
+{
+	array->nr_active--;
+	list_del_init(&p->run_list);
+	if (list_empty(array->queue + p->prio))
+		__clear_bit(p->prio, array->bitmap);
+}
+
+static void enqueue_task(struct task_struct *p, struct nick_prio_array *array)
+{
+	struct list_head *entry = array->queue + p->prio;
+
+	sched_info_queued(p);
+	if (!rt_task(p)) {
+		/*
+		 * Cycle tasks on the same priority level. This reduces their
+		 * timeslice fluctuations due to higher priority tasks expiring.
+		 */
+		if (!list_empty(entry))
+			entry = entry->next;
+	}
+	list_add_tail(&p->run_list, entry);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.nicksched.array = array;
+}
+
+static inline void enqueue_task_head(struct task_struct *p, struct nick_prio_array *array)
+{
+	list_add(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->sdu.nicksched.array = array;
+}
+
+#define NS_TO_APPROX_US(t) ((t) >> 10)
+
+/*
+ * add_task_time updates a task @p after @time of doing the specified @type
+ * of activity. See STIME_*. This is used for priority calculation.
+ */
+static inline void add_task_time(task_t *p, unsigned long long time, unsigned long type)
+{
+	unsigned long ratio;
+	unsigned long long tmp;
+	unsigned long t;
+	if (type == STIME_SLEEP) {
+		if (time > MAX_SLEEP_AFFECT*4)
+			time = MAX_SLEEP_AFFECT*4;
+		t = ((unsigned long)time + 3) / 4;
+	} else {
+		unsigned long div = 60 - STATIC_USER_PRIO(p);
+		t = (unsigned long)time * 30;
+		t = t / div;
+		t = t * 30;
+		t = t / div;
+	}
+
+	ratio = MAX_SLEEP - t;
+	tmp = (unsigned long long)ratio * p->sdu.nicksched.total_time + MAX_SLEEP/2;
+	tmp >>= MAX_SLEEP_SHIFT;
+	p->sdu.nicksched.total_time = (unsigned long)tmp;
+
+	tmp = (unsigned long long)ratio * p->sdu.nicksched.sleep_time + MAX_SLEEP/2;
+	tmp >>= MAX_SLEEP_SHIFT;
+	p->sdu.nicksched.sleep_time = (unsigned long)tmp;
+
+	p->sdu.nicksched.total_time += t;
+	if (type == STIME_SLEEP)
+		p->sdu.nicksched.sleep_time += t;
+}
+
+static unsigned long task_sleep_avg(task_t *p)
+{
+	return (SLEEP_FACTOR * p->sdu.nicksched.sleep_time) / (p->sdu.nicksched.total_time + 1);
+}
+
+/*
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ *
+ * Timeslices are scaled, so if only low priority processes are running,
+ * they will all get long timeslices.
+ */
+
+static int task_timeslice(const task_t *p, runqueue_t *rq)
+{
+	int idx, base, delta;
+	int timeslice;
+
+	if (rt_task(p))
+		return RT_TIMESLICE;
+
+	idx = min(p->prio, rq->qu.nicksched.expired->min_prio);
+	delta = p->prio - idx;
+	base = BASE_TIMESLICE * (MAX_USER_PRIO + 1) / (delta + 2);
+	base = base * (MAX_USER_PRIO + 1) / (delta + 2);
+
+	base = base * 40 / (70 - USER_PRIO(idx));
+	base = base * 40 / (70 - USER_PRIO(idx));
+
+	timeslice = base >> 10;
+	timeslice = timeslice * HZ / 1000;
+	if (timeslice < MIN_TIMESLICE)
+		timeslice = MIN_TIMESLICE;
+
+	return timeslice;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+ * scheduling class and "nice" value.
+ */
+#define NICE_TO_LP(nice) ((nice >=0) ? (20 - (nice)) : (20 + (nice) * (nice)))
+#define LOAD_WEIGHT(lp) \
+	(((lp) * SCHED_LOAD_SCALE) / NICE_TO_LP(0))
+#define PRIO_TO_LOAD_WEIGHT(prio) \
+	LOAD_WEIGHT(NICE_TO_LP(PRIO_TO_NICE(prio)))
+#define RTPRIO_TO_LOAD_WEIGHT(rp) \
+	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
+
+static inline void nick_set_load_weight(task_t *p)
+{
+	if (rt_task(p)) {
+		if (p == task_rq(p)->migration_thread)
+			/*
+			 * The migration thread does the actual balancing.
+			 * Giving its load any weight will skew balancing
+			 * adversely.
+			 */
+			p->load_weight = 0;
+		else
+			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
+	} else
+		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
+}
+#else
+static inline void nick_set_load_weight(task_t *p)
+{
+}
+#endif
+
+/*
++ * task_priority: calculates a task's priority based on previous running
++ * history (see add_task_time). The priority is just a simple linear function
++ * based on sleep_avg and static_prio.
++ */
+static int task_priority(task_t *p)
+{
+	unsigned long sleep_avg;
+ 	int bonus, prio;
+
+ 	if (rt_task(p))
+ 		return p->prio;
+
+	sleep_avg = task_sleep_avg(p);
+
+	prio = STATIC_USER_PRIO(p) + 10;
+	if (p->policy == SCHED_BATCH)
+		bonus = 0;
+	else
+		bonus = (((MAX_USER_PRIO + 1) / 3) * sleep_avg +
+			 (SLEEP_FACTOR / 2)) / SLEEP_FACTOR;
+	prio = MAX_RT_PRIO + prio - bonus;
+
+ 	if (prio < MAX_RT_PRIO)
+		return MAX_RT_PRIO;
+ 	if (prio > NICK_MAX_PRIO-1)
+		return NICK_MAX_PRIO-1;
+
+ 	return prio;
+}
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq, struct nick_prio_array *array)
+{
+	enqueue_task(p, array);
+	inc_nr_running(p, rq);
+	if (!rt_task(p)) {
+		if (p->prio < array->min_prio)
+			array->min_prio = p->prio;
+	}
+}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ *
+ * Update all the scheduling statistics stuff. (sleep average
+ * calculation, priority modifiers, etc.)
+ */
+static void activate_task(task_t *p, runqueue_t *rq, int local)
+{
+	unsigned long long now, sleep;
+	struct nick_prio_array *array;
+
+	now = sched_clock();
+#ifdef CONFIG_SMP
+	if (!local) {
+		/* Compensate for drifting sched_clock */
+		runqueue_t *this_rq = this_rq();
+		now = (now - this_rq->timestamp_last_tick)
+			+ rq->timestamp_last_tick;
+	}
+#endif
+
+	/*
+	 * If we have slept through an active/expired array switch, restart
+	 * our timeslice too.
+	 */
+	sleep = NS_TO_APPROX_US(now - p->timestamp);
+	p->timestamp = now;
+	add_task_time(p, sleep, STIME_SLEEP);
+	p->prio = task_priority(p);
+
+	array = rq->qu.nicksched.active;
+	if (rq->qu.nicksched.array_sequence != p->sdu.nicksched.array_sequence) {
+		p->sdu.nicksched.used_slice = 0;
+	} else if (unlikely(p->sdu.nicksched.used_slice == -1)) {
+		p->sdu.nicksched.used_slice = 0;
+		array = rq->qu.nicksched.expired;
+	}
+
+	__activate_task(p, rq, array);
+}
+
+/*
+ * __activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task_head(p, rq->qu.nicksched.active);
+	inc_nr_running(p, rq);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	p->sdu.nicksched.array_sequence = rq->qu.nicksched.array_sequence;
+	dec_nr_running(p, rq);
+	dequeue_task(p, p->sdu.nicksched.array);
+	p->sdu.nicksched.array = NULL;
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: the task's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+static void nick_wake_up_task(struct task_struct *p, struct runqueue *rq, unsigned int old_state, int sync)
+{
+	int same_cpu = (rq == this_rq());
+
+	if (old_state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible--;
+
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	activate_task(p, rq, same_cpu);
+	if (!sync || !same_cpu) {
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+static void nick_fork(task_t *p)
+{
+	unsigned long sleep_avg;
+	runqueue_t *rq;
+
+	p->sdu.nicksched.array = NULL;
+
+	p->timestamp = sched_clock();
+	p->sdu.nicksched.used_slice = 0;
+	if (rt_task(p)) {
+		BUG_ON(!rt_task(current));
+		return;
+	}
+
+	preempt_disable();
+	rq = this_rq();
+	/* Get MIN_HISTORY of history with the same sleep_avg as parent. */
+	sleep_avg = task_sleep_avg(current);
+	p->sdu.nicksched.total_time = MIN_HISTORY;
+	p->sdu.nicksched.sleep_time = p->sdu.nicksched.total_time * sleep_avg / SLEEP_FACTOR;
+
+	/* Parent loses 1/4 of sleep time for forking */
+	current->sdu.nicksched.sleep_time = 3 * current->sdu.nicksched.sleep_time / 4;
+
+	local_irq_disable();
+	if (unlikely(current->sdu.nicksched.used_slice == -1 || current == rq->idle))
+		p->sdu.nicksched.used_slice = -1;
+	else {
+		int ts = task_timeslice(current, rq);
+		current->sdu.nicksched.used_slice += (ts + 3) / 4;
+		if (current->sdu.nicksched.used_slice >= ts) {
+			current->sdu.nicksched.used_slice = -1;
+			set_need_resched();
+		}
+	}
+	local_irq_enable();
+	preempt_enable();
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+static void nick_wake_up_new_task(task_t * p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	int this_cpu, cpu;
+	runqueue_t *rq;
+	struct nick_prio_array *array;
+
+	rq = task_rq_lock(p, &flags);
+
+	BUG_ON(p->state != TASK_RUNNING);
+
+	cpu = task_cpu(p);
+	this_cpu = smp_processor_id();
+
+	array = rq->qu.nicksched.active;
+	if (!rt_task(p)) {
+		if (unlikely(p->sdu.nicksched.used_slice == -1)) {
+			p->sdu.nicksched.used_slice = 0;
+			array = rq->qu.nicksched.expired;
+		} else {
+			int total = task_timeslice(p, rq);
+			int ts = max((total + 3) / 4, MIN_TIMESLICE);
+			ts = min(ts, (int)FORKED_TS_MAX);
+			p->sdu.nicksched.used_slice = total - ts;
+		}
+	}
+
+	if (likely(cpu == this_cpu)) {
+		if (!(clone_flags & CLONE_VM) && likely(array == rq->qu.nicksched.active)) {
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			if (p->prio >= current->prio) {
+				p->prio = current->prio;
+				list_add_tail(&p->run_list, &current->run_list);
+				p->sdu.nicksched.array = current->sdu.nicksched.array;
+				p->sdu.nicksched.array->nr_active++;
+				inc_nr_running(p, rq);
+			} else {
+				p->prio = task_priority(p);
+				__activate_task(p, rq, array);
+			}
+			set_need_resched();
+		} else {
+			/* Run child last */
+			p->prio = task_priority(p);
+			__activate_task(p, rq, array);
+		}
+#ifdef CONFIG_SMP
+	} else {
+		runqueue_t *this_rq = cpu_rq(this_cpu);
+
+		/*
+		 * Not the local CPU - must adjust timestamp. This should
+		 * get optimised away in the !CONFIG_SMP case.
+		 */
+		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
+					+ rq->timestamp_last_tick;
+		p->prio = task_priority(p);
+		__activate_task(p, rq, array);
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+#endif
+	}
+
+ 	task_rq_unlock(rq, &flags);
+}
+
+/*
+ * Potentially available exiting-child timeslices are
+ * retrieved here - this way the parent does not get
+ * penalized for creating too many threads.
+ *
+ * (this cannot be used to 'generate' timeslices
+ * artificially, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+static void nick_exit(task_t * p)
+{
+}
+
+#ifdef CONFIG_SMP
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static inline
+void pull_task(runqueue_t *src_rq, struct nick_prio_array *src_array, task_t *p,
+	       runqueue_t *this_rq, struct nick_prio_array *this_array, int this_cpu)
+{
+	dequeue_task(p, src_array);
+	dec_nr_running(p, src_rq);
+	set_task_cpu(p, this_cpu);
+	inc_nr_running(p, this_rq);
+	enqueue_task(p, this_array);
+	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+				+ this_rq->timestamp_last_tick;
+	/*
+	 * Note that idle threads have a prio of NICK_MAX_PRIO, for this test
+	 * to be always true for them.
+	 */
+	if (TASK_PREEMPTS_CURR(p, this_rq))
+		resched_task(this_rq->curr);
+}
+
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int nick_move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+		      unsigned long max_nr_move, unsigned long max_load_move,
+		      struct sched_domain *sd, enum idle_type idle,
+		      int *all_pinned)
+{
+	struct nick_prio_array *array, *dst_array;
+	struct list_head *head, *curr;
+	int idx, pulled = 0, pinned = 0;
+	long rem_load_move;
+	task_t *tmp;
+
+	if (max_nr_move == 0 || max_load_move == 0)
+		goto out;
+
+	rem_load_move = max_load_move;
+	pinned = 1;
+
+	/*
+	 * We first consider expired tasks. Those will likely not be
+	 * executed in the near future, and they are most likely to
+	 * be cache-cold, thus switching CPUs has the least effect
+	 * on them.
+	 */
+	if (busiest->qu.nicksched.expired->nr_active) {
+		array = busiest->qu.nicksched.expired;
+		dst_array = this_rq->qu.nicksched.expired;
+	} else {
+		array = busiest->qu.nicksched.active;
+		dst_array = this_rq->qu.nicksched.active;
+	}
+
+new_array:
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(array->bitmap);
+	else
+		idx = find_next_bit(array->bitmap, NICK_MAX_PRIO, idx);
+	if (idx >= NICK_MAX_PRIO) {
+		if (array == busiest->qu.nicksched.expired && busiest->qu.nicksched.active->nr_active) {
+			array = busiest->qu.nicksched.active;
+			dst_array = this_rq->qu.nicksched.active;
+			goto new_array;
+		}
+		goto out;
+	}
+
+	head = array->queue + idx;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+
+	curr = curr->prev;
+
+	if (tmp->load_weight > rem_load_move ||
+	    !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
+	pulled++;
+	rem_load_move -= tmp->load_weight;
+
+	/*
+	 * We only want to steal up to the prescribed number of tasks
+	 * and the prescribed amount of biased load.
+	 */
+	if (pulled < max_nr_move && rem_load_move > 0) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	if (all_pinned)
+		*all_pinned = pinned;
+
+	return pulled;
+}
+#endif
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ *
+ * It also gets called by the fork code, when changing the parent's
+ * timeslices.
+ */
+static void nick_tick(struct task_struct *p, struct runqueue *rq, unsigned long long now)
+{
+	enum idle_type cpu_status;
+	int ts;
+
+	if (p == rq->idle) {
+		cpu_status = SCHED_IDLE;
+		goto out;
+	}
+
+	cpu_status = NOT_IDLE;
+	/* Task might have expired already, but not scheduled off yet */
+	if  (unlikely(p->sdu.nicksched.used_slice == -1))
+		goto out;
+
+	if (unlikely(p->policy == SCHED_FIFO))
+		goto out;
+
+	/* p was running during this tick. Update its time slice counter. */
+	p->sdu.nicksched.used_slice++;
+	ts = task_timeslice(p, rq);
+	if (unlikely(p->sdu.nicksched.used_slice >= ts)) {
+		p->sdu.nicksched.used_slice = -1;
+		set_tsk_need_resched(p);
+	}
+out:
+	rebalance_tick(smp_processor_id(), rq, cpu_status);
+}
+
+#ifdef CONFIG_SCHED_SMT
+/* these should never get called */
+static struct task_struct *nick_head_of_queue(union runqueue_queue *rqq)
+{
+	struct nick_prio_array *array = rqq->nicksched.active;
+
+	if (!array->nr_active)
+		array = rqq->nicksched.expired;
+	BUG_ON(!array->nr_active);
+
+	return list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
+		task_t, run_list);
+}
+
+static int nick_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct * p2, struct sched_domain *sd)
+{
+	return 0;
+}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ */
+static void nick_schedule(void)
+{
+	long *switch_count;
+	struct nick_prio_array *array;
+	unsigned long run_time;
+	int cpu, idx;
+	struct task_struct *prev = current, *next;
+	struct list_head *queue;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+
+	run_time = NS_TO_APPROX_US(now - prev->timestamp);
+	update_cpu_clock(prev, rq, now);
+	prev->timestamp = prev->last_ran = now;
+	add_task_time(prev, run_time, STIME_RUN);
+
+	spin_lock_irq(&rq->lock);
+
+	if (unlikely(prev->flags & PF_DEAD))
+		prev->state = EXIT_DEAD;
+
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE)
+				rq->nr_uninterruptible++;
+			deactivate_task(prev, rq);
+			goto no_check_expired;
+		}
+	}
+
+	if (unlikely(prev->sdu.nicksched.used_slice == -1)) {
+		dequeue_task(prev, prev->sdu.nicksched.array);
+		if (rt_task(prev)) {
+			/* SCHED_FIFO can come in here too, from sched_yield */
+			array = rq->qu.nicksched.active;
+		} else {
+			array = rq->qu.nicksched.expired;
+			prev->prio = task_priority(prev);
+			if (prev->prio < rq->qu.nicksched.expired->min_prio)
+				rq->qu.nicksched.expired->min_prio = prev->prio;
+ 		}
+		enqueue_task(prev, array);
+		prev->sdu.nicksched.used_slice = 0;
+ 	}
+no_check_expired:
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+		rq->qu.nicksched.array_sequence++;
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			rq->qu.nicksched.arrays[0].min_prio = NICK_MAX_PRIO;
+			rq->qu.nicksched.arrays[1].min_prio = NICK_MAX_PRIO;
+ 			goto switch_tasks;
+		}
+	}
+
+	array = rq->qu.nicksched.active;
+	if (unlikely(!array->nr_active)) {
+		/*
+		 * Switch the active and expired arrays.
+		 */
+		schedstat_inc(rq, sched_switch);
+		rq->qu.nicksched.array_sequence++;
+		rq->qu.nicksched.active = rq->qu.nicksched.expired;
+		rq->qu.nicksched.expired = array;
+		rq->qu.nicksched.expired->min_prio = NICK_MAX_PRIO;
+		array = rq->qu.nicksched.active;
+	}
+
+	idx = sched_find_first_bit(array->bitmap);
+	queue = array->queue + idx;
+	next = list_entry(queue->next, task_t, run_list);
+
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prefetch(next);
+	prefetch_stack(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(cpu);
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		next->timestamp = now;
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_task_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+		/*
+		 * this_rq must be evaluated again because prev may have moved
+		 * CPUs since it called schedule(), thus the 'rq' on its stack
+		 * frame will be invalid.
+		 */
+		finish_task_switch(this_rq(), prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+static void nick_set_normal_task_nice(task_t *p, long nice)
+{
+	struct nick_prio_array *array;
+	int old_prio, new_prio, delta;
+
+	array = p->sdu.nicksched.array;
+	if (array) {
+		dequeue_task(p, array);
+		dec_raw_weighted_load(task_rq(p), p);
+	}
+
+	old_prio = p->prio;
+	new_prio = NICE_TO_PRIO(nice);
+	delta = new_prio - old_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	nick_set_load_weight(p);
+	p->prio = task_priority(p);
+
+	if (array) {
+		struct runqueue *rq = task_rq(p);
+
+		inc_raw_weighted_load(task_rq(p), p);
+		enqueue_task(p, array);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+}
+
+static void nick_init_batch_task(task_t *p)
+{
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+static void nick_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	struct nick_prio_array *array;
+	runqueue_t *rq = task_rq(p);
+
+	array = p->sdu.nicksched.array;
+	if (array)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	if (policy == SCHED_FIFO || policy == SCHED_RR)
+		p->sdu.nicksched.used_slice = 0;
+
+	if (array) {
+		__activate_task(p, rq, rq->qu.nicksched.active);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+static long nick_sys_yield(void)
+{
+	local_irq_disable();
+#ifdef CONFIG_SCHEDSTATS
+	schedstat_inc(this_rq(), yld_cnt);
+#endif
+	current->sdu.nicksched.used_slice = -1;
+	set_need_resched();
+	local_irq_enable();
+
+	return 0;
+}
+
+static void nick_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	nick_sys_yield();
+#ifndef CONFIG_PREEMPT
+	/*
+	 * Kernel-space yield won't follow the schedule upon
+	 * return from syscall path. Must call schedule() here.
+	 */
+	schedule();
+#endif
+}
+
+static void nick_init_idle(task_t *idle, int cpu)
+{
+	idle->sdu.nicksched.used_slice = 0;
+	idle->sdu.nicksched.array = NULL;
+	idle->prio = NICK_MAX_PRIO;
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+static void nick_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	/*
+	 * Sync timestamp with rq_dest's before activating.
+	 * The same thing could be achieved by doing this step
+	 * afterwards, and pretending it was a local activate.
+	 * This way is cleaner and logically correct.
+	 */
+	p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+			+ rq_dest->timestamp_last_tick;
+	deactivate_task(p, rq_src);
+	set_task_cpu(p, dest_cpu);
+	activate_task(p, rq_dest, 0);
+	if (TASK_PREEMPTS_CURR(p, rq_dest))
+		resched_task(rq_dest->curr);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static	void nick_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO-1);
+	/* Add idle task to _front_ of it's priority queue */
+	__activate_idle_task(rq->idle, rq);
+}
+
+static	void nick_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = NICK_MAX_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+static void nick_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned arr, i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (arr = 0; arr < 2; arr++) {
+		for (i = 0; i < NICK_MAX_PRIO; i++) {
+			struct list_head *list = &rq->qu.nicksched.arrays[arr].queue[i];
+			while (!list_empty(list))
+				migrate_dead(dead_cpu,
+					     list_entry(list->next, task_t,
+							run_list));
+		}
+	}
+}
+#endif
+#endif
+
+static void nick_sched_init(void)
+{
+	init_task.sdu.nicksched.used_slice = 0;
+	init_task.sdu.nicksched.array = NULL;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void nick_normalize_rt_task(struct task_struct *p)
+{
+	struct nick_prio_array *array;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	array = p->sdu.nicksched.array;
+	if (array)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	if (array) {
+		__activate_task(p, rq, array);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+static unsigned int nick_task_timeslice(const struct task_struct *p)
+{
+	return task_timeslice(p, task_rq(p));
+}
+
+#ifdef CONFIG_SYSFS
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW(base_timeslice, no_change, no_change, min_base_timeslice, max_base_timeslice);
+
+static struct attribute *nick_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(base_timeslice),
+	NULL,
+};
+#endif
+
+const struct sched_drv nick_sched_drv = {
+	.name = "nicksched",
+	.init_runqueue_queue = nick_init_runqueue_queue,
+	.set_oom_time_slice = nick_set_oom_time_slice,
+#ifdef CONFIG_SMP
+	.set_load_weight = nick_set_load_weight,
+#endif
+	.task_timeslice = nick_task_timeslice,
+	.wake_up_task = nick_wake_up_task,
+	.fork = nick_fork,
+	.wake_up_new_task = nick_wake_up_new_task,
+	.exit = nick_exit,
+#ifdef CONFIG_SMP
+	.move_tasks = nick_move_tasks,
+#endif
+	.tick = nick_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = nick_head_of_queue,
+	.dependent_sleeper_trumps = nick_dependent_sleeper_trumps,
+#endif
+	.schedule = nick_schedule,
+	.set_normal_task_nice = nick_set_normal_task_nice,
+	.init_batch_task = nick_init_batch_task,
+	.setscheduler = nick_setscheduler,
+	.sys_yield = nick_sys_yield,
+	.yield = nick_yield,
+	.init_idle = nick_init_idle,
+	.sched_init = nick_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = nick_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = nick_set_select_idle_first,
+	.set_select_idle_last = nick_set_select_idle_last,
+	.migrate_dead_tasks = nick_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = nick_normalize_rt_task,
+#endif
+	.attrs = nick_attrs,
+};
diff -urN oldtree/kernel/sched.c newtree/kernel/sched.c
--- oldtree/kernel/sched.c	2006-03-08 18:48:02.972064750 +0000
+++ newtree/kernel/sched.c	2006-03-08 18:57:58.361274250 +0000
@@ -56,138 +56,15 @@
 
 #include <asm/unistd.h>
 
-/*
- * Convert user-nice values [ -20 ... 0 ... 19 ]
- * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
- * and back.
- */
-#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
-#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
-#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
-
-/*
- * 'User priority' is the nice value converted to something we
- * can work with better when scaling various scheduler parameters,
- * it's a [ 0 ... 39 ] range.
- */
-#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
-#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
-#define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))
-
-/*
- * Some helpers for converting nanosecond timing to jiffy resolution
- */
-#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
-#define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
-
-/*
- * These are the 'tuning knobs' of the scheduler:
- *
- * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
- * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
- * Timeslices get refilled after they expire.
- */
-#define MIN_TIMESLICE		max(5 * HZ / 1000, 1)
-#define DEF_TIMESLICE		(100 * HZ / 1000)
-#define ON_RUNQUEUE_WEIGHT	 30
-#define CHILD_PENALTY		 95
-#define PARENT_PENALTY		100
-#define EXIT_WEIGHT		  3
-#define PRIO_BONUS_RATIO	 25
-#define MAX_BONUS		(MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
-#define INTERACTIVE_DELTA	  2
-#define MAX_SLEEP_AVG		(DEF_TIMESLICE * MAX_BONUS)
-#define STARVATION_LIMIT	(MAX_SLEEP_AVG)
-#define NS_MAX_SLEEP_AVG	(JIFFIES_TO_NS(MAX_SLEEP_AVG))
-
-/*
- * If a task is 'interactive' then we reinsert it in the active
- * array after it has expired its current timeslice. (it will not
- * continue to run immediately, it will still roundrobin with
- * other interactive tasks.)
- *
- * This part scales the interactivity limit depending on niceness.
- *
- * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
- * Here are a few examples of different nice levels:
- *
- *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
- *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
- *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
- *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
- *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
- *
- * (the X axis represents the possible -5 ... 0 ... +5 dynamic
- *  priority range a task can explore, a value of '1' means the
- *  task is rated interactive.)
- *
- * Ie. nice +19 tasks can never get 'interactive' enough to be
- * reinserted into the active array. And only heavily CPU-hog nice -20
- * tasks will be expired. Default nice 0 tasks are somewhere between,
- * it takes some effort for them to get interactive, but it's not
- * too hard.
- */
-
-#define CURRENT_BONUS(p) \
-	(NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
-		MAX_SLEEP_AVG)
-
-#define GRANULARITY	(10 * HZ / 1000 ? : 1)
-
-#ifdef CONFIG_SMP
-#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
-		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
-			num_online_cpus())
-#else
-#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
-		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
-#endif
-
-#define SCALE(v1,v1_max,v2_max) \
-	(v1) * (v2_max) / (v1_max)
-
-#define DELTA(p) \
-	(SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \
-		INTERACTIVE_DELTA)
-
-#define TASK_INTERACTIVE(p) \
-	((p)->prio <= (p)->static_prio - DELTA(p))
-
-#define INTERACTIVE_SLEEP(p) \
-	(JIFFIES_TO_NS(MAX_SLEEP_AVG * \
-		(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
-
-#define TASK_PREEMPTS_CURR(p, rq) \
-	((p)->prio < (rq)->curr->prio)
-
-/*
- * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
- * to time slice values: [800ms ... 100ms ... 5ms]
- *
- * The higher a thread's priority, the bigger timeslices
- * it gets during one round of execution. But even the lowest
- * priority thread gets MIN_TIMESLICE worth of execution time.
- */
+#include <linux/sched_runq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_drv.h>
 
-#define SCALE_PRIO(x, prio) \
-	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
-
-static unsigned int static_prio_timeslice(int static_prio)
-{
-	if (static_prio < NICE_TO_PRIO(0))
-		return SCALE_PRIO(DEF_TIMESLICE*4, static_prio);
-	else
-		return SCALE_PRIO(DEF_TIMESLICE, static_prio);
-}
-
-static inline unsigned int task_timeslice(task_t *p)
+static inline unsigned int task_timeslice(const task_t *p)
 {
-	return static_prio_timeslice(p->static_prio);
+ 	return sched_drvp->task_timeslice(p);
 }
 
-#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)	\
-				< (long long) (sd)->cache_hot_time)
-
 void __put_task_struct_cb(struct rcu_head *rhp)
 {
 	__put_task_struct(container_of(rhp, struct task_struct, rcu));
@@ -198,87 +75,7 @@
 /*
  * These are the runqueue data structures:
  */
-
-#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
-
-typedef struct runqueue runqueue_t;
-
-struct prio_array {
-	unsigned int nr_active;
-	unsigned long bitmap[BITMAP_SIZE];
-	struct list_head queue[MAX_PRIO];
-};
-
-/*
- * This is the main, per-CPU runqueue data structure.
- *
- * Locking rule: those places that want to lock multiple runqueues
- * (such as the load balancing or the thread migration code), lock
- * acquire operations must be ordered by ascending &runqueue.
- */
-struct runqueue {
-	spinlock_t lock;
-
-	/*
-	 * nr_running and cpu_load should be in the same cacheline because
-	 * remote CPUs use both these fields when doing load calculation.
-	 */
-	unsigned long nr_running;
-#ifdef CONFIG_SMP
-	unsigned long raw_weighted_load;
-	unsigned long cpu_load[3];
-#endif
-	unsigned long long nr_switches;
-
-	/*
-	 * This is part of a global counter where only the total sum
-	 * over all CPUs matters. A task can increase this counter on
-	 * one CPU and if it got migrated afterwards it may decrease
-	 * it on another CPU. Always updated under the runqueue lock:
-	 */
-	unsigned long nr_uninterruptible;
-
-	unsigned long expired_timestamp;
-	unsigned long long timestamp_last_tick;
-	task_t *curr, *idle;
-	struct mm_struct *prev_mm;
-	prio_array_t *active, *expired, arrays[2];
-	int best_expired_prio;
-	atomic_t nr_iowait;
-
-#ifdef CONFIG_SMP
-	struct sched_domain *sd;
-
-	/* For active balancing */
-	int active_balance;
-	int push_cpu;
-
-	task_t *migration_thread;
-	struct list_head migration_queue;
-#endif
-
-#ifdef CONFIG_SCHEDSTATS
-	/* latency stats */
-	struct sched_info rq_sched_info;
-
-	/* sys_sched_yield() stats */
-	unsigned long yld_exp_empty;
-	unsigned long yld_act_empty;
-	unsigned long yld_both_empty;
-	unsigned long yld_cnt;
-
-	/* schedule() stats */
-	unsigned long sched_switch;
-	unsigned long sched_cnt;
-	unsigned long sched_goidle;
-
-	/* try_to_wake_up() stats */
-	unsigned long ttwu_cnt;
-	unsigned long ttwu_local;
-#endif
-};
-
-static DEFINE_PER_CPU(struct runqueue, runqueues);
+DEFINE_PER_CPU(struct runqueue, runqueues);
 
 /*
  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
@@ -290,108 +87,6 @@
 #define for_each_domain(cpu, domain) \
 for (domain = rcu_dereference(cpu_rq(cpu)->sd); domain; domain = domain->parent)
 
-#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
-#define this_rq()		(&__get_cpu_var(runqueues))
-#define task_rq(p)		cpu_rq(task_cpu(p))
-#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
-
-#ifndef prepare_arch_switch
-# define prepare_arch_switch(next)	do { } while (0)
-#endif
-#ifndef finish_arch_switch
-# define finish_arch_switch(prev)	do { } while (0)
-#endif
-
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
-static inline int task_running(runqueue_t *rq, task_t *p)
-{
-	return rq->curr == p;
-}
-
-static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
-{
-}
-
-static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
-	/* this is a valid case when another task releases the spinlock */
-	rq->lock.owner = current;
-#endif
-	spin_unlock_irq(&rq->lock);
-}
-
-#else /* __ARCH_WANT_UNLOCKED_CTXSW */
-static inline int task_running(runqueue_t *rq, task_t *p)
-{
-#ifdef CONFIG_SMP
-	return p->oncpu;
-#else
-	return rq->curr == p;
-#endif
-}
-
-static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
-{
-#ifdef CONFIG_SMP
-	/*
-	 * We can optimise this out completely for !SMP, because the
-	 * SMP rebalancing from interrupt is the only thing that cares
-	 * here.
-	 */
-	next->oncpu = 1;
-#endif
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-	spin_unlock_irq(&rq->lock);
-#else
-	spin_unlock(&rq->lock);
-#endif
-}
-
-static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
-{
-#ifdef CONFIG_SMP
-	/*
-	 * After ->oncpu is cleared, the task can be moved to a different CPU.
-	 * We must ensure this doesn't happen until the switch is completely
-	 * finished.
-	 */
-	smp_wmb();
-	prev->oncpu = 0;
-#endif
-#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-	local_irq_enable();
-#endif
-}
-#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
-
-/*
- * task_rq_lock - lock the runqueue a given task resides on and disable
- * interrupts.  Note the ordering: we can safely lookup the task_rq without
- * explicitly disabling preemption.
- */
-static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
-	__acquires(rq->lock)
-{
-	struct runqueue *rq;
-
-repeat_lock_task:
-	local_irq_save(*flags);
-	rq = task_rq(p);
-	spin_lock(&rq->lock);
-	if (unlikely(rq != task_rq(p))) {
-		spin_unlock_irqrestore(&rq->lock, *flags);
-		goto repeat_lock_task;
-	}
-	return rq;
-}
-
-static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
-	__releases(rq->lock)
-{
-	spin_unlock_irqrestore(&rq->lock, *flags);
-}
-
 #ifdef CONFIG_SCHEDSTATS
 /*
  * bump this up when changing the output format or the meaning of an existing
@@ -483,55 +178,18 @@
 	.release = single_release,
 };
 
-# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
 # define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
 #else /* !CONFIG_SCHEDSTATS */
-# define schedstat_inc(rq, field)	do { } while (0)
 # define schedstat_add(rq, field, amt)	do { } while (0)
 #endif
 
-/*
- * rq_lock - lock a given runqueue and disable interrupts.
- */
-static inline runqueue_t *this_rq_lock(void)
-	__acquires(rq->lock)
-{
-	runqueue_t *rq;
-
-	local_irq_disable();
-	rq = this_rq();
-	spin_lock(&rq->lock);
-
-	return rq;
-}
-
 #ifdef CONFIG_SCHEDSTATS
 /*
- * Called when a process is dequeued from the active array and given
- * the cpu.  We should note that with the exception of interactive
- * tasks, the expired queue will become the active queue after the active
- * queue is empty, without explicitly dequeuing and requeuing tasks in the
- * expired queue.  (Interactive tasks may be requeued directly to the
- * active queue, thus delaying tasks in the expired queue from running;
- * see scheduler_tick()).
- *
- * This function is only called from sched_info_arrive(), rather than
- * dequeue_task(). Even though a task may be queued and dequeued multiple
- * times as it is shuffled about, we're really interested in knowing how
- * long it was from the *first* time it was queued to the time that it
- * finally hit a cpu.
- */
-static inline void sched_info_dequeued(task_t *t)
-{
-	t->sched_info.last_queued = 0;
-}
-
-/*
  * Called when a task finally hits the cpu.  We can now calculate how
  * long it was waiting to run.  We also note when it began so that we
  * can keep stats on how long its timeslice is.
  */
-static void sched_info_arrive(task_t *t)
+void sched_info_arrive(task_t *t)
 {
 	unsigned long now = jiffies, diff = 0;
 	struct runqueue *rq = task_rq(t);
@@ -549,345 +207,25 @@
 	rq->rq_sched_info.run_delay += diff;
 	rq->rq_sched_info.pcnt++;
 }
-
-/*
- * Called when a process is queued into either the active or expired
- * array.  The time is noted and later used to determine how long we
- * had to wait for us to reach the cpu.  Since the expired queue will
- * become the active queue after active queue is empty, without dequeuing
- * and requeuing any tasks, we are interested in queuing to either. It
- * is unusual but not impossible for tasks to be dequeued and immediately
- * requeued in the same or another array: this can happen in sched_yield(),
- * set_user_nice(), and even load_balance() as it moves tasks from runqueue
- * to runqueue.
- *
- * This function is only called from enqueue_task(), but also only updates
- * the timestamp if it is already not set.  It's assumed that
- * sched_info_dequeued() will clear that stamp when appropriate.
- */
-static inline void sched_info_queued(task_t *t)
-{
-	if (!t->sched_info.last_queued)
-		t->sched_info.last_queued = jiffies;
-}
-
-/*
- * Called when a process ceases being the active-running process, either
- * voluntarily or involuntarily.  Now we can calculate how long we ran.
- */
-static inline void sched_info_depart(task_t *t)
-{
-	struct runqueue *rq = task_rq(t);
-	unsigned long diff = jiffies - t->sched_info.last_arrival;
-
-	t->sched_info.cpu_time += diff;
-
-	if (rq)
-		rq->rq_sched_info.cpu_time += diff;
-}
-
-/*
- * Called when tasks are switched involuntarily due, typically, to expiring
- * their time slice.  (This may also be called when switching to or from
- * the idle task.)  We are only called when prev != next.
- */
-static inline void sched_info_switch(task_t *prev, task_t *next)
-{
-	struct runqueue *rq = task_rq(prev);
-
-	/*
-	 * prev now departs the cpu.  It's not interesting to record
-	 * stats about how efficient we were at scheduling the idle
-	 * process, however.
-	 */
-	if (prev != rq->idle)
-		sched_info_depart(prev);
-
-	if (next != rq->idle)
-		sched_info_arrive(next);
-}
-#else
-#define sched_info_queued(t)		do { } while (0)
-#define sched_info_switch(t, next)	do { } while (0)
 #endif /* CONFIG_SCHEDSTATS */
 
-/*
- * Adding/removing a task to/from a priority array:
- */
-static void dequeue_task(struct task_struct *p, prio_array_t *array)
-{
-	array->nr_active--;
-	list_del(&p->run_list);
-	if (list_empty(array->queue + p->prio))
-		__clear_bit(p->prio, array->bitmap);
-}
-
-static void enqueue_task(struct task_struct *p, prio_array_t *array)
-{
-	sched_info_queued(p);
-	list_add_tail(&p->run_list, array->queue + p->prio);
-	__set_bit(p->prio, array->bitmap);
-	array->nr_active++;
-	p->array = array;
-}
-
-/*
- * Put task to the end of the run list without the overhead of dequeue
- * followed by enqueue.
- */
-static void requeue_task(struct task_struct *p, prio_array_t *array)
-{
-	list_move_tail(&p->run_list, array->queue + p->prio);
-}
-
-static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
-{
-	list_add(&p->run_list, array->queue + p->prio);
-	__set_bit(p->prio, array->bitmap);
-	array->nr_active++;
-	p->array = array;
-}
-
-/*
- * effective_prio - return the priority that is based on the static
- * priority but is modified by bonuses/penalties.
- *
- * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
- * into the -5 ... 0 ... +5 bonus/penalty range.
- *
- * We use 25% of the full 0...39 priority range so that:
- *
- * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
- * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
- *
- * Both properties are important to certain workloads.
- */
-static int effective_prio(task_t *p)
-{
-	int bonus, prio;
-
-	if (rt_task(p))
-		return p->prio;
-
-	bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
-
-	prio = p->static_prio - bonus;
-	if (prio < MAX_RT_PRIO)
-		prio = MAX_RT_PRIO;
-	if (prio > MAX_PRIO-1)
-		prio = MAX_PRIO-1;
-	return prio;
-}
-
 #ifdef CONFIG_SMP
 /*
  * To aid in avoiding the subversion of "niceness" due to uneven distribution
  * of tasks with abnormal "nice" values across CPUs the contribution that
  * each task makes to its run queue's load is weighted according to its
- * scheduling class and "nice" value.  For SCHED_NORMAL tasks this is just a
- * scaled version of the new time slice allocation that they receive on time
- * slice expiry etc.
+ * scheduling class and "nice" value.
  */
-
-/*
- * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
- * If static_prio_timeslice() is ever changed to break this assumption then
- * this code will need modification
- */
-#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
-#define LOAD_WEIGHT(lp) \
-	(((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
-#define PRIO_TO_LOAD_WEIGHT(prio) \
-	LOAD_WEIGHT(static_prio_timeslice(prio))
-#define RTPRIO_TO_LOAD_WEIGHT(rp) \
-	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
-
 static inline void set_load_weight(task_t *p)
 {
-	if (rt_task(p)) {
-		if (p == task_rq(p)->migration_thread)
-			/*
-			 * The migration thread does the actual balancing.
-			 * Giving its load any weight will skew balancing
-			 * adversely.
-			 */
-			p->load_weight = 0;
-		else
-			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
-	} else
-		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
-}
-
-static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
-{
-	rq->raw_weighted_load += p->load_weight;
-}
-
-static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
-{
-	rq->raw_weighted_load -= p->load_weight;
+	sched_drvp->set_load_weight(p);
 }
 #else
 static inline void set_load_weight(task_t *p)
 {
 }
-
-static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
-{
-}
-
-static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
-{
-}
 #endif
 
-static inline void inc_nr_running(task_t *p, runqueue_t *rq)
-{
-	rq->nr_running++;
-	inc_raw_weighted_load(rq, p);
-}
-
-static inline void dec_nr_running(task_t *p, runqueue_t *rq)
-{
-	rq->nr_running--;
-	dec_raw_weighted_load(rq, p);
-}
-
-/*
- * __activate_task - move a task to the runqueue.
- */
-static inline void __activate_task(task_t *p, runqueue_t *rq)
-{
-	enqueue_task(p, rq->active);
-	inc_nr_running(p, rq);
-}
-
-/*
- * __activate_idle_task - move idle task to the _front_ of runqueue.
- */
-static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
-{
-	enqueue_task_head(p, rq->active);
-	inc_nr_running(p, rq);
-}
-
-static int recalc_task_prio(task_t *p, unsigned long long now)
-{
-	/* Caller must always ensure 'now >= p->timestamp' */
-	unsigned long long __sleep_time = now - p->timestamp;
-	unsigned long sleep_time;
-
-	if (unlikely(p->policy == SCHED_BATCH))
-		sleep_time = 0;
-	else {
-		if (__sleep_time > NS_MAX_SLEEP_AVG)
-			sleep_time = NS_MAX_SLEEP_AVG;
-		else
-			sleep_time = (unsigned long)__sleep_time;
-	}
-
-	if (likely(sleep_time > 0)) {
-		/*
-		 * User tasks that sleep a long time are categorised as
-		 * idle. They will only have their sleep_avg increased to a
-		 * level that makes them just interactive priority to stay
-		 * active yet prevent them suddenly becoming cpu hogs and
-		 * starving other processes.
-		 */
-		if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) {
-				unsigned long ceiling;
-
-				ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG -
-					DEF_TIMESLICE);
-				if (p->sleep_avg < ceiling)
-					p->sleep_avg = ceiling;
-		} else {
-
-			/*
-			 * The lower the sleep avg a task has the more
-			 * rapidly it will rise with sleep time. This enables
-			 * tasks to rapidly recover to a low latency priority.
-			 * If a task was sleeping with the noninteractive
-			 * label do not apply this non-linear boost
-			 */
-			if (p->sleep_type != SLEEP_NONINTERACTIVE || !p->mm)
-				sleep_time *=
-					(MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
-
-			/*
-			 * This code gives a bonus to interactive tasks.
-			 *
-			 * The boost works by updating the 'average sleep time'
-			 * value here, based on ->timestamp. The more time a
-			 * task spends sleeping, the higher the average gets -
-			 * and the higher the priority boost gets as well.
-			 */
-			p->sleep_avg += sleep_time;
-
-			if (p->sleep_avg > NS_MAX_SLEEP_AVG)
-				p->sleep_avg = NS_MAX_SLEEP_AVG;
-		}
-	}
-
-	return effective_prio(p);
-}
-
-/*
- * activate_task - move a task to the runqueue and do priority recalculation
- *
- * Update all the scheduling statistics stuff. (sleep average
- * calculation, priority modifiers, etc.)
- */
-static void activate_task(task_t *p, runqueue_t *rq, int local)
-{
-	unsigned long long now;
-
-	now = sched_clock();
-#ifdef CONFIG_SMP
-	if (!local) {
-		/* Compensate for drifting sched_clock */
-		runqueue_t *this_rq = this_rq();
-		now = (now - this_rq->timestamp_last_tick)
-			+ rq->timestamp_last_tick;
-	}
-#endif
-
-	if (!rt_task(p))
-		p->prio = recalc_task_prio(p, now);
-
-	if (p->sleep_type != SLEEP_NONINTERACTIVE) {
-		/*
-		 * Tasks which were woken up by interrupts (ie. hw events)
-		 * are most likely of interactive nature. So we give them
-		 * the credit of extending their sleep time to the period
-		 * of time they spend on the runqueue, waiting for execution
-		 * on a CPU, first time around:
-		 */
-		if (in_interrupt())
-			p->sleep_type = SLEEP_INTERRUPTED;
-		else {
-			/*
-			 * Normal first-time wakeups get a credit too for
-			 * on-runqueue time, but it will be weighted down:
-			 */
-			p->sleep_type = SLEEP_INTERACTIVE;
-		}
-	}
-	p->timestamp = now;
-
-	__activate_task(p, rq);
-}
-
-/*
- * deactivate_task - remove a task from the runqueue.
- */
-static void deactivate_task(struct task_struct *p, runqueue_t *rq)
-{
-	dec_nr_running(p, rq);
-	dequeue_task(p, p->array);
-	p->array = NULL;
-}
-
 /*
  * resched_task - mark a task 'to be rescheduled now'.
  *
@@ -896,7 +234,7 @@
  * the target CPU.
  */
 #ifdef CONFIG_SMP
-static void resched_task(task_t *p)
+void resched_task(task_t *p)
 {
 	int cpu;
 
@@ -912,15 +250,9 @@
 		return;
 
 	/* NEED_RESCHED must be visible before we test POLLING_NRFLAG */
-	smp_mb();
-	if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG))
-		smp_send_reschedule(cpu);
-}
-#else
-static inline void resched_task(task_t *p)
-{
-	assert_spin_locked(&task_rq(p)->lock);
-	set_tsk_need_resched(p);
+	smp_mb();
+	if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG))
+		smp_send_reschedule(cpu);
 }
 #endif
 
@@ -955,7 +287,7 @@
 	 * If the task is not on a runqueue (and not running), then
 	 * it is sufficient to simply update the task's cpu field.
 	 */
-	if (!p->array && !task_running(rq, p)) {
+	if (!task_is_queued(p) && !task_running(rq, p)) {
 		set_task_cpu(p, dest_cpu);
 		return 0;
 	}
@@ -985,7 +317,7 @@
 repeat:
 	rq = task_rq_lock(p, &flags);
 	/* Must be off runqueue entirely, not preempted. */
-	if (unlikely(p->array || task_running(rq, p))) {
+	if (unlikely(task_is_queued(p) || task_running(rq, p))) {
 		/* If it's preempted, we yield.  It could be a while. */
 		preempted = !task_running(rq, p);
 		task_rq_unlock(rq, &flags);
@@ -1267,7 +599,7 @@
 	if (!(old_state & state))
 		goto out;
 
-	if (p->array)
+	if (task_is_queued(p))
 		goto out_running;
 
 	cpu = task_cpu(p);
@@ -1358,7 +690,7 @@
 		old_state = p->state;
 		if (!(old_state & state))
 			goto out;
-		if (p->array)
+		if (task_is_queued(p))
 			goto out_running;
 
 		this_cpu = smp_processor_id();
@@ -1367,38 +699,7 @@
 
 out_activate:
 #endif /* CONFIG_SMP */
-	if (old_state == TASK_UNINTERRUPTIBLE) {
-		rq->nr_uninterruptible--;
-		/*
-		 * Tasks waking from uninterruptible sleep are likely
-		 * to be sleeping involuntarily on I/O and are otherwise
-		 * cpu bound so label them as noninteractive.
-		 */
-		p->sleep_type = SLEEP_NONINTERACTIVE;
-	} else
-
-	/*
-	 * Tasks that have marked their sleep as noninteractive get
-	 * woken up with their sleep average not weighted in an
-	 * interactive way.
-	 */
-		if (old_state & TASK_NONINTERACTIVE)
-			p->sleep_type = SLEEP_NONINTERACTIVE;
-
-
-	activate_task(p, rq, cpu == this_cpu);
-	/*
-	 * Sync wakeups (i.e. those types of wakeups where the waker
-	 * has indicated that it will leave the CPU in short order)
-	 * don't trigger a preemption, if the woken up task will run on
-	 * this cpu. (in this case the 'I will reschedule' promise of
-	 * the waker guarantees that the freshly woken up task is going
-	 * to be considered on this CPU.)
-	 */
-	if (!sync || cpu != this_cpu) {
-		if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
-	}
+	sched_drvp->wake_up_task(p, rq, old_state, sync);
 	success = 1;
 
 out_running:
@@ -1443,7 +744,6 @@
 	 */
 	p->state = TASK_RUNNING;
 	INIT_LIST_HEAD(&p->run_list);
-	p->array = NULL;
 #ifdef CONFIG_SCHEDSTATS
 	memset(&p->sched_info, 0, sizeof(p->sched_info));
 #endif
@@ -1454,30 +754,7 @@
 	/* Want to start with kernel preemption disabled. */
 	task_thread_info(p)->preempt_count = 1;
 #endif
-	/*
-	 * Share the timeslice between parent and child, thus the
-	 * total amount of pending timeslices in the system doesn't change,
-	 * resulting in more scheduling fairness.
-	 */
-	local_irq_disable();
-	p->time_slice = (current->time_slice + 1) >> 1;
-	/*
-	 * The remainder of the first timeslice might be recovered by
-	 * the parent if the child exits early enough.
-	 */
-	p->first_time_slice = 1;
-	current->time_slice >>= 1;
-	p->timestamp = sched_clock();
-	if (unlikely(!current->time_slice)) {
-		/*
-		 * This case is rare, it happens when the parent has only
-		 * a single jiffy left from its timeslice. Taking the
-		 * runqueue lock is not a problem.
-		 */
-		current->time_slice = 1;
-		scheduler_tick();
-	}
-	local_irq_enable();
+	sched_drvp->fork(p);
 	put_cpu();
 }
 
@@ -1490,174 +767,12 @@
  */
 void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
 {
-	unsigned long flags;
-	int this_cpu, cpu;
-	runqueue_t *rq, *this_rq;
-
-	rq = task_rq_lock(p, &flags);
-	BUG_ON(p->state != TASK_RUNNING);
-	this_cpu = smp_processor_id();
-	cpu = task_cpu(p);
-
-	/*
-	 * We decrease the sleep average of forking parents
-	 * and children as well, to keep max-interactive tasks
-	 * from forking tasks that are max-interactive. The parent
-	 * (current) is done further down, under its lock.
-	 */
-	p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
-		CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
-
-	p->prio = effective_prio(p);
-
-	if (likely(cpu == this_cpu)) {
-		if (!(clone_flags & CLONE_VM)) {
-			/*
-			 * The VM isn't cloned, so we're in a good position to
-			 * do child-runs-first in anticipation of an exec. This
-			 * usually avoids a lot of COW overhead.
-			 */
-			if (unlikely(!current->array))
-				__activate_task(p, rq);
-			else {
-				p->prio = current->prio;
-				list_add_tail(&p->run_list, &current->run_list);
-				p->array = current->array;
-				p->array->nr_active++;
-				inc_nr_running(p, rq);
-			}
-			set_need_resched();
-		} else
-			/* Run child last */
-			__activate_task(p, rq);
-		/*
-		 * We skip the following code due to cpu == this_cpu
-	 	 *
-		 *   task_rq_unlock(rq, &flags);
-		 *   this_rq = task_rq_lock(current, &flags);
-		 */
-		this_rq = rq;
-	} else {
-		this_rq = cpu_rq(this_cpu);
-
-		/*
-		 * Not the local CPU - must adjust timestamp. This should
-		 * get optimised away in the !CONFIG_SMP case.
-		 */
-		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
-					+ rq->timestamp_last_tick;
-		__activate_task(p, rq);
-		if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
-
-		/*
-		 * Parent and child are on different CPUs, now get the
-		 * parent runqueue to update the parent's ->sleep_avg:
-		 */
-		task_rq_unlock(rq, &flags);
-		this_rq = task_rq_lock(current, &flags);
-	}
-	current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
-		PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
-	task_rq_unlock(this_rq, &flags);
+	sched_drvp->wake_up_new_task(p, clone_flags);
 }
 
-/*
- * Potentially available exiting-child timeslices are
- * retrieved here - this way the parent does not get
- * penalized for creating too many threads.
- *
- * (this cannot be used to 'generate' timeslices
- * artificially, because any timeslice recovered here
- * was given away by the parent in the first place.)
- */
 void fastcall sched_exit(task_t *p)
 {
-	unsigned long flags;
-	runqueue_t *rq;
-
-	/*
-	 * If the child was a (relative-) CPU hog then decrease
-	 * the sleep_avg of the parent as well.
-	 */
-	rq = task_rq_lock(p->parent, &flags);
-	if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
-		p->parent->time_slice += p->time_slice;
-		if (unlikely(p->parent->time_slice > task_timeslice(p)))
-			p->parent->time_slice = task_timeslice(p);
-	}
-	if (p->sleep_avg < p->parent->sleep_avg)
-		p->parent->sleep_avg = p->parent->sleep_avg /
-		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
-		(EXIT_WEIGHT + 1);
-	task_rq_unlock(rq, &flags);
-}
-
-/**
- * prepare_task_switch - prepare to switch tasks
- * @rq: the runqueue preparing to switch
- * @next: the task we are going to switch to.
- *
- * This is called with the rq lock held and interrupts off. It must
- * be paired with a subsequent finish_task_switch after the context
- * switch.
- *
- * prepare_task_switch sets up locking and calls architecture specific
- * hooks.
- */
-static inline void prepare_task_switch(runqueue_t *rq, task_t *next)
-{
-	prepare_lock_switch(rq, next);
-	prepare_arch_switch(next);
-}
-
-/**
- * finish_task_switch - clean up after a task-switch
- * @rq: runqueue associated with task-switch
- * @prev: the thread we just switched away from.
- *
- * finish_task_switch must be called after the context switch, paired
- * with a prepare_task_switch call before the context switch.
- * finish_task_switch will reconcile locking set up by prepare_task_switch,
- * and do any other architecture-specific cleanup actions.
- *
- * Note that we may have delayed dropping an mm in context_switch(). If
- * so, we finish that here outside of the runqueue lock.  (Doing it
- * with the lock held can cause deadlocks; see schedule() for
- * details.)
- */
-static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
-	__releases(rq->lock)
-{
-	struct mm_struct *mm = rq->prev_mm;
-	unsigned long prev_task_flags;
-
-	rq->prev_mm = NULL;
-
-	/*
-	 * A task struct has one reference for the use as "current".
-	 * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
-	 * calls schedule one last time. The schedule call will never return,
-	 * and the scheduled task must drop that reference.
-	 * The test for EXIT_ZOMBIE must occur while the runqueue locks are
-	 * still held, otherwise prev could be scheduled on another cpu, die
-	 * there before we look at prev->state, and then the reference would
-	 * be dropped twice.
-	 *		Manfred Spraul <manfred@colorfullife.com>
-	 */
-	prev_task_flags = prev->flags;
-	finish_arch_switch(prev);
-	finish_lock_switch(rq, prev);
-	if (mm)
-		mmdrop(mm);
-	if (unlikely(prev_task_flags & PF_DEAD)) {
-		/*
-		 * Remove function-return probe instances associated with this
-		 * task and put them back on the free list.
-	 	 */
-		kprobe_flush_task(prev);
-		put_task_struct(prev);
-	}
+	sched_drvp->exit(p);
 }
 
 /**
@@ -1678,35 +793,6 @@
 }
 
 /*
- * context_switch - switch to the new MM and the new
- * thread's register state.
- */
-static inline
-task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
-{
-	struct mm_struct *mm = next->mm;
-	struct mm_struct *oldmm = prev->active_mm;
-
-	if (unlikely(!mm)) {
-		next->active_mm = oldmm;
-		atomic_inc(&oldmm->mm_count);
-		enter_lazy_tlb(oldmm, next);
-	} else
-		switch_mm(oldmm, mm, next);
-
-	if (unlikely(!prev->mm)) {
-		prev->active_mm = NULL;
-		WARN_ON(rq->prev_mm);
-		rq->prev_mm = oldmm;
-	}
-
-	/* Here we just switch the register state and the stack. */
-	switch_to(prev, next, prev);
-
-	return prev;
-}
-
-/*
  * nr_running, nr_uninterruptible and nr_context_switches:
  *
  * externally visible scheduler statistics: current number of runnable
@@ -1867,32 +953,8 @@
 }
 
 /*
- * pull_task - move a task from a remote runqueue to the local runqueue.
- * Both runqueues must be locked.
- */
-static
-void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
-	       runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
-{
-	dequeue_task(p, src_array);
-	dec_nr_running(p, src_rq);
-	set_task_cpu(p, this_cpu);
-	inc_nr_running(p, this_rq);
-	enqueue_task(p, this_array);
-	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
-				+ this_rq->timestamp_last_tick;
-	/*
-	 * Note that idle threads have a prio of MAX_PRIO, for this test
-	 * to be always true for them.
-	 */
-	if (TASK_PREEMPTS_CURR(p, this_rq))
-		resched_task(this_rq->curr);
-}
-
-/*
  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
  */
-static
 int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
 		     struct sched_domain *sd, enum idle_type idle,
 		     int *all_pinned)
@@ -1936,84 +998,8 @@
 		      struct sched_domain *sd, enum idle_type idle,
 		      int *all_pinned)
 {
-	prio_array_t *array, *dst_array;
-	struct list_head *head, *curr;
-	int idx, pulled = 0, pinned = 0;
-	long rem_load_move;
-	task_t *tmp;
-
-	if (max_nr_move == 0 || max_load_move == 0)
-		goto out;
-
-	rem_load_move = max_load_move;
-	pinned = 1;
-
-	/*
-	 * We first consider expired tasks. Those will likely not be
-	 * executed in the near future, and they are most likely to
-	 * be cache-cold, thus switching CPUs has the least effect
-	 * on them.
-	 */
-	if (busiest->expired->nr_active) {
-		array = busiest->expired;
-		dst_array = this_rq->expired;
-	} else {
-		array = busiest->active;
-		dst_array = this_rq->active;
-	}
+	int pulled = sched_drvp->move_tasks(this_rq, this_cpu, busiest, max_nr_move, max_load_move, sd, idle, all_pinned);
 
-new_array:
-	/* Start searching at priority 0: */
-	idx = 0;
-skip_bitmap:
-	if (!idx)
-		idx = sched_find_first_bit(array->bitmap);
-	else
-		idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
-	if (idx >= MAX_PRIO) {
-		if (array == busiest->expired && busiest->active->nr_active) {
-			array = busiest->active;
-			dst_array = this_rq->active;
-			goto new_array;
-		}
-		goto out;
-	}
-
-	head = array->queue + idx;
-	curr = head->prev;
-skip_queue:
-	tmp = list_entry(curr, task_t, run_list);
-
-	curr = curr->prev;
-
-	if (tmp->load_weight > rem_load_move ||
-	    !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
-		if (curr != head)
-			goto skip_queue;
-		idx++;
-		goto skip_bitmap;
-	}
-
-#ifdef CONFIG_SCHEDSTATS
-	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
-		schedstat_inc(sd, lb_hot_gained[idle]);
-#endif
-
-	pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
-	pulled++;
-	rem_load_move -= tmp->load_weight;
-
-	/*
-	 * We only want to steal up to the prescribed number of tasks
-	 * and the prescribed amount of weighted load.
-	 */
-	if (pulled < max_nr_move && rem_load_move > 0) {
-		if (curr != head)
-			goto skip_queue;
-		idx++;
-		goto skip_bitmap;
-	}
-out:
 	/*
 	 * Right now, this is the only place pull_task() is called,
 	 * so we can safely collect pull_task() stats here rather than
@@ -2021,8 +1007,6 @@
 	 */
 	schedstat_add(sd, lb_gained[idle], pulled);
 
-	if (all_pinned)
-		*all_pinned = pinned;
 	return pulled;
 }
 
@@ -2415,7 +1399,7 @@
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
  */
-static void idle_balance(int this_cpu, runqueue_t *this_rq)
+void idle_balance(int this_cpu, runqueue_t *this_rq)
 {
 	struct sched_domain *sd;
 
@@ -2471,7 +1455,7 @@
 	schedstat_inc(sd, alb_cnt);
 
 	if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
-			RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, NULL))
+			ULONG_MAX, sd, SCHED_IDLE, NULL))
 		schedstat_inc(sd, alb_pushed);
 	else
 		schedstat_inc(sd, alb_failed);
@@ -2491,8 +1475,7 @@
 /* Don't have all balancing operations going off at once */
 #define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS)
 
-static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
-			   enum idle_type idle)
+void rebalance_tick(int this_cpu, runqueue_t *this_rq, enum idle_type idle)
 {
 	unsigned long old_load, this_load;
 	unsigned long j = jiffies + CPU_OFFSET(this_cpu);
@@ -2543,22 +1526,13 @@
 		}
 	}
 }
-#else
-/*
- * on UP we do not need to balance between CPUs:
- */
-static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle)
-{
-}
-static inline void idle_balance(int cpu, runqueue_t *rq)
-{
-}
 #endif
 
-static inline int wake_priority_sleeper(runqueue_t *rq)
+#ifdef CONFIG_SCHED_SMT
+int wake_priority_sleeper(runqueue_t *rq)
 {
 	int ret = 0;
-#ifdef CONFIG_SCHED_SMT
+
 	spin_lock(&rq->lock);
 	/*
 	 * If an SMT sibling task has been put to sleep for priority
@@ -2569,26 +1543,16 @@
 		ret = 1;
 	}
 	spin_unlock(&rq->lock);
-#endif
+
 	return ret;
 }
+#endif
 
 DEFINE_PER_CPU(struct kernel_stat, kstat);
 
 EXPORT_PER_CPU_SYMBOL(kstat);
 
 /*
- * This is called on clock ticks and on context switches.
- * Bank in p->sched_time the ns elapsed since the last tick or switch.
- */
-static inline void update_cpu_clock(task_t *p, runqueue_t *rq,
-				    unsigned long long now)
-{
-	unsigned long long last = max(p->timestamp, rq->timestamp_last_tick);
-	p->sched_time += now - last;
-}
-
-/*
  * Return current->sched_time plus any more ns on the sched_clock
  * that have not yet been banked.
  */
@@ -2604,22 +1568,6 @@
 }
 
 /*
- * We place interactive tasks back into the active array, if possible.
- *
- * To guarantee that this does not starve expired tasks we ignore the
- * interactivity of a task if the first expired task had to wait more
- * than a 'reasonable' amount of time. This deadline timeout is
- * load-dependent, as the frequency of array switched decreases with
- * increasing number of running tasks. We also ignore the interactivity
- * if a better static_prio task has expired:
- */
-#define EXPIRED_STARVING(rq) \
-	((STARVATION_LIMIT && ((rq)->expired_timestamp && \
-		(jiffies - (rq)->expired_timestamp >= \
-			STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
-			((rq)->curr->static_prio > (rq)->best_expired_prio))
-
-/*
  * Account user cpu time to a process.
  * @p: the process that the cpu time gets accounted to
  * @hardirq_offset: the offset to subtract from hardirq_count()
@@ -2701,7 +1649,6 @@
  */
 void scheduler_tick(void)
 {
-	int cpu = smp_processor_id();
 	runqueue_t *rq = this_rq();
 	task_t *p = current;
 	unsigned long long now = sched_clock();
@@ -2710,86 +1657,7 @@
 
 	rq->timestamp_last_tick = now;
 
-	if (p == rq->idle) {
-		if (wake_priority_sleeper(rq))
-			goto out;
-		rebalance_tick(cpu, rq, SCHED_IDLE);
-		return;
-	}
-
-	/* Task might have expired already, but not scheduled off yet */
-	if (p->array != rq->active) {
-		set_tsk_need_resched(p);
-		goto out;
-	}
-	spin_lock(&rq->lock);
-	/*
-	 * The task was running during this tick - update the
-	 * time slice counter. Note: we do not update a thread's
-	 * priority until it either goes to sleep or uses up its
-	 * timeslice. This makes it possible for interactive tasks
-	 * to use up their timeslices at their highest priority levels.
-	 */
-	if (rt_task(p)) {
-		/*
-		 * RR tasks need a special form of timeslice management.
-		 * FIFO tasks have no timeslices.
-		 */
-		if ((p->policy == SCHED_RR) && !--p->time_slice) {
-			p->time_slice = task_timeslice(p);
-			p->first_time_slice = 0;
-			set_tsk_need_resched(p);
-
-			/* put it at the end of the queue: */
-			requeue_task(p, rq->active);
-		}
-		goto out_unlock;
-	}
-	if (!--p->time_slice) {
-		dequeue_task(p, rq->active);
-		set_tsk_need_resched(p);
-		p->prio = effective_prio(p);
-		p->time_slice = task_timeslice(p);
-		p->first_time_slice = 0;
-
-		if (!rq->expired_timestamp)
-			rq->expired_timestamp = jiffies;
-		if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
-			enqueue_task(p, rq->expired);
-			if (p->static_prio < rq->best_expired_prio)
-				rq->best_expired_prio = p->static_prio;
-		} else
-			enqueue_task(p, rq->active);
-	} else {
-		/*
-		 * Prevent a too long timeslice allowing a task to monopolize
-		 * the CPU. We do this by splitting up the timeslice into
-		 * smaller pieces.
-		 *
-		 * Note: this does not mean the task's timeslices expire or
-		 * get lost in any way, they just might be preempted by
-		 * another task of equal priority. (one with higher
-		 * priority would have preempted this task already.) We
-		 * requeue this task to the end of the list on this priority
-		 * level, which is in essence a round-robin of tasks with
-		 * equal priority.
-		 *
-		 * This only applies to tasks in the interactive
-		 * delta range with at least TIMESLICE_GRANULARITY to requeue.
-		 */
-		if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
-			p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
-			(p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
-			(p->array == rq->active)) {
-
-			requeue_task(p, rq->active);
-			set_tsk_need_resched(p);
-		}
-	}
-out_unlock:
-	spin_unlock(&rq->lock);
-out:
-	rebalance_tick(cpu, rq, NOT_IDLE);
+	sched_drvp->tick(p, rq, now);
 }
 
 #ifdef CONFIG_SCHED_SMT
@@ -2800,7 +1668,7 @@
 		resched_task(rq->idle);
 }
 
-static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
+void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
 {
 	struct sched_domain *tmp, *sd = NULL;
 	cpumask_t sibling_map;
@@ -2844,21 +1712,14 @@
 	 */
 }
 
-/*
- * number of 'lost' timeslices this task wont be able to fully
- * utilize, if another task runs on a sibling. This models the
- * slowdown effect of other tasks running on siblings:
- */
-static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
-{
-	return p->time_slice * (100 - sd->per_cpu_gain) / 100;
-}
+#define SMT_RT_TIME_CHUNK (100 * HZ / 1000)
+#define dependent_sleeper_trumps(p1, p2, sd) \
+	sched_drvp->dependent_sleeper_trumps(p1, p2, sd)
 
-static int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
+int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
 {
 	struct sched_domain *tmp, *sd = NULL;
 	cpumask_t sibling_map;
-	prio_array_t *array;
 	int ret = 0, i;
 	task_t *p;
 
@@ -2885,13 +1746,8 @@
 	 */
 	if (!this_rq->nr_running)
 		goto out_unlock;
-	array = this_rq->active;
-	if (!array->nr_active)
-		array = this_rq->expired;
-	BUG_ON(!array->nr_active);
 
-	p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
-		task_t, run_list);
+	p = sched_drvp->head_of_queue(&this_rq->qu);
 
 	for_each_cpu_mask(i, sibling_map) {
 		runqueue_t *smt_rq = cpu_rq(i);
@@ -2914,13 +1770,13 @@
 			 * With real time tasks we run non-rt tasks only
 			 * per_cpu_gain% of the time.
 			 */
-			if ((jiffies % DEF_TIMESLICE) >
-				(sd->per_cpu_gain * DEF_TIMESLICE / 100))
+			if ((jiffies % SMT_RT_TIME_CHUNK) >
+				(sd->per_cpu_gain * SMT_RT_TIME_CHUNK / 100))
 					ret = 1;
 		} else
 			if (smt_curr->static_prio < p->static_prio &&
 				!TASK_PREEMPTS_CURR(p, smt_rq) &&
-				smt_slice(smt_curr, sd) > task_timeslice(p))
+				dependent_sleeper_trumps(smt_curr, p, sd))
 					ret = 1;
 
 check_smt_task:
@@ -2938,12 +1794,12 @@
 		 * sleep for priority reasons to see if it should run now.
 		 */
 		if (rt_task(p)) {
-			if ((jiffies % DEF_TIMESLICE) >
-				(sd->per_cpu_gain * DEF_TIMESLICE / 100))
+			if ((jiffies % SMT_RT_TIME_CHUNK) >
+				(sd->per_cpu_gain * SMT_RT_TIME_CHUNK / 100))
 					resched_task(smt_curr);
 		} else {
 			if (TASK_PREEMPTS_CURR(p, smt_rq) &&
-				smt_slice(p, sd) > task_timeslice(smt_curr))
+			        dependent_sleeper_trumps(p, smt_curr, sd))
 					resched_task(smt_curr);
 			else
 				wakeup_busy_runqueue(smt_rq);
@@ -2954,15 +1810,6 @@
 		spin_unlock(&cpu_rq(i)->lock);
 	return ret;
 }
-#else
-static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
-{
-}
-
-static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
-{
-	return 0;
-}
 #endif
 
 #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
@@ -2997,25 +1844,13 @@
 
 #endif
 
-static inline int interactive_sleep(enum sleep_type sleep_type)
-{
-	return (sleep_type == SLEEP_INTERACTIVE ||
-		sleep_type == SLEEP_INTERRUPTED);
-}
-
 /*
  * schedule() is the main scheduler function.
  */
 asmlinkage void __sched schedule(void)
 {
-	long *switch_count;
-	task_t *prev, *next;
+	task_t *prev;
 	runqueue_t *rq;
-	prio_array_t *array;
-	struct list_head *queue;
-	unsigned long long now;
-	unsigned long run_time;
-	int cpu, idx, new_prio;
 
 	/*
 	 * Test if we are atomic.  Since do_exit() needs to call into
@@ -3049,136 +1884,8 @@
 	}
 
 	schedstat_inc(rq, sched_cnt);
-	now = sched_clock();
-	if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) {
-		run_time = now - prev->timestamp;
-		if (unlikely((long long)(now - prev->timestamp) < 0))
-			run_time = 0;
-	} else
-		run_time = NS_MAX_SLEEP_AVG;
-
-	/*
-	 * Tasks charged proportionately less run_time at high sleep_avg to
-	 * delay them losing their interactive status
-	 */
-	run_time /= (CURRENT_BONUS(prev) ? : 1);
-
-	spin_lock_irq(&rq->lock);
-
-	if (unlikely(prev->flags & PF_DEAD))
-		prev->state = EXIT_DEAD;
-
-	switch_count = &prev->nivcsw;
-	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
-		switch_count = &prev->nvcsw;
-		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
-				unlikely(signal_pending(prev))))
-			prev->state = TASK_RUNNING;
-		else {
-			if (prev->state == TASK_UNINTERRUPTIBLE)
-				rq->nr_uninterruptible++;
-			deactivate_task(prev, rq);
-		}
-	}
-
-	cpu = smp_processor_id();
-	if (unlikely(!rq->nr_running)) {
-go_idle:
-		idle_balance(cpu, rq);
-		if (!rq->nr_running) {
-			next = rq->idle;
-			rq->expired_timestamp = 0;
-			wake_sleeping_dependent(cpu, rq);
-			/*
-			 * wake_sleeping_dependent() might have released
-			 * the runqueue, so break out if we got new
-			 * tasks meanwhile:
-			 */
-			if (!rq->nr_running)
-				goto switch_tasks;
-		}
-	} else {
-		if (dependent_sleeper(cpu, rq)) {
-			next = rq->idle;
-			goto switch_tasks;
-		}
-		/*
-		 * dependent_sleeper() releases and reacquires the runqueue
-		 * lock, hence go into the idle loop if the rq went
-		 * empty meanwhile:
-		 */
-		if (unlikely(!rq->nr_running))
-			goto go_idle;
-	}
 
-	array = rq->active;
-	if (unlikely(!array->nr_active)) {
-		/*
-		 * Switch the active and expired arrays.
-		 */
-		schedstat_inc(rq, sched_switch);
-		rq->active = rq->expired;
-		rq->expired = array;
-		array = rq->active;
-		rq->expired_timestamp = 0;
-		rq->best_expired_prio = MAX_PRIO;
-	}
-
-	idx = sched_find_first_bit(array->bitmap);
-	queue = array->queue + idx;
-	next = list_entry(queue->next, task_t, run_list);
-
-	if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
-		unsigned long long delta = now - next->timestamp;
-		if (unlikely((long long)(now - next->timestamp) < 0))
-			delta = 0;
-
-		if (next->sleep_type == SLEEP_INTERACTIVE)
-			delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
-
-		array = next->array;
-		new_prio = recalc_task_prio(next, next->timestamp + delta);
-
-		if (unlikely(next->prio != new_prio)) {
-			dequeue_task(next, array);
-			next->prio = new_prio;
-			enqueue_task(next, array);
-		}
-	}
-	next->sleep_type = SLEEP_NORMAL;
-switch_tasks:
-	if (next == rq->idle)
-		schedstat_inc(rq, sched_goidle);
-	prefetch(next);
-	prefetch_stack(next);
-	clear_tsk_need_resched(prev);
-	rcu_qsctr_inc(task_cpu(prev));
-
-	update_cpu_clock(prev, rq, now);
-
-	prev->sleep_avg -= run_time;
-	if ((long)prev->sleep_avg <= 0)
-		prev->sleep_avg = 0;
-	prev->timestamp = prev->last_ran = now;
-
-	sched_info_switch(prev, next);
-	if (likely(prev != next)) {
-		next->timestamp = now;
-		rq->nr_switches++;
-		rq->curr = next;
-		++*switch_count;
-
-		prepare_task_switch(rq, next);
-		prev = context_switch(rq, prev, next);
-		barrier();
-		/*
-		 * this_rq must be evaluated again because prev may have moved
-		 * CPUs since it called schedule(), thus the 'rq' on its stack
-		 * frame will be invalid.
-		 */
-		finish_task_switch(this_rq(), prev);
-	} else
-		spin_unlock_irq(&rq->lock);
+	sched_drvp->schedule();
 
 	prev = current;
 	if (unlikely(reacquire_kernel_lock(prev) < 0))
@@ -3592,9 +2299,7 @@
 void set_user_nice(task_t *p, long nice)
 {
 	unsigned long flags;
-	prio_array_t *array;
 	runqueue_t *rq;
-	int old_prio, new_prio, delta;
 
 	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
 		return;
@@ -3613,29 +2318,8 @@
 		p->static_prio = NICE_TO_PRIO(nice);
 		goto out_unlock;
 	}
-	array = p->array;
-	if (array) {
-		dequeue_task(p, array);
-		dec_raw_weighted_load(rq, p);
-	}
-
-	old_prio = p->prio;
-	new_prio = NICE_TO_PRIO(nice);
-	delta = new_prio - old_prio;
-	p->static_prio = NICE_TO_PRIO(nice);
-	set_load_weight(p);
-	p->prio += delta;
 
-	if (array) {
-		enqueue_task(p, array);
-		inc_raw_weighted_load(rq, p);
-		/*
-		 * If the task increased its priority or is running and
-		 * lowered its priority, then reschedule its CPU:
-		 */
-		if (delta < 0 || (delta > 0 && task_running(rq, p)))
-			resched_task(rq->curr);
-	}
+	sched_drvp->set_normal_task_nice(p, nice);
 out_unlock:
 	task_rq_unlock(rq, &flags);
 }
@@ -3754,9 +2438,9 @@
 }
 
 /* Actually do priority change: must hold rq lock. */
-static void __setscheduler(struct task_struct *p, int policy, int prio)
+void __setscheduler(struct task_struct *p, int policy, int prio)
 {
-	BUG_ON(p->array);
+	BUG_ON(task_is_queued(p));
 	p->policy = policy;
 	p->rt_priority = prio;
 	if (policy != SCHED_NORMAL && policy != SCHED_BATCH) {
@@ -3767,7 +2451,7 @@
 		 * SCHED_BATCH tasks are treated as perpetual CPU hogs:
 		 */
 		if (policy == SCHED_BATCH)
-			p->sleep_avg = 0;
+			sched_drvp->init_batch_task(p);
 	}
 	set_load_weight(p);
 }
@@ -3783,8 +2467,7 @@
 		       struct sched_param *param)
 {
 	int retval;
-	int oldprio, oldpolicy = -1;
-	prio_array_t *array;
+	int oldpolicy = -1;
 	unsigned long flags;
 	runqueue_t *rq;
 
@@ -3846,24 +2529,9 @@
 		task_rq_unlock(rq, &flags);
 		goto recheck;
 	}
-	array = p->array;
-	if (array)
-		deactivate_task(p, rq);
-	oldprio = p->prio;
-	__setscheduler(p, policy, param->sched_priority);
-	if (array) {
-		__activate_task(p, rq);
-		/*
-		 * Reschedule if we are currently running on this runqueue and
-		 * our priority decreased, or if we are not currently running on
-		 * this runqueue and our priority is higher than the current's
-		 */
-		if (task_running(rq, p)) {
-			if (p->prio > oldprio)
-				resched_task(rq->curr);
-		} else if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
-	}
+
+	sched_drvp->setscheduler(p, policy, param->sched_priority);
+
 	task_rq_unlock(rq, &flags);
 	return 0;
 }
@@ -4126,48 +2794,7 @@
  */
 asmlinkage long sys_sched_yield(void)
 {
-	runqueue_t *rq = this_rq_lock();
-	prio_array_t *array = current->array;
-	prio_array_t *target = rq->expired;
-
-	schedstat_inc(rq, yld_cnt);
-	/*
-	 * We implement yielding by moving the task into the expired
-	 * queue.
-	 *
-	 * (special rule: RT tasks will just roundrobin in the active
-	 *  array.)
-	 */
-	if (rt_task(current))
-		target = rq->active;
-
-	if (array->nr_active == 1) {
-		schedstat_inc(rq, yld_act_empty);
-		if (!rq->expired->nr_active)
-			schedstat_inc(rq, yld_both_empty);
-	} else if (!rq->expired->nr_active)
-		schedstat_inc(rq, yld_exp_empty);
-
-	if (array != target) {
-		dequeue_task(current, array);
-		enqueue_task(current, target);
-	} else
-		/*
-		 * requeue_task is cheaper so perform that if possible.
-		 */
-		requeue_task(current, array);
-
-	/*
-	 * Since we are going to call schedule() anyway, there's
-	 * no need to preempt or enable interrupts:
-	 */
-	__release(rq->lock);
-	_raw_spin_unlock(&rq->lock);
-	preempt_enable_no_resched();
-
-	schedule();
-
-	return 0;
+	return sched_drvp->sys_yield();
 }
 
 static inline void __cond_resched(void)
@@ -4253,8 +2880,7 @@
  */
 void __sched yield(void)
 {
-	set_current_state(TASK_RUNNING);
-	sys_sched_yield();
+	sched_drvp->yield();
 }
 
 EXPORT_SYMBOL(yield);
@@ -4487,9 +3113,7 @@
 	unsigned long flags;
 
 	idle->timestamp = sched_clock();
-	idle->sleep_avg = 0;
-	idle->array = NULL;
-	idle->prio = MAX_PRIO;
+        sched_drvp->init_idle(idle, cpu);
 	idle->state = TASK_RUNNING;
 	idle->cpus_allowed = cpumask_of_cpu(cpu);
 	set_task_cpu(idle, cpu);
@@ -4604,21 +3228,10 @@
 	if (!cpu_isset(dest_cpu, p->cpus_allowed))
 		goto out;
 
-	set_task_cpu(p, dest_cpu);
-	if (p->array) {
-		/*
-		 * Sync timestamp with rq_dest's before activating.
-		 * The same thing could be achieved by doing this step
-		 * afterwards, and pretending it was a local activate.
-		 * This way is cleaner and logically correct.
-		 */
-		p->timestamp = p->timestamp - rq_src->timestamp_last_tick
-				+ rq_dest->timestamp_last_tick;
-		deactivate_task(p, rq_src);
-		activate_task(p, rq_dest, 0);
-		if (TASK_PREEMPTS_CURR(p, rq_dest))
-			resched_task(rq_dest->curr);
-	}
+	if (task_is_queued(p))
+		sched_drvp->migrate_queued_task(p, dest_cpu);
+	else
+		set_task_cpu(p, dest_cpu);
 
 out:
 	double_rq_unlock(rq_src, rq_dest);
@@ -4767,7 +3380,6 @@
 {
 	int cpu = smp_processor_id();
 	runqueue_t *rq = this_rq();
-	struct task_struct *p = rq->idle;
 	unsigned long flags;
 
 	/* cpu has to be offline */
@@ -4778,9 +3390,7 @@
 	 */
 	spin_lock_irqsave(&rq->lock, flags);
 
-	__setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
-	/* Add idle task to _front_ of it's priority queue */
-	__activate_idle_task(p, rq);
+	sched_drvp->set_select_idle_first(rq);
 
 	spin_unlock_irqrestore(&rq->lock, flags);
 }
@@ -4799,7 +3409,7 @@
 	mmdrop(mm);
 }
 
-static void migrate_dead(unsigned int dead_cpu, task_t *tsk)
+void migrate_dead(unsigned int dead_cpu, task_t *tsk)
 {
 	struct runqueue *rq = cpu_rq(dead_cpu);
 
@@ -4824,20 +3434,9 @@
 }
 
 /* release_task() removes task from tasklist, so we won't find dead tasks. */
-static void migrate_dead_tasks(unsigned int dead_cpu)
+static inline void migrate_dead_tasks(unsigned int dead_cpu)
 {
-	unsigned arr, i;
-	struct runqueue *rq = cpu_rq(dead_cpu);
-
-	for (arr = 0; arr < 2; arr++) {
-		for (i = 0; i < MAX_PRIO; i++) {
-			struct list_head *list = &rq->arrays[arr].queue[i];
-			while (!list_empty(list))
-				migrate_dead(dead_cpu,
-					     list_entry(list->next, task_t,
-							run_list));
-		}
-	}
+	sched_drvp->migrate_dead_tasks(dead_cpu);
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
@@ -5008,9 +3607,7 @@
 		rq->migration_thread = NULL;
 		/* Idle task back to normal (off runqueue, low prio) */
 		rq = task_rq_lock(rq->idle, &flags);
-		deactivate_task(rq->idle, rq);
-		rq->idle->static_prio = MAX_PRIO;
-		__setscheduler(rq->idle, SCHED_NORMAL, 0);
+		sched_drvp->set_select_idle_last(rq);
 		migrate_dead_tasks(cpu);
 		task_rq_unlock(rq, &flags);
 		migrate_nr_uninterruptible(rq);
@@ -6353,20 +4950,26 @@
 		&& addr < (unsigned long)__sched_text_end);
 }
 
+void set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+	sched_drvp->set_oom_time_slice(p, t);
+}
+
 void __init sched_init(void)
 {
 	runqueue_t *rq;
-	int i, j, k;
+	int i;
+
+	sched_drvp->sched_init();
 
 	for_each_cpu(i) {
-		prio_array_t *array;
+#ifdef CONFIG_SMP
+		int j;
+#endif
 
 		rq = cpu_rq(i);
 		spin_lock_init(&rq->lock);
 		rq->nr_running = 0;
-		rq->active = rq->arrays;
-		rq->expired = rq->arrays + 1;
-		rq->best_expired_prio = MAX_PRIO;
 
 #ifdef CONFIG_SMP
 		rq->sd = NULL;
@@ -6379,15 +4982,7 @@
 #endif
 		atomic_set(&rq->nr_iowait, 0);
 
-		for (j = 0; j < 2; j++) {
-			array = rq->arrays + j;
-			for (k = 0; k < MAX_PRIO; k++) {
-				INIT_LIST_HEAD(array->queue + k);
-				__clear_bit(k, array->bitmap);
-			}
-			// delimiter for bitsearch
-			__set_bit(MAX_PRIO, array->bitmap);
-		}
+		sched_drvp->init_runqueue_queue(&rq->qu);
 	}
 
 	set_load_weight(&init_task);
@@ -6432,27 +5027,11 @@
 void normalize_rt_tasks(void)
 {
 	struct task_struct *p;
-	prio_array_t *array;
-	unsigned long flags;
-	runqueue_t *rq;
 
 	read_lock_irq(&tasklist_lock);
 	for_each_process (p) {
-		if (!rt_task(p))
-			continue;
-
-		rq = task_rq_lock(p, &flags);
-
-		array = p->array;
-		if (array)
-			deactivate_task(p, task_rq(p));
-		__setscheduler(p, SCHED_NORMAL, 0);
-		if (array) {
-			__activate_task(p, task_rq(p));
-			resched_task(rq->curr);
-		}
-
-		task_rq_unlock(rq, &flags);
+		if (rt_task(p))
+			sched_drvp->normalize_rt_task(p);
 	}
 	read_unlock_irq(&tasklist_lock);
 }
diff -urN oldtree/kernel/sched.c.orig newtree/kernel/sched.c.orig
--- oldtree/kernel/sched.c.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched.c.orig	2006-03-08 18:48:02.000000000 +0000
@@ -0,0 +1,6504 @@
+/*
+ *  kernel/sched.c
+ *
+ *  Kernel scheduler and related syscalls
+ *
+ *  Copyright (C) 1991-2002  Linus Torvalds
+ *
+ *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
+ *		make semaphores SMP safe
+ *  1998-11-19	Implemented schedule_timeout() and related stuff
+ *		by Andrea Arcangeli
+ *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
+ *		hybrid priority-list and round-robin design with
+ *		an array-switch method of distributing timeslices
+ *		and per-CPU runqueues.  Cleanups and useful suggestions
+ *		by Davide Libenzi, preemptible kernel bits by Robert Love.
+ *  2003-09-03	Interactivity tuning by Con Kolivas.
+ *  2004-04-02	Scheduler domains code by Nick Piggin
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/nmi.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <linux/highmem.h>
+#include <linux/smp_lock.h>
+#include <asm/mmu_context.h>
+#include <linux/interrupt.h>
+#include <linux/capability.h>
+#include <linux/completion.h>
+#include <linux/kgdb.h>
+#include <linux/kernel_stat.h>
+#include <linux/security.h>
+#include <linux/notifier.h>
+#include <linux/profile.h>
+#include <linux/suspend.h>
+#include <linux/vmalloc.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/timer.h>
+#include <linux/rcupdate.h>
+#include <linux/cpu.h>
+#include <linux/cpuset.h>
+#include <linux/percpu.h>
+#include <linux/kthread.h>
+#include <linux/seq_file.h>
+#include <linux/sysctl.h>
+#include <linux/syscalls.h>
+#include <linux/times.h>
+#include <linux/acct.h>
+#include <linux/kprobes.h>
+#include <asm/tlb.h>
+
+#include <asm/unistd.h>
+
+/*
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+ * and back.
+ */
+#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
+#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
+#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
+#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
+#define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))
+
+/*
+ * Some helpers for converting nanosecond timing to jiffy resolution
+ */
+#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
+#define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
+ * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
+ * Timeslices get refilled after they expire.
+ */
+#define MIN_TIMESLICE		max(5 * HZ / 1000, 1)
+#define DEF_TIMESLICE		(100 * HZ / 1000)
+#define ON_RUNQUEUE_WEIGHT	 30
+#define CHILD_PENALTY		 95
+#define PARENT_PENALTY		100
+#define EXIT_WEIGHT		  3
+#define PRIO_BONUS_RATIO	 25
+#define MAX_BONUS		(MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
+#define INTERACTIVE_DELTA	  2
+#define MAX_SLEEP_AVG		(DEF_TIMESLICE * MAX_BONUS)
+#define STARVATION_LIMIT	(MAX_SLEEP_AVG)
+#define NS_MAX_SLEEP_AVG	(JIFFIES_TO_NS(MAX_SLEEP_AVG))
+
+/*
+ * If a task is 'interactive' then we reinsert it in the active
+ * array after it has expired its current timeslice. (it will not
+ * continue to run immediately, it will still roundrobin with
+ * other interactive tasks.)
+ *
+ * This part scales the interactivity limit depending on niceness.
+ *
+ * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
+ * Here are a few examples of different nice levels:
+ *
+ *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
+ *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
+ *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
+ *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
+ *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
+ *
+ * (the X axis represents the possible -5 ... 0 ... +5 dynamic
+ *  priority range a task can explore, a value of '1' means the
+ *  task is rated interactive.)
+ *
+ * Ie. nice +19 tasks can never get 'interactive' enough to be
+ * reinserted into the active array. And only heavily CPU-hog nice -20
+ * tasks will be expired. Default nice 0 tasks are somewhere between,
+ * it takes some effort for them to get interactive, but it's not
+ * too hard.
+ */
+
+#define CURRENT_BONUS(p) \
+	(NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
+		MAX_SLEEP_AVG)
+
+#define GRANULARITY	(10 * HZ / 1000 ? : 1)
+
+#ifdef CONFIG_SMP
+#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
+		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
+			num_online_cpus())
+#else
+#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
+		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
+#endif
+
+#define SCALE(v1,v1_max,v2_max) \
+	(v1) * (v2_max) / (v1_max)
+
+#define DELTA(p) \
+	(SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \
+		INTERACTIVE_DELTA)
+
+#define TASK_INTERACTIVE(p) \
+	((p)->prio <= (p)->static_prio - DELTA(p))
+
+#define INTERACTIVE_SLEEP(p) \
+	(JIFFIES_TO_NS(MAX_SLEEP_AVG * \
+		(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
+
+#define TASK_PREEMPTS_CURR(p, rq) \
+	((p)->prio < (rq)->curr->prio)
+
+/*
+ * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
+ * to time slice values: [800ms ... 100ms ... 5ms]
+ *
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ */
+
+#define SCALE_PRIO(x, prio) \
+	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
+
+static unsigned int static_prio_timeslice(int static_prio)
+{
+	if (static_prio < NICE_TO_PRIO(0))
+		return SCALE_PRIO(DEF_TIMESLICE*4, static_prio);
+	else
+		return SCALE_PRIO(DEF_TIMESLICE, static_prio);
+}
+
+static inline unsigned int task_timeslice(task_t *p)
+{
+	return static_prio_timeslice(p->static_prio);
+}
+
+#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)	\
+				< (long long) (sd)->cache_hot_time)
+
+void __put_task_struct_cb(struct rcu_head *rhp)
+{
+	__put_task_struct(container_of(rhp, struct task_struct, rcu));
+}
+
+EXPORT_SYMBOL_GPL(__put_task_struct_cb);
+
+/*
+ * These are the runqueue data structures:
+ */
+
+#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+
+typedef struct runqueue runqueue_t;
+
+struct prio_array {
+	unsigned int nr_active;
+	unsigned long bitmap[BITMAP_SIZE];
+	struct list_head queue[MAX_PRIO];
+};
+
+/*
+ * This is the main, per-CPU runqueue data structure.
+ *
+ * Locking rule: those places that want to lock multiple runqueues
+ * (such as the load balancing or the thread migration code), lock
+ * acquire operations must be ordered by ascending &runqueue.
+ */
+struct runqueue {
+	spinlock_t lock;
+
+	/*
+	 * nr_running and cpu_load should be in the same cacheline because
+	 * remote CPUs use both these fields when doing load calculation.
+	 */
+	unsigned long nr_running;
+#ifdef CONFIG_SMP
+	unsigned long raw_weighted_load;
+	unsigned long cpu_load[3];
+#endif
+	unsigned long long nr_switches;
+
+	/*
+	 * This is part of a global counter where only the total sum
+	 * over all CPUs matters. A task can increase this counter on
+	 * one CPU and if it got migrated afterwards it may decrease
+	 * it on another CPU. Always updated under the runqueue lock:
+	 */
+	unsigned long nr_uninterruptible;
+
+	unsigned long expired_timestamp;
+	unsigned long long timestamp_last_tick;
+	task_t *curr, *idle;
+	struct mm_struct *prev_mm;
+	prio_array_t *active, *expired, arrays[2];
+	int best_expired_prio;
+	atomic_t nr_iowait;
+
+#ifdef CONFIG_SMP
+	struct sched_domain *sd;
+
+	/* For active balancing */
+	int active_balance;
+	int push_cpu;
+
+	task_t *migration_thread;
+	struct list_head migration_queue;
+#endif
+
+#ifdef CONFIG_SCHEDSTATS
+	/* latency stats */
+	struct sched_info rq_sched_info;
+
+	/* sys_sched_yield() stats */
+	unsigned long yld_exp_empty;
+	unsigned long yld_act_empty;
+	unsigned long yld_both_empty;
+	unsigned long yld_cnt;
+
+	/* schedule() stats */
+	unsigned long sched_switch;
+	unsigned long sched_cnt;
+	unsigned long sched_goidle;
+
+	/* try_to_wake_up() stats */
+	unsigned long ttwu_cnt;
+	unsigned long ttwu_local;
+#endif
+};
+
+static DEFINE_PER_CPU(struct runqueue, runqueues);
+
+/*
+ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
+ * See detach_destroy_domains: synchronize_sched for details.
+ *
+ * The domain tree of any CPU may only be accessed from within
+ * preempt-disabled sections.
+ */
+#define for_each_domain(cpu, domain) \
+for (domain = rcu_dereference(cpu_rq(cpu)->sd); domain; domain = domain->parent)
+
+#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
+#define this_rq()		(&__get_cpu_var(runqueues))
+#define task_rq(p)		cpu_rq(task_cpu(p))
+#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
+
+#ifndef prepare_arch_switch
+# define prepare_arch_switch(next)	do { } while (0)
+#endif
+#ifndef finish_arch_switch
+# define finish_arch_switch(prev)	do { } while (0)
+#endif
+
+#ifndef __ARCH_WANT_UNLOCKED_CTXSW
+static inline int task_running(runqueue_t *rq, task_t *p)
+{
+	return rq->curr == p;
+}
+
+static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
+{
+}
+
+static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+	/* this is a valid case when another task releases the spinlock */
+	rq->lock.owner = current;
+#endif
+	spin_unlock_irq(&rq->lock);
+}
+
+#else /* __ARCH_WANT_UNLOCKED_CTXSW */
+static inline int task_running(runqueue_t *rq, task_t *p)
+{
+#ifdef CONFIG_SMP
+	return p->oncpu;
+#else
+	return rq->curr == p;
+#endif
+}
+
+static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
+{
+#ifdef CONFIG_SMP
+	/*
+	 * We can optimise this out completely for !SMP, because the
+	 * SMP rebalancing from interrupt is the only thing that cares
+	 * here.
+	 */
+	next->oncpu = 1;
+#endif
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	spin_unlock_irq(&rq->lock);
+#else
+	spin_unlock(&rq->lock);
+#endif
+}
+
+static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
+{
+#ifdef CONFIG_SMP
+	/*
+	 * After ->oncpu is cleared, the task can be moved to a different CPU.
+	 * We must ensure this doesn't happen until the switch is completely
+	 * finished.
+	 */
+	smp_wmb();
+	prev->oncpu = 0;
+#endif
+#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	local_irq_enable();
+#endif
+}
+#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
+
+/*
+ * task_rq_lock - lock the runqueue a given task resides on and disable
+ * interrupts.  Note the ordering: we can safely lookup the task_rq without
+ * explicitly disabling preemption.
+ */
+static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
+	__acquires(rq->lock)
+{
+	struct runqueue *rq;
+
+repeat_lock_task:
+	local_irq_save(*flags);
+	rq = task_rq(p);
+	spin_lock(&rq->lock);
+	if (unlikely(rq != task_rq(p))) {
+		spin_unlock_irqrestore(&rq->lock, *flags);
+		goto repeat_lock_task;
+	}
+	return rq;
+}
+
+static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
+	__releases(rq->lock)
+{
+	spin_unlock_irqrestore(&rq->lock, *flags);
+}
+
+#ifdef CONFIG_SCHEDSTATS
+/*
+ * bump this up when changing the output format or the meaning of an existing
+ * format, so that tools can adapt (or abort)
+ */
+#define SCHEDSTAT_VERSION 12
+
+static int show_schedstat(struct seq_file *seq, void *v)
+{
+	int cpu;
+
+	seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
+	seq_printf(seq, "timestamp %lu\n", jiffies);
+	for_each_online_cpu(cpu) {
+		runqueue_t *rq = cpu_rq(cpu);
+#ifdef CONFIG_SMP
+		struct sched_domain *sd;
+		int dcnt = 0;
+#endif
+
+		/* runqueue-specific stats */
+		seq_printf(seq,
+		    "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
+		    cpu, rq->yld_both_empty,
+		    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
+		    rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
+		    rq->ttwu_cnt, rq->ttwu_local,
+		    rq->rq_sched_info.cpu_time,
+		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
+
+		seq_printf(seq, "\n");
+
+#ifdef CONFIG_SMP
+		/* domain-specific stats */
+		preempt_disable();
+		for_each_domain(cpu, sd) {
+			enum idle_type itype;
+			char mask_str[NR_CPUS];
+
+			cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
+			seq_printf(seq, "domain%d %s", dcnt++, mask_str);
+			for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
+					itype++) {
+				seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu",
+				    sd->lb_cnt[itype],
+				    sd->lb_balanced[itype],
+				    sd->lb_failed[itype],
+				    sd->lb_imbalance[itype],
+				    sd->lb_gained[itype],
+				    sd->lb_hot_gained[itype],
+				    sd->lb_nobusyq[itype],
+				    sd->lb_nobusyg[itype]);
+			}
+			seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
+			    sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
+			    sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
+			    sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
+			    sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance);
+		}
+		preempt_enable();
+#endif
+	}
+	return 0;
+}
+
+static int schedstat_open(struct inode *inode, struct file *file)
+{
+	unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
+	char *buf = kmalloc(size, GFP_KERNEL);
+	struct seq_file *m;
+	int res;
+
+	if (!buf)
+		return -ENOMEM;
+	res = single_open(file, show_schedstat, NULL);
+	if (!res) {
+		m = file->private_data;
+		m->buf = buf;
+		m->size = size;
+	} else
+		kfree(buf);
+	return res;
+}
+
+struct file_operations proc_schedstat_operations = {
+	.open    = schedstat_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+
+# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
+# define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
+#else /* !CONFIG_SCHEDSTATS */
+# define schedstat_inc(rq, field)	do { } while (0)
+# define schedstat_add(rq, field, amt)	do { } while (0)
+#endif
+
+/*
+ * rq_lock - lock a given runqueue and disable interrupts.
+ */
+static inline runqueue_t *this_rq_lock(void)
+	__acquires(rq->lock)
+{
+	runqueue_t *rq;
+
+	local_irq_disable();
+	rq = this_rq();
+	spin_lock(&rq->lock);
+
+	return rq;
+}
+
+#ifdef CONFIG_SCHEDSTATS
+/*
+ * Called when a process is dequeued from the active array and given
+ * the cpu.  We should note that with the exception of interactive
+ * tasks, the expired queue will become the active queue after the active
+ * queue is empty, without explicitly dequeuing and requeuing tasks in the
+ * expired queue.  (Interactive tasks may be requeued directly to the
+ * active queue, thus delaying tasks in the expired queue from running;
+ * see scheduler_tick()).
+ *
+ * This function is only called from sched_info_arrive(), rather than
+ * dequeue_task(). Even though a task may be queued and dequeued multiple
+ * times as it is shuffled about, we're really interested in knowing how
+ * long it was from the *first* time it was queued to the time that it
+ * finally hit a cpu.
+ */
+static inline void sched_info_dequeued(task_t *t)
+{
+	t->sched_info.last_queued = 0;
+}
+
+/*
+ * Called when a task finally hits the cpu.  We can now calculate how
+ * long it was waiting to run.  We also note when it began so that we
+ * can keep stats on how long its timeslice is.
+ */
+static void sched_info_arrive(task_t *t)
+{
+	unsigned long now = jiffies, diff = 0;
+	struct runqueue *rq = task_rq(t);
+
+	if (t->sched_info.last_queued)
+		diff = now - t->sched_info.last_queued;
+	sched_info_dequeued(t);
+	t->sched_info.run_delay += diff;
+	t->sched_info.last_arrival = now;
+	t->sched_info.pcnt++;
+
+	if (!rq)
+		return;
+
+	rq->rq_sched_info.run_delay += diff;
+	rq->rq_sched_info.pcnt++;
+}
+
+/*
+ * Called when a process is queued into either the active or expired
+ * array.  The time is noted and later used to determine how long we
+ * had to wait for us to reach the cpu.  Since the expired queue will
+ * become the active queue after active queue is empty, without dequeuing
+ * and requeuing any tasks, we are interested in queuing to either. It
+ * is unusual but not impossible for tasks to be dequeued and immediately
+ * requeued in the same or another array: this can happen in sched_yield(),
+ * set_user_nice(), and even load_balance() as it moves tasks from runqueue
+ * to runqueue.
+ *
+ * This function is only called from enqueue_task(), but also only updates
+ * the timestamp if it is already not set.  It's assumed that
+ * sched_info_dequeued() will clear that stamp when appropriate.
+ */
+static inline void sched_info_queued(task_t *t)
+{
+	if (!t->sched_info.last_queued)
+		t->sched_info.last_queued = jiffies;
+}
+
+/*
+ * Called when a process ceases being the active-running process, either
+ * voluntarily or involuntarily.  Now we can calculate how long we ran.
+ */
+static inline void sched_info_depart(task_t *t)
+{
+	struct runqueue *rq = task_rq(t);
+	unsigned long diff = jiffies - t->sched_info.last_arrival;
+
+	t->sched_info.cpu_time += diff;
+
+	if (rq)
+		rq->rq_sched_info.cpu_time += diff;
+}
+
+/*
+ * Called when tasks are switched involuntarily due, typically, to expiring
+ * their time slice.  (This may also be called when switching to or from
+ * the idle task.)  We are only called when prev != next.
+ */
+static inline void sched_info_switch(task_t *prev, task_t *next)
+{
+	struct runqueue *rq = task_rq(prev);
+
+	/*
+	 * prev now departs the cpu.  It's not interesting to record
+	 * stats about how efficient we were at scheduling the idle
+	 * process, however.
+	 */
+	if (prev != rq->idle)
+		sched_info_depart(prev);
+
+	if (next != rq->idle)
+		sched_info_arrive(next);
+}
+#else
+#define sched_info_queued(t)		do { } while (0)
+#define sched_info_switch(t, next)	do { } while (0)
+#endif /* CONFIG_SCHEDSTATS */
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void dequeue_task(struct task_struct *p, prio_array_t *array)
+{
+	array->nr_active--;
+	list_del(&p->run_list);
+	if (list_empty(array->queue + p->prio))
+		__clear_bit(p->prio, array->bitmap);
+}
+
+static void enqueue_task(struct task_struct *p, prio_array_t *array)
+{
+	sched_info_queued(p);
+	list_add_tail(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->array = array;
+}
+
+/*
+ * Put task to the end of the run list without the overhead of dequeue
+ * followed by enqueue.
+ */
+static void requeue_task(struct task_struct *p, prio_array_t *array)
+{
+	list_move_tail(&p->run_list, array->queue + p->prio);
+}
+
+static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
+{
+	list_add(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+	array->nr_active++;
+	p->array = array;
+}
+
+/*
+ * effective_prio - return the priority that is based on the static
+ * priority but is modified by bonuses/penalties.
+ *
+ * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
+ * into the -5 ... 0 ... +5 bonus/penalty range.
+ *
+ * We use 25% of the full 0...39 priority range so that:
+ *
+ * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
+ * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
+ *
+ * Both properties are important to certain workloads.
+ */
+static int effective_prio(task_t *p)
+{
+	int bonus, prio;
+
+	if (rt_task(p))
+		return p->prio;
+
+	bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
+
+	prio = p->static_prio - bonus;
+	if (prio < MAX_RT_PRIO)
+		prio = MAX_RT_PRIO;
+	if (prio > MAX_PRIO-1)
+		prio = MAX_PRIO-1;
+	return prio;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+ * scheduling class and "nice" value.  For SCHED_NORMAL tasks this is just a
+ * scaled version of the new time slice allocation that they receive on time
+ * slice expiry etc.
+ */
+
+/*
+ * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
+ * If static_prio_timeslice() is ever changed to break this assumption then
+ * this code will need modification
+ */
+#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
+#define LOAD_WEIGHT(lp) \
+	(((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
+#define PRIO_TO_LOAD_WEIGHT(prio) \
+	LOAD_WEIGHT(static_prio_timeslice(prio))
+#define RTPRIO_TO_LOAD_WEIGHT(rp) \
+	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
+
+static inline void set_load_weight(task_t *p)
+{
+	if (rt_task(p)) {
+		if (p == task_rq(p)->migration_thread)
+			/*
+			 * The migration thread does the actual balancing.
+			 * Giving its load any weight will skew balancing
+			 * adversely.
+			 */
+			p->load_weight = 0;
+		else
+			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
+	} else
+		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
+}
+
+static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
+{
+	rq->raw_weighted_load += p->load_weight;
+}
+
+static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
+{
+	rq->raw_weighted_load -= p->load_weight;
+}
+#else
+static inline void set_load_weight(task_t *p)
+{
+}
+
+static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
+{
+}
+
+static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
+{
+}
+#endif
+
+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+{
+	rq->nr_running++;
+	inc_raw_weighted_load(rq, p);
+}
+
+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+{
+	rq->nr_running--;
+	dec_raw_weighted_load(rq, p);
+}
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task(p, rq->active);
+	inc_nr_running(p, rq);
+}
+
+/*
+ * __activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task_head(p, rq->active);
+	inc_nr_running(p, rq);
+}
+
+static int recalc_task_prio(task_t *p, unsigned long long now)
+{
+	/* Caller must always ensure 'now >= p->timestamp' */
+	unsigned long long __sleep_time = now - p->timestamp;
+	unsigned long sleep_time;
+
+	if (unlikely(p->policy == SCHED_BATCH))
+		sleep_time = 0;
+	else {
+		if (__sleep_time > NS_MAX_SLEEP_AVG)
+			sleep_time = NS_MAX_SLEEP_AVG;
+		else
+			sleep_time = (unsigned long)__sleep_time;
+	}
+
+	if (likely(sleep_time > 0)) {
+		/*
+		 * User tasks that sleep a long time are categorised as
+		 * idle. They will only have their sleep_avg increased to a
+		 * level that makes them just interactive priority to stay
+		 * active yet prevent them suddenly becoming cpu hogs and
+		 * starving other processes.
+		 */
+		if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) {
+				unsigned long ceiling;
+
+				ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG -
+					DEF_TIMESLICE);
+				if (p->sleep_avg < ceiling)
+					p->sleep_avg = ceiling;
+		} else {
+
+			/*
+			 * The lower the sleep avg a task has the more
+			 * rapidly it will rise with sleep time. This enables
+			 * tasks to rapidly recover to a low latency priority.
+			 * If a task was sleeping with the noninteractive
+			 * label do not apply this non-linear boost
+			 */
+			if (p->sleep_type != SLEEP_NONINTERACTIVE || !p->mm)
+				sleep_time *=
+					(MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
+
+			/*
+			 * This code gives a bonus to interactive tasks.
+			 *
+			 * The boost works by updating the 'average sleep time'
+			 * value here, based on ->timestamp. The more time a
+			 * task spends sleeping, the higher the average gets -
+			 * and the higher the priority boost gets as well.
+			 */
+			p->sleep_avg += sleep_time;
+
+			if (p->sleep_avg > NS_MAX_SLEEP_AVG)
+				p->sleep_avg = NS_MAX_SLEEP_AVG;
+		}
+	}
+
+	return effective_prio(p);
+}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ *
+ * Update all the scheduling statistics stuff. (sleep average
+ * calculation, priority modifiers, etc.)
+ */
+static void activate_task(task_t *p, runqueue_t *rq, int local)
+{
+	unsigned long long now;
+
+	now = sched_clock();
+#ifdef CONFIG_SMP
+	if (!local) {
+		/* Compensate for drifting sched_clock */
+		runqueue_t *this_rq = this_rq();
+		now = (now - this_rq->timestamp_last_tick)
+			+ rq->timestamp_last_tick;
+	}
+#endif
+
+	if (!rt_task(p))
+		p->prio = recalc_task_prio(p, now);
+
+	if (p->sleep_type != SLEEP_NONINTERACTIVE) {
+		/*
+		 * Tasks which were woken up by interrupts (ie. hw events)
+		 * are most likely of interactive nature. So we give them
+		 * the credit of extending their sleep time to the period
+		 * of time they spend on the runqueue, waiting for execution
+		 * on a CPU, first time around:
+		 */
+		if (in_interrupt())
+			p->sleep_type = SLEEP_INTERRUPTED;
+		else {
+			/*
+			 * Normal first-time wakeups get a credit too for
+			 * on-runqueue time, but it will be weighted down:
+			 */
+			p->sleep_type = SLEEP_INTERACTIVE;
+		}
+	}
+	p->timestamp = now;
+
+	__activate_task(p, rq);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	dec_nr_running(p, rq);
+	dequeue_task(p, p->array);
+	p->array = NULL;
+}
+
+/*
+ * resched_task - mark a task 'to be rescheduled now'.
+ *
+ * On UP this means the setting of the need_resched flag, on SMP it
+ * might also involve a cross-CPU call to trigger the scheduler on
+ * the target CPU.
+ */
+#ifdef CONFIG_SMP
+static void resched_task(task_t *p)
+{
+	int cpu;
+
+	assert_spin_locked(&task_rq(p)->lock);
+
+	if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
+		return;
+
+	set_tsk_thread_flag(p, TIF_NEED_RESCHED);
+
+	cpu = task_cpu(p);
+	if (cpu == smp_processor_id())
+		return;
+
+	/* NEED_RESCHED must be visible before we test POLLING_NRFLAG */
+	smp_mb();
+	if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG))
+		smp_send_reschedule(cpu);
+}
+#else
+static inline void resched_task(task_t *p)
+{
+	assert_spin_locked(&task_rq(p)->lock);
+	set_tsk_need_resched(p);
+}
+#endif
+
+/**
+ * task_curr - is this task currently executing on a CPU?
+ * @p: the task in question.
+ */
+inline int task_curr(const task_t *p)
+{
+	return cpu_curr(task_cpu(p)) == p;
+}
+
+#ifdef CONFIG_SMP
+typedef struct {
+	struct list_head list;
+
+	task_t *task;
+	int dest_cpu;
+
+	struct completion done;
+} migration_req_t;
+
+/*
+ * The task's runqueue lock must be held.
+ * Returns true if you have to wait for migration thread.
+ */
+static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
+{
+	runqueue_t *rq = task_rq(p);
+
+	/*
+	 * If the task is not on a runqueue (and not running), then
+	 * it is sufficient to simply update the task's cpu field.
+	 */
+	if (!p->array && !task_running(rq, p)) {
+		set_task_cpu(p, dest_cpu);
+		return 0;
+	}
+
+	init_completion(&req->done);
+	req->task = p;
+	req->dest_cpu = dest_cpu;
+	list_add(&req->list, &rq->migration_queue);
+	return 1;
+}
+
+/*
+ * wait_task_inactive - wait for a thread to unschedule.
+ *
+ * The caller must ensure that the task *will* unschedule sometime soon,
+ * else this function might spin for a *long* time. This function can't
+ * be called with interrupts off, or it may introduce deadlock with
+ * smp_call_function() if an IPI is sent by the same process we are
+ * waiting to become inactive.
+ */
+void wait_task_inactive(task_t *p)
+{
+	unsigned long flags;
+	runqueue_t *rq;
+	int preempted;
+
+repeat:
+	rq = task_rq_lock(p, &flags);
+	/* Must be off runqueue entirely, not preempted. */
+	if (unlikely(p->array || task_running(rq, p))) {
+		/* If it's preempted, we yield.  It could be a while. */
+		preempted = !task_running(rq, p);
+		task_rq_unlock(rq, &flags);
+		cpu_relax();
+		if (preempted)
+			yield();
+		goto repeat;
+	}
+	task_rq_unlock(rq, &flags);
+}
+
+/***
+ * kick_process - kick a running thread to enter/exit the kernel
+ * @p: the to-be-kicked thread
+ *
+ * Cause a process which is running on another CPU to enter
+ * kernel-mode, without any delay. (to get signals handled.)
+ *
+ * NOTE: this function doesnt have to take the runqueue lock,
+ * because all it wants to ensure is that the remote task enters
+ * the kernel. If the IPI races and the task has been migrated
+ * to another CPU then no harm is done and the purpose has been
+ * achieved as well.
+ */
+void kick_process(task_t *p)
+{
+	int cpu;
+
+	preempt_disable();
+	cpu = task_cpu(p);
+	if ((cpu != smp_processor_id()) && task_curr(p))
+		smp_send_reschedule(cpu);
+	preempt_enable();
+}
+
+/*
+ * Return a low guess at the load of a migration-source cpu weighted
+ * according to the scheduling class and "nice" value.
+ *
+ * We want to under-estimate the load of migration sources, to
+ * balance conservatively.
+ */
+static inline unsigned long source_load(int cpu, int type)
+{
+	runqueue_t *rq = cpu_rq(cpu);
+
+	if (type == 0)
+		return rq->raw_weighted_load;
+
+	return min(rq->cpu_load[type-1], rq->raw_weighted_load);
+}
+
+/*
+ * Return a high guess at the load of a migration-target cpu weighted
+ * according to the scheduling class and "nice" value.
+ */
+static inline unsigned long target_load(int cpu, int type)
+{
+	runqueue_t *rq = cpu_rq(cpu);
+
+	if (type == 0)
+		return rq->raw_weighted_load;
+
+	return max(rq->cpu_load[type-1], rq->raw_weighted_load);
+}
+
+/*
+ * Return the average load per task on the cpu's run queue
+ */
+static inline unsigned long cpu_avg_load_per_task(int cpu)
+{
+	runqueue_t *rq = cpu_rq(cpu);
+	unsigned long n = rq->nr_running;
+
+	return n ?  rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
+}
+
+/*
+ * find_idlest_group finds and returns the least busy CPU group within the
+ * domain.
+ */
+static struct sched_group *
+find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
+{
+	struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
+	unsigned long min_load = ULONG_MAX, this_load = 0;
+	int load_idx = sd->forkexec_idx;
+	int imbalance = 100 + (sd->imbalance_pct-100)/2;
+
+	do {
+		unsigned long load, avg_load;
+		int local_group;
+		int i;
+
+		/* Skip over this group if it has no CPUs allowed */
+		if (!cpus_intersects(group->cpumask, p->cpus_allowed))
+			goto nextgroup;
+
+		local_group = cpu_isset(this_cpu, group->cpumask);
+
+		/* Tally up the load of all CPUs in the group */
+		avg_load = 0;
+
+		for_each_cpu_mask(i, group->cpumask) {
+			/* Bias balancing toward cpus of our domain */
+			if (local_group)
+				load = source_load(i, load_idx);
+			else
+				load = target_load(i, load_idx);
+
+			avg_load += load;
+		}
+
+		/* Adjust by relative CPU power of the group */
+		avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
+
+		if (local_group) {
+			this_load = avg_load;
+			this = group;
+		} else if (avg_load < min_load) {
+			min_load = avg_load;
+			idlest = group;
+		}
+nextgroup:
+		group = group->next;
+	} while (group != sd->groups);
+
+	if (!idlest || 100*this_load < imbalance*min_load)
+		return NULL;
+	return idlest;
+}
+
+/*
+ * find_idlest_queue - find the idlest runqueue among the cpus in group.
+ */
+static int
+find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
+{
+	cpumask_t tmp;
+	unsigned long load, min_load = ULONG_MAX;
+	int idlest = -1;
+	int i;
+
+	/* Traverse only the allowed CPUs */
+	cpus_and(tmp, group->cpumask, p->cpus_allowed);
+
+	for_each_cpu_mask(i, tmp) {
+		load = source_load(i, 0);
+
+		if (load < min_load || (load == min_load && i == this_cpu)) {
+			min_load = load;
+			idlest = i;
+		}
+	}
+
+	return idlest;
+}
+
+/*
+ * sched_balance_self: balance the current task (running on cpu) in domains
+ * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
+ * SD_BALANCE_EXEC.
+ *
+ * Balance, ie. select the least loaded group.
+ *
+ * Returns the target CPU number, or the same CPU if no balancing is needed.
+ *
+ * preempt must be disabled.
+ */
+static int sched_balance_self(int cpu, int flag)
+{
+	struct task_struct *t = current;
+	struct sched_domain *tmp, *sd = NULL;
+
+	for_each_domain(cpu, tmp)
+		if (tmp->flags & flag)
+			sd = tmp;
+
+	while (sd) {
+		cpumask_t span;
+		struct sched_group *group;
+		int new_cpu;
+		int weight;
+
+		span = sd->span;
+		group = find_idlest_group(sd, t, cpu);
+		if (!group)
+			goto nextlevel;
+
+		new_cpu = find_idlest_cpu(group, t, cpu);
+		if (new_cpu == -1 || new_cpu == cpu)
+			goto nextlevel;
+
+		/* Now try balancing at a lower domain level */
+		cpu = new_cpu;
+nextlevel:
+		sd = NULL;
+		weight = cpus_weight(span);
+		for_each_domain(cpu, tmp) {
+			if (weight <= cpus_weight(tmp->span))
+				break;
+			if (tmp->flags & flag)
+				sd = tmp;
+		}
+		/* while loop will break here if sd == NULL */
+	}
+
+	return cpu;
+}
+
+#endif /* CONFIG_SMP */
+
+/*
+ * wake_idle() will wake a task on an idle cpu if task->cpu is
+ * not idle and an idle cpu is available.  The span of cpus to
+ * search starts with cpus closest then further out as needed,
+ * so we always favor a closer, idle cpu.
+ *
+ * Returns the CPU we should wake onto.
+ */
+#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
+static int wake_idle(int cpu, task_t *p)
+{
+	cpumask_t tmp;
+	struct sched_domain *sd;
+	int i;
+
+	if (idle_cpu(cpu))
+		return cpu;
+
+	for_each_domain(cpu, sd) {
+		if (sd->flags & SD_WAKE_IDLE) {
+			cpus_and(tmp, sd->span, p->cpus_allowed);
+			for_each_cpu_mask(i, tmp) {
+				if (idle_cpu(i))
+					return i;
+			}
+		}
+		else
+			break;
+	}
+	return cpu;
+}
+#else
+static inline int wake_idle(int cpu, task_t *p)
+{
+	return cpu;
+}
+#endif
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @state: the mask of task states that can be woken
+ * @sync: do a synchronous wakeup?
+ *
+ * Put it on the run-queue if it's not already there. The "current"
+ * thread is always on the run-queue (except when the actual
+ * re-schedule is in progress), and as such you're allowed to do
+ * the simpler "current->state = TASK_RUNNING" to mark yourself
+ * runnable without the overhead of this.
+ *
+ * returns failure only if the task is already active.
+ */
+static int try_to_wake_up(task_t *p, unsigned int state, int sync)
+{
+	int cpu, this_cpu, success = 0;
+	unsigned long flags;
+	long old_state;
+	runqueue_t *rq;
+#ifdef CONFIG_SMP
+	unsigned long load, this_load;
+	struct sched_domain *sd, *this_sd = NULL;
+	int new_cpu;
+#endif
+
+	rq = task_rq_lock(p, &flags);
+	old_state = p->state;
+	if (!(old_state & state))
+		goto out;
+
+	if (p->array)
+		goto out_running;
+
+	cpu = task_cpu(p);
+	this_cpu = smp_processor_id();
+
+#ifdef CONFIG_SMP
+	if (unlikely(task_running(rq, p)))
+		goto out_activate;
+
+	new_cpu = cpu;
+
+	schedstat_inc(rq, ttwu_cnt);
+	if (cpu == this_cpu) {
+		schedstat_inc(rq, ttwu_local);
+		goto out_set_cpu;
+	}
+
+	for_each_domain(this_cpu, sd) {
+		if (cpu_isset(cpu, sd->span)) {
+			schedstat_inc(sd, ttwu_wake_remote);
+			this_sd = sd;
+			break;
+		}
+	}
+
+	if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
+		goto out_set_cpu;
+
+	/*
+	 * Check for affine wakeup and passive balancing possibilities.
+	 */
+	if (this_sd) {
+		int idx = this_sd->wake_idx;
+		unsigned int imbalance;
+
+		imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
+
+		load = source_load(cpu, idx);
+		this_load = target_load(this_cpu, idx);
+
+		new_cpu = this_cpu; /* Wake to this CPU if we can */
+
+		if (this_sd->flags & SD_WAKE_AFFINE) {
+			unsigned long tl = this_load;
+			unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu);
+
+			/*
+			 * If sync wakeup then subtract the (maximum possible)
+			 * effect of the currently running task from the load
+			 * of the current CPU:
+			 */
+			if (sync)
+				tl -= current->load_weight;
+
+			if ((tl <= load &&
+				tl + target_load(cpu, idx) <= tl_per_task) ||
+				100*(tl + p->load_weight) <= imbalance*load) {
+				/*
+				 * This domain has SD_WAKE_AFFINE and
+				 * p is cache cold in this domain, and
+				 * there is no bad imbalance.
+				 */
+				schedstat_inc(this_sd, ttwu_move_affine);
+				goto out_set_cpu;
+			}
+		}
+
+		/*
+		 * Start passive balancing when half the imbalance_pct
+		 * limit is reached.
+		 */
+		if (this_sd->flags & SD_WAKE_BALANCE) {
+			if (imbalance*this_load <= 100*load) {
+				schedstat_inc(this_sd, ttwu_move_balance);
+				goto out_set_cpu;
+			}
+		}
+	}
+
+	new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
+out_set_cpu:
+	new_cpu = wake_idle(new_cpu, p);
+	if (new_cpu != cpu) {
+		set_task_cpu(p, new_cpu);
+		task_rq_unlock(rq, &flags);
+		/* might preempt at this point */
+		rq = task_rq_lock(p, &flags);
+		old_state = p->state;
+		if (!(old_state & state))
+			goto out;
+		if (p->array)
+			goto out_running;
+
+		this_cpu = smp_processor_id();
+		cpu = task_cpu(p);
+	}
+
+out_activate:
+#endif /* CONFIG_SMP */
+	if (old_state == TASK_UNINTERRUPTIBLE) {
+		rq->nr_uninterruptible--;
+		/*
+		 * Tasks waking from uninterruptible sleep are likely
+		 * to be sleeping involuntarily on I/O and are otherwise
+		 * cpu bound so label them as noninteractive.
+		 */
+		p->sleep_type = SLEEP_NONINTERACTIVE;
+	} else
+
+	/*
+	 * Tasks that have marked their sleep as noninteractive get
+	 * woken up with their sleep average not weighted in an
+	 * interactive way.
+	 */
+		if (old_state & TASK_NONINTERACTIVE)
+			p->sleep_type = SLEEP_NONINTERACTIVE;
+
+
+	activate_task(p, rq, cpu == this_cpu);
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	if (!sync || cpu != this_cpu) {
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+	success = 1;
+
+out_running:
+	p->state = TASK_RUNNING;
+out:
+	task_rq_unlock(rq, &flags);
+
+	return success;
+}
+
+int fastcall wake_up_process(task_t *p)
+{
+	return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
+				 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+}
+
+EXPORT_SYMBOL(wake_up_process);
+
+int fastcall wake_up_state(task_t *p, unsigned int state)
+{
+	return try_to_wake_up(p, state, 0);
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+void fastcall sched_fork(task_t *p, int clone_flags)
+{
+	int cpu = get_cpu();
+
+#ifdef CONFIG_SMP
+	cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
+#endif
+	set_task_cpu(p, cpu);
+
+	/*
+	 * We mark the process as running here, but have not actually
+	 * inserted it onto the runqueue yet. This guarantees that
+	 * nobody will actually run it, and a signal or other external
+	 * event cannot wake it up and insert it on the runqueue either.
+	 */
+	p->state = TASK_RUNNING;
+	INIT_LIST_HEAD(&p->run_list);
+	p->array = NULL;
+#ifdef CONFIG_SCHEDSTATS
+	memset(&p->sched_info, 0, sizeof(p->sched_info));
+#endif
+#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+	p->oncpu = 0;
+#endif
+#ifdef CONFIG_PREEMPT
+	/* Want to start with kernel preemption disabled. */
+	task_thread_info(p)->preempt_count = 1;
+#endif
+	/*
+	 * Share the timeslice between parent and child, thus the
+	 * total amount of pending timeslices in the system doesn't change,
+	 * resulting in more scheduling fairness.
+	 */
+	local_irq_disable();
+	p->time_slice = (current->time_slice + 1) >> 1;
+	/*
+	 * The remainder of the first timeslice might be recovered by
+	 * the parent if the child exits early enough.
+	 */
+	p->first_time_slice = 1;
+	current->time_slice >>= 1;
+	p->timestamp = sched_clock();
+	if (unlikely(!current->time_slice)) {
+		/*
+		 * This case is rare, it happens when the parent has only
+		 * a single jiffy left from its timeslice. Taking the
+		 * runqueue lock is not a problem.
+		 */
+		current->time_slice = 1;
+		scheduler_tick();
+	}
+	local_irq_enable();
+	put_cpu();
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	int this_cpu, cpu;
+	runqueue_t *rq, *this_rq;
+
+	rq = task_rq_lock(p, &flags);
+	BUG_ON(p->state != TASK_RUNNING);
+	this_cpu = smp_processor_id();
+	cpu = task_cpu(p);
+
+	/*
+	 * We decrease the sleep average of forking parents
+	 * and children as well, to keep max-interactive tasks
+	 * from forking tasks that are max-interactive. The parent
+	 * (current) is done further down, under its lock.
+	 */
+	p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
+		CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
+
+	p->prio = effective_prio(p);
+
+	if (likely(cpu == this_cpu)) {
+		if (!(clone_flags & CLONE_VM)) {
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			if (unlikely(!current->array))
+				__activate_task(p, rq);
+			else {
+				p->prio = current->prio;
+				list_add_tail(&p->run_list, &current->run_list);
+				p->array = current->array;
+				p->array->nr_active++;
+				inc_nr_running(p, rq);
+			}
+			set_need_resched();
+		} else
+			/* Run child last */
+			__activate_task(p, rq);
+		/*
+		 * We skip the following code due to cpu == this_cpu
+	 	 *
+		 *   task_rq_unlock(rq, &flags);
+		 *   this_rq = task_rq_lock(current, &flags);
+		 */
+		this_rq = rq;
+	} else {
+		this_rq = cpu_rq(this_cpu);
+
+		/*
+		 * Not the local CPU - must adjust timestamp. This should
+		 * get optimised away in the !CONFIG_SMP case.
+		 */
+		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
+					+ rq->timestamp_last_tick;
+		__activate_task(p, rq);
+		if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+
+		/*
+		 * Parent and child are on different CPUs, now get the
+		 * parent runqueue to update the parent's ->sleep_avg:
+		 */
+		task_rq_unlock(rq, &flags);
+		this_rq = task_rq_lock(current, &flags);
+	}
+	current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
+		PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
+	task_rq_unlock(this_rq, &flags);
+}
+
+/*
+ * Potentially available exiting-child timeslices are
+ * retrieved here - this way the parent does not get
+ * penalized for creating too many threads.
+ *
+ * (this cannot be used to 'generate' timeslices
+ * artificially, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+void fastcall sched_exit(task_t *p)
+{
+	unsigned long flags;
+	runqueue_t *rq;
+
+	/*
+	 * If the child was a (relative-) CPU hog then decrease
+	 * the sleep_avg of the parent as well.
+	 */
+	rq = task_rq_lock(p->parent, &flags);
+	if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
+		p->parent->time_slice += p->time_slice;
+		if (unlikely(p->parent->time_slice > task_timeslice(p)))
+			p->parent->time_slice = task_timeslice(p);
+	}
+	if (p->sleep_avg < p->parent->sleep_avg)
+		p->parent->sleep_avg = p->parent->sleep_avg /
+		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
+		(EXIT_WEIGHT + 1);
+	task_rq_unlock(rq, &flags);
+}
+
+/**
+ * prepare_task_switch - prepare to switch tasks
+ * @rq: the runqueue preparing to switch
+ * @next: the task we are going to switch to.
+ *
+ * This is called with the rq lock held and interrupts off. It must
+ * be paired with a subsequent finish_task_switch after the context
+ * switch.
+ *
+ * prepare_task_switch sets up locking and calls architecture specific
+ * hooks.
+ */
+static inline void prepare_task_switch(runqueue_t *rq, task_t *next)
+{
+	prepare_lock_switch(rq, next);
+	prepare_arch_switch(next);
+}
+
+/**
+ * finish_task_switch - clean up after a task-switch
+ * @rq: runqueue associated with task-switch
+ * @prev: the thread we just switched away from.
+ *
+ * finish_task_switch must be called after the context switch, paired
+ * with a prepare_task_switch call before the context switch.
+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
+ * and do any other architecture-specific cleanup actions.
+ *
+ * Note that we may have delayed dropping an mm in context_switch(). If
+ * so, we finish that here outside of the runqueue lock.  (Doing it
+ * with the lock held can cause deadlocks; see schedule() for
+ * details.)
+ */
+static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
+	__releases(rq->lock)
+{
+	struct mm_struct *mm = rq->prev_mm;
+	unsigned long prev_task_flags;
+
+	rq->prev_mm = NULL;
+
+	/*
+	 * A task struct has one reference for the use as "current".
+	 * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
+	 * calls schedule one last time. The schedule call will never return,
+	 * and the scheduled task must drop that reference.
+	 * The test for EXIT_ZOMBIE must occur while the runqueue locks are
+	 * still held, otherwise prev could be scheduled on another cpu, die
+	 * there before we look at prev->state, and then the reference would
+	 * be dropped twice.
+	 *		Manfred Spraul <manfred@colorfullife.com>
+	 */
+	prev_task_flags = prev->flags;
+	finish_arch_switch(prev);
+	finish_lock_switch(rq, prev);
+	if (mm)
+		mmdrop(mm);
+	if (unlikely(prev_task_flags & PF_DEAD)) {
+		/*
+		 * Remove function-return probe instances associated with this
+		 * task and put them back on the free list.
+	 	 */
+		kprobe_flush_task(prev);
+		put_task_struct(prev);
+	}
+}
+
+/**
+ * schedule_tail - first thing a freshly forked thread must call.
+ * @prev: the thread we just switched away from.
+ */
+asmlinkage void schedule_tail(task_t *prev)
+	__releases(rq->lock)
+{
+	runqueue_t *rq = this_rq();
+	finish_task_switch(rq, prev);
+#ifdef __ARCH_WANT_UNLOCKED_CTXSW
+	/* In this case, finish_task_switch does not reenable preemption */
+	preempt_enable();
+#endif
+	if (current->set_child_tid)
+		put_user(current->pid, current->set_child_tid);
+}
+
+/*
+ * context_switch - switch to the new MM and the new
+ * thread's register state.
+ */
+static inline
+task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
+{
+	struct mm_struct *mm = next->mm;
+	struct mm_struct *oldmm = prev->active_mm;
+
+	if (unlikely(!mm)) {
+		next->active_mm = oldmm;
+		atomic_inc(&oldmm->mm_count);
+		enter_lazy_tlb(oldmm, next);
+	} else
+		switch_mm(oldmm, mm, next);
+
+	if (unlikely(!prev->mm)) {
+		prev->active_mm = NULL;
+		WARN_ON(rq->prev_mm);
+		rq->prev_mm = oldmm;
+	}
+
+	/* Here we just switch the register state and the stack. */
+	switch_to(prev, next, prev);
+
+	return prev;
+}
+
+/*
+ * nr_running, nr_uninterruptible and nr_context_switches:
+ *
+ * externally visible scheduler statistics: current number of runnable
+ * threads, current number of uninterruptible-sleeping threads, total
+ * number of context switches performed since bootup.
+ */
+unsigned long nr_running(void)
+{
+	unsigned long i, sum = 0;
+
+	for_each_online_cpu(i)
+		sum += cpu_rq(i)->nr_running;
+
+	return sum;
+}
+
+unsigned long nr_uninterruptible(void)
+{
+	unsigned long i, sum = 0;
+
+	for_each_cpu(i)
+		sum += cpu_rq(i)->nr_uninterruptible;
+
+	/*
+	 * Since we read the counters lockless, it might be slightly
+	 * inaccurate. Do not allow it to go below zero though:
+	 */
+	if (unlikely((long)sum < 0))
+		sum = 0;
+
+	return sum;
+}
+
+unsigned long long nr_context_switches(void)
+{
+	unsigned long long i, sum = 0;
+
+	for_each_cpu(i)
+		sum += cpu_rq(i)->nr_switches;
+
+	return sum;
+}
+
+unsigned long nr_iowait(void)
+{
+	unsigned long i, sum = 0;
+
+	for_each_cpu(i)
+		sum += atomic_read(&cpu_rq(i)->nr_iowait);
+
+	return sum;
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * double_rq_lock - safely lock two runqueues
+ *
+ * Note this does not disable interrupts like task_rq_lock,
+ * you need to do so manually before calling.
+ */
+static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
+	__acquires(rq1->lock)
+	__acquires(rq2->lock)
+{
+	if (rq1 == rq2) {
+		spin_lock(&rq1->lock);
+		__acquire(rq2->lock);	/* Fake it out ;) */
+	} else {
+		if (rq1 < rq2) {
+			spin_lock(&rq1->lock);
+			spin_lock(&rq2->lock);
+		} else {
+			spin_lock(&rq2->lock);
+			spin_lock(&rq1->lock);
+		}
+	}
+}
+
+/*
+ * double_rq_unlock - safely unlock two runqueues
+ *
+ * Note this does not restore interrupts like task_rq_unlock,
+ * you need to do so manually after calling.
+ */
+static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
+	__releases(rq1->lock)
+	__releases(rq2->lock)
+{
+	spin_unlock(&rq1->lock);
+	if (rq1 != rq2)
+		spin_unlock(&rq2->lock);
+	else
+		__release(rq2->lock);
+}
+
+/*
+ * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
+ */
+static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
+	__releases(this_rq->lock)
+	__acquires(busiest->lock)
+	__acquires(this_rq->lock)
+{
+	if (unlikely(!spin_trylock(&busiest->lock))) {
+		if (busiest < this_rq) {
+			spin_unlock(&this_rq->lock);
+			spin_lock(&busiest->lock);
+			spin_lock(&this_rq->lock);
+		} else
+			spin_lock(&busiest->lock);
+	}
+}
+
+/*
+ * If dest_cpu is allowed for this process, migrate the task to it.
+ * This is accomplished by forcing the cpu_allowed mask to only
+ * allow dest_cpu, which will force the cpu onto dest_cpu.  Then
+ * the cpu_allowed mask is restored.
+ */
+static void sched_migrate_task(task_t *p, int dest_cpu)
+{
+	migration_req_t req;
+	runqueue_t *rq;
+	unsigned long flags;
+
+	rq = task_rq_lock(p, &flags);
+	if (!cpu_isset(dest_cpu, p->cpus_allowed)
+	    || unlikely(cpu_is_offline(dest_cpu)))
+		goto out;
+
+	/* force the process onto the specified CPU */
+	if (migrate_task(p, dest_cpu, &req)) {
+		/* Need to wait for migration thread (might exit: take ref). */
+		struct task_struct *mt = rq->migration_thread;
+		get_task_struct(mt);
+		task_rq_unlock(rq, &flags);
+		wake_up_process(mt);
+		put_task_struct(mt);
+		wait_for_completion(&req.done);
+		return;
+	}
+out:
+	task_rq_unlock(rq, &flags);
+}
+
+/*
+ * sched_exec - execve() is a valuable balancing opportunity, because at
+ * this point the task has the smallest effective memory and cache footprint.
+ */
+void sched_exec(void)
+{
+	int new_cpu, this_cpu = get_cpu();
+	new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
+	put_cpu();
+	if (new_cpu != this_cpu)
+		sched_migrate_task(current, new_cpu);
+}
+
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static
+void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
+	       runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
+{
+	dequeue_task(p, src_array);
+	dec_nr_running(p, src_rq);
+	set_task_cpu(p, this_cpu);
+	inc_nr_running(p, this_rq);
+	enqueue_task(p, this_array);
+	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+				+ this_rq->timestamp_last_tick;
+	/*
+	 * Note that idle threads have a prio of MAX_PRIO, for this test
+	 * to be always true for them.
+	 */
+	if (TASK_PREEMPTS_CURR(p, this_rq))
+		resched_task(this_rq->curr);
+}
+
+/*
+ * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
+ */
+static
+int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
+		     struct sched_domain *sd, enum idle_type idle,
+		     int *all_pinned)
+{
+	/*
+	 * We do not migrate tasks that are:
+	 * 1) running (obviously), or
+	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
+	 * 3) are cache-hot on their current CPU.
+	 */
+	if (!cpu_isset(this_cpu, p->cpus_allowed))
+		return 0;
+	*all_pinned = 0;
+
+	if (task_running(rq, p))
+		return 0;
+
+	/*
+	 * Aggressive migration if:
+	 * 1) task is cache cold, or
+	 * 2) too many balance attempts have failed.
+	 */
+
+	if (sd->nr_balance_failed > sd->cache_nice_tries)
+		return 1;
+
+	if (task_hot(p, rq->timestamp_last_tick, sd))
+		return 0;
+	return 1;
+}
+
+/*
+ * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
+ * load from busiest to this_rq, as part of a balancing operation within
+ * "domain". Returns the number of tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+		      unsigned long max_nr_move, unsigned long max_load_move,
+		      struct sched_domain *sd, enum idle_type idle,
+		      int *all_pinned)
+{
+	prio_array_t *array, *dst_array;
+	struct list_head *head, *curr;
+	int idx, pulled = 0, pinned = 0;
+	long rem_load_move;
+	task_t *tmp;
+
+	if (max_nr_move == 0 || max_load_move == 0)
+		goto out;
+
+	rem_load_move = max_load_move;
+	pinned = 1;
+
+	/*
+	 * We first consider expired tasks. Those will likely not be
+	 * executed in the near future, and they are most likely to
+	 * be cache-cold, thus switching CPUs has the least effect
+	 * on them.
+	 */
+	if (busiest->expired->nr_active) {
+		array = busiest->expired;
+		dst_array = this_rq->expired;
+	} else {
+		array = busiest->active;
+		dst_array = this_rq->active;
+	}
+
+new_array:
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(array->bitmap);
+	else
+		idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
+	if (idx >= MAX_PRIO) {
+		if (array == busiest->expired && busiest->active->nr_active) {
+			array = busiest->active;
+			dst_array = this_rq->active;
+			goto new_array;
+		}
+		goto out;
+	}
+
+	head = array->queue + idx;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+
+	curr = curr->prev;
+
+	if (tmp->load_weight > rem_load_move ||
+	    !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
+	pulled++;
+	rem_load_move -= tmp->load_weight;
+
+	/*
+	 * We only want to steal up to the prescribed number of tasks
+	 * and the prescribed amount of weighted load.
+	 */
+	if (pulled < max_nr_move && rem_load_move > 0) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	/*
+	 * Right now, this is the only place pull_task() is called,
+	 * so we can safely collect pull_task() stats here rather than
+	 * inside pull_task().
+	 */
+	schedstat_add(sd, lb_gained[idle], pulled);
+
+	if (all_pinned)
+		*all_pinned = pinned;
+	return pulled;
+}
+
+/*
+ * find_busiest_group finds and returns the busiest CPU group within the
+ * domain. It calculates and returns the amount of weighted load which should be
+ * moved to restore balance via the imbalance parameter.
+ */
+static struct sched_group *
+find_busiest_group(struct sched_domain *sd, int this_cpu,
+		   unsigned long *imbalance, enum idle_type idle, int *sd_idle)
+{
+	struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
+	unsigned long max_load, avg_load, total_load, this_load, total_pwr;
+	unsigned long max_pull;
+	unsigned long busiest_load_per_task, busiest_nr_running;
+	unsigned long this_load_per_task, this_nr_running;
+	int load_idx;
+
+	max_load = this_load = total_load = total_pwr = 0;
+	busiest_load_per_task = busiest_nr_running = 0;
+	this_load_per_task = this_nr_running = 0;
+	if (idle == NOT_IDLE)
+		load_idx = sd->busy_idx;
+	else if (idle == NEWLY_IDLE)
+		load_idx = sd->newidle_idx;
+	else
+		load_idx = sd->idle_idx;
+
+	do {
+		unsigned long load;
+		int local_group;
+		int i;
+		unsigned long sum_nr_running, sum_weighted_load;
+
+		local_group = cpu_isset(this_cpu, group->cpumask);
+
+		/* Tally up the load of all CPUs in the group */
+		sum_weighted_load = sum_nr_running = avg_load = 0;
+
+		for_each_cpu_mask(i, group->cpumask) {
+			runqueue_t *rq = cpu_rq(i);
+
+			if (*sd_idle && !idle_cpu(i))
+				*sd_idle = 0;
+
+			/* Bias balancing toward cpus of our domain */
+			if (local_group)
+				load = target_load(i, load_idx);
+			else
+				load = source_load(i, load_idx);
+
+			avg_load += load;
+			sum_nr_running += rq->nr_running;
+			sum_weighted_load += rq->raw_weighted_load;
+		}
+
+		total_load += avg_load;
+		total_pwr += group->cpu_power;
+
+		/* Adjust by relative CPU power of the group */
+		avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
+
+		if (local_group) {
+			this_load = avg_load;
+			this = group;
+			this_nr_running = sum_nr_running;
+			this_load_per_task = sum_weighted_load;
+		} else if (avg_load > max_load) {
+			max_load = avg_load;
+			busiest = group;
+			busiest_nr_running = sum_nr_running;
+			busiest_load_per_task = sum_weighted_load;
+		}
+		group = group->next;
+	} while (group != sd->groups);
+
+	if (!busiest || this_load >= max_load || busiest_nr_running <= 1)
+		goto out_balanced;
+
+	avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
+
+	if (this_load >= avg_load ||
+			100*max_load <= sd->imbalance_pct*this_load)
+		goto out_balanced;
+
+	busiest_load_per_task /= busiest_nr_running;
+	/*
+	 * We're trying to get all the cpus to the average_load, so we don't
+	 * want to push ourselves above the average load, nor do we wish to
+	 * reduce the max loaded cpu below the average load, as either of these
+	 * actions would just result in more rebalancing later, and ping-pong
+	 * tasks around. Thus we look for the minimum possible imbalance.
+	 * Negative imbalances (*we* are more loaded than anyone else) will
+	 * be counted as no imbalance for these purposes -- we can't fix that
+	 * by pulling tasks to us.  Be careful of negative numbers as they'll
+	 * appear as very large values with unsigned longs.
+	 */
+
+	/* Don't want to pull so many tasks that a group would go idle */
+	max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
+
+	/* How much load to actually move to equalise the imbalance */
+	*imbalance = min(max_pull * busiest->cpu_power,
+				(avg_load - this_load) * this->cpu_power)
+			/ SCHED_LOAD_SCALE;
+
+	/*
+	 * if *imbalance is less than the average load per runnable task
+	 * there is no gaurantee that any tasks will be moved so we'll have
+	 * a think about bumping its value to force at least one task to be
+	 * moved
+	 */
+	if (*imbalance < busiest_load_per_task) {
+		unsigned long pwr_now = 0, pwr_move = 0;
+		unsigned long tmp;
+
+		if (max_load - this_load >= busiest_load_per_task*2) {
+			*imbalance = busiest_load_per_task;
+			return busiest;
+		}
+
+		/*
+		 * OK, we don't have enough imbalance to justify moving tasks,
+		 * however we may be able to increase total CPU power used by
+		 * moving them.
+		 */
+
+		pwr_now += busiest->cpu_power *
+			min(busiest_load_per_task, max_load);
+		if (this_nr_running)
+			this_load_per_task /= this_nr_running;
+		else
+			this_load_per_task = SCHED_LOAD_SCALE;
+		pwr_now += this->cpu_power *
+			min(this_load_per_task, this_load);
+		pwr_now /= SCHED_LOAD_SCALE;
+
+		/* Amount of load we'd subtract */
+		tmp = busiest_load_per_task*SCHED_LOAD_SCALE/busiest->cpu_power;
+		if (max_load > tmp)
+			pwr_move += busiest->cpu_power *
+				min(busiest_load_per_task, max_load - tmp);
+
+		/* Amount of load we'd add */
+		if (max_load*busiest->cpu_power <
+				busiest_load_per_task*SCHED_LOAD_SCALE)
+			tmp = max_load*busiest->cpu_power/this->cpu_power;
+		else
+			tmp = busiest_load_per_task*SCHED_LOAD_SCALE/this->cpu_power;
+		pwr_move += this->cpu_power*min(this_load_per_task, this_load + tmp);
+		pwr_move /= SCHED_LOAD_SCALE;
+
+		/* Move if we gain throughput */
+		if (pwr_move > pwr_now)
+			*imbalance = busiest_load_per_task;
+		/* or if there's a reasonable chance that *imbalance is big
+		 * enough to cause a move
+		 */
+		 else if (*imbalance <= busiest_load_per_task / 2)
+			goto out_balanced;
+	}
+
+	return busiest;
+
+out_balanced:
+
+	*imbalance = 0;
+	return NULL;
+}
+
+/*
+ * find_busiest_queue - find the busiest runqueue among the cpus in group.
+ */
+static runqueue_t *find_busiest_queue(struct sched_group *group,
+	enum idle_type idle)
+{
+	unsigned long load, max_load = 0;
+	runqueue_t *busiest = NULL;
+	int i;
+
+	for_each_cpu_mask(i, group->cpumask) {
+		load = source_load(i, 0);
+
+		if (load > max_load) {
+			max_load = load;
+			busiest = cpu_rq(i);
+		}
+	}
+
+	return busiest;
+}
+
+/*
+ * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
+ * so long as it is large enough.
+ */
+#define MAX_PINNED_INTERVAL	512
+
+#define minus_1_or_zero(n) ((n) > 0 ? (n) - 1 : 0)
+/*
+ * Check this_cpu to ensure it is balanced within domain. Attempt to move
+ * tasks if there is an imbalance.
+ *
+ * Called with this_rq unlocked.
+ */
+static int load_balance(int this_cpu, runqueue_t *this_rq,
+			struct sched_domain *sd, enum idle_type idle)
+{
+	struct sched_group *group;
+	runqueue_t *busiest;
+	unsigned long imbalance;
+	int nr_moved, all_pinned = 0;
+	int active_balance = 0;
+	int sd_idle = 0;
+
+	if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER)
+		sd_idle = 1;
+
+	schedstat_inc(sd, lb_cnt[idle]);
+
+	group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle);
+	if (!group) {
+		schedstat_inc(sd, lb_nobusyg[idle]);
+		goto out_balanced;
+	}
+
+	busiest = find_busiest_queue(group, idle);
+	if (!busiest) {
+		schedstat_inc(sd, lb_nobusyq[idle]);
+		goto out_balanced;
+	}
+
+	BUG_ON(busiest == this_rq);
+
+	schedstat_add(sd, lb_imbalance[idle], imbalance);
+
+	nr_moved = 0;
+	if (busiest->nr_running > 1) {
+		/*
+		 * Attempt to move tasks. If find_busiest_group has found
+		 * an imbalance but busiest->nr_running <= 1, the group is
+		 * still unbalanced. nr_moved simply stays zero, so it is
+		 * correctly treated as an imbalance.
+		 */
+		double_rq_lock(this_rq, busiest);
+		nr_moved = move_tasks(this_rq, this_cpu, busiest,
+					minus_1_or_zero(busiest->nr_running),
+					imbalance, sd, idle, &all_pinned);
+		double_rq_unlock(this_rq, busiest);
+
+		/* All tasks on this runqueue were pinned by CPU affinity */
+		if (unlikely(all_pinned))
+			goto out_balanced;
+	}
+
+	if (!nr_moved) {
+		schedstat_inc(sd, lb_failed[idle]);
+		sd->nr_balance_failed++;
+
+		if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
+
+			spin_lock(&busiest->lock);
+
+			/* don't kick the migration_thread, if the curr
+			 * task on busiest cpu can't be moved to this_cpu
+			 */
+			if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
+				spin_unlock(&busiest->lock);
+				all_pinned = 1;
+				goto out_one_pinned;
+			}
+
+			if (!busiest->active_balance) {
+				busiest->active_balance = 1;
+				busiest->push_cpu = this_cpu;
+				active_balance = 1;
+			}
+			spin_unlock(&busiest->lock);
+			if (active_balance)
+				wake_up_process(busiest->migration_thread);
+
+			/*
+			 * We've kicked active balancing, reset the failure
+			 * counter.
+			 */
+			sd->nr_balance_failed = sd->cache_nice_tries+1;
+		}
+	} else
+		sd->nr_balance_failed = 0;
+
+	if (likely(!active_balance)) {
+		/* We were unbalanced, so reset the balancing interval */
+		sd->balance_interval = sd->min_interval;
+	} else {
+		/*
+		 * If we've begun active balancing, start to back off. This
+		 * case may not be covered by the all_pinned logic if there
+		 * is only 1 task on the busy runqueue (because we don't call
+		 * move_tasks).
+		 */
+		if (sd->balance_interval < sd->max_interval)
+			sd->balance_interval *= 2;
+	}
+
+	if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+		return -1;
+	return nr_moved;
+
+out_balanced:
+	schedstat_inc(sd, lb_balanced[idle]);
+
+	sd->nr_balance_failed = 0;
+
+out_one_pinned:
+	/* tune up the balancing interval */
+	if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
+			(sd->balance_interval < sd->max_interval))
+		sd->balance_interval *= 2;
+
+	if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+		return -1;
+	return 0;
+}
+
+/*
+ * Check this_cpu to ensure it is balanced within domain. Attempt to move
+ * tasks if there is an imbalance.
+ *
+ * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
+ * this_rq is locked.
+ */
+static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
+				struct sched_domain *sd)
+{
+	struct sched_group *group;
+	runqueue_t *busiest = NULL;
+	unsigned long imbalance;
+	int nr_moved = 0;
+	int sd_idle = 0;
+
+	if (sd->flags & SD_SHARE_CPUPOWER)
+		sd_idle = 1;
+
+	schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
+	group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, &sd_idle);
+	if (!group) {
+		schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
+		goto out_balanced;
+	}
+
+	busiest = find_busiest_queue(group, NEWLY_IDLE);
+	if (!busiest) {
+		schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
+		goto out_balanced;
+	}
+
+	BUG_ON(busiest == this_rq);
+
+	schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
+
+	nr_moved = 0;
+	if (busiest->nr_running > 1) {
+		/* Attempt to move tasks */
+		double_lock_balance(this_rq, busiest);
+		nr_moved = move_tasks(this_rq, this_cpu, busiest,
+					minus_1_or_zero(busiest->nr_running),
+					imbalance, sd, NEWLY_IDLE, NULL);
+		spin_unlock(&busiest->lock);
+	}
+
+	if (!nr_moved) {
+		schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
+		if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+			return -1;
+	} else
+		sd->nr_balance_failed = 0;
+
+	return nr_moved;
+
+out_balanced:
+	schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
+	if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+		return -1;
+	sd->nr_balance_failed = 0;
+	return 0;
+}
+
+/*
+ * idle_balance is called by schedule() if this_cpu is about to become
+ * idle. Attempts to pull tasks from other CPUs.
+ */
+static void idle_balance(int this_cpu, runqueue_t *this_rq)
+{
+	struct sched_domain *sd;
+
+	for_each_domain(this_cpu, sd) {
+		if (sd->flags & SD_BALANCE_NEWIDLE) {
+			if (load_balance_newidle(this_cpu, this_rq, sd)) {
+				/* We've pulled tasks over so stop searching */
+				break;
+			}
+		}
+	}
+}
+
+/*
+ * active_load_balance is run by migration threads. It pushes running tasks
+ * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
+ * running on each physical CPU where possible, and avoids physical /
+ * logical imbalances.
+ *
+ * Called with busiest_rq locked.
+ */
+static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
+{
+	struct sched_domain *sd;
+	runqueue_t *target_rq;
+	int target_cpu = busiest_rq->push_cpu;
+
+	if (busiest_rq->nr_running <= 1)
+		/* no task to move */
+		return;
+
+	target_rq = cpu_rq(target_cpu);
+
+	/*
+	 * This condition is "impossible", if it occurs
+	 * we need to fix it.  Originally reported by
+	 * Bjorn Helgaas on a 128-cpu setup.
+	 */
+	BUG_ON(busiest_rq == target_rq);
+
+	/* move a task from busiest_rq to target_rq */
+	double_lock_balance(busiest_rq, target_rq);
+
+	/* Search for an sd spanning us and the target CPU. */
+	for_each_domain(target_cpu, sd)
+		if ((sd->flags & SD_LOAD_BALANCE) &&
+			cpu_isset(busiest_cpu, sd->span))
+				break;
+
+	if (unlikely(sd == NULL))
+		goto out;
+
+	schedstat_inc(sd, alb_cnt);
+
+	if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
+			RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, NULL))
+		schedstat_inc(sd, alb_pushed);
+	else
+		schedstat_inc(sd, alb_failed);
+out:
+	spin_unlock(&target_rq->lock);
+}
+
+/*
+ * rebalance_tick will get called every timer tick, on every CPU.
+ *
+ * It checks each scheduling domain to see if it is due to be balanced,
+ * and initiates a balancing operation if so.
+ *
+ * Balancing parameters are set up in arch_init_sched_domains.
+ */
+
+/* Don't have all balancing operations going off at once */
+#define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS)
+
+static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
+			   enum idle_type idle)
+{
+	unsigned long old_load, this_load;
+	unsigned long j = jiffies + CPU_OFFSET(this_cpu);
+	struct sched_domain *sd;
+	int i;
+
+	this_load = this_rq->raw_weighted_load;
+	/* Update our load */
+	for (i = 0; i < 3; i++) {
+		unsigned long new_load = this_load;
+		int scale = 1 << i;
+		old_load = this_rq->cpu_load[i];
+		/*
+		 * Round up the averaging division if load is increasing. This
+		 * prevents us from getting stuck on 9 if the load is 10, for
+		 * example.
+		 */
+		if (new_load > old_load)
+			new_load += scale-1;
+		this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale;
+	}
+
+	for_each_domain(this_cpu, sd) {
+		unsigned long interval;
+
+		if (!(sd->flags & SD_LOAD_BALANCE))
+			continue;
+
+		interval = sd->balance_interval;
+		if (idle != SCHED_IDLE)
+			interval *= sd->busy_factor;
+
+		/* scale ms to jiffies */
+		interval = msecs_to_jiffies(interval);
+		if (unlikely(!interval))
+			interval = 1;
+
+		if (j - sd->last_balance >= interval) {
+			if (load_balance(this_cpu, this_rq, sd, idle)) {
+				/*
+				 * We've pulled tasks over so either we're no
+				 * longer idle, or one of our SMT siblings is
+				 * not idle.
+				 */
+				idle = NOT_IDLE;
+			}
+			sd->last_balance += interval;
+		}
+	}
+}
+#else
+/*
+ * on UP we do not need to balance between CPUs:
+ */
+static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle)
+{
+}
+static inline void idle_balance(int cpu, runqueue_t *rq)
+{
+}
+#endif
+
+static inline int wake_priority_sleeper(runqueue_t *rq)
+{
+	int ret = 0;
+#ifdef CONFIG_SCHED_SMT
+	spin_lock(&rq->lock);
+	/*
+	 * If an SMT sibling task has been put to sleep for priority
+	 * reasons reschedule the idle task to see if it can now run.
+	 */
+	if (rq->nr_running) {
+		resched_task(rq->idle);
+		ret = 1;
+	}
+	spin_unlock(&rq->lock);
+#endif
+	return ret;
+}
+
+DEFINE_PER_CPU(struct kernel_stat, kstat);
+
+EXPORT_PER_CPU_SYMBOL(kstat);
+
+/*
+ * This is called on clock ticks and on context switches.
+ * Bank in p->sched_time the ns elapsed since the last tick or switch.
+ */
+static inline void update_cpu_clock(task_t *p, runqueue_t *rq,
+				    unsigned long long now)
+{
+	unsigned long long last = max(p->timestamp, rq->timestamp_last_tick);
+	p->sched_time += now - last;
+}
+
+/*
+ * Return current->sched_time plus any more ns on the sched_clock
+ * that have not yet been banked.
+ */
+unsigned long long current_sched_time(const task_t *tsk)
+{
+	unsigned long long ns;
+	unsigned long flags;
+	local_irq_save(flags);
+	ns = max(tsk->timestamp, task_rq(tsk)->timestamp_last_tick);
+	ns = tsk->sched_time + (sched_clock() - ns);
+	local_irq_restore(flags);
+	return ns;
+}
+
+/*
+ * We place interactive tasks back into the active array, if possible.
+ *
+ * To guarantee that this does not starve expired tasks we ignore the
+ * interactivity of a task if the first expired task had to wait more
+ * than a 'reasonable' amount of time. This deadline timeout is
+ * load-dependent, as the frequency of array switched decreases with
+ * increasing number of running tasks. We also ignore the interactivity
+ * if a better static_prio task has expired:
+ */
+#define EXPIRED_STARVING(rq) \
+	((STARVATION_LIMIT && ((rq)->expired_timestamp && \
+		(jiffies - (rq)->expired_timestamp >= \
+			STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
+			((rq)->curr->static_prio > (rq)->best_expired_prio))
+
+/*
+ * Account user cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @hardirq_offset: the offset to subtract from hardirq_count()
+ * @cputime: the cpu time spent in user space since the last update
+ */
+void account_user_time(struct task_struct *p, cputime_t cputime)
+{
+	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+	cputime64_t tmp;
+
+	p->utime = cputime_add(p->utime, cputime);
+
+	/* Add user time to cpustat. */
+	tmp = cputime_to_cputime64(cputime);
+	if (TASK_NICE(p) > 0)
+		cpustat->nice = cputime64_add(cpustat->nice, tmp);
+	else
+		cpustat->user = cputime64_add(cpustat->user, tmp);
+}
+
+/*
+ * Account system cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @hardirq_offset: the offset to subtract from hardirq_count()
+ * @cputime: the cpu time spent in kernel space since the last update
+ */
+void account_system_time(struct task_struct *p, int hardirq_offset,
+			 cputime_t cputime)
+{
+	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+	runqueue_t *rq = this_rq();
+	cputime64_t tmp;
+
+	p->stime = cputime_add(p->stime, cputime);
+
+	/* Add system time to cpustat. */
+	tmp = cputime_to_cputime64(cputime);
+	if (hardirq_count() - hardirq_offset)
+		cpustat->irq = cputime64_add(cpustat->irq, tmp);
+	else if (softirq_count())
+		cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
+	else if (p != rq->idle)
+		cpustat->system = cputime64_add(cpustat->system, tmp);
+	else if (atomic_read(&rq->nr_iowait) > 0)
+		cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
+	else
+		cpustat->idle = cputime64_add(cpustat->idle, tmp);
+	/* Account for system time used */
+	acct_update_integrals(p);
+}
+
+/*
+ * Account for involuntary wait time.
+ * @p: the process from which the cpu time has been stolen
+ * @steal: the cpu time spent in involuntary wait
+ */
+void account_steal_time(struct task_struct *p, cputime_t steal)
+{
+	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+	cputime64_t tmp = cputime_to_cputime64(steal);
+	runqueue_t *rq = this_rq();
+
+	if (p == rq->idle) {
+		p->stime = cputime_add(p->stime, steal);
+		if (atomic_read(&rq->nr_iowait) > 0)
+			cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
+		else
+			cpustat->idle = cputime64_add(cpustat->idle, tmp);
+	} else
+		cpustat->steal = cputime64_add(cpustat->steal, tmp);
+}
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ *
+ * It also gets called by the fork code, when changing the parent's
+ * timeslices.
+ */
+void scheduler_tick(void)
+{
+	int cpu = smp_processor_id();
+	runqueue_t *rq = this_rq();
+	task_t *p = current;
+	unsigned long long now = sched_clock();
+
+	update_cpu_clock(p, rq, now);
+
+	rq->timestamp_last_tick = now;
+
+	if (p == rq->idle) {
+		if (wake_priority_sleeper(rq))
+			goto out;
+		rebalance_tick(cpu, rq, SCHED_IDLE);
+		return;
+	}
+
+	/* Task might have expired already, but not scheduled off yet */
+	if (p->array != rq->active) {
+		set_tsk_need_resched(p);
+		goto out;
+	}
+	spin_lock(&rq->lock);
+	/*
+	 * The task was running during this tick - update the
+	 * time slice counter. Note: we do not update a thread's
+	 * priority until it either goes to sleep or uses up its
+	 * timeslice. This makes it possible for interactive tasks
+	 * to use up their timeslices at their highest priority levels.
+	 */
+	if (rt_task(p)) {
+		/*
+		 * RR tasks need a special form of timeslice management.
+		 * FIFO tasks have no timeslices.
+		 */
+		if ((p->policy == SCHED_RR) && !--p->time_slice) {
+			p->time_slice = task_timeslice(p);
+			p->first_time_slice = 0;
+			set_tsk_need_resched(p);
+
+			/* put it at the end of the queue: */
+			requeue_task(p, rq->active);
+		}
+		goto out_unlock;
+	}
+	if (!--p->time_slice) {
+		dequeue_task(p, rq->active);
+		set_tsk_need_resched(p);
+		p->prio = effective_prio(p);
+		p->time_slice = task_timeslice(p);
+		p->first_time_slice = 0;
+
+		if (!rq->expired_timestamp)
+			rq->expired_timestamp = jiffies;
+		if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
+			enqueue_task(p, rq->expired);
+			if (p->static_prio < rq->best_expired_prio)
+				rq->best_expired_prio = p->static_prio;
+		} else
+			enqueue_task(p, rq->active);
+	} else {
+		/*
+		 * Prevent a too long timeslice allowing a task to monopolize
+		 * the CPU. We do this by splitting up the timeslice into
+		 * smaller pieces.
+		 *
+		 * Note: this does not mean the task's timeslices expire or
+		 * get lost in any way, they just might be preempted by
+		 * another task of equal priority. (one with higher
+		 * priority would have preempted this task already.) We
+		 * requeue this task to the end of the list on this priority
+		 * level, which is in essence a round-robin of tasks with
+		 * equal priority.
+		 *
+		 * This only applies to tasks in the interactive
+		 * delta range with at least TIMESLICE_GRANULARITY to requeue.
+		 */
+		if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
+			p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
+			(p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
+			(p->array == rq->active)) {
+
+			requeue_task(p, rq->active);
+			set_tsk_need_resched(p);
+		}
+	}
+out_unlock:
+	spin_unlock(&rq->lock);
+out:
+	rebalance_tick(cpu, rq, NOT_IDLE);
+}
+
+#ifdef CONFIG_SCHED_SMT
+static inline void wakeup_busy_runqueue(runqueue_t *rq)
+{
+	/* If an SMT runqueue is sleeping due to priority reasons wake it up */
+	if (rq->curr == rq->idle && rq->nr_running)
+		resched_task(rq->idle);
+}
+
+static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
+{
+	struct sched_domain *tmp, *sd = NULL;
+	cpumask_t sibling_map;
+	int i;
+
+	for_each_domain(this_cpu, tmp)
+		if (tmp->flags & SD_SHARE_CPUPOWER)
+			sd = tmp;
+
+	if (!sd)
+		return;
+
+	/*
+	 * Unlock the current runqueue because we have to lock in
+	 * CPU order to avoid deadlocks. Caller knows that we might
+	 * unlock. We keep IRQs disabled.
+	 */
+	spin_unlock(&this_rq->lock);
+
+	sibling_map = sd->span;
+
+	for_each_cpu_mask(i, sibling_map)
+		spin_lock(&cpu_rq(i)->lock);
+	/*
+	 * We clear this CPU from the mask. This both simplifies the
+	 * inner loop and keps this_rq locked when we exit:
+	 */
+	cpu_clear(this_cpu, sibling_map);
+
+	for_each_cpu_mask(i, sibling_map) {
+		runqueue_t *smt_rq = cpu_rq(i);
+
+		wakeup_busy_runqueue(smt_rq);
+	}
+
+	for_each_cpu_mask(i, sibling_map)
+		spin_unlock(&cpu_rq(i)->lock);
+	/*
+	 * We exit with this_cpu's rq still held and IRQs
+	 * still disabled:
+	 */
+}
+
+/*
+ * number of 'lost' timeslices this task wont be able to fully
+ * utilize, if another task runs on a sibling. This models the
+ * slowdown effect of other tasks running on siblings:
+ */
+static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
+{
+	return p->time_slice * (100 - sd->per_cpu_gain) / 100;
+}
+
+static int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
+{
+	struct sched_domain *tmp, *sd = NULL;
+	cpumask_t sibling_map;
+	prio_array_t *array;
+	int ret = 0, i;
+	task_t *p;
+
+	for_each_domain(this_cpu, tmp)
+		if (tmp->flags & SD_SHARE_CPUPOWER)
+			sd = tmp;
+
+	if (!sd)
+		return 0;
+
+	/*
+	 * The same locking rules and details apply as for
+	 * wake_sleeping_dependent():
+	 */
+	spin_unlock(&this_rq->lock);
+	sibling_map = sd->span;
+	for_each_cpu_mask(i, sibling_map)
+		spin_lock(&cpu_rq(i)->lock);
+	cpu_clear(this_cpu, sibling_map);
+
+	/*
+	 * Establish next task to be run - it might have gone away because
+	 * we released the runqueue lock above:
+	 */
+	if (!this_rq->nr_running)
+		goto out_unlock;
+	array = this_rq->active;
+	if (!array->nr_active)
+		array = this_rq->expired;
+	BUG_ON(!array->nr_active);
+
+	p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
+		task_t, run_list);
+
+	for_each_cpu_mask(i, sibling_map) {
+		runqueue_t *smt_rq = cpu_rq(i);
+		task_t *smt_curr = smt_rq->curr;
+
+		/* Kernel threads do not participate in dependent sleeping */
+		if (!p->mm || !smt_curr->mm || rt_task(p))
+			goto check_smt_task;
+
+		/*
+		 * If a user task with lower static priority than the
+		 * running task on the SMT sibling is trying to schedule,
+		 * delay it till there is proportionately less timeslice
+		 * left of the sibling task to prevent a lower priority
+		 * task from using an unfair proportion of the
+		 * physical cpu's resources. -ck
+		 */
+		if (rt_task(smt_curr)) {
+			/*
+			 * With real time tasks we run non-rt tasks only
+			 * per_cpu_gain% of the time.
+			 */
+			if ((jiffies % DEF_TIMESLICE) >
+				(sd->per_cpu_gain * DEF_TIMESLICE / 100))
+					ret = 1;
+		} else
+			if (smt_curr->static_prio < p->static_prio &&
+				!TASK_PREEMPTS_CURR(p, smt_rq) &&
+				smt_slice(smt_curr, sd) > task_timeslice(p))
+					ret = 1;
+
+check_smt_task:
+		if ((!smt_curr->mm && smt_curr != smt_rq->idle) ||
+			rt_task(smt_curr))
+				continue;
+		if (!p->mm) {
+			wakeup_busy_runqueue(smt_rq);
+			continue;
+		}
+
+		/*
+		 * Reschedule a lower priority task on the SMT sibling for
+		 * it to be put to sleep, or wake it up if it has been put to
+		 * sleep for priority reasons to see if it should run now.
+		 */
+		if (rt_task(p)) {
+			if ((jiffies % DEF_TIMESLICE) >
+				(sd->per_cpu_gain * DEF_TIMESLICE / 100))
+					resched_task(smt_curr);
+		} else {
+			if (TASK_PREEMPTS_CURR(p, smt_rq) &&
+				smt_slice(p, sd) > task_timeslice(smt_curr))
+					resched_task(smt_curr);
+			else
+				wakeup_busy_runqueue(smt_rq);
+		}
+	}
+out_unlock:
+	for_each_cpu_mask(i, sibling_map)
+		spin_unlock(&cpu_rq(i)->lock);
+	return ret;
+}
+#else
+static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
+{
+}
+
+static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
+{
+	return 0;
+}
+#endif
+
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
+
+void fastcall add_preempt_count(int val)
+{
+	/*
+	 * Underflow?
+	 */
+	BUG_ON((preempt_count() < 0));
+	preempt_count() += val;
+	/*
+	 * Spinlock count overflowing soon?
+	 */
+	BUG_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
+}
+EXPORT_SYMBOL(add_preempt_count);
+
+void fastcall sub_preempt_count(int val)
+{
+	/*
+	 * Underflow?
+	 */
+	BUG_ON(val > preempt_count());
+	/*
+	 * Is the spinlock portion underflowing?
+	 */
+	BUG_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK));
+	preempt_count() -= val;
+}
+EXPORT_SYMBOL(sub_preempt_count);
+
+#endif
+
+static inline int interactive_sleep(enum sleep_type sleep_type)
+{
+	return (sleep_type == SLEEP_INTERACTIVE ||
+		sleep_type == SLEEP_INTERRUPTED);
+}
+
+/*
+ * schedule() is the main scheduler function.
+ */
+asmlinkage void __sched schedule(void)
+{
+	long *switch_count;
+	task_t *prev, *next;
+	runqueue_t *rq;
+	prio_array_t *array;
+	struct list_head *queue;
+	unsigned long long now;
+	unsigned long run_time;
+	int cpu, idx, new_prio;
+
+	/*
+	 * Test if we are atomic.  Since do_exit() needs to call into
+	 * schedule() atomically, we ignore that path for now.
+	 * Otherwise, whine if we are scheduling when we should not be.
+	 */
+	if (likely(!current->exit_state)) {
+		if (unlikely(in_atomic())) {
+			printk(KERN_ERR "BUG: scheduling while atomic: "
+				"%s/0x%08x/%d\n",
+				current->comm, preempt_count(), current->pid);
+			dump_stack();
+		}
+	}
+	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
+
+need_resched:
+	preempt_disable();
+	prev = current;
+	release_kernel_lock(prev);
+need_resched_nonpreemptible:
+	rq = this_rq();
+
+	/*
+	 * The idle thread is not allowed to schedule!
+	 * Remove this check after it has been exercised a bit.
+	 */
+	if (unlikely(prev == rq->idle) && prev->state != TASK_RUNNING) {
+		printk(KERN_ERR "bad: scheduling from the idle thread!\n");
+		dump_stack();
+	}
+
+	schedstat_inc(rq, sched_cnt);
+	now = sched_clock();
+	if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) {
+		run_time = now - prev->timestamp;
+		if (unlikely((long long)(now - prev->timestamp) < 0))
+			run_time = 0;
+	} else
+		run_time = NS_MAX_SLEEP_AVG;
+
+	/*
+	 * Tasks charged proportionately less run_time at high sleep_avg to
+	 * delay them losing their interactive status
+	 */
+	run_time /= (CURRENT_BONUS(prev) ? : 1);
+
+	spin_lock_irq(&rq->lock);
+
+	if (unlikely(prev->flags & PF_DEAD))
+		prev->state = EXIT_DEAD;
+
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE)
+				rq->nr_uninterruptible++;
+			deactivate_task(prev, rq);
+		}
+	}
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+go_idle:
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			rq->expired_timestamp = 0;
+			wake_sleeping_dependent(cpu, rq);
+			/*
+			 * wake_sleeping_dependent() might have released
+			 * the runqueue, so break out if we got new
+			 * tasks meanwhile:
+			 */
+			if (!rq->nr_running)
+				goto switch_tasks;
+		}
+	} else {
+		if (dependent_sleeper(cpu, rq)) {
+			next = rq->idle;
+			goto switch_tasks;
+		}
+		/*
+		 * dependent_sleeper() releases and reacquires the runqueue
+		 * lock, hence go into the idle loop if the rq went
+		 * empty meanwhile:
+		 */
+		if (unlikely(!rq->nr_running))
+			goto go_idle;
+	}
+
+	array = rq->active;
+	if (unlikely(!array->nr_active)) {
+		/*
+		 * Switch the active and expired arrays.
+		 */
+		schedstat_inc(rq, sched_switch);
+		rq->active = rq->expired;
+		rq->expired = array;
+		array = rq->active;
+		rq->expired_timestamp = 0;
+		rq->best_expired_prio = MAX_PRIO;
+	}
+
+	idx = sched_find_first_bit(array->bitmap);
+	queue = array->queue + idx;
+	next = list_entry(queue->next, task_t, run_list);
+
+	if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
+		unsigned long long delta = now - next->timestamp;
+		if (unlikely((long long)(now - next->timestamp) < 0))
+			delta = 0;
+
+		if (next->sleep_type == SLEEP_INTERACTIVE)
+			delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
+
+		array = next->array;
+		new_prio = recalc_task_prio(next, next->timestamp + delta);
+
+		if (unlikely(next->prio != new_prio)) {
+			dequeue_task(next, array);
+			next->prio = new_prio;
+			enqueue_task(next, array);
+		}
+	}
+	next->sleep_type = SLEEP_NORMAL;
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prefetch(next);
+	prefetch_stack(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(task_cpu(prev));
+
+	update_cpu_clock(prev, rq, now);
+
+	prev->sleep_avg -= run_time;
+	if ((long)prev->sleep_avg <= 0)
+		prev->sleep_avg = 0;
+	prev->timestamp = prev->last_ran = now;
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		next->timestamp = now;
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_task_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+		/*
+		 * this_rq must be evaluated again because prev may have moved
+		 * CPUs since it called schedule(), thus the 'rq' on its stack
+		 * frame will be invalid.
+		 */
+		finish_task_switch(this_rq(), prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+
+	prev = current;
+	if (unlikely(reacquire_kernel_lock(prev) < 0))
+		goto need_resched_nonpreemptible;
+	preempt_enable_no_resched();
+	if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
+		goto need_resched;
+}
+
+EXPORT_SYMBOL(schedule);
+
+#ifdef CONFIG_PREEMPT
+/*
+ * this is is the entry point to schedule() from in-kernel preemption
+ * off of preempt_enable.  Kernel preemptions off return from interrupt
+ * occur there and call schedule directly.
+ */
+asmlinkage void __sched preempt_schedule(void)
+{
+	struct thread_info *ti = current_thread_info();
+#ifdef CONFIG_PREEMPT_BKL
+	struct task_struct *task = current;
+	int saved_lock_depth;
+#endif
+	/*
+	 * If there is a non-zero preempt_count or interrupts are disabled,
+	 * we do not want to preempt the current task.  Just return..
+	 */
+	if (unlikely(ti->preempt_count || irqs_disabled()))
+		return;
+
+need_resched:
+	add_preempt_count(PREEMPT_ACTIVE);
+	/*
+	 * We keep the big kernel semaphore locked, but we
+	 * clear ->lock_depth so that schedule() doesnt
+	 * auto-release the semaphore:
+	 */
+#ifdef CONFIG_PREEMPT_BKL
+	saved_lock_depth = task->lock_depth;
+	task->lock_depth = -1;
+#endif
+	schedule();
+#ifdef CONFIG_PREEMPT_BKL
+	task->lock_depth = saved_lock_depth;
+#endif
+	sub_preempt_count(PREEMPT_ACTIVE);
+
+	/* we could miss a preemption opportunity between schedule and now */
+	barrier();
+	if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
+		goto need_resched;
+}
+
+EXPORT_SYMBOL(preempt_schedule);
+
+/*
+ * this is is the entry point to schedule() from kernel preemption
+ * off of irq context.
+ * Note, that this is called and return with irqs disabled. This will
+ * protect us against recursive calling from irq.
+ */
+asmlinkage void __sched preempt_schedule_irq(void)
+{
+	struct thread_info *ti = current_thread_info();
+#ifdef CONFIG_PREEMPT_BKL
+	struct task_struct *task = current;
+	int saved_lock_depth;
+#endif
+	/* Catch callers which need to be fixed*/
+	BUG_ON(ti->preempt_count || !irqs_disabled());
+
+need_resched:
+	add_preempt_count(PREEMPT_ACTIVE);
+	/*
+	 * We keep the big kernel semaphore locked, but we
+	 * clear ->lock_depth so that schedule() doesnt
+	 * auto-release the semaphore:
+	 */
+#ifdef CONFIG_PREEMPT_BKL
+	saved_lock_depth = task->lock_depth;
+	task->lock_depth = -1;
+#endif
+	local_irq_enable();
+	schedule();
+	local_irq_disable();
+#ifdef CONFIG_PREEMPT_BKL
+	task->lock_depth = saved_lock_depth;
+#endif
+	sub_preempt_count(PREEMPT_ACTIVE);
+
+	/* we could miss a preemption opportunity between schedule and now */
+	barrier();
+	if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
+		goto need_resched;
+}
+
+#endif /* CONFIG_PREEMPT */
+
+int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
+			  void *key)
+{
+	task_t *p = curr->private;
+	return try_to_wake_up(p, mode, sync);
+}
+
+EXPORT_SYMBOL(default_wake_function);
+
+/*
+ * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
+ * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve
+ * number) then we wake all the non-exclusive tasks and one exclusive task.
+ *
+ * There are circumstances in which we can try to wake a task which has already
+ * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
+ * zero in this (rare) case, and we handle it by continuing to scan the queue.
+ */
+static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+			     int nr_exclusive, int sync, void *key)
+{
+	struct list_head *tmp, *next;
+
+	list_for_each_safe(tmp, next, &q->task_list) {
+		wait_queue_t *curr;
+		unsigned flags;
+		curr = list_entry(tmp, wait_queue_t, task_list);
+		flags = curr->flags;
+		if (curr->func(curr, mode, sync, key) &&
+		    (flags & WQ_FLAG_EXCLUSIVE) &&
+		    !--nr_exclusive)
+			break;
+	}
+}
+
+/**
+ * __wake_up - wake up threads blocked on a waitqueue.
+ * @q: the waitqueue
+ * @mode: which threads
+ * @nr_exclusive: how many wake-one or wake-many threads to wake up
+ * @key: is directly passed to the wakeup function
+ */
+void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
+			int nr_exclusive, void *key)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	__wake_up_common(q, mode, nr_exclusive, 0, key);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(__wake_up);
+
+/*
+ * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
+ */
+void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
+{
+	__wake_up_common(q, mode, 1, 0, NULL);
+}
+
+/**
+ * __wake_up_sync - wake up threads blocked on a waitqueue.
+ * @q: the waitqueue
+ * @mode: which threads
+ * @nr_exclusive: how many wake-one or wake-many threads to wake up
+ *
+ * The sync wakeup differs that the waker knows that it will schedule
+ * away soon, so while the target thread will be woken up, it will not
+ * be migrated to another CPU - ie. the two threads are 'synchronized'
+ * with each other. This can prevent needless bouncing between CPUs.
+ *
+ * On UP it can prevent extra preemption.
+ */
+void fastcall
+__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+{
+	unsigned long flags;
+	int sync = 1;
+
+	if (unlikely(!q))
+		return;
+
+	if (unlikely(!nr_exclusive))
+		sync = 0;
+
+	spin_lock_irqsave(&q->lock, flags);
+	__wake_up_common(q, mode, nr_exclusive, sync, NULL);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
+
+void fastcall complete(struct completion *x)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&x->wait.lock, flags);
+	x->done++;
+	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
+			 1, 0, NULL);
+	spin_unlock_irqrestore(&x->wait.lock, flags);
+}
+EXPORT_SYMBOL(complete);
+
+void fastcall complete_all(struct completion *x)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&x->wait.lock, flags);
+	x->done += UINT_MAX/2;
+	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
+			 0, 0, NULL);
+	spin_unlock_irqrestore(&x->wait.lock, flags);
+}
+EXPORT_SYMBOL(complete_all);
+
+void fastcall __sched wait_for_completion(struct completion *x)
+{
+	might_sleep();
+	spin_lock_irq(&x->wait.lock);
+	if (!x->done) {
+		DECLARE_WAITQUEUE(wait, current);
+
+		wait.flags |= WQ_FLAG_EXCLUSIVE;
+		__add_wait_queue_tail(&x->wait, &wait);
+		do {
+			__set_current_state(TASK_UNINTERRUPTIBLE);
+			spin_unlock_irq(&x->wait.lock);
+			schedule();
+			spin_lock_irq(&x->wait.lock);
+		} while (!x->done);
+		__remove_wait_queue(&x->wait, &wait);
+	}
+	x->done--;
+	spin_unlock_irq(&x->wait.lock);
+}
+EXPORT_SYMBOL(wait_for_completion);
+
+unsigned long fastcall __sched
+wait_for_completion_timeout(struct completion *x, unsigned long timeout)
+{
+	might_sleep();
+
+	spin_lock_irq(&x->wait.lock);
+	if (!x->done) {
+		DECLARE_WAITQUEUE(wait, current);
+
+		wait.flags |= WQ_FLAG_EXCLUSIVE;
+		__add_wait_queue_tail(&x->wait, &wait);
+		do {
+			__set_current_state(TASK_UNINTERRUPTIBLE);
+			spin_unlock_irq(&x->wait.lock);
+			timeout = schedule_timeout(timeout);
+			spin_lock_irq(&x->wait.lock);
+			if (!timeout) {
+				__remove_wait_queue(&x->wait, &wait);
+				goto out;
+			}
+		} while (!x->done);
+		__remove_wait_queue(&x->wait, &wait);
+	}
+	x->done--;
+out:
+	spin_unlock_irq(&x->wait.lock);
+	return timeout;
+}
+EXPORT_SYMBOL(wait_for_completion_timeout);
+
+int fastcall __sched wait_for_completion_interruptible(struct completion *x)
+{
+	int ret = 0;
+
+	might_sleep();
+
+	spin_lock_irq(&x->wait.lock);
+	if (!x->done) {
+		DECLARE_WAITQUEUE(wait, current);
+
+		wait.flags |= WQ_FLAG_EXCLUSIVE;
+		__add_wait_queue_tail(&x->wait, &wait);
+		do {
+			if (signal_pending(current)) {
+				ret = -ERESTARTSYS;
+				__remove_wait_queue(&x->wait, &wait);
+				goto out;
+			}
+			__set_current_state(TASK_INTERRUPTIBLE);
+			spin_unlock_irq(&x->wait.lock);
+			schedule();
+			spin_lock_irq(&x->wait.lock);
+		} while (!x->done);
+		__remove_wait_queue(&x->wait, &wait);
+	}
+	x->done--;
+out:
+	spin_unlock_irq(&x->wait.lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(wait_for_completion_interruptible);
+
+unsigned long fastcall __sched
+wait_for_completion_interruptible_timeout(struct completion *x,
+					  unsigned long timeout)
+{
+	might_sleep();
+
+	spin_lock_irq(&x->wait.lock);
+	if (!x->done) {
+		DECLARE_WAITQUEUE(wait, current);
+
+		wait.flags |= WQ_FLAG_EXCLUSIVE;
+		__add_wait_queue_tail(&x->wait, &wait);
+		do {
+			if (signal_pending(current)) {
+				timeout = -ERESTARTSYS;
+				__remove_wait_queue(&x->wait, &wait);
+				goto out;
+			}
+			__set_current_state(TASK_INTERRUPTIBLE);
+			spin_unlock_irq(&x->wait.lock);
+			timeout = schedule_timeout(timeout);
+			spin_lock_irq(&x->wait.lock);
+			if (!timeout) {
+				__remove_wait_queue(&x->wait, &wait);
+				goto out;
+			}
+		} while (!x->done);
+		__remove_wait_queue(&x->wait, &wait);
+	}
+	x->done--;
+out:
+	spin_unlock_irq(&x->wait.lock);
+	return timeout;
+}
+EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+
+
+#define	SLEEP_ON_VAR					\
+	unsigned long flags;				\
+	wait_queue_t wait;				\
+	init_waitqueue_entry(&wait, current);
+
+#define SLEEP_ON_HEAD					\
+	spin_lock_irqsave(&q->lock,flags);		\
+	__add_wait_queue(q, &wait);			\
+	spin_unlock(&q->lock);
+
+#define	SLEEP_ON_TAIL					\
+	spin_lock_irq(&q->lock);			\
+	__remove_wait_queue(q, &wait);			\
+	spin_unlock_irqrestore(&q->lock, flags);
+
+void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
+{
+	SLEEP_ON_VAR
+
+	current->state = TASK_INTERRUPTIBLE;
+
+	SLEEP_ON_HEAD
+	schedule();
+	SLEEP_ON_TAIL
+}
+
+EXPORT_SYMBOL(interruptible_sleep_on);
+
+long fastcall __sched
+interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
+{
+	SLEEP_ON_VAR
+
+	current->state = TASK_INTERRUPTIBLE;
+
+	SLEEP_ON_HEAD
+	timeout = schedule_timeout(timeout);
+	SLEEP_ON_TAIL
+
+	return timeout;
+}
+
+EXPORT_SYMBOL(interruptible_sleep_on_timeout);
+
+void fastcall __sched sleep_on(wait_queue_head_t *q)
+{
+	SLEEP_ON_VAR
+
+	current->state = TASK_UNINTERRUPTIBLE;
+
+	SLEEP_ON_HEAD
+	schedule();
+	SLEEP_ON_TAIL
+}
+
+EXPORT_SYMBOL(sleep_on);
+
+long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
+{
+	SLEEP_ON_VAR
+
+	current->state = TASK_UNINTERRUPTIBLE;
+
+	SLEEP_ON_HEAD
+	timeout = schedule_timeout(timeout);
+	SLEEP_ON_TAIL
+
+	return timeout;
+}
+
+EXPORT_SYMBOL(sleep_on_timeout);
+
+void set_user_nice(task_t *p, long nice)
+{
+	unsigned long flags;
+	prio_array_t *array;
+	runqueue_t *rq;
+	int old_prio, new_prio, delta;
+
+	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
+		return;
+	/*
+	 * We have to be careful, if called from sys_setpriority(),
+	 * the task might be in the middle of scheduling on another CPU.
+	 */
+	rq = task_rq_lock(p, &flags);
+	/*
+	 * The RT priorities are set via sched_setscheduler(), but we still
+	 * allow the 'normal' nice value to be set - but as expected
+	 * it wont have any effect on scheduling until the task is
+	 * not SCHED_NORMAL/SCHED_BATCH:
+	 */
+	if (rt_task(p)) {
+		p->static_prio = NICE_TO_PRIO(nice);
+		goto out_unlock;
+	}
+	array = p->array;
+	if (array) {
+		dequeue_task(p, array);
+		dec_raw_weighted_load(rq, p);
+	}
+
+	old_prio = p->prio;
+	new_prio = NICE_TO_PRIO(nice);
+	delta = new_prio - old_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	set_load_weight(p);
+	p->prio += delta;
+
+	if (array) {
+		enqueue_task(p, array);
+		inc_raw_weighted_load(rq, p);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+out_unlock:
+	task_rq_unlock(rq, &flags);
+}
+
+EXPORT_SYMBOL(set_user_nice);
+
+/*
+ * can_nice - check if a task can reduce its nice value
+ * @p: task
+ * @nice: nice value
+ */
+int can_nice(const task_t *p, const int nice)
+{
+	/* convert nice value [19,-20] to rlimit style value [1,40] */
+	int nice_rlim = 20 - nice;
+	return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
+		capable(CAP_SYS_NICE));
+}
+
+struct task_struct *kgdb_get_idle(int this_cpu)
+{
+        return cpu_rq(this_cpu)->idle;
+}
+
+#ifdef __ARCH_WANT_SYS_NICE
+
+/*
+ * sys_nice - change the priority of the current process.
+ * @increment: priority increment
+ *
+ * sys_setpriority is a more generic, but much slower function that
+ * does similar things.
+ */
+asmlinkage long sys_nice(int increment)
+{
+	int retval;
+	long nice;
+
+	/*
+	 * Setpriority might change our priority at the same moment.
+	 * We don't have to worry. Conceptually one call occurs first
+	 * and we have a single winner.
+	 */
+	if (increment < -40)
+		increment = -40;
+	if (increment > 40)
+		increment = 40;
+
+	nice = PRIO_TO_NICE(current->static_prio) + increment;
+	if (nice < -20)
+		nice = -20;
+	if (nice > 19)
+		nice = 19;
+
+	if (increment < 0 && !can_nice(current, nice))
+		return -EPERM;
+
+	retval = security_task_setnice(current, nice);
+	if (retval)
+		return retval;
+
+	set_user_nice(current, nice);
+	return 0;
+}
+
+#endif
+
+/**
+ * task_prio - return the priority value of a given task.
+ * @p: the task in question.
+ *
+ * This is the priority value as seen by users in /proc.
+ * RT tasks are offset by -200. Normal tasks are centered
+ * around 0, value goes from -16 to +15.
+ */
+int task_prio(const task_t *p)
+{
+	return p->prio - MAX_RT_PRIO;
+}
+
+/**
+ * task_nice - return the nice value of a given task.
+ * @p: the task in question.
+ */
+int task_nice(const task_t *p)
+{
+	return TASK_NICE(p);
+}
+EXPORT_SYMBOL_GPL(task_nice);
+
+/**
+ * idle_cpu - is a given cpu idle currently?
+ * @cpu: the processor in question.
+ */
+int idle_cpu(int cpu)
+{
+	return cpu_curr(cpu) == cpu_rq(cpu)->idle;
+}
+
+/**
+ * idle_task - return the idle task for a given cpu.
+ * @cpu: the processor in question.
+ */
+task_t *idle_task(int cpu)
+{
+	return cpu_rq(cpu)->idle;
+}
+
+/**
+ * find_process_by_pid - find a process with a matching PID value.
+ * @pid: the pid in question.
+ */
+static inline task_t *find_process_by_pid(pid_t pid)
+{
+	return pid ? find_task_by_pid(pid) : current;
+}
+
+/* Actually do priority change: must hold rq lock. */
+static void __setscheduler(struct task_struct *p, int policy, int prio)
+{
+	BUG_ON(p->array);
+	p->policy = policy;
+	p->rt_priority = prio;
+	if (policy != SCHED_NORMAL && policy != SCHED_BATCH) {
+		p->prio = MAX_RT_PRIO-1 - p->rt_priority;
+	} else {
+		p->prio = p->static_prio;
+		/*
+		 * SCHED_BATCH tasks are treated as perpetual CPU hogs:
+		 */
+		if (policy == SCHED_BATCH)
+			p->sleep_avg = 0;
+	}
+	set_load_weight(p);
+}
+
+/**
+ * sched_setscheduler - change the scheduling policy and/or RT priority of
+ * a thread.
+ * @p: the task in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ */
+int sched_setscheduler(struct task_struct *p, int policy,
+		       struct sched_param *param)
+{
+	int retval;
+	int oldprio, oldpolicy = -1;
+	prio_array_t *array;
+	unsigned long flags;
+	runqueue_t *rq;
+
+recheck:
+	/* double check policy once rq lock held */
+	if (policy < 0)
+		policy = oldpolicy = p->policy;
+	else if (policy != SCHED_FIFO && policy != SCHED_RR &&
+			policy != SCHED_NORMAL && policy != SCHED_BATCH)
+		return -EINVAL;
+	/*
+	 * Valid priorities for SCHED_FIFO and SCHED_RR are
+	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
+	 * SCHED_BATCH is 0.
+	 */
+	if (param->sched_priority < 0 ||
+	    (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
+	    (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
+		return -EINVAL;
+	if ((policy == SCHED_NORMAL || policy == SCHED_BATCH)
+					!= (param->sched_priority == 0))
+		return -EINVAL;
+
+	/*
+	 * Allow unprivileged RT tasks to decrease priority:
+	 */
+	if (!capable(CAP_SYS_NICE)) {
+		/*
+		 * can't change policy, except between SCHED_NORMAL
+		 * and SCHED_BATCH:
+		 */
+		if (((policy != SCHED_NORMAL && p->policy != SCHED_BATCH) &&
+			(policy != SCHED_BATCH && p->policy != SCHED_NORMAL)) &&
+				!p->signal->rlim[RLIMIT_RTPRIO].rlim_cur)
+			return -EPERM;
+		/* can't increase priority */
+		if ((policy != SCHED_NORMAL && policy != SCHED_BATCH) &&
+		    param->sched_priority > p->rt_priority &&
+		    param->sched_priority >
+				p->signal->rlim[RLIMIT_RTPRIO].rlim_cur)
+			return -EPERM;
+		/* can't change other user's priorities */
+		if ((current->euid != p->euid) &&
+		    (current->euid != p->uid))
+			return -EPERM;
+	}
+
+	retval = security_task_setscheduler(p, policy, param);
+	if (retval)
+		return retval;
+	/*
+	 * To be able to change p->policy safely, the apropriate
+	 * runqueue lock must be held.
+	 */
+	rq = task_rq_lock(p, &flags);
+	/* recheck policy now with rq lock held */
+	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
+		policy = oldpolicy = -1;
+		task_rq_unlock(rq, &flags);
+		goto recheck;
+	}
+	array = p->array;
+	if (array)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, param->sched_priority);
+	if (array) {
+		__activate_task(p, rq);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else if (TASK_PREEMPTS_CURR(p, rq))
+			resched_task(rq->curr);
+	}
+	task_rq_unlock(rq, &flags);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sched_setscheduler);
+
+static int
+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+{
+	int retval;
+	struct sched_param lparam;
+	struct task_struct *p;
+
+	if (!param || pid < 0)
+		return -EINVAL;
+	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
+		return -EFAULT;
+	read_lock_irq(&tasklist_lock);
+	p = find_process_by_pid(pid);
+	if (!p) {
+		read_unlock_irq(&tasklist_lock);
+		return -ESRCH;
+	}
+	retval = sched_setscheduler(p, policy, &lparam);
+	read_unlock_irq(&tasklist_lock);
+	return retval;
+}
+
+/**
+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
+ * @pid: the pid in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ */
+asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
+				       struct sched_param __user *param)
+{
+	/* negative values for policy are not valid */
+	if (policy < 0)
+		return -EINVAL;
+
+	return do_sched_setscheduler(pid, policy, param);
+}
+
+/**
+ * sys_sched_setparam - set/change the RT priority of a thread
+ * @pid: the pid in question.
+ * @param: structure containing the new RT priority.
+ */
+asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
+{
+	return do_sched_setscheduler(pid, -1, param);
+}
+
+/**
+ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
+ * @pid: the pid in question.
+ */
+asmlinkage long sys_sched_getscheduler(pid_t pid)
+{
+	int retval = -EINVAL;
+	task_t *p;
+
+	if (pid < 0)
+		goto out_nounlock;
+
+	retval = -ESRCH;
+	read_lock(&tasklist_lock);
+	p = find_process_by_pid(pid);
+	if (p) {
+		retval = security_task_getscheduler(p);
+		if (!retval)
+			retval = p->policy;
+	}
+	read_unlock(&tasklist_lock);
+
+out_nounlock:
+	return retval;
+}
+
+/**
+ * sys_sched_getscheduler - get the RT priority of a thread
+ * @pid: the pid in question.
+ * @param: structure containing the RT priority.
+ */
+asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
+{
+	struct sched_param lp;
+	int retval = -EINVAL;
+	task_t *p;
+
+	if (!param || pid < 0)
+		goto out_nounlock;
+
+	read_lock(&tasklist_lock);
+	p = find_process_by_pid(pid);
+	retval = -ESRCH;
+	if (!p)
+		goto out_unlock;
+
+	retval = security_task_getscheduler(p);
+	if (retval)
+		goto out_unlock;
+
+	lp.sched_priority = p->rt_priority;
+	read_unlock(&tasklist_lock);
+
+	/*
+	 * This one might sleep, we cannot do it with a spinlock held ...
+	 */
+	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
+
+out_nounlock:
+	return retval;
+
+out_unlock:
+	read_unlock(&tasklist_lock);
+	return retval;
+}
+
+long sched_setaffinity(pid_t pid, cpumask_t new_mask)
+{
+	task_t *p;
+	int retval;
+	cpumask_t cpus_allowed;
+
+	lock_cpu_hotplug();
+	read_lock(&tasklist_lock);
+
+	p = find_process_by_pid(pid);
+	if (!p) {
+		read_unlock(&tasklist_lock);
+		unlock_cpu_hotplug();
+		return -ESRCH;
+	}
+
+	/*
+	 * It is not safe to call set_cpus_allowed with the
+	 * tasklist_lock held.  We will bump the task_struct's
+	 * usage count and then drop tasklist_lock.
+	 */
+	get_task_struct(p);
+	read_unlock(&tasklist_lock);
+
+	retval = -EPERM;
+	if ((current->euid != p->euid) && (current->euid != p->uid) &&
+			!capable(CAP_SYS_NICE))
+		goto out_unlock;
+
+	cpus_allowed = cpuset_cpus_allowed(p);
+	cpus_and(new_mask, new_mask, cpus_allowed);
+	retval = set_cpus_allowed(p, new_mask);
+
+out_unlock:
+	put_task_struct(p);
+	unlock_cpu_hotplug();
+	return retval;
+}
+
+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
+			     cpumask_t *new_mask)
+{
+	if (len < sizeof(cpumask_t)) {
+		memset(new_mask, 0, sizeof(cpumask_t));
+	} else if (len > sizeof(cpumask_t)) {
+		len = sizeof(cpumask_t);
+	}
+	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
+}
+
+/**
+ * sys_sched_setaffinity - set the cpu affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to the new cpu mask
+ */
+asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
+				      unsigned long __user *user_mask_ptr)
+{
+	cpumask_t new_mask;
+	int retval;
+
+	retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
+	if (retval)
+		return retval;
+
+	return sched_setaffinity(pid, new_mask);
+}
+
+/*
+ * Represents all cpu's present in the system
+ * In systems capable of hotplug, this map could dynamically grow
+ * as new cpu's are detected in the system via any platform specific
+ * method, such as ACPI for e.g.
+ */
+
+cpumask_t cpu_present_map __read_mostly;
+EXPORT_SYMBOL(cpu_present_map);
+
+#ifndef CONFIG_SMP
+cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
+cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
+#endif
+
+long sched_getaffinity(pid_t pid, cpumask_t *mask)
+{
+	int retval;
+	task_t *p;
+
+	lock_cpu_hotplug();
+	read_lock(&tasklist_lock);
+
+	retval = -ESRCH;
+	p = find_process_by_pid(pid);
+	if (!p)
+		goto out_unlock;
+
+	retval = 0;
+	cpus_and(*mask, p->cpus_allowed, cpu_online_map);
+
+out_unlock:
+	read_unlock(&tasklist_lock);
+	unlock_cpu_hotplug();
+	if (retval)
+		return retval;
+
+	return 0;
+}
+
+/**
+ * sys_sched_getaffinity - get the cpu affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to hold the current cpu mask
+ */
+asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
+				      unsigned long __user *user_mask_ptr)
+{
+	int ret;
+	cpumask_t mask;
+
+	if (len < sizeof(cpumask_t))
+		return -EINVAL;
+
+	ret = sched_getaffinity(pid, &mask);
+	if (ret < 0)
+		return ret;
+
+	if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
+		return -EFAULT;
+
+	return sizeof(cpumask_t);
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+asmlinkage long sys_sched_yield(void)
+{
+	runqueue_t *rq = this_rq_lock();
+	prio_array_t *array = current->array;
+	prio_array_t *target = rq->expired;
+
+	schedstat_inc(rq, yld_cnt);
+	/*
+	 * We implement yielding by moving the task into the expired
+	 * queue.
+	 *
+	 * (special rule: RT tasks will just roundrobin in the active
+	 *  array.)
+	 */
+	if (rt_task(current))
+		target = rq->active;
+
+	if (array->nr_active == 1) {
+		schedstat_inc(rq, yld_act_empty);
+		if (!rq->expired->nr_active)
+			schedstat_inc(rq, yld_both_empty);
+	} else if (!rq->expired->nr_active)
+		schedstat_inc(rq, yld_exp_empty);
+
+	if (array != target) {
+		dequeue_task(current, array);
+		enqueue_task(current, target);
+	} else
+		/*
+		 * requeue_task is cheaper so perform that if possible.
+		 */
+		requeue_task(current, array);
+
+	/*
+	 * Since we are going to call schedule() anyway, there's
+	 * no need to preempt or enable interrupts:
+	 */
+	__release(rq->lock);
+	_raw_spin_unlock(&rq->lock);
+	preempt_enable_no_resched();
+
+	schedule();
+
+	return 0;
+}
+
+static inline void __cond_resched(void)
+{
+	/*
+	 * The BKS might be reacquired before we have dropped
+	 * PREEMPT_ACTIVE, which could trigger a second
+	 * cond_resched() call.
+	 */
+	if (unlikely(preempt_count()))
+		return;
+	if (unlikely(system_state != SYSTEM_RUNNING))
+		return;
+	do {
+		add_preempt_count(PREEMPT_ACTIVE);
+		schedule();
+		sub_preempt_count(PREEMPT_ACTIVE);
+	} while (need_resched());
+}
+
+int __sched cond_resched(void)
+{
+	if (need_resched()) {
+		__cond_resched();
+		return 1;
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(cond_resched);
+
+/*
+ * cond_resched_lock() - if a reschedule is pending, drop the given lock,
+ * call schedule, and on return reacquire the lock.
+ *
+ * This works OK both with and without CONFIG_PREEMPT.  We do strange low-level
+ * operations here to prevent schedule() from being called twice (once via
+ * spin_unlock(), once by hand).
+ */
+int cond_resched_lock(spinlock_t *lock)
+{
+	int ret = 0;
+
+	if (need_lockbreak(lock)) {
+		spin_unlock(lock);
+		cpu_relax();
+		ret = 1;
+		spin_lock(lock);
+	}
+	if (need_resched()) {
+		_raw_spin_unlock(lock);
+		preempt_enable_no_resched();
+		__cond_resched();
+		ret = 1;
+		spin_lock(lock);
+	}
+	return ret;
+}
+
+EXPORT_SYMBOL(cond_resched_lock);
+
+int __sched cond_resched_softirq(void)
+{
+	BUG_ON(!in_softirq());
+
+	if (need_resched()) {
+		__local_bh_enable();
+		__cond_resched();
+		local_bh_disable();
+		return 1;
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(cond_resched_softirq);
+
+
+/**
+ * yield - yield the current processor to other threads.
+ *
+ * this is a shortcut for kernel-space yielding - it marks the
+ * thread runnable and calls sys_sched_yield().
+ */
+void __sched yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	sys_sched_yield();
+}
+
+EXPORT_SYMBOL(yield);
+
+/*
+ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
+ * that process accounting knows that this is a task in IO wait state.
+ *
+ * But don't do that if it is a deliberate, throttling IO wait (this task
+ * has set its backing_dev_info: the queue against which it should throttle)
+ */
+void __sched io_schedule(void)
+{
+	struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+
+	atomic_inc(&rq->nr_iowait);
+	schedule();
+	atomic_dec(&rq->nr_iowait);
+}
+
+EXPORT_SYMBOL(io_schedule);
+
+long __sched io_schedule_timeout(long timeout)
+{
+	struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+	long ret;
+
+	atomic_inc(&rq->nr_iowait);
+	ret = schedule_timeout(timeout);
+	atomic_dec(&rq->nr_iowait);
+	return ret;
+}
+
+/**
+ * sys_sched_get_priority_max - return maximum RT priority.
+ * @policy: scheduling class.
+ *
+ * this syscall returns the maximum rt_priority that can be used
+ * by a given scheduling class.
+ */
+asmlinkage long sys_sched_get_priority_max(int policy)
+{
+	int ret = -EINVAL;
+
+	switch (policy) {
+	case SCHED_FIFO:
+	case SCHED_RR:
+		ret = MAX_USER_RT_PRIO-1;
+		break;
+	case SCHED_NORMAL:
+	case SCHED_BATCH:
+		ret = 0;
+		break;
+	}
+	return ret;
+}
+
+/**
+ * sys_sched_get_priority_min - return minimum RT priority.
+ * @policy: scheduling class.
+ *
+ * this syscall returns the minimum rt_priority that can be used
+ * by a given scheduling class.
+ */
+asmlinkage long sys_sched_get_priority_min(int policy)
+{
+	int ret = -EINVAL;
+
+	switch (policy) {
+	case SCHED_FIFO:
+	case SCHED_RR:
+		ret = 1;
+		break;
+	case SCHED_NORMAL:
+	case SCHED_BATCH:
+		ret = 0;
+	}
+	return ret;
+}
+
+/**
+ * sys_sched_rr_get_interval - return the default timeslice of a process.
+ * @pid: pid of the process.
+ * @interval: userspace pointer to the timeslice value.
+ *
+ * this syscall writes the default timeslice value of a given process
+ * into the user-space timespec buffer. A value of '0' means infinity.
+ */
+asmlinkage
+long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
+{
+	int retval = -EINVAL;
+	struct timespec t;
+	task_t *p;
+
+	if (pid < 0)
+		goto out_nounlock;
+
+	retval = -ESRCH;
+	read_lock(&tasklist_lock);
+	p = find_process_by_pid(pid);
+	if (!p)
+		goto out_unlock;
+
+	retval = security_task_getscheduler(p);
+	if (retval)
+		goto out_unlock;
+
+	jiffies_to_timespec(p->policy & SCHED_FIFO ?
+				0 : task_timeslice(p), &t);
+	read_unlock(&tasklist_lock);
+	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
+out_nounlock:
+	return retval;
+out_unlock:
+	read_unlock(&tasklist_lock);
+	return retval;
+}
+
+static inline struct task_struct *eldest_child(struct task_struct *p)
+{
+	if (list_empty(&p->children)) return NULL;
+	return list_entry(p->children.next,struct task_struct,sibling);
+}
+
+static inline struct task_struct *older_sibling(struct task_struct *p)
+{
+	if (p->sibling.prev==&p->parent->children) return NULL;
+	return list_entry(p->sibling.prev,struct task_struct,sibling);
+}
+
+static inline struct task_struct *younger_sibling(struct task_struct *p)
+{
+	if (p->sibling.next==&p->parent->children) return NULL;
+	return list_entry(p->sibling.next,struct task_struct,sibling);
+}
+
+static void show_task(task_t *p)
+{
+	task_t *relative;
+	unsigned state;
+	unsigned long free = 0;
+	static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" };
+
+	printk("%-13.13s ", p->comm);
+	state = p->state ? __ffs(p->state) + 1 : 0;
+	if (state < ARRAY_SIZE(stat_nam))
+		printk(stat_nam[state]);
+	else
+		printk("?");
+#if (BITS_PER_LONG == 32)
+	if (state == TASK_RUNNING)
+		printk(" running ");
+	else
+		printk(" %08lX ", thread_saved_pc(p));
+#else
+	if (state == TASK_RUNNING)
+		printk("  running task   ");
+	else
+		printk(" %016lx ", thread_saved_pc(p));
+#endif
+#ifdef CONFIG_DEBUG_STACK_USAGE
+	{
+		unsigned long *n = end_of_stack(p);
+		while (!*n)
+			n++;
+		free = (unsigned long)n - (unsigned long)end_of_stack(p);
+	}
+#endif
+	printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
+	if ((relative = eldest_child(p)))
+		printk("%5d ", relative->pid);
+	else
+		printk("      ");
+	if ((relative = younger_sibling(p)))
+		printk("%7d", relative->pid);
+	else
+		printk("       ");
+	if ((relative = older_sibling(p)))
+		printk(" %5d", relative->pid);
+	else
+		printk("      ");
+	if (!p->mm)
+		printk(" (L-TLB)\n");
+	else
+		printk(" (NOTLB)\n");
+
+	if (state != TASK_RUNNING)
+		show_stack(p, NULL);
+}
+
+void show_state(void)
+{
+	task_t *g, *p;
+
+#if (BITS_PER_LONG == 32)
+	printk("\n"
+	       "                                               sibling\n");
+	printk("  task             PC      pid father child younger older\n");
+#else
+	printk("\n"
+	       "                                                       sibling\n");
+	printk("  task                 PC          pid father child younger older\n");
+#endif
+	read_lock(&tasklist_lock);
+	do_each_thread(g, p) {
+		/*
+		 * reset the NMI-timeout, listing all files on a slow
+		 * console might take alot of time:
+		 */
+		touch_nmi_watchdog();
+		show_task(p);
+	} while_each_thread(g, p);
+
+	read_unlock(&tasklist_lock);
+	mutex_debug_show_all_locks();
+}
+
+/**
+ * init_idle - set up an idle thread for a given CPU
+ * @idle: task in question
+ * @cpu: cpu the idle task belongs to
+ *
+ * NOTE: this function does not set the idle thread's NEED_RESCHED
+ * flag, to make booting more robust.
+ */
+void __devinit init_idle(task_t *idle, int cpu)
+{
+	runqueue_t *rq = cpu_rq(cpu);
+	unsigned long flags;
+
+	idle->timestamp = sched_clock();
+	idle->sleep_avg = 0;
+	idle->array = NULL;
+	idle->prio = MAX_PRIO;
+	idle->state = TASK_RUNNING;
+	idle->cpus_allowed = cpumask_of_cpu(cpu);
+	set_task_cpu(idle, cpu);
+
+	spin_lock_irqsave(&rq->lock, flags);
+	rq->curr = rq->idle = idle;
+#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+	idle->oncpu = 1;
+#endif
+	spin_unlock_irqrestore(&rq->lock, flags);
+
+	/* Set the preempt count _outside_ the spinlocks! */
+#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
+	task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
+#else
+	task_thread_info(idle)->preempt_count = 0;
+#endif
+}
+
+/*
+ * In a system that switches off the HZ timer nohz_cpu_mask
+ * indicates which cpus entered this state. This is used
+ * in the rcu update to wait only for active cpus. For system
+ * which do not switch off the HZ timer nohz_cpu_mask should
+ * always be CPU_MASK_NONE.
+ */
+cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
+
+#ifdef CONFIG_SMP
+/*
+ * This is how migration works:
+ *
+ * 1) we queue a migration_req_t structure in the source CPU's
+ *    runqueue and wake up that CPU's migration thread.
+ * 2) we down() the locked semaphore => thread blocks.
+ * 3) migration thread wakes up (implicitly it forces the migrated
+ *    thread off the CPU)
+ * 4) it gets the migration request and checks whether the migrated
+ *    task is still in the wrong runqueue.
+ * 5) if it's in the wrong runqueue then the migration thread removes
+ *    it and puts it into the right queue.
+ * 6) migration thread up()s the semaphore.
+ * 7) we wake up and the migration is done.
+ */
+
+/*
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask.
+ *
+ * NOTE: the caller must have a valid reference to the task, the
+ * task must not exit() & deallocate itself prematurely.  The
+ * call is not atomic; no spinlocks may be held.
+ */
+int set_cpus_allowed(task_t *p, cpumask_t new_mask)
+{
+	unsigned long flags;
+	int ret = 0;
+	migration_req_t req;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+	if (!cpus_intersects(new_mask, cpu_online_map)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	p->cpus_allowed = new_mask;
+	/* Can the task run on the task's current CPU? If so, we're done */
+	if (cpu_isset(task_cpu(p), new_mask))
+		goto out;
+
+	if (migrate_task(p, any_online_cpu(new_mask), &req)) {
+		/* Need help from migration thread: drop lock and wait. */
+		task_rq_unlock(rq, &flags);
+		wake_up_process(rq->migration_thread);
+		wait_for_completion(&req.done);
+		tlb_migrate_finish(p->mm);
+		return 0;
+	}
+out:
+	task_rq_unlock(rq, &flags);
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(set_cpus_allowed);
+
+/*
+ * Move (not current) task off this cpu, onto dest cpu.  We're doing
+ * this because either it can't run here any more (set_cpus_allowed()
+ * away from this CPU, or CPU going down), or because we're
+ * attempting to rebalance this task on exec (sched_exec).
+ *
+ * So we race with normal scheduler movements, but that's OK, as long
+ * as the task is no longer on this CPU.
+ */
+static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
+{
+	runqueue_t *rq_dest, *rq_src;
+
+	if (unlikely(cpu_is_offline(dest_cpu)))
+		return;
+
+	rq_src = cpu_rq(src_cpu);
+	rq_dest = cpu_rq(dest_cpu);
+
+	double_rq_lock(rq_src, rq_dest);
+	/* Already moved. */
+	if (task_cpu(p) != src_cpu)
+		goto out;
+	/* Affinity changed (again). */
+	if (!cpu_isset(dest_cpu, p->cpus_allowed))
+		goto out;
+
+	set_task_cpu(p, dest_cpu);
+	if (p->array) {
+		/*
+		 * Sync timestamp with rq_dest's before activating.
+		 * The same thing could be achieved by doing this step
+		 * afterwards, and pretending it was a local activate.
+		 * This way is cleaner and logically correct.
+		 */
+		p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+				+ rq_dest->timestamp_last_tick;
+		deactivate_task(p, rq_src);
+		activate_task(p, rq_dest, 0);
+		if (TASK_PREEMPTS_CURR(p, rq_dest))
+			resched_task(rq_dest->curr);
+	}
+
+out:
+	double_rq_unlock(rq_src, rq_dest);
+}
+
+/*
+ * migration_thread - this is a highprio system thread that performs
+ * thread migration by bumping thread off CPU then 'pushing' onto
+ * another runqueue.
+ */
+static int migration_thread(void *data)
+{
+	runqueue_t *rq;
+	int cpu = (long)data;
+
+	rq = cpu_rq(cpu);
+	BUG_ON(rq->migration_thread != current);
+
+	set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		struct list_head *head;
+		migration_req_t *req;
+
+		try_to_freeze();
+
+		spin_lock_irq(&rq->lock);
+
+		if (cpu_is_offline(cpu)) {
+			spin_unlock_irq(&rq->lock);
+			goto wait_to_die;
+		}
+
+		if (rq->active_balance) {
+			active_load_balance(rq, cpu);
+			rq->active_balance = 0;
+		}
+
+		head = &rq->migration_queue;
+
+		if (list_empty(head)) {
+			spin_unlock_irq(&rq->lock);
+			schedule();
+			set_current_state(TASK_INTERRUPTIBLE);
+			continue;
+		}
+		req = list_entry(head->next, migration_req_t, list);
+		list_del_init(head->next);
+
+		spin_unlock(&rq->lock);
+		__migrate_task(req->task, cpu, req->dest_cpu);
+		local_irq_enable();
+
+		complete(&req->done);
+	}
+	__set_current_state(TASK_RUNNING);
+	return 0;
+
+wait_to_die:
+	/* Wait for kthread_stop */
+	set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		schedule();
+		set_current_state(TASK_INTERRUPTIBLE);
+	}
+	__set_current_state(TASK_RUNNING);
+	return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/* Figure out where task on dead CPU should go, use force if neccessary. */
+static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
+{
+	int dest_cpu;
+	cpumask_t mask;
+
+	/* On same node? */
+	mask = node_to_cpumask(cpu_to_node(dead_cpu));
+	cpus_and(mask, mask, tsk->cpus_allowed);
+	dest_cpu = any_online_cpu(mask);
+
+	/* On any allowed CPU? */
+	if (dest_cpu == NR_CPUS)
+		dest_cpu = any_online_cpu(tsk->cpus_allowed);
+
+	/* No more Mr. Nice Guy. */
+	if (dest_cpu == NR_CPUS) {
+		cpus_setall(tsk->cpus_allowed);
+		dest_cpu = any_online_cpu(tsk->cpus_allowed);
+
+		/*
+		 * Don't tell them about moving exiting tasks or
+		 * kernel threads (both mm NULL), since they never
+		 * leave kernel.
+		 */
+		if (tsk->mm && printk_ratelimit())
+			printk(KERN_INFO "process %d (%s) no "
+			       "longer affine to cpu%d\n",
+			       tsk->pid, tsk->comm, dead_cpu);
+	}
+	__migrate_task(tsk, dead_cpu, dest_cpu);
+}
+
+/*
+ * While a dead CPU has no uninterruptible tasks queued at this point,
+ * it might still have a nonzero ->nr_uninterruptible counter, because
+ * for performance reasons the counter is not stricly tracking tasks to
+ * their home CPUs. So we just add the counter to another CPU's counter,
+ * to keep the global sum constant after CPU-down:
+ */
+static void migrate_nr_uninterruptible(runqueue_t *rq_src)
+{
+	runqueue_t *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
+	unsigned long flags;
+
+	local_irq_save(flags);
+	double_rq_lock(rq_src, rq_dest);
+	rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
+	rq_src->nr_uninterruptible = 0;
+	double_rq_unlock(rq_src, rq_dest);
+	local_irq_restore(flags);
+}
+
+/* Run through task list and migrate tasks from the dead cpu. */
+static void migrate_live_tasks(int src_cpu)
+{
+	struct task_struct *tsk, *t;
+
+	write_lock_irq(&tasklist_lock);
+
+	do_each_thread(t, tsk) {
+		if (tsk == current)
+			continue;
+
+		if (task_cpu(tsk) == src_cpu)
+			move_task_off_dead_cpu(src_cpu, tsk);
+	} while_each_thread(t, tsk);
+
+	write_unlock_irq(&tasklist_lock);
+}
+
+/* Schedules idle task to be the next runnable task on current CPU.
+ * It does so by boosting its priority to highest possible and adding it to
+ * the _front_ of runqueue. Used by CPU offline code.
+ */
+void sched_idle_next(void)
+{
+	int cpu = smp_processor_id();
+	runqueue_t *rq = this_rq();
+	struct task_struct *p = rq->idle;
+	unsigned long flags;
+
+	/* cpu has to be offline */
+	BUG_ON(cpu_online(cpu));
+
+	/* Strictly not necessary since rest of the CPUs are stopped by now
+	 * and interrupts disabled on current cpu.
+	 */
+	spin_lock_irqsave(&rq->lock, flags);
+
+	__setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
+	/* Add idle task to _front_ of it's priority queue */
+	__activate_idle_task(p, rq);
+
+	spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+/* Ensures that the idle task is using init_mm right before its cpu goes
+ * offline.
+ */
+void idle_task_exit(void)
+{
+	struct mm_struct *mm = current->active_mm;
+
+	BUG_ON(cpu_online(smp_processor_id()));
+
+	if (mm != &init_mm)
+		switch_mm(mm, &init_mm, current);
+	mmdrop(mm);
+}
+
+static void migrate_dead(unsigned int dead_cpu, task_t *tsk)
+{
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	/* Must be exiting, otherwise would be on tasklist. */
+	BUG_ON(tsk->exit_state != EXIT_ZOMBIE && tsk->exit_state != EXIT_DEAD);
+
+	/* Cannot have done final schedule yet: would have vanished. */
+	BUG_ON(tsk->flags & PF_DEAD);
+
+	get_task_struct(tsk);
+
+	/*
+	 * Drop lock around migration; if someone else moves it,
+	 * that's OK.  No task can be added to this CPU, so iteration is
+	 * fine.
+	 */
+	spin_unlock_irq(&rq->lock);
+	move_task_off_dead_cpu(dead_cpu, tsk);
+	spin_lock_irq(&rq->lock);
+
+	put_task_struct(tsk);
+}
+
+/* release_task() removes task from tasklist, so we won't find dead tasks. */
+static void migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned arr, i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (arr = 0; arr < 2; arr++) {
+		for (i = 0; i < MAX_PRIO; i++) {
+			struct list_head *list = &rq->arrays[arr].queue[i];
+			while (!list_empty(list))
+				migrate_dead(dead_cpu,
+					     list_entry(list->next, task_t,
+							run_list));
+		}
+	}
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_SYSCTL)
+static struct ctl_table sd_ctl_dir[] = {
+	{1, "sched_domain", NULL, 0, 0755, NULL, },
+	{0,},
+};
+
+static struct ctl_table sd_ctl_root[] = {
+	{1, "kernel", NULL, 0, 0755, sd_ctl_dir, },
+	{0,},
+};
+
+static char *sched_strdup(char *str)
+{
+	int n = strlen(str)+1;
+	char *s = kmalloc(n, GFP_KERNEL);
+	if (!s)
+		return NULL;
+	return strcpy(s, str);
+}
+
+static struct ctl_table *sd_alloc_ctl_entry(int n)
+{
+	struct ctl_table *entry =
+		kmalloc(n * sizeof(struct ctl_table), GFP_KERNEL);
+	BUG_ON(!entry);
+	memset(entry, 0, n * sizeof(struct ctl_table));
+	return entry;
+}
+
+static void set_table_entry(struct ctl_table *entry, int ctl_name,
+			const char *procname, void *data, int maxlen,
+			mode_t mode, proc_handler *proc_handler)
+{
+	entry->ctl_name = ctl_name;
+	entry->procname = procname;
+	entry->data = data;
+	entry->maxlen = maxlen;
+	entry->mode = mode;
+	entry->proc_handler = proc_handler;
+}
+
+static struct ctl_table *
+sd_alloc_ctl_domain_table(struct sched_domain *sd)
+{
+	struct ctl_table *table;
+	table = sd_alloc_ctl_entry(14);
+
+	set_table_entry(&table[0], 1, "min_interval", &sd->min_interval,
+		sizeof(long), 0644, proc_doulongvec_minmax);
+	set_table_entry(&table[1], 2, "max_interval", &sd->max_interval,
+		sizeof(long), 0644, proc_doulongvec_minmax);
+	set_table_entry(&table[2], 3, "busy_idx", &sd->busy_idx,
+		sizeof(int), 0644, proc_dointvec_minmax);
+	set_table_entry(&table[3], 4, "idle_idx", &sd->idle_idx,
+		sizeof(int), 0644, proc_dointvec_minmax);
+	set_table_entry(&table[4], 5, "newidle_idx", &sd->newidle_idx,
+		sizeof(int), 0644, proc_dointvec_minmax);
+	set_table_entry(&table[5], 6, "wake_idx", &sd->wake_idx,
+		sizeof(int), 0644, proc_dointvec_minmax);
+	set_table_entry(&table[6], 7, "forkexec_idx", &sd->forkexec_idx,
+		sizeof(int), 0644, proc_dointvec_minmax);
+	set_table_entry(&table[7], 8, "busy_factor", &sd->busy_factor,
+		sizeof(int), 0644, proc_dointvec_minmax);
+	set_table_entry(&table[8], 9, "imbalance_pct", &sd->imbalance_pct,
+		sizeof(int), 0644, proc_dointvec_minmax);
+	set_table_entry(&table[9], 10, "cache_hot_time", &sd->cache_hot_time,
+		sizeof(long long), 0644, proc_doulongvec_minmax);
+	set_table_entry(&table[10], 11, "cache_nice_tries", &sd->cache_nice_tries,
+		sizeof(int), 0644, proc_dointvec_minmax);
+	set_table_entry(&table[11], 12, "per_cpu_gain", &sd->per_cpu_gain,
+		sizeof(int), 0644, proc_dointvec_minmax);
+	set_table_entry(&table[12], 13, "flags", &sd->flags,
+		sizeof(int), 0644, proc_dointvec_minmax);
+	return table;
+}
+
+static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+{
+	struct sched_domain *sd;
+	int domain_num = 0, i;
+	struct ctl_table *entry, *table;
+	char buf[32];
+	for_each_domain(cpu, sd)
+		domain_num++;
+	entry = table = sd_alloc_ctl_entry(domain_num + 1);
+
+	i = 0;
+	for_each_domain(cpu, sd) {
+		snprintf(buf, 32, "domain%d", i);
+		entry->ctl_name = i + 1;
+		entry->procname = sched_strdup(buf);
+		entry->mode = 0755;
+		entry->child = sd_alloc_ctl_domain_table(sd);
+		entry++;
+		i++;
+	}
+	return table;
+}
+
+static struct ctl_table_header *sd_sysctl_header;
+static void init_sched_domain_sysctl(void)
+{
+	int i, cpu_num = num_online_cpus();
+	char buf[32];
+	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
+
+	sd_ctl_dir[0].child = entry;
+
+	for (i = 0; i < cpu_num; i++, entry++) {
+		snprintf(buf, 32, "cpu%d", i);
+		entry->ctl_name = i + 1;
+		entry->procname = sched_strdup(buf);
+		entry->mode = 0755;
+		entry->child = sd_alloc_ctl_cpu_table(i);
+	}
+	sd_sysctl_header = register_sysctl_table(sd_ctl_root, 0);
+}
+#else
+static void init_sched_domain_sysctl(void)
+{
+}
+#endif
+
+/*
+ * migration_call - callback that gets triggered when a CPU is added.
+ * Here we can start up the necessary migration thread for the new CPU.
+ */
+static int migration_call(struct notifier_block *nfb, unsigned long action,
+			  void *hcpu)
+{
+	int cpu = (long)hcpu;
+	struct task_struct *p;
+	struct runqueue *rq;
+	unsigned long flags;
+
+	switch (action) {
+	case CPU_UP_PREPARE:
+		p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
+		if (IS_ERR(p))
+			return NOTIFY_BAD;
+		p->flags |= PF_NOFREEZE;
+		kthread_bind(p, cpu);
+		/* Must be high prio: stop_machine expects to yield to it. */
+		rq = task_rq_lock(p, &flags);
+		__setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
+		task_rq_unlock(rq, &flags);
+		cpu_rq(cpu)->migration_thread = p;
+		break;
+	case CPU_ONLINE:
+		/* Strictly unneccessary, as first user will wake it. */
+		wake_up_process(cpu_rq(cpu)->migration_thread);
+		break;
+#ifdef CONFIG_HOTPLUG_CPU
+	case CPU_UP_CANCELED:
+		/* Unbind it from offline cpu so it can run.  Fall thru. */
+		kthread_bind(cpu_rq(cpu)->migration_thread,
+			     any_online_cpu(cpu_online_map));
+		kthread_stop(cpu_rq(cpu)->migration_thread);
+		cpu_rq(cpu)->migration_thread = NULL;
+		break;
+	case CPU_DEAD:
+		migrate_live_tasks(cpu);
+		rq = cpu_rq(cpu);
+		kthread_stop(rq->migration_thread);
+		rq->migration_thread = NULL;
+		/* Idle task back to normal (off runqueue, low prio) */
+		rq = task_rq_lock(rq->idle, &flags);
+		deactivate_task(rq->idle, rq);
+		rq->idle->static_prio = MAX_PRIO;
+		__setscheduler(rq->idle, SCHED_NORMAL, 0);
+		migrate_dead_tasks(cpu);
+		task_rq_unlock(rq, &flags);
+		migrate_nr_uninterruptible(rq);
+		BUG_ON(rq->nr_running != 0);
+
+		/* No need to migrate the tasks: it was best-effort if
+		 * they didn't do lock_cpu_hotplug().  Just wake up
+		 * the requestors. */
+		spin_lock_irq(&rq->lock);
+		while (!list_empty(&rq->migration_queue)) {
+			migration_req_t *req;
+			req = list_entry(rq->migration_queue.next,
+					 migration_req_t, list);
+			list_del_init(&req->list);
+			complete(&req->done);
+		}
+		spin_unlock_irq(&rq->lock);
+		break;
+#endif
+	}
+	return NOTIFY_OK;
+}
+
+/* Register at highest priority so that task migration (migrate_all_tasks)
+ * happens before everything else.
+ */
+static struct notifier_block __devinitdata migration_notifier = {
+	.notifier_call = migration_call,
+	.priority = 10
+};
+
+int __init migration_init(void)
+{
+	void *cpu = (void *)(long)smp_processor_id();
+	/* Start one for boot CPU. */
+	migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
+	migration_call(&migration_notifier, CPU_ONLINE, cpu);
+	register_cpu_notifier(&migration_notifier);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_SMP
+#undef SCHED_DOMAIN_DEBUG
+#ifdef SCHED_DOMAIN_DEBUG
+static void sched_domain_debug(struct sched_domain *sd, int cpu)
+{
+	int level = 0;
+
+	if (!sd) {
+		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
+		return;
+	}
+
+	printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
+
+	do {
+		int i;
+		char str[NR_CPUS];
+		struct sched_group *group = sd->groups;
+		cpumask_t groupmask;
+
+		cpumask_scnprintf(str, NR_CPUS, sd->span);
+		cpus_clear(groupmask);
+
+		printk(KERN_DEBUG);
+		for (i = 0; i < level + 1; i++)
+			printk(" ");
+		printk("domain %d: ", level);
+
+		if (!(sd->flags & SD_LOAD_BALANCE)) {
+			printk("does not load-balance\n");
+			if (sd->parent)
+				printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent");
+			break;
+		}
+
+		printk("span %s\n", str);
+
+		if (!cpu_isset(cpu, sd->span))
+			printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
+		if (!cpu_isset(cpu, group->cpumask))
+			printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
+
+		printk(KERN_DEBUG);
+		for (i = 0; i < level + 2; i++)
+			printk(" ");
+		printk("groups:");
+		do {
+			if (!group) {
+				printk("\n");
+				printk(KERN_ERR "ERROR: group is NULL\n");
+				break;
+			}
+
+			if (!group->cpu_power) {
+				printk("\n");
+				printk(KERN_ERR "ERROR: domain->cpu_power not set\n");
+			}
+
+			if (!cpus_weight(group->cpumask)) {
+				printk("\n");
+				printk(KERN_ERR "ERROR: empty group\n");
+			}
+
+			if (cpus_intersects(groupmask, group->cpumask)) {
+				printk("\n");
+				printk(KERN_ERR "ERROR: repeated CPUs\n");
+			}
+
+			cpus_or(groupmask, groupmask, group->cpumask);
+
+			cpumask_scnprintf(str, NR_CPUS, group->cpumask);
+			printk(" %s", str);
+
+			group = group->next;
+		} while (group != sd->groups);
+		printk("\n");
+
+		if (!cpus_equal(sd->span, groupmask))
+			printk(KERN_ERR "ERROR: groups don't span domain->span\n");
+
+		level++;
+		sd = sd->parent;
+
+		if (sd) {
+			if (!cpus_subset(groupmask, sd->span))
+				printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
+		}
+
+	} while (sd);
+}
+#else
+#define sched_domain_debug(sd, cpu) {}
+#endif
+
+static int sd_degenerate(struct sched_domain *sd)
+{
+	if (cpus_weight(sd->span) == 1)
+		return 1;
+
+	/* Following flags need at least 2 groups */
+	if (sd->flags & (SD_LOAD_BALANCE |
+			 SD_BALANCE_NEWIDLE |
+			 SD_BALANCE_FORK |
+			 SD_BALANCE_EXEC)) {
+		if (sd->groups != sd->groups->next)
+			return 0;
+	}
+
+	/* Following flags don't use groups */
+	if (sd->flags & (SD_WAKE_IDLE |
+			 SD_WAKE_AFFINE |
+			 SD_WAKE_BALANCE))
+		return 0;
+
+	return 1;
+}
+
+static int sd_parent_degenerate(struct sched_domain *sd,
+						struct sched_domain *parent)
+{
+	unsigned long cflags = sd->flags, pflags = parent->flags;
+
+	if (sd_degenerate(parent))
+		return 1;
+
+	if (!cpus_equal(sd->span, parent->span))
+		return 0;
+
+	/* Does parent contain flags not in child? */
+	/* WAKE_BALANCE is a subset of WAKE_AFFINE */
+	if (cflags & SD_WAKE_AFFINE)
+		pflags &= ~SD_WAKE_BALANCE;
+	/* Flags needing groups don't count if only 1 group in parent */
+	if (parent->groups == parent->groups->next) {
+		pflags &= ~(SD_LOAD_BALANCE |
+				SD_BALANCE_NEWIDLE |
+				SD_BALANCE_FORK |
+				SD_BALANCE_EXEC);
+	}
+	if (~cflags & pflags)
+		return 0;
+
+	return 1;
+}
+
+/*
+ * Attach the domain 'sd' to 'cpu' as its base domain.  Callers must
+ * hold the hotplug lock.
+ */
+static void cpu_attach_domain(struct sched_domain *sd, int cpu)
+{
+	runqueue_t *rq = cpu_rq(cpu);
+	struct sched_domain *tmp;
+
+	/* Remove the sched domains which do not contribute to scheduling. */
+	for (tmp = sd; tmp; tmp = tmp->parent) {
+		struct sched_domain *parent = tmp->parent;
+		if (!parent)
+			break;
+		if (sd_parent_degenerate(tmp, parent))
+			tmp->parent = parent->parent;
+	}
+
+	if (sd && sd_degenerate(sd))
+		sd = sd->parent;
+
+	sched_domain_debug(sd, cpu);
+
+	rcu_assign_pointer(rq->sd, sd);
+}
+
+/* cpus with isolated domains */
+static cpumask_t __devinitdata cpu_isolated_map = CPU_MASK_NONE;
+
+/* Setup the mask of cpus configured for isolated domains */
+static int __init isolated_cpu_setup(char *str)
+{
+	int ints[NR_CPUS], i;
+
+	str = get_options(str, ARRAY_SIZE(ints), ints);
+	cpus_clear(cpu_isolated_map);
+	for (i = 1; i <= ints[0]; i++)
+		if (ints[i] < NR_CPUS)
+			cpu_set(ints[i], cpu_isolated_map);
+	return 1;
+}
+
+__setup ("isolcpus=", isolated_cpu_setup);
+
+/*
+ * init_sched_build_groups takes an array of groups, the cpumask we wish
+ * to span, and a pointer to a function which identifies what group a CPU
+ * belongs to. The return value of group_fn must be a valid index into the
+ * groups[] array, and must be >= 0 and < NR_CPUS (due to the fact that we
+ * keep track of groups covered with a cpumask_t).
+ *
+ * init_sched_build_groups will build a circular linked list of the groups
+ * covered by the given span, and will set each group's ->cpumask correctly,
+ * and ->cpu_power to 0.
+ */
+static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
+				    int (*group_fn)(int cpu))
+{
+	struct sched_group *first = NULL, *last = NULL;
+	cpumask_t covered = CPU_MASK_NONE;
+	int i;
+
+	for_each_cpu_mask(i, span) {
+		int group = group_fn(i);
+		struct sched_group *sg = &groups[group];
+		int j;
+
+		if (cpu_isset(i, covered))
+			continue;
+
+		sg->cpumask = CPU_MASK_NONE;
+		sg->cpu_power = 0;
+
+		for_each_cpu_mask(j, span) {
+			if (group_fn(j) != group)
+				continue;
+
+			cpu_set(j, covered);
+			cpu_set(j, sg->cpumask);
+		}
+		if (!first)
+			first = sg;
+		if (last)
+			last->next = sg;
+		last = sg;
+	}
+	last->next = first;
+}
+
+#define SD_NODES_PER_DOMAIN 16
+
+/*
+ * Self-tuning task migration cost measurement between source and target CPUs.
+ *
+ * This is done by measuring the cost of manipulating buffers of varying
+ * sizes. For a given buffer-size here are the steps that are taken:
+ *
+ * 1) the source CPU reads+dirties a shared buffer
+ * 2) the target CPU reads+dirties the same shared buffer
+ *
+ * We measure how long they take, in the following 4 scenarios:
+ *
+ *  - source: CPU1, target: CPU2 | cost1
+ *  - source: CPU2, target: CPU1 | cost2
+ *  - source: CPU1, target: CPU1 | cost3
+ *  - source: CPU2, target: CPU2 | cost4
+ *
+ * We then calculate the cost3+cost4-cost1-cost2 difference - this is
+ * the cost of migration.
+ *
+ * We then start off from a small buffer-size and iterate up to larger
+ * buffer sizes, in 5% steps - measuring each buffer-size separately, and
+ * doing a maximum search for the cost. (The maximum cost for a migration
+ * normally occurs when the working set size is around the effective cache
+ * size.)
+ */
+#define SEARCH_SCOPE		2
+#define MIN_CACHE_SIZE		(64*1024U)
+#define DEFAULT_CACHE_SIZE	(5*1024*1024U)
+#define ITERATIONS		1
+#define SIZE_THRESH		130
+#define COST_THRESH		130
+
+/*
+ * The migration cost is a function of 'domain distance'. Domain
+ * distance is the number of steps a CPU has to iterate down its
+ * domain tree to share a domain with the other CPU. The farther
+ * two CPUs are from each other, the larger the distance gets.
+ *
+ * Note that we use the distance only to cache measurement results,
+ * the distance value is not used numerically otherwise. When two
+ * CPUs have the same distance it is assumed that the migration
+ * cost is the same. (this is a simplification but quite practical)
+ */
+#define MAX_DOMAIN_DISTANCE 32
+
+static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
+		{ [ 0 ... MAX_DOMAIN_DISTANCE-1 ] =
+/*
+ * Architectures may override the migration cost and thus avoid
+ * boot-time calibration. Unit is nanoseconds. Mostly useful for
+ * virtualized hardware:
+ */
+#ifdef CONFIG_DEFAULT_MIGRATION_COST
+			CONFIG_DEFAULT_MIGRATION_COST
+#else
+			-1LL
+#endif
+};
+
+/*
+ * Allow override of migration cost - in units of microseconds.
+ * E.g. migration_cost=1000,2000,3000 will set up a level-1 cost
+ * of 1 msec, level-2 cost of 2 msecs and level3 cost of 3 msecs:
+ */
+static int __init migration_cost_setup(char *str)
+{
+	int ints[MAX_DOMAIN_DISTANCE+1], i;
+
+	str = get_options(str, ARRAY_SIZE(ints), ints);
+
+	printk("#ints: %d\n", ints[0]);
+	for (i = 1; i <= ints[0]; i++) {
+		migration_cost[i-1] = (unsigned long long)ints[i]*1000;
+		printk("migration_cost[%d]: %Ld\n", i-1, migration_cost[i-1]);
+	}
+	return 1;
+}
+
+__setup ("migration_cost=", migration_cost_setup);
+
+/*
+ * Global multiplier (divisor) for migration-cutoff values,
+ * in percentiles. E.g. use a value of 150 to get 1.5 times
+ * longer cache-hot cutoff times.
+ *
+ * (We scale it from 100 to 128 to long long handling easier.)
+ */
+
+#define MIGRATION_FACTOR_SCALE 128
+
+static unsigned int migration_factor = MIGRATION_FACTOR_SCALE;
+
+static int __init setup_migration_factor(char *str)
+{
+	get_option(&str, &migration_factor);
+	migration_factor = migration_factor * MIGRATION_FACTOR_SCALE / 100;
+	return 1;
+}
+
+__setup("migration_factor=", setup_migration_factor);
+
+/*
+ * Estimated distance of two CPUs, measured via the number of domains
+ * we have to pass for the two CPUs to be in the same span:
+ */
+static unsigned long domain_distance(int cpu1, int cpu2)
+{
+	unsigned long distance = 0;
+	struct sched_domain *sd;
+
+	for_each_domain(cpu1, sd) {
+		WARN_ON(!cpu_isset(cpu1, sd->span));
+		if (cpu_isset(cpu2, sd->span))
+			return distance;
+		distance++;
+	}
+	if (distance >= MAX_DOMAIN_DISTANCE) {
+		WARN_ON(1);
+		distance = MAX_DOMAIN_DISTANCE-1;
+	}
+
+	return distance;
+}
+
+static unsigned int migration_debug;
+
+static int __init setup_migration_debug(char *str)
+{
+	get_option(&str, &migration_debug);
+	return 1;
+}
+
+__setup("migration_debug=", setup_migration_debug);
+
+/*
+ * Maximum cache-size that the scheduler should try to measure.
+ * Architectures with larger caches should tune this up during
+ * bootup. Gets used in the domain-setup code (i.e. during SMP
+ * bootup).
+ */
+unsigned int max_cache_size;
+
+static int __init setup_max_cache_size(char *str)
+{
+	get_option(&str, &max_cache_size);
+	return 1;
+}
+
+__setup("max_cache_size=", setup_max_cache_size);
+
+/*
+ * Dirty a big buffer in a hard-to-predict (for the L2 cache) way. This
+ * is the operation that is timed, so we try to generate unpredictable
+ * cachemisses that still end up filling the L2 cache:
+ */
+static void touch_cache(void *__cache, unsigned long __size)
+{
+	unsigned long size = __size/sizeof(long), chunk1 = size/3,
+			chunk2 = 2*size/3;
+	unsigned long *cache = __cache;
+	int i;
+
+	for (i = 0; i < size/6; i += 8) {
+		switch (i % 6) {
+			case 0: cache[i]++;
+			case 1: cache[size-1-i]++;
+			case 2: cache[chunk1-i]++;
+			case 3: cache[chunk1+i]++;
+			case 4: cache[chunk2-i]++;
+			case 5: cache[chunk2+i]++;
+		}
+	}
+}
+
+/*
+ * Measure the cache-cost of one task migration. Returns in units of nsec.
+ */
+static unsigned long long measure_one(void *cache, unsigned long size,
+				      int source, int target)
+{
+	cpumask_t mask, saved_mask;
+	unsigned long long t0, t1, t2, t3, cost;
+
+	saved_mask = current->cpus_allowed;
+
+	/*
+	 * Flush source caches to RAM and invalidate them:
+	 */
+	sched_cacheflush();
+
+	/*
+	 * Migrate to the source CPU:
+	 */
+	mask = cpumask_of_cpu(source);
+	set_cpus_allowed(current, mask);
+	WARN_ON(smp_processor_id() != source);
+
+	/*
+	 * Dirty the working set:
+	 */
+	t0 = sched_clock();
+	touch_cache(cache, size);
+	t1 = sched_clock();
+
+	/*
+	 * Migrate to the target CPU, dirty the L2 cache and access
+	 * the shared buffer. (which represents the working set
+	 * of a migrated task.)
+	 */
+	mask = cpumask_of_cpu(target);
+	set_cpus_allowed(current, mask);
+	WARN_ON(smp_processor_id() != target);
+
+	t2 = sched_clock();
+	touch_cache(cache, size);
+	t3 = sched_clock();
+
+	cost = t1-t0 + t3-t2;
+
+	if (migration_debug >= 2)
+		printk("[%d->%d]: %8Ld %8Ld %8Ld => %10Ld.\n",
+			source, target, t1-t0, t1-t0, t3-t2, cost);
+	/*
+	 * Flush target caches to RAM and invalidate them:
+	 */
+	sched_cacheflush();
+
+	set_cpus_allowed(current, saved_mask);
+
+	return cost;
+}
+
+/*
+ * Measure a series of task migrations and return the average
+ * result. Since this code runs early during bootup the system
+ * is 'undisturbed' and the average latency makes sense.
+ *
+ * The algorithm in essence auto-detects the relevant cache-size,
+ * so it will properly detect different cachesizes for different
+ * cache-hierarchies, depending on how the CPUs are connected.
+ *
+ * Architectures can prime the upper limit of the search range via
+ * max_cache_size, otherwise the search range defaults to 20MB...64K.
+ */
+static unsigned long long
+measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
+{
+	unsigned long long cost1, cost2;
+	int i;
+
+	/*
+	 * Measure the migration cost of 'size' bytes, over an
+	 * average of 10 runs:
+	 *
+	 * (We perturb the cache size by a small (0..4k)
+	 *  value to compensate size/alignment related artifacts.
+	 *  We also subtract the cost of the operation done on
+	 *  the same CPU.)
+	 */
+	cost1 = 0;
+
+	/*
+	 * dry run, to make sure we start off cache-cold on cpu1,
+	 * and to get any vmalloc pagefaults in advance:
+	 */
+	measure_one(cache, size, cpu1, cpu2);
+	for (i = 0; i < ITERATIONS; i++)
+		cost1 += measure_one(cache, size - i*1024, cpu1, cpu2);
+
+	measure_one(cache, size, cpu2, cpu1);
+	for (i = 0; i < ITERATIONS; i++)
+		cost1 += measure_one(cache, size - i*1024, cpu2, cpu1);
+
+	/*
+	 * (We measure the non-migrating [cached] cost on both
+	 *  cpu1 and cpu2, to handle CPUs with different speeds)
+	 */
+	cost2 = 0;
+
+	measure_one(cache, size, cpu1, cpu1);
+	for (i = 0; i < ITERATIONS; i++)
+		cost2 += measure_one(cache, size - i*1024, cpu1, cpu1);
+
+	measure_one(cache, size, cpu2, cpu2);
+	for (i = 0; i < ITERATIONS; i++)
+		cost2 += measure_one(cache, size - i*1024, cpu2, cpu2);
+
+	/*
+	 * Get the per-iteration migration cost:
+	 */
+	do_div(cost1, 2*ITERATIONS);
+	do_div(cost2, 2*ITERATIONS);
+
+	return cost1 - cost2;
+}
+
+static unsigned long long measure_migration_cost(int cpu1, int cpu2)
+{
+	unsigned long long max_cost = 0, fluct = 0, avg_fluct = 0;
+	unsigned int max_size, size, size_found = 0;
+	long long cost = 0, prev_cost;
+	void *cache;
+
+	/*
+	 * Search from max_cache_size*5 down to 64K - the real relevant
+	 * cachesize has to lie somewhere inbetween.
+	 */
+	if (max_cache_size) {
+		max_size = max(max_cache_size * SEARCH_SCOPE, MIN_CACHE_SIZE);
+		size = max(max_cache_size / SEARCH_SCOPE, MIN_CACHE_SIZE);
+	} else {
+		/*
+		 * Since we have no estimation about the relevant
+		 * search range
+		 */
+		max_size = DEFAULT_CACHE_SIZE * SEARCH_SCOPE;
+		size = MIN_CACHE_SIZE;
+	}
+
+	if (!cpu_online(cpu1) || !cpu_online(cpu2)) {
+		printk("cpu %d and %d not both online!\n", cpu1, cpu2);
+		return 0;
+	}
+
+	/*
+	 * Allocate the working set:
+	 */
+	cache = vmalloc(max_size);
+	if (!cache) {
+		printk("could not vmalloc %d bytes for cache!\n", 2*max_size);
+		return 1000000; // return 1 msec on very small boxen
+	}
+
+	while (size <= max_size) {
+		prev_cost = cost;
+		cost = measure_cost(cpu1, cpu2, cache, size);
+
+		/*
+		 * Update the max:
+		 */
+		if (cost > 0) {
+			if (max_cost < cost) {
+				max_cost = cost;
+				size_found = size;
+			}
+		}
+		/*
+		 * Calculate average fluctuation, we use this to prevent
+		 * noise from triggering an early break out of the loop:
+		 */
+		fluct = abs(cost - prev_cost);
+		avg_fluct = (avg_fluct + fluct)/2;
+
+		if (migration_debug)
+			printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): (%8Ld %8Ld)\n",
+				cpu1, cpu2, size,
+				(long)cost / 1000000,
+				((long)cost / 100000) % 10,
+				(long)max_cost / 1000000,
+				((long)max_cost / 100000) % 10,
+				domain_distance(cpu1, cpu2),
+				cost, avg_fluct);
+
+		/*
+		 * If we iterated at least 20% past the previous maximum,
+		 * and the cost has dropped by more than 20% already,
+		 * (taking fluctuations into account) then we assume to
+		 * have found the maximum and break out of the loop early:
+		 */
+		if (size_found && (size*100 > size_found*SIZE_THRESH))
+			if (cost+avg_fluct <= 0 ||
+				max_cost*100 > (cost+avg_fluct)*COST_THRESH) {
+
+				if (migration_debug)
+					printk("-> found max.\n");
+				break;
+			}
+		/*
+		 * Increase the cachesize in 10% steps:
+		 */
+		size = size * 10 / 9;
+	}
+
+	if (migration_debug)
+		printk("[%d][%d] working set size found: %d, cost: %Ld\n",
+			cpu1, cpu2, size_found, max_cost);
+
+	vfree(cache);
+
+	/*
+	 * A task is considered 'cache cold' if at least 2 times
+	 * the worst-case cost of migration has passed.
+	 *
+	 * (this limit is only listened to if the load-balancing
+	 * situation is 'nice' - if there is a large imbalance we
+	 * ignore it for the sake of CPU utilization and
+	 * processing fairness.)
+	 */
+	return 2 * max_cost * migration_factor / MIGRATION_FACTOR_SCALE;
+}
+
+static void calibrate_migration_costs(const cpumask_t *cpu_map)
+{
+	int cpu1 = -1, cpu2 = -1, cpu, orig_cpu = raw_smp_processor_id();
+	unsigned long j0, j1, distance, max_distance = 0;
+	struct sched_domain *sd;
+
+	j0 = jiffies;
+
+	/*
+	 * First pass - calculate the cacheflush times:
+	 */
+	for_each_cpu_mask(cpu1, *cpu_map) {
+		for_each_cpu_mask(cpu2, *cpu_map) {
+			if (cpu1 == cpu2)
+				continue;
+			distance = domain_distance(cpu1, cpu2);
+			max_distance = max(max_distance, distance);
+			/*
+			 * No result cached yet?
+			 */
+			if (migration_cost[distance] == -1LL)
+				migration_cost[distance] =
+					measure_migration_cost(cpu1, cpu2);
+		}
+	}
+	/*
+	 * Second pass - update the sched domain hierarchy with
+	 * the new cache-hot-time estimations:
+	 */
+	for_each_cpu_mask(cpu, *cpu_map) {
+		distance = 0;
+		for_each_domain(cpu, sd) {
+			sd->cache_hot_time = migration_cost[distance];
+			distance++;
+		}
+	}
+	/*
+	 * Print the matrix:
+	 */
+	if (migration_debug)
+		printk("migration: max_cache_size: %d, cpu: %d MHz:\n",
+			max_cache_size,
+#ifdef CONFIG_X86
+			cpu_khz/1000
+#else
+			-1
+#endif
+		);
+	if (system_state == SYSTEM_BOOTING) {
+		printk("migration_cost=");
+		for (distance = 0; distance <= max_distance; distance++) {
+			if (distance)
+				printk(",");
+			printk("%ld", (long)migration_cost[distance] / 1000);
+		}
+		printk("\n");
+	}
+	j1 = jiffies;
+	if (migration_debug)
+		printk("migration: %ld seconds\n", (j1-j0)/HZ);
+
+	/*
+	 * Move back to the original CPU. NUMA-Q gets confused
+	 * if we migrate to another quad during bootup.
+	 */
+	if (raw_smp_processor_id() != orig_cpu) {
+		cpumask_t mask = cpumask_of_cpu(orig_cpu),
+			saved_mask = current->cpus_allowed;
+
+		set_cpus_allowed(current, mask);
+		set_cpus_allowed(current, saved_mask);
+	}
+}
+
+#ifdef CONFIG_NUMA
+
+/**
+ * find_next_best_node - find the next node to include in a sched_domain
+ * @node: node whose sched_domain we're building
+ * @used_nodes: nodes already in the sched_domain
+ *
+ * Find the next node to include in a given scheduling domain.  Simply
+ * finds the closest node not already in the @used_nodes map.
+ *
+ * Should use nodemask_t.
+ */
+static int find_next_best_node(int node, unsigned long *used_nodes)
+{
+	int i, n, val, min_val, best_node = 0;
+
+	min_val = INT_MAX;
+
+	for (i = 0; i < MAX_NUMNODES; i++) {
+		/* Start at @node */
+		n = (node + i) % MAX_NUMNODES;
+
+		if (!nr_cpus_node(n))
+			continue;
+
+		/* Skip already used nodes */
+		if (test_bit(n, used_nodes))
+			continue;
+
+		/* Simple min distance search */
+		val = node_distance(node, n);
+
+		if (val < min_val) {
+			min_val = val;
+			best_node = n;
+		}
+	}
+
+	set_bit(best_node, used_nodes);
+	return best_node;
+}
+
+/**
+ * sched_domain_node_span - get a cpumask for a node's sched_domain
+ * @node: node whose cpumask we're constructing
+ * @size: number of nodes to include in this span
+ *
+ * Given a node, construct a good cpumask for its sched_domain to span.  It
+ * should be one that prevents unnecessary balancing, but also spreads tasks
+ * out optimally.
+ */
+static cpumask_t sched_domain_node_span(int node)
+{
+	int i;
+	cpumask_t span, nodemask;
+	DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
+
+	cpus_clear(span);
+	bitmap_zero(used_nodes, MAX_NUMNODES);
+
+	nodemask = node_to_cpumask(node);
+	cpus_or(span, span, nodemask);
+	set_bit(node, used_nodes);
+
+	for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
+		int next_node = find_next_best_node(node, used_nodes);
+		nodemask = node_to_cpumask(next_node);
+		cpus_or(span, span, nodemask);
+	}
+
+	return span;
+}
+#endif
+
+/*
+ * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
+ * can switch it on easily if needed.
+ */
+#ifdef CONFIG_SCHED_SMT
+static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
+static struct sched_group sched_group_cpus[NR_CPUS];
+static int cpu_to_cpu_group(int cpu)
+{
+	return cpu;
+}
+#endif
+
+#ifdef CONFIG_SCHED_MC
+static DEFINE_PER_CPU(struct sched_domain, core_domains);
+static struct sched_group sched_group_core[NR_CPUS];
+#endif
+
+#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
+static int cpu_to_core_group(int cpu)
+{
+	return first_cpu(cpu_sibling_map[cpu]);
+}
+#elif defined(CONFIG_SCHED_MC)
+static int cpu_to_core_group(int cpu)
+{
+	return cpu;
+}
+#endif
+
+static DEFINE_PER_CPU(struct sched_domain, phys_domains);
+static struct sched_group sched_group_phys[NR_CPUS];
+static int cpu_to_phys_group(int cpu)
+{
+#if defined(CONFIG_SCHED_MC)
+	cpumask_t mask = cpu_coregroup_map(cpu);
+	return first_cpu(mask);
+#elif defined(CONFIG_SCHED_SMT)
+	return first_cpu(cpu_sibling_map[cpu]);
+#else
+	return cpu;
+#endif
+}
+
+#ifdef CONFIG_NUMA
+/*
+ * The init_sched_build_groups can't handle what we want to do with node
+ * groups, so roll our own. Now each node has its own list of groups which
+ * gets dynamically allocated.
+ */
+static DEFINE_PER_CPU(struct sched_domain, node_domains);
+static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
+
+static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
+static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS];
+
+static int cpu_to_allnodes_group(int cpu)
+{
+	return cpu_to_node(cpu);
+}
+static void init_numa_sched_groups_power(struct sched_group *group_head)
+{
+	struct sched_group *sg = group_head;
+	int j;
+
+	if (!sg)
+		return;
+next_sg:
+	for_each_cpu_mask(j, sg->cpumask) {
+		struct sched_domain *sd;
+
+		sd = &per_cpu(phys_domains, j);
+		if (j != first_cpu(sd->groups->cpumask)) {
+			/*
+			 * Only add "power" once for each
+			 * physical package.
+			 */
+			continue;
+		}
+
+		sg->cpu_power += sd->groups->cpu_power;
+	}
+	sg = sg->next;
+	if (sg != group_head)
+		goto next_sg;
+}
+#endif
+
+/*
+ * Build sched domains for a given set of cpus and attach the sched domains
+ * to the individual cpus
+ */
+void build_sched_domains(const cpumask_t *cpu_map)
+{
+	int i;
+#ifdef CONFIG_NUMA
+	struct sched_group **sched_group_nodes = NULL;
+	struct sched_group *sched_group_allnodes = NULL;
+
+	/*
+	 * Allocate the per-node list of sched groups
+	 */
+	sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
+					   GFP_ATOMIC);
+	if (!sched_group_nodes) {
+		printk(KERN_WARNING "Can not alloc sched group node list\n");
+		return;
+	}
+	sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
+#endif
+
+	/*
+	 * Set up domains for cpus specified by the cpu_map.
+	 */
+	for_each_cpu_mask(i, *cpu_map) {
+		int group;
+		struct sched_domain *sd = NULL, *p;
+		cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
+
+		cpus_and(nodemask, nodemask, *cpu_map);
+
+#ifdef CONFIG_NUMA
+		if (cpus_weight(*cpu_map)
+				> SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
+			if (!sched_group_allnodes) {
+				sched_group_allnodes
+					= kmalloc(sizeof(struct sched_group)
+							* MAX_NUMNODES,
+						  GFP_KERNEL);
+				if (!sched_group_allnodes) {
+					printk(KERN_WARNING
+					"Can not alloc allnodes sched group\n");
+					break;
+				}
+				sched_group_allnodes_bycpu[i]
+						= sched_group_allnodes;
+			}
+			sd = &per_cpu(allnodes_domains, i);
+			*sd = SD_ALLNODES_INIT;
+			sd->span = *cpu_map;
+			group = cpu_to_allnodes_group(i);
+			sd->groups = &sched_group_allnodes[group];
+			p = sd;
+		} else
+			p = NULL;
+
+		sd = &per_cpu(node_domains, i);
+		*sd = SD_NODE_INIT;
+		sd->span = sched_domain_node_span(cpu_to_node(i));
+		sd->parent = p;
+		cpus_and(sd->span, sd->span, *cpu_map);
+#endif
+
+		p = sd;
+		sd = &per_cpu(phys_domains, i);
+		group = cpu_to_phys_group(i);
+		*sd = SD_CPU_INIT;
+		sd->span = nodemask;
+		sd->parent = p;
+		sd->groups = &sched_group_phys[group];
+
+#ifdef CONFIG_SCHED_MC
+		p = sd;
+		sd = &per_cpu(core_domains, i);
+		group = cpu_to_core_group(i);
+		*sd = SD_MC_INIT;
+		sd->span = cpu_coregroup_map(i);
+		cpus_and(sd->span, sd->span, *cpu_map);
+		sd->parent = p;
+		sd->groups = &sched_group_core[group];
+#endif
+
+#ifdef CONFIG_SCHED_SMT
+		p = sd;
+		sd = &per_cpu(cpu_domains, i);
+		group = cpu_to_cpu_group(i);
+		*sd = SD_SIBLING_INIT;
+		sd->span = cpu_sibling_map[i];
+		cpus_and(sd->span, sd->span, *cpu_map);
+		sd->parent = p;
+		sd->groups = &sched_group_cpus[group];
+#endif
+	}
+
+#ifdef CONFIG_SCHED_SMT
+	/* Set up CPU (sibling) groups */
+	for_each_cpu_mask(i, *cpu_map) {
+		cpumask_t this_sibling_map = cpu_sibling_map[i];
+		cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
+		if (i != first_cpu(this_sibling_map))
+			continue;
+
+		init_sched_build_groups(sched_group_cpus, this_sibling_map,
+						&cpu_to_cpu_group);
+	}
+#endif
+
+#ifdef CONFIG_SCHED_MC
+	/* Set up multi-core groups */
+	for_each_cpu_mask(i, *cpu_map) {
+		cpumask_t this_core_map = cpu_coregroup_map(i);
+		cpus_and(this_core_map, this_core_map, *cpu_map);
+		if (i != first_cpu(this_core_map))
+			continue;
+		init_sched_build_groups(sched_group_core, this_core_map,
+					&cpu_to_core_group);
+	}
+#endif
+
+
+	/* Set up physical groups */
+	for (i = 0; i < MAX_NUMNODES; i++) {
+		cpumask_t nodemask = node_to_cpumask(i);
+
+		cpus_and(nodemask, nodemask, *cpu_map);
+		if (cpus_empty(nodemask))
+			continue;
+
+		init_sched_build_groups(sched_group_phys, nodemask,
+						&cpu_to_phys_group);
+	}
+
+#ifdef CONFIG_NUMA
+	/* Set up node groups */
+	if (sched_group_allnodes)
+		init_sched_build_groups(sched_group_allnodes, *cpu_map,
+					&cpu_to_allnodes_group);
+
+	for (i = 0; i < MAX_NUMNODES; i++) {
+		/* Set up node groups */
+		struct sched_group *sg, *prev;
+		cpumask_t nodemask = node_to_cpumask(i);
+		cpumask_t domainspan;
+		cpumask_t covered = CPU_MASK_NONE;
+		int j;
+
+		cpus_and(nodemask, nodemask, *cpu_map);
+		if (cpus_empty(nodemask)) {
+			sched_group_nodes[i] = NULL;
+			continue;
+		}
+
+		domainspan = sched_domain_node_span(i);
+		cpus_and(domainspan, domainspan, *cpu_map);
+
+		sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
+		sched_group_nodes[i] = sg;
+		for_each_cpu_mask(j, nodemask) {
+			struct sched_domain *sd;
+			sd = &per_cpu(node_domains, j);
+			sd->groups = sg;
+			if (sd->groups == NULL) {
+				/* Turn off balancing if we have no groups */
+				sd->flags = 0;
+			}
+		}
+		if (!sg) {
+			printk(KERN_WARNING
+			"Can not alloc domain group for node %d\n", i);
+			continue;
+		}
+		sg->cpu_power = 0;
+		sg->cpumask = nodemask;
+		cpus_or(covered, covered, nodemask);
+		prev = sg;
+
+		for (j = 0; j < MAX_NUMNODES; j++) {
+			cpumask_t tmp, notcovered;
+			int n = (i + j) % MAX_NUMNODES;
+
+			cpus_complement(notcovered, covered);
+			cpus_and(tmp, notcovered, *cpu_map);
+			cpus_and(tmp, tmp, domainspan);
+			if (cpus_empty(tmp))
+				break;
+
+			nodemask = node_to_cpumask(n);
+			cpus_and(tmp, tmp, nodemask);
+			if (cpus_empty(tmp))
+				continue;
+
+			sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
+			if (!sg) {
+				printk(KERN_WARNING
+				"Can not alloc domain group for node %d\n", j);
+				break;
+			}
+			sg->cpu_power = 0;
+			sg->cpumask = tmp;
+			cpus_or(covered, covered, tmp);
+			prev->next = sg;
+			prev = sg;
+		}
+		prev->next = sched_group_nodes[i];
+	}
+#endif
+
+	/* Calculate CPU power for physical packages and nodes */
+	for_each_cpu_mask(i, *cpu_map) {
+		int power;
+		struct sched_domain *sd;
+#ifdef CONFIG_SCHED_SMT
+		sd = &per_cpu(cpu_domains, i);
+		power = SCHED_LOAD_SCALE;
+		sd->groups->cpu_power = power;
+#endif
+#ifdef CONFIG_SCHED_MC
+		sd = &per_cpu(core_domains, i);
+		power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1)
+					    * SCHED_LOAD_SCALE / 10;
+		sd->groups->cpu_power = power;
+
+		sd = &per_cpu(phys_domains, i);
+
+ 		/*
+ 		 * This has to be < 2 * SCHED_LOAD_SCALE
+ 		 * Lets keep it SCHED_LOAD_SCALE, so that
+ 		 * while calculating NUMA group's cpu_power
+ 		 * we can simply do
+ 		 *  numa_group->cpu_power += phys_group->cpu_power;
+ 		 *
+ 		 * See "only add power once for each physical pkg"
+ 		 * comment below
+ 		 */
+ 		sd->groups->cpu_power = SCHED_LOAD_SCALE;
+#else
+		sd = &per_cpu(phys_domains, i);
+		power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
+				(cpus_weight(sd->groups->cpumask)-1) / 10;
+		sd->groups->cpu_power = power;
+#endif
+	}
+
+#ifdef CONFIG_NUMA
+	for (i = 0; i < MAX_NUMNODES; i++)
+		init_numa_sched_groups_power(sched_group_nodes[i]);
+
+	init_numa_sched_groups_power(sched_group_allnodes);
+#endif
+
+	/* Attach the domains */
+	for_each_cpu_mask(i, *cpu_map) {
+		struct sched_domain *sd;
+#ifdef CONFIG_SCHED_SMT
+		sd = &per_cpu(cpu_domains, i);
+#elif defined(CONFIG_SCHED_MC)
+		sd = &per_cpu(core_domains, i);
+#else
+		sd = &per_cpu(phys_domains, i);
+#endif
+		cpu_attach_domain(sd, i);
+	}
+	/*
+	 * Tune cache-hot values:
+	 */
+	calibrate_migration_costs(cpu_map);
+}
+/*
+ * Set up scheduler domains and groups.  Callers must hold the hotplug lock.
+ */
+static void arch_init_sched_domains(const cpumask_t *cpu_map)
+{
+	cpumask_t cpu_default_map;
+
+	/*
+	 * Setup mask for cpus without special case scheduling requirements.
+	 * For now this just excludes isolated cpus, but could be used to
+	 * exclude other special cases in the future.
+	 */
+	cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
+
+	build_sched_domains(&cpu_default_map);
+}
+
+static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
+{
+#ifdef CONFIG_NUMA
+	int i;
+	int cpu;
+
+	for_each_cpu_mask(cpu, *cpu_map) {
+		struct sched_group *sched_group_allnodes
+			= sched_group_allnodes_bycpu[cpu];
+		struct sched_group **sched_group_nodes
+			= sched_group_nodes_bycpu[cpu];
+
+		if (sched_group_allnodes) {
+			kfree(sched_group_allnodes);
+			sched_group_allnodes_bycpu[cpu] = NULL;
+		}
+
+		if (!sched_group_nodes)
+			continue;
+
+		for (i = 0; i < MAX_NUMNODES; i++) {
+			cpumask_t nodemask = node_to_cpumask(i);
+			struct sched_group *oldsg, *sg = sched_group_nodes[i];
+
+			cpus_and(nodemask, nodemask, *cpu_map);
+			if (cpus_empty(nodemask))
+				continue;
+
+			if (sg == NULL)
+				continue;
+			sg = sg->next;
+next_sg:
+			oldsg = sg;
+			sg = sg->next;
+			kfree(oldsg);
+			if (oldsg != sched_group_nodes[i])
+				goto next_sg;
+		}
+		kfree(sched_group_nodes);
+		sched_group_nodes_bycpu[cpu] = NULL;
+	}
+#endif
+}
+
+/*
+ * Detach sched domains from a group of cpus specified in cpu_map
+ * These cpus will now be attached to the NULL domain
+ */
+static void detach_destroy_domains(const cpumask_t *cpu_map)
+{
+	int i;
+
+	for_each_cpu_mask(i, *cpu_map)
+		cpu_attach_domain(NULL, i);
+	synchronize_sched();
+	arch_destroy_sched_domains(cpu_map);
+}
+
+/*
+ * Partition sched domains as specified by the cpumasks below.
+ * This attaches all cpus from the cpumasks to the NULL domain,
+ * waits for a RCU quiescent period, recalculates sched
+ * domain information and then attaches them back to the
+ * correct sched domains
+ * Call with hotplug lock held
+ */
+void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
+{
+	cpumask_t change_map;
+
+	cpus_and(*partition1, *partition1, cpu_online_map);
+	cpus_and(*partition2, *partition2, cpu_online_map);
+	cpus_or(change_map, *partition1, *partition2);
+
+	/* Detach sched domains from all of the affected cpus */
+	detach_destroy_domains(&change_map);
+	if (!cpus_empty(*partition1))
+		build_sched_domains(partition1);
+	if (!cpus_empty(*partition2))
+		build_sched_domains(partition2);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * Force a reinitialization of the sched domains hierarchy.  The domains
+ * and groups cannot be updated in place without racing with the balancing
+ * code, so we temporarily attach all running cpus to the NULL domain
+ * which will prevent rebalancing while the sched domains are recalculated.
+ */
+static int update_sched_domains(struct notifier_block *nfb,
+				unsigned long action, void *hcpu)
+{
+	switch (action) {
+	case CPU_UP_PREPARE:
+	case CPU_DOWN_PREPARE:
+		detach_destroy_domains(&cpu_online_map);
+		return NOTIFY_OK;
+
+	case CPU_UP_CANCELED:
+	case CPU_DOWN_FAILED:
+	case CPU_ONLINE:
+	case CPU_DEAD:
+		/*
+		 * Fall through and re-initialise the domains.
+		 */
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+
+	/* The hotplug lock is already held by cpu_up/cpu_down */
+	arch_init_sched_domains(&cpu_online_map);
+
+	return NOTIFY_OK;
+}
+#endif
+
+void __init sched_init_smp(void)
+{
+	lock_cpu_hotplug();
+	arch_init_sched_domains(&cpu_online_map);
+	unlock_cpu_hotplug();
+	/* XXX: Theoretical race here - CPU may be hotplugged now */
+	hotcpu_notifier(update_sched_domains, 0);
+	init_sched_domain_sysctl();
+}
+#else
+void __init sched_init_smp(void)
+{
+}
+#endif /* CONFIG_SMP */
+
+int in_sched_functions(unsigned long addr)
+{
+	/* Linker adds these: start and end of __sched functions */
+	extern char __sched_text_start[], __sched_text_end[];
+	return in_lock_functions(addr) ||
+		(addr >= (unsigned long)__sched_text_start
+		&& addr < (unsigned long)__sched_text_end);
+}
+
+void __init sched_init(void)
+{
+	runqueue_t *rq;
+	int i, j, k;
+
+	for_each_cpu(i) {
+		prio_array_t *array;
+
+		rq = cpu_rq(i);
+		spin_lock_init(&rq->lock);
+		rq->nr_running = 0;
+		rq->active = rq->arrays;
+		rq->expired = rq->arrays + 1;
+		rq->best_expired_prio = MAX_PRIO;
+
+#ifdef CONFIG_SMP
+		rq->sd = NULL;
+		for (j = 1; j < 3; j++)
+			rq->cpu_load[j] = 0;
+		rq->active_balance = 0;
+		rq->push_cpu = 0;
+		rq->migration_thread = NULL;
+		INIT_LIST_HEAD(&rq->migration_queue);
+#endif
+		atomic_set(&rq->nr_iowait, 0);
+
+		for (j = 0; j < 2; j++) {
+			array = rq->arrays + j;
+			for (k = 0; k < MAX_PRIO; k++) {
+				INIT_LIST_HEAD(array->queue + k);
+				__clear_bit(k, array->bitmap);
+			}
+			// delimiter for bitsearch
+			__set_bit(MAX_PRIO, array->bitmap);
+		}
+	}
+
+	set_load_weight(&init_task);
+	/*
+	 * The boot idle thread does lazy MMU switching as well:
+	 */
+	atomic_inc(&init_mm.mm_count);
+	enter_lazy_tlb(&init_mm, current);
+
+	/*
+	 * Make us the idle thread. Technically, schedule() should not be
+	 * called from this thread, however somewhere below it might be,
+	 * but because we are the idle thread, we just pick up running again
+	 * when this runqueue becomes "idle".
+	 */
+	init_idle(current, smp_processor_id());
+}
+
+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+void __might_sleep(char *file, int line)
+{
+#if defined(in_atomic)
+	static unsigned long prev_jiffy;	/* ratelimiting */
+
+	if ((in_atomic() || irqs_disabled()) &&
+	    system_state == SYSTEM_RUNNING && !oops_in_progress) {
+		if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+			return;
+		prev_jiffy = jiffies;
+		printk(KERN_ERR "BUG: sleeping function called from invalid"
+				" context at %s:%d\n", file, line);
+		printk("in_atomic():%d, irqs_disabled():%d\n",
+			in_atomic(), irqs_disabled());
+		dump_stack();
+	}
+#endif
+}
+EXPORT_SYMBOL(__might_sleep);
+#endif
+
+#ifdef CONFIG_MAGIC_SYSRQ
+void normalize_rt_tasks(void)
+{
+	struct task_struct *p;
+	prio_array_t *array;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	read_lock_irq(&tasklist_lock);
+	for_each_process (p) {
+		if (!rt_task(p))
+			continue;
+
+		rq = task_rq_lock(p, &flags);
+
+		array = p->array;
+		if (array)
+			deactivate_task(p, task_rq(p));
+		__setscheduler(p, SCHED_NORMAL, 0);
+		if (array) {
+			__activate_task(p, task_rq(p));
+			resched_task(rq->curr);
+		}
+
+		task_rq_unlock(rq, &flags);
+	}
+	read_unlock_irq(&tasklist_lock);
+}
+
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+#ifdef CONFIG_IA64
+/*
+ * These functions are only useful for the IA64 MCA handling.
+ *
+ * They can only be called when the whole system has been
+ * stopped - every CPU needs to be quiescent, and no scheduling
+ * activity can take place. Using them for anything else would
+ * be a serious bug, and as a result, they aren't even visible
+ * under any other configuration.
+ */
+
+/**
+ * curr_task - return the current task for a given cpu.
+ * @cpu: the processor in question.
+ *
+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ */
+task_t *curr_task(int cpu)
+{
+	return cpu_curr(cpu);
+}
+
+/**
+ * set_curr_task - set the current task for a given cpu.
+ * @cpu: the processor in question.
+ * @p: the task pointer to set.
+ *
+ * Description: This function must only be used when non-maskable interrupts
+ * are serviced on a separate stack.  It allows the architecture to switch the
+ * notion of the current task on a cpu in a non-blocking manner.  This function
+ * must be called with all CPU's synchronized, and interrupts disabled, the
+ * and caller must save the original value of the current task (see
+ * curr_task() above) and restore that value before reenabling interrupts and
+ * re-starting the system.
+ *
+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ */
+void set_curr_task(int cpu, task_t *p)
+{
+	cpu_curr(cpu) = p;
+}
+
+#endif
diff -urN oldtree/kernel/sched_drv.c newtree/kernel/sched_drv.c
--- oldtree/kernel/sched_drv.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched_drv.c	2006-03-08 18:56:30.063756000 +0000
@@ -0,0 +1,171 @@
+/*
+ *  kernel/sched_drv.c
+ *
+ *  Kernel scheduler device implementation
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/syscalls.h>
+#include <linux/sched_drv.h>
+#include <linux/sched_pvt.h>
+
+/*
+ * All private per scheduler entries in task_struct are defined as
+ * separate structs and placed into the cpusched union in task_struct.
+ */
+
+/* Ingosched */
+#ifdef CONFIG_CPUSCHED_INGO
+extern const struct sched_drv ingo_sched_drv;
+#endif
+
+/* Ingo Low Latency */
+#ifdef CONFIG_CPUSCHED_INGO_LL
+extern const struct sched_drv ingo_ll_sched_drv;
+#endif
+
+/* Staircase */
+#ifdef CONFIG_CPUSCHED_STAIRCASE
+extern const struct sched_drv staircase_sched_drv;
+#endif
+
+/* Single priority array (SPA) schedulers */
+#ifdef CONFIG_CPUSCHED_SPA_NF
+extern const struct sched_drv spa_nf_sched_drv;
+#endif
+#ifdef CONFIG_CPUSCHED_SPA_WS
+extern const struct sched_drv spa_ws_sched_drv;
+#endif
+#ifdef CONFIG_CPUSCHED_SPA_SVR
+extern const struct sched_drv spa_svr_sched_drv;
+#endif
+#ifdef CONFIG_CPUSCHED_SPA_EBS
+extern const struct sched_drv spa_ebs_sched_drv;
+#endif
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+extern const struct sched_drv zaphod_sched_drv;
+#endif
+
+/* Nicksched */
+#ifdef CONFIG_CPUSCHED_NICK
+extern const struct sched_drv nick_sched_drv;
+#endif
+
+const struct sched_drv *sched_drvp =
+#if defined(CONFIG_CPUSCHED_DEFAULT_INGO)
+	&ingo_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_INGO_LL)
+	&ingo_ll_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_STAIRCASE)
+	&staircase_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_SPA_NF)
+	&spa_nf_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_SPA_WS)
+	&spa_ws_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_SPA_SVR)
+	&spa_svr_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_SPA_EBS)
+	&spa_ebs_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_ZAPHOD)
+	&zaphod_sched_drv;
+#elif defined(CONFIG_CPUSCHED_DEFAULT_NICK)
+	&nick_sched_drv;
+#else
+	NULL;
+#error "You must have at least 1 cpu scheduler selected"
+#endif
+
+extern struct task_struct base_init_task;
+
+#define CPUSCHED_CHECK_SELECT(drv) \
+do { \
+	if (!strcmp(str, (drv).name)) { \
+		sched_drvp = &(drv); \
+		return 1; \
+	} \
+} while (0)
+
+static int __init sched_drv_setup(char *str)
+{
+#if defined(CONFIG_CPUSCHED_INGO)
+	CPUSCHED_CHECK_SELECT(ingo_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_INGO_LL)
+	CPUSCHED_CHECK_SELECT(ingo_ll_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_STAIRCASE)
+	CPUSCHED_CHECK_SELECT(staircase_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_SPA_NF)
+	CPUSCHED_CHECK_SELECT(spa_nf_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_SPA_WS)
+	CPUSCHED_CHECK_SELECT(spa_ws_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_SPA_SVR)
+	CPUSCHED_CHECK_SELECT(spa_svr_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_SPA_EBS)
+	CPUSCHED_CHECK_SELECT(spa_ebs_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_ZAPHOD)
+	CPUSCHED_CHECK_SELECT(zaphod_sched_drv);
+#endif
+#if defined(CONFIG_CPUSCHED_NICK)
+	CPUSCHED_CHECK_SELECT(nick_sched_drv);
+#endif
+	return 1;
+}
+
+__setup ("cpusched=", sched_drv_setup);
+
+static ssize_t show_attribute(struct kobject *kobj, struct attribute *attr, char *page)
+{
+	struct sched_drv_sysfs_entry *e = to_sched_drv_sysfs_entry(attr);
+
+	if (!e->show)
+		return 0;
+
+	return e->show(page);
+}
+
+static ssize_t store_attribute(struct kobject *kobj, struct attribute *attr, const char *page, size_t length)
+{
+	struct sched_drv_sysfs_entry *e = to_sched_drv_sysfs_entry(attr);
+
+	if (!e->show)
+		return -EBADF;
+
+	return e->store(page, length);
+}
+
+struct sysfs_ops sched_drv_sysfs_ops = {
+	.show = show_attribute,
+	.store = store_attribute,
+};
+
+static struct kobj_type sched_drv_ktype = {
+	.sysfs_ops = &sched_drv_sysfs_ops,
+	.default_attrs = NULL,
+};
+
+static struct kobject sched_drv_kobj = {
+	.ktype = &sched_drv_ktype
+};
+
+decl_subsys(cpusched, NULL, NULL);
+
+void __init sched_drv_sysfs_init(void)
+{
+	if (subsystem_register(&cpusched_subsys) == 0) {
+		if (sched_drvp->attrs == NULL)
+			return;
+
+		sched_drv_ktype.default_attrs = sched_drvp->attrs;
+		strncpy(sched_drv_kobj.name, sched_drvp->name, KOBJ_NAME_LEN);
+		sched_drv_kobj.kset = &cpusched_subsys.kset;
+		(void)kobject_register(&sched_drv_kobj);
+ 	}
+}
diff -urN oldtree/kernel/sched_spa.c newtree/kernel/sched_spa.c
--- oldtree/kernel/sched_spa.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched_spa.c	2006-03-08 18:56:30.067756250 +0000
@@ -0,0 +1,1637 @@
+/*
+ *  kernel/sched_spa.c
+ *  Copyright (C) 1991-2005  Linus Torvalds
+ *
+ *  2005-01-11 Single priority array scheduler (no frills)
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+#include <linux/module.h>
+#include <linux/sched_spa.h>
+
+#define INITIAL_TIMESTAMP \
+	((unsigned long long)INITIAL_JIFFIES * (1000000000ULL / HZ))
+
+#ifdef CONFIG_SMP
+static inline unsigned long long adjusted_sched_clock(const task_t *p)
+{
+	return sched_clock() + (task_rq(p)->timestamp_last_tick - this_rq()->timestamp_last_tick);
+}
+#else
+#define adjusted_sched_clock(p) sched_clock()
+#endif
+
+static inline void adjust_timestamp(struct task_struct *tsk, struct runqueue *tsk_rq, struct runqueue *other_rq)
+{
+#ifdef CONFIG_SMP
+	tsk->timestamp += (tsk_rq->timestamp_last_tick - other_rq->timestamp_last_tick);
+#endif
+}
+
+void initialize_stats(struct task_struct *p, unsigned long long now)
+{
+	p->sdu.spa.avg_sleep_per_cycle = 0;
+	p->sdu.spa.avg_ia_sleep_per_cycle = 0;
+	p->sdu.spa.avg_delay_per_cycle = 0;
+	p->sdu.spa.avg_latency = 0;
+	p->sdu.spa.avg_ia_latency = 0;
+	p->sdu.spa.avg_cpu_per_cycle = 0;
+	p->sdu.spa.avg_cycle_length = 0;
+	p->sdu.spa.flags = (SPAF_JUST_FORK|SPAF_FIRST_RUN);
+}
+
+static void delta_sleep_stats(struct task_struct *p, unsigned long long now)
+{
+	unsigned long long delta;
+
+	/* sched_clock() is not guaranteed monotonic */
+	if (now <= p->timestamp)
+		goto out;
+
+	delta = now - p->timestamp;
+	p->sdu.spa.avg_sleep_per_cycle += delta;
+	p->sdu.spa.avg_cycle_length += delta;
+	if (task_was_in_ia_sleep(p))
+		p->sdu.spa.avg_ia_sleep_per_cycle += delta;
+out:
+	p->timestamp = now;
+}
+
+static inline void delta_cpu_stats(struct task_struct *p,
+				      unsigned long long now)
+{
+	unsigned long long delta;
+
+	/* sched_clock() is not guaranteed monotonic */
+	if (now <= p->timestamp)
+		goto out;
+
+	delta = now - p->timestamp;
+	p->sdu.spa.avg_cpu_per_cycle += delta;
+	p->sdu.spa.avg_cycle_length += delta;
+out:
+	p->timestamp = now;
+}
+
+#define SPA_AVG_ALPHA ((1 << SPA_AVG_OFFSET) - 1)
+static inline void apply_spa_avg_decay(unsigned long long *valp)
+{
+	*valp *= SPA_AVG_ALPHA;
+	*valp >>= SPA_AVG_OFFSET;
+}
+
+static void delta_delay_stats(struct task_struct *p, unsigned long long now)
+{
+	long long delta = now - p->timestamp;
+
+	/* sched_clock() is not guaranteed monotonic */
+	if (delta < 0)
+		delta = 0;
+
+	delta = now - p->timestamp;
+	p->sdu.spa.avg_delay_per_cycle += delta;
+	p->sdu.spa.avg_cycle_length += delta;
+
+	if (p->sdu.spa.flags & SPAF_JUST_WOKEN) {
+		apply_spa_avg_decay(&p->sdu.spa.avg_latency);
+		p->sdu.spa.avg_latency += delta;
+		if (task_was_in_ia_sleep(p)) {
+			apply_spa_avg_decay(&p->sdu.spa.avg_ia_latency);
+			p->sdu.spa.avg_ia_latency += delta;
+			p->sdu.spa.flags |= SPAF_IA_LATENCY;
+		} else
+			p->sdu.spa.flags &= ~SPAF_IA_LATENCY;
+	}
+
+	p->timestamp = now;
+	p->sdu.spa.flags &= ~(SPAF_INTR_WOKEN|SPAF_JUST_WOKEN);
+}
+
+static inline void spa_avg_first_sample(unsigned long long *valp)
+{
+	*valp <<= SPA_AVG_OFFSET;
+}
+
+static void decay_stats_for_cycle(struct task_struct *p)
+{
+	if (unlikely(p->sdu.spa.flags & SPAF_JUST_FORK)) {
+		/* set the average to be equal to the first sample */
+		spa_avg_first_sample(&p->sdu.spa.avg_sleep_per_cycle);
+		spa_avg_first_sample(&p->sdu.spa.avg_ia_sleep_per_cycle);
+		spa_avg_first_sample(&p->sdu.spa.avg_delay_per_cycle);
+		spa_avg_first_sample(&p->sdu.spa.avg_cpu_per_cycle);
+		spa_avg_first_sample(&p->sdu.spa.avg_cycle_length);
+		p->sdu.spa.flags &= ~SPAF_JUST_FORK;
+	}
+	else {
+		apply_spa_avg_decay(&p->sdu.spa.avg_sleep_per_cycle);
+		apply_spa_avg_decay(&p->sdu.spa.avg_ia_sleep_per_cycle);
+		apply_spa_avg_decay(&p->sdu.spa.avg_delay_per_cycle);
+		apply_spa_avg_decay(&p->sdu.spa.avg_cpu_per_cycle);
+		apply_spa_avg_decay(&p->sdu.spa.avg_cycle_length);
+	}
+}
+
+static void update_stats_at_wake_up(struct task_struct *p,
+				       unsigned long long now)
+{
+	delta_sleep_stats(p, now);
+	p->sdu.spa.flags |= SPAF_JUST_WOKEN;
+	if (in_interrupt())
+		p->sdu.spa.flags |= SPAF_INTR_WOKEN;
+	decay_stats_for_cycle(p);
+	p->sdu.spa.flags &= ~SPAF_FIRST_RUN;
+}
+
+static inline void update_stats_at_end_of_ts(struct task_struct *p,
+						unsigned long long now)
+{
+	delta_cpu_stats(p, now);
+	decay_stats_for_cycle(p);
+}
+
+static inline unsigned long long spa_avg_in_jiffies(unsigned long long avg)
+{
+	unsigned long long tmp = SPA_AVG_RND(avg) * HZ;
+
+#if BITS_PER_LONG < 64
+	(void)do_div(tmp, 1000000000);
+#else
+	tmp /= 1000000000;
+#endif
+
+	return tmp;
+}
+
+#define PPT_OVERFLOW ((1ULL << 63) / 1000 - 1)
+
+static unsigned long delay_in_jiffies_for_usage(const struct task_struct *p,
+						unsigned long rur)
+{
+	unsigned long long acpc_jiffies, abl_jiffies, res;
+
+	if (rur == 0)
+		return ULONG_MAX;
+
+	acpc_jiffies = spa_avg_in_jiffies(p->sdu.spa.avg_cpu_per_cycle);
+
+	/*
+	 * we have to be careful about overflow and/or underflow
+	 */
+	while (unlikely(acpc_jiffies > PPT_OVERFLOW)) {
+		acpc_jiffies >>= 1;
+		if (unlikely((rur >>= 1) == 0))
+			return ULONG_MAX;
+	}
+
+	abl_jiffies = spa_avg_in_jiffies(p->sdu.spa.avg_sleep_per_cycle) +
+		acpc_jiffies;
+	res = acpc_jiffies * 1000;
+#if BITS_PER_LONG < 64
+	(void)do_div(res, rur);
+#else
+	res /= rur;
+#endif
+	if (res > abl_jiffies)
+		return res - abl_jiffies;
+	else
+		return 0;
+}
+
+static void update_shares(struct task_struct *p)
+{
+	int nice = TASK_NICE(p);
+
+	p->sdu.spa.eb_shares = DEFAULT_EB_SHARES;
+
+	if (nice > 0)
+		p->sdu.spa.eb_shares -= nice;
+	else if (nice < 0)
+		p->sdu.spa.eb_shares += nice * nice;
+}
+
+extern const struct sched_drv spa_nf_sched_drv;
+extern struct sched_spa_child spa_nf_child;
+
+struct sched_spa_child *spa_sched_child = &spa_nf_child;
+
+/*
+ * Some of our exported functions could be called when other schedulers are
+ * in charge with catastrophic results if not handled properly.
+ * So we need to know whether one of our schedulers is in charge
+ */
+static int spa_in_charge = 0;
+
+void spa_init_runqueue_queue(union runqueue_queue *qup)
+{
+	int k;
+
+	for (k = 0; k < SPA_IDLE_PRIO; k++) {
+		qup->spa.queue[k].prio = k;
+		INIT_LIST_HEAD(&qup->spa.queue[k].list);
+	}
+	bitmap_zero(qup->spa.bitmap, SPA_NUM_PRIO_SLOTS);
+	// delimiter for bitsearch
+	__set_bit(SPA_IDLE_PRIO, qup->spa.bitmap);
+	qup->spa.next_prom_due = ULONG_MAX;
+	qup->spa.pcount = 0;
+	qup->spa.nr_active_eb_shares = 0;
+}
+
+void spa_set_oom_time_slice(struct task_struct *p, unsigned long t)
+{
+	p->sdu.spa.time_slice = t;
+}
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * Default configurable timeslice is 40 msecs, maximum configurable
+ * timeslice is 1000 msecs and minumum configurable timeslice is 1 jiffy.
+ * Timeslices get renewed on task creation, on wake up and after they expire.
+ */
+#define MIN_TIMESLICE		1
+#define DEF_TIMESLICE		((120 * HZ / 1000) ? : MIN_TIMESLICE)
+#define MAX_TIMESLICE		((1000 * HZ / 1000) ? : MIN_TIMESLICE)
+
+static unsigned long time_slice = DEF_TIMESLICE;
+static unsigned long sched_rr_time_slice = DEF_TIMESLICE;
+
+/*
+ * Background tasks may have longer time slices as compensation
+ */
+#define MAX_BGND_TIME_SLICE_MULTIPLIER 100
+static unsigned int bgnd_time_slice_multiplier = 1;
+
+#define TASK_PREEMPTS_CURR(p, rq) \
+	((p)->prio < (rq)->curr->prio)
+
+static inline unsigned int normal_task_timeslice(const task_t *p)
+{
+	if (unlikely(p->prio == SPA_BGND_PRIO))
+		return time_slice * bgnd_time_slice_multiplier;
+
+	return time_slice;
+}
+
+static inline unsigned int hard_cap_timeslice(const task_t *p)
+{
+	unsigned int cpu_avg = spa_avg_in_jiffies(p->sdu.spa.avg_cpu_per_cycle);
+
+	return (cpu_avg / 2) ? (cpu_avg / 2) : 1;
+}
+
+/*
+ * task_timeslice() is the interface that is used internally by the scheduler.
+ */
+static inline unsigned int task_timeslice(const task_t *p)
+{
+	if (rt_task(p))
+		return sched_rr_time_slice;
+
+	return normal_task_timeslice(p);
+}
+
+unsigned int spa_task_timeslice(const task_t *p)
+{
+	return task_timeslice(p);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+ * scheduling class and "nice" value.
+ */
+#define NICE_TO_LP(nice) ((nice >=0) ? (20 - (nice)) : (20 + (nice) * (nice)))
+#define LOAD_WEIGHT(lp) \
+	(((lp) * SCHED_LOAD_SCALE) / NICE_TO_LP(0))
+#define PRIO_TO_LOAD_WEIGHT(prio) \
+	LOAD_WEIGHT(NICE_TO_LP(PRIO_TO_NICE(prio)))
+#define RTPRIO_TO_LOAD_WEIGHT(rp) \
+	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
+
+void spa_set_load_weight(task_t *p)
+{
+	if (rt_task(p)) {
+		if (p == task_rq(p)->migration_thread)
+			/*
+			 * The migration thread does the actual balancing.
+			 * Giving its load any weight will skew balancing
+			 * adversely.
+			 */
+			p->load_weight = 0;
+		else
+			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
+	} else {
+		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
+
+		if (unlikely(p->sdu.spa.min_cpu_rate_cap < 1000)) {
+			unsigned int clw; /* load weight based on cap */
+
+			clw = (p->sdu.spa.min_cpu_rate_cap * SCHED_LOAD_SCALE) / 1000;
+			if (clw < p->load_weight)
+				p->load_weight = clw;
+		}
+	}
+}
+#else
+static inline void spa_set_load_weight(task_t *p)
+{
+}
+#endif
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void dequeue_task(struct task_struct *p, struct spa_runqueue_queue *rqq)
+{
+	/*
+	 * Initialize after removal from the list so that list_empty() works
+	 * as a means for testing whether the task is runnable
+	 * If p is the last task in this priority slot then slotp will be
+	 * a pointer to the head of the list in the sunqueue structure
+	 * NB we can't use p->prio as is for bitmap as task may have
+	 * been promoted so we update it.
+	 */
+	struct list_head *slotp = p->run_list.next;
+
+	list_del_init(&p->run_list);
+	if (list_empty(slotp)) {
+		p->prio = list_entry(slotp, struct spa_prio_slot, list)->prio;
+		__clear_bit(p->prio, rqq->bitmap);
+	}
+}
+
+static void enqueue_task(struct task_struct *p, struct spa_runqueue_queue *rqq)
+{
+	sched_info_queued(p);
+	list_add_tail(&p->run_list, &rqq->queue[p->prio].list);
+	__set_bit(p->prio, rqq->bitmap);
+}
+
+/*
+ * Used by the migration code - we pull tasks from the head of the
+ * remote queue so we want these tasks to show up at the head of the
+ * local queue:
+ */
+static inline void enqueue_task_head(struct task_struct *p, struct spa_runqueue_queue *rqq)
+{
+	list_add(&p->run_list, &rqq->queue[p->prio].list);
+	__set_bit(p->prio, rqq->bitmap);
+}
+
+/*
+ * Control value for promotion mechanism NB this controls severity of "nice"
+ */
+unsigned long base_prom_interval = ((DEF_TIMESLICE * 15) / 10);
+unsigned int promotion_floor = MAX_RT_PRIO;
+
+#define PROMOTION_CEILING SPA_BGND_PRIO
+#define in_promotable_range(prio) \
+	((prio) > promotion_floor && (prio) < PROMOTION_CEILING)
+
+static inline void restart_promotions(struct runqueue *rq)
+{
+	rq->qu.spa.next_prom_due = jiffies + base_prom_interval;
+	rq->qu.spa.pcount = 2;
+}
+
+#define check_restart_promotions(rq) \
+do { \
+	if (rq->nr_running == 2) \
+		restart_promotions(rq); \
+} while (0)
+
+/* make it (relatively) easy to switch to using a timer */
+static inline void stop_promotions(struct runqueue *rq)
+{
+}
+
+#define check_stop_promotions(rq) \
+do { \
+	if (rq->nr_running == 1) \
+		stop_promotions(rq); \
+} while (0)
+
+/*
+ * Are promotions due?
+ */
+static inline int promotions_due(const struct runqueue *rq)
+{
+	return unlikely(time_after_eq(jiffies, rq->qu.spa.next_prom_due));
+}
+
+static inline void update_curr_prio_for_promotion(struct runqueue *rq)
+{
+	if (likely(in_promotable_range(rq->curr->prio)))
+		rq->curr->prio--;
+}
+
+/*
+ * Assume spa_runq lock is NOT already held.
+ */
+static void do_promotions(struct runqueue *rq)
+{
+	int idx = promotion_floor;
+
+	spin_lock(&rq->lock);
+	if (unlikely(rq->nr_running < 2))
+		goto out_unlock;
+	if (rq->nr_running > rq->qu.spa.pcount) {
+		rq->qu.spa.pcount++;
+		goto out_unlock;
+	}
+	for (;;) {
+		int new_prio;
+		idx = find_next_bit(rq->qu.spa.bitmap, PROMOTION_CEILING, idx + 1);
+		if (idx > (PROMOTION_CEILING - 1))
+			break;
+
+		new_prio = idx - 1;
+		__list_splice(&rq->qu.spa.queue[idx].list, rq->qu.spa.queue[new_prio].list.prev);
+		INIT_LIST_HEAD(&rq->qu.spa.queue[idx].list);
+		__clear_bit(idx, rq->qu.spa.bitmap);
+		__set_bit(new_prio, rq->qu.spa.bitmap);
+	}
+	/* The only prio field that needs update is the current task's */
+	update_curr_prio_for_promotion(rq);
+	rq->qu.spa.pcount = 2;
+out_unlock:
+	rq->qu.spa.next_prom_due = jiffies + base_prom_interval;
+	spin_unlock(&rq->lock);
+}
+
+static inline unsigned int spa_soft_cap_penalty(const task_t *p)
+{
+	unsigned long rd = delay_in_jiffies_for_usage(p, p->sdu.spa.min_cpu_rate_cap);
+
+	return (rd + base_prom_interval) / base_prom_interval;
+}
+
+int spa_pb_soft_cap_priority(const task_t *p, int base_prio)
+{
+	struct spa_runqueue_queue *rqq = &task_rq(p)->qu.spa;
+	int prio = find_next_bit(rqq->bitmap, SPA_IDLE_PRIO, base_prio);
+
+	if (prio == SPA_IDLE_PRIO)
+		prio = base_prio;
+
+	prio += spa_soft_cap_penalty(p);
+
+	if (prio > SPA_SOFT_CAP_PRIO)
+		return SPA_SOFT_CAP_PRIO;
+
+	return prio;
+}
+
+int spa_eb_soft_cap_priority(const task_t *p, int base_prio)
+{
+	int prio = base_prio + spa_soft_cap_penalty(p);
+
+	if (prio > SPA_SOFT_CAP_PRIO)
+		return SPA_SOFT_CAP_PRIO;
+
+	return prio;
+}
+
+static inline int spa_nf_soft_cap_effective_prio(const struct task_struct *p)
+{
+	return spa_pb_soft_cap_priority(p, p->static_prio);
+}
+
+static inline int spa_nf_normal_effective_prio(const struct task_struct *p)
+{
+	return p->static_prio;
+}
+
+/*
+ * effective_prio - return the priority that is based on the static
+ * priority
+ */
+#define should_run_in_background(p) \
+	(task_is_bgnd(p) && !((p)->sdu.spa.flags & SPAF_UISLEEP))
+static inline int effective_prio(const task_t *p)
+{
+	if (rt_task(p))
+		return p->prio;
+
+	if (task_is_bgnd(p))
+		return (p->sdu.spa.flags & SPAF_UISLEEP) ?
+			SPA_SOFT_CAP_PRIO : SPA_BGND_PRIO;
+
+	/* using the minimum of the hard and soft caps makes things smoother */
+	if (unlikely(spa_exceeding_cpu_rate_cap(p)))
+		return  spa_sched_child->soft_cap_effective_prio(p);
+
+	return spa_sched_child->normal_effective_prio(p);
+}
+
+static inline void spa_inc_nr_running(task_t *p, runqueue_t *rq)
+{
+	inc_nr_running(p, rq);
+	check_restart_promotions(rq);
+	if (!rt_task(p))
+		rq->qu.spa.nr_active_eb_shares += p->sdu.spa.eb_shares;
+}
+
+static inline void spa_dec_nr_running(task_t *p, runqueue_t *rq)
+{
+	dec_nr_running(p, rq);
+	check_stop_promotions(rq);
+	if (!rt_task(p))
+		rq->qu.spa.nr_active_eb_shares -= p->sdu.spa.eb_shares;
+}
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task(p, &rq->qu.spa);
+	spa_inc_nr_running(p, rq);
+}
+
+static inline void do_nothing_to_task(task_t *p) {}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ */
+static void activate_task(task_t *p, runqueue_t *rq)
+{
+	if (rt_task(p))
+		p->sdu.spa.time_slice = sched_rr_time_slice;
+	else {
+		spa_sched_child->reassess_at_activation(p);
+		p->prio = effective_prio(p);
+		/* hard capped tasks that never use their full time slice evade
+		 * the sinbin so we need to reduce the size of their time slice
+		 * to reduce the size of the hole that they slip through.
+		 * It would be unwise to close it completely.
+		 */
+		if (unlikely(spa_exceeding_cpu_rate_hard_cap(p)))
+			p->sdu.spa.time_slice = hard_cap_timeslice(p);
+		else
+			p->sdu.spa.time_slice = normal_task_timeslice(p);
+	}
+	__activate_task(p, rq);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+	struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+	spa_dec_nr_running(p, rq);
+	dequeue_task(p, rqq);
+}
+
+/*
+ * Check to see if p preempts rq->curr and resched if it does. In compute
+ * mode we do not preempt for at least cache_delay and set rq->preempted.
+ */
+static inline void preempt_if_warranted(task_t *p, struct runqueue *rq)
+{
+	if (TASK_PREEMPTS_CURR(p, rq))
+		resched_task(rq->curr);
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: thetask's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+void spa_wake_up_task(struct task_struct *p, struct runqueue *rq, unsigned int old_state, int sync)
+{
+	/*
+	 * Tasks waking from (declared) non interactive sleep will not receive
+	 * any interactive bonus.
+	 */
+	if (old_state & TASK_NONINTERACTIVE)
+		p->sdu.spa.flags |= SPAF_NONIASLEEP;
+
+	/*
+	 * This is the end of one scheduling cycle and the start of the next
+	 */
+	update_stats_at_wake_up(p, adjusted_sched_clock(p));
+
+	if (old_state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible--;
+
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	activate_task(p, rq);
+	if (!sync || (rq != this_rq()))
+		preempt_if_warranted(p, rq);
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+void spa_fork(task_t *p)
+{
+	unsigned long long now;
+
+	init_timer(&p->sdu.spa.sinbin_timer);
+	p->sdu.spa.sinbin_timer.data = (unsigned long) p;
+	/*
+	 * Give the task a new timeslice.
+	 */
+	p->sdu.spa.time_slice = task_timeslice(p);
+	local_irq_disable();
+	now = sched_clock();
+	local_irq_enable();
+	/*
+	 * Initialize the scheduling statistics
+	 */
+	initialize_stats(p, now);
+	spa_sched_child->fork_extras(p);
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+#ifdef CONFIG_SMP
+#define rq_is_this_rq(rq) (likely((rq) == this_rq()))
+#else
+#define rq_is_this_rq(rq) 1
+#endif
+void spa_wake_up_new_task(task_t * p, unsigned long clone_flags)
+{
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	BUG_ON(p->state != TASK_RUNNING);
+
+	if (rq_is_this_rq(rq)) {
+		if (!(clone_flags & CLONE_VM)) {
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			if (unlikely(!task_is_queued(current))) {
+				p->prio = effective_prio(p);
+				__activate_task(p, rq);
+			} else {
+				p->prio = current->prio;
+				list_add_tail(&p->run_list, &current->run_list);
+				spa_inc_nr_running(p, rq);
+				check_restart_promotions(rq);
+			}
+			set_need_resched();
+		} else {
+			p->prio = effective_prio(p);
+			/* Run child last */
+			__activate_task(p, rq);
+		}
+	} else {
+		p->prio = effective_prio(p);
+		__activate_task(p, rq);
+		preempt_if_warranted(p, rq);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+
+void spa_exit(task_t * p)
+{
+}
+
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static inline
+void pull_task(runqueue_t *src_rq, task_t *p, runqueue_t *this_rq, int this_cpu)
+{
+	dequeue_task(p, &src_rq->qu.spa);
+	spa_dec_nr_running(p, src_rq);
+	set_task_cpu(p, this_cpu);
+	adjust_timestamp(p, this_rq, src_rq);
+	spa_inc_nr_running(p, this_rq);
+	enqueue_task(p, &this_rq->qu.spa);
+	preempt_if_warranted(p, this_rq);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+int spa_move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
+		      unsigned long max_nr_move, unsigned long max_load_move,
+		      struct sched_domain *sd, enum idle_type idle,
+		      int *all_pinned)
+{
+	struct list_head *head, *curr;
+	int idx, pulled = 0, pinned = 0;
+	long rem_load_move;
+	struct task_struct *tmp;
+
+	if (max_nr_move == 0 || max_load_move == 0)
+		goto out;
+
+	rem_load_move = max_load_move;
+	pinned = 1;
+
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(busiest->qu.spa.bitmap);
+	else
+		idx = find_next_bit(busiest->qu.spa.bitmap, SPA_IDLE_PRIO, idx);
+	if (idx >= SPA_IDLE_PRIO)
+		goto out;
+
+	head = &busiest->qu.spa.queue[idx].list;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+	/* Take the opportunity to update task's prio field just in
+	 * in case it's been promoted.  This makes sure that the task doesn't
+	 * lose any promotions it has received during the move.
+	 */
+	tmp->prio = idx;
+
+	curr = curr->prev;
+
+	if (tmp->load_weight > rem_load_move ||
+	    !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, tmp, this_rq, this_cpu);
+	pulled++;
+	rem_load_move -= tmp->load_weight;
+
+	/*
+	 * We only want to steal up to the prescribed number of tasks
+	 * and the prescribed amount of biased load.
+	 */
+	if (pulled < max_nr_move && rem_load_move > 0) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	if (all_pinned)
+		*all_pinned = pinned;
+
+	return pulled;
+}
+#endif
+
+static void spa_nf_runq_data_tick(unsigned int cpu, struct runqueue *rq)
+{
+}
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ */
+void spa_tick(struct task_struct *p, struct runqueue *rq, unsigned long long now)
+{
+	int cpu = smp_processor_id();
+	struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+	spa_sched_child->runq_data_tick(cpu, rq);
+
+	if (p == rq->idle) {
+		if (wake_priority_sleeper(rq))
+			goto out;
+		rebalance_tick(cpu, rq, SCHED_IDLE);
+		return;
+	}
+
+	/*
+	 * SCHED_FIFO tasks never run out of timeslice.
+	 */
+	if (unlikely(p->policy == SCHED_FIFO))
+		goto out;
+
+	spin_lock(&rq->lock);
+	/*
+	 * The task was running during this tick - update the
+	 * time slice counter. Note: we do not update a thread's
+	 * priority until it either goes to sleep or uses up its
+	 * timeslice. This makes it possible for interactive tasks
+	 * to use up their timeslices at their highest priority levels.
+	 */
+	if (!--p->sdu.spa.time_slice) {
+		dequeue_task(p, rqq);
+		set_tsk_need_resched(p);
+		update_stats_at_end_of_ts(p, now);
+		if (unlikely(p->policy == SCHED_RR))
+			p->sdu.spa.time_slice = sched_rr_time_slice;
+		else {
+			spa_sched_child->reassess_at_end_of_ts(p);
+			p->prio = effective_prio(p);
+			p->sdu.spa.time_slice = normal_task_timeslice(p);
+		}
+		enqueue_task(p, rqq);
+	}
+	spin_unlock(&rq->lock);
+out:
+	if (unlikely(promotions_due(rq)))
+		do_promotions(rq);
+	rebalance_tick(cpu, rq, NOT_IDLE);
+}
+
+/*
+ * Take an active task off the runqueue for a short while
+ * Assun=mes that task's runqueue is already locked
+ */
+static inline void put_task_in_sinbin(struct task_struct *p, unsigned long durn)
+{
+	if (durn == 0)
+		return;
+	deactivate_task(p, task_rq(p));
+	p->sdu.spa.flags |= SPAF_SINBINNED;
+	p->sdu.spa.sinbin_timer.expires = jiffies + durn;
+	add_timer(&p->sdu.spa.sinbin_timer);
+}
+
+/*
+ * Release a task from the sinbin
+ */
+void sinbin_release_fn(unsigned long arg)
+{
+	unsigned long flags;
+	struct task_struct *p = (struct task_struct*)arg;
+	struct runqueue *rq = task_rq_lock(p, &flags);
+
+	p->sdu.spa.flags &= ~SPAF_SINBINNED;
+	if (!rt_task(p)) {
+		spa_sched_child->reassess_at_sinbin_release(p);
+		p->prio = effective_prio(p);
+	}
+	__activate_task(p, rq);
+
+	task_rq_unlock(rq, &flags);
+}
+
+static inline int task_needs_sinbinning(const struct task_struct *p)
+{
+	return unlikely(spa_exceeding_cpu_rate_hard_cap(p)) &&
+		(p->state == TASK_RUNNING) && !rt_task(p) &&
+		((p->sdu.spa.flags & PF_EXITING) == 0);
+}
+
+static inline unsigned long required_sinbin_durn(const struct task_struct *p)
+{
+	return delay_in_jiffies_for_usage(p, p->sdu.spa.cpu_rate_hard_cap);
+}
+
+#ifdef CONFIG_SCHED_SMT
+struct task_struct *spa_head_of_queue(union runqueue_queue *rqq)
+{
+	struct task_struct *tmp;
+	int idx = sched_find_first_bit(rqq->spa.bitmap);
+
+	tmp = list_entry(rqq->spa.queue[idx].list.next, task_t, run_list);
+	/* Take the opportunity to update task's prio field just in
+	 * in case it's been promoted.
+	 */
+	tmp->prio = idx;
+
+	return tmp;
+}
+
+/* maximum expected priority difference for SCHED_NORMAL/SCHED_BATCH tasks */
+#define MAX_SN_PD (SPA_IDLE_PRIO - MAX_RT_PRIO)
+int spa_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct *p2, struct sched_domain *sd)
+{
+	int dp = p2->static_prio - p1->static_prio;
+
+	if ((dp > 0) && (sd->per_cpu_gain < 100)) {
+		unsigned long rq_ts_rm;
+
+		rq_ts_rm = ((MAX_SN_PD - dp) * time_slice * sd->per_cpu_gain) /
+			(100 * MAX_SN_PD);
+
+		return p1->sdu.spa.time_slice > rq_ts_rm;
+	}
+
+	return 0;
+}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ */
+void spa_schedule(void)
+{
+	long *switch_count;
+	int cpu, idx;
+	struct task_struct *prev = current, *next;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+	struct list_head *queue;
+
+	spin_lock_irq(&rq->lock);
+
+	if (unlikely(current->flags & PF_DEAD))
+		current->state = EXIT_DEAD;
+	/*
+	 * if entering off of a kernel preemption go straight
+	 * to picking the next task.
+	 */
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE) {
+				rq->nr_uninterruptible++;
+				prev->sdu.spa.flags |= SPAF_UISLEEP;
+			}
+			deactivate_task(prev, rq);
+		}
+	}
+
+	update_cpu_clock(prev, rq, now);
+	delta_cpu_stats(prev, now);
+	if (task_needs_sinbinning(prev) && likely(!signal_pending(prev)))
+		put_task_in_sinbin(prev, required_sinbin_durn(prev));
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+go_idle:
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			wake_sleeping_dependent(cpu, rq);
+			/*
+			 * wake_sleeping_dependent() might have released
+			 * the runqueue, so break out if we got new
+			 * tasks meanwhile:
+			 */
+			if (!rq->nr_running)
+				goto switch_tasks;
+		}
+	} else {
+		if (dependent_sleeper(cpu, rq)) {
+			next = rq->idle;
+			goto switch_tasks;
+		}
+		/*
+		 * dependent_sleeper() releases and reacquires the runqueue
+		 * lock, hence go into the idle loop if the rq went
+		 * empty meanwhile:
+		 */
+		if (unlikely(!rq->nr_running))
+			goto go_idle;
+	}
+
+	idx = sched_find_first_bit(rq->qu.spa.bitmap);
+	queue = &rq->qu.spa.queue[idx].list;
+	next = list_entry(queue->next, task_t, run_list);
+	/* Take the opportunity to update task's prio field just in
+	 * in case it's been promoted.
+	 */
+	next->prio = idx;
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prefetch(next);
+	prefetch_stack(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(task_cpu(prev));
+
+	prev->last_ran = now;
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		delta_delay_stats(next, now);
+		next->sdu.spa.flags &= ~(SPAF_UISLEEP | SPAF_NONIASLEEP);
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_task_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+		/*
+		 * this_rq must be evaluated again because prev may have moved
+		 * CPUs since it called schedule(), thus the 'rq' on its stack
+		 * frame will be invalid.
+		 */
+		finish_task_switch(this_rq(), prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+void spa_set_normal_task_nice(task_t *p, long nice)
+{
+	int old_static_prio, delta;
+	struct runqueue *rq = task_rq(p);
+	struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+	old_static_prio = p->static_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	spa_sched_child->reassess_at_renice(p);
+
+	if (p->prio == SPA_BGND_PRIO)
+		return;
+
+	delta = p->static_prio - old_static_prio;
+	if (delta == 0)
+		return;
+
+	if (task_is_queued(p)) {
+		dec_raw_weighted_load(rq, p);
+		spa_set_load_weight(p);
+		inc_raw_weighted_load(rq, p);
+		rqq->nr_active_eb_shares -= p->sdu.spa.eb_shares;
+		update_shares(p);
+		rqq->nr_active_eb_shares += p->sdu.spa.eb_shares;
+		dequeue_task(p, rqq);
+		/* This check is done here rather than outside the if statement
+		 * as there is a need to avoid a race condition with p->prio in
+		 * dequeue_task()
+		 */
+		if (unlikely(delta > (SPA_SOFT_CAP_PRIO - p->prio)))
+			delta = (SPA_SOFT_CAP_PRIO - p->prio);
+		else if (unlikely(delta < (MAX_RT_PRIO - p->prio)))
+			delta = (MAX_RT_PRIO - p->prio);
+		p->prio += delta;
+		enqueue_task(p, rqq);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	} else {
+		spa_set_load_weight(p);
+		update_shares(p);
+		/* See comment in other branch of if statement */
+		if (unlikely(delta > (SPA_SOFT_CAP_PRIO - p->prio)))
+			delta = (SPA_SOFT_CAP_PRIO - p->prio);
+		else if (unlikely(delta < (MAX_RT_PRIO - p->prio)))
+			delta = (MAX_RT_PRIO - p->prio);
+		p->prio += delta;
+	}
+}
+
+void spa_init_batch_task(task_t *p)
+{
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+void spa_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	int queued;
+	runqueue_t *rq = task_rq(p);
+
+	queued = task_is_queued(p);
+	if (queued)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	update_shares(p);
+	if (queued) {
+		__activate_task(p, rq);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else
+			preempt_if_warranted(p, rq);
+	}
+}
+
+/*
+ * Require: 0 <= new_cap <= 1000
+ */
+int set_cpu_rate_cap(struct task_struct *p, unsigned long new_cap)
+{
+	int is_allowed;
+	unsigned long flags;
+	struct runqueue *rq;
+	long delta;
+
+	/* this function could be called when other schedulers are in
+	 * charge (with catastrophic results) so let's check
+	 */
+	if (!spa_in_charge)
+		return -ENOSYS;
+
+	if (new_cap > 1000)
+		return -EINVAL;
+	is_allowed = capable(CAP_SYS_NICE);
+	/*
+	 * We have to be careful, if called from /proc code,
+	 * the task might be in the middle of scheduling on another CPU.
+	 */
+	rq = task_rq_lock(p, &flags);
+	delta = new_cap - p->sdu.spa.cpu_rate_cap;
+	if (!is_allowed) {
+		/*
+		 * Ordinary users can set/change caps on their own tasks
+		 * provided that the new setting is MORE constraining
+		 */
+		if (((current->euid != p->uid) && (current->uid != p->uid)) || (delta > 0)) {
+			task_rq_unlock(rq, &flags);
+			return -EPERM;
+		}
+	}
+	/*
+	 * The RT tasks don't have caps, but we still allow the caps to be
+	 * set - but as expected it wont have any effect on scheduling until
+	 * the task becomes SCHED_NORMAL/SCHED_BATCH:
+	 */
+	p->sdu.spa.cpu_rate_cap = new_cap;
+	if (p->sdu.spa.cpu_rate_cap < p->sdu.spa.cpu_rate_hard_cap)
+		p->sdu.spa.min_cpu_rate_cap = p->sdu.spa.cpu_rate_cap;
+	else
+		p->sdu.spa.min_cpu_rate_cap = p->sdu.spa.cpu_rate_hard_cap;
+
+	spa_sched_child->reassess_at_renice(p);
+
+	if (rt_task(p))
+		goto out;
+
+	if (task_is_queued(p)) {
+		int delta = -p->prio;
+		struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+		dequeue_task(p, rqq);
+		dec_raw_weighted_load(rq, p);
+		delta += p->prio = effective_prio(p);
+		spa_set_load_weight(p);
+		enqueue_task(p, rqq);
+		inc_raw_weighted_load(rq, p);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	} else
+		spa_set_load_weight(p);
+out:
+	task_rq_unlock(rq, &flags);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(set_cpu_rate_cap);
+
+unsigned long get_cpu_rate_cap(struct task_struct *p)
+{
+	if (!spa_in_charge)
+		return 1000;
+
+	return p->sdu.spa.cpu_rate_cap;
+}
+
+EXPORT_SYMBOL(get_cpu_rate_cap);
+
+/*
+ * Require: 1 <= new_cap <= 1000
+ */
+int set_cpu_rate_hard_cap(struct task_struct *p, unsigned long new_cap)
+{
+	int is_allowed;
+	unsigned long flags;
+	struct runqueue *rq;
+	long delta;
+
+	/* this function could be called when other schedulers are in
+	 * charge (with catastrophic results) so let's check
+	 */
+	if (!spa_in_charge)
+		return -ENOSYS;
+
+	if ((new_cap > 1000) || (new_cap == 0)) /* zero hard caps are not allowed */
+		return -EINVAL;
+	is_allowed = capable(CAP_SYS_NICE);
+	/*
+	 * We have to be careful, if called from /proc code,
+	 * the task might be in the middle of scheduling on another CPU.
+	 */
+	rq = task_rq_lock(p, &flags);
+	delta = new_cap - p->sdu.spa.cpu_rate_hard_cap;
+	if (!is_allowed) {
+		/*
+		 * Ordinary users can set/change caps on their own tasks
+		 * provided that the new setting is MORE constraining
+		 */
+		if (((current->euid != p->uid) && (current->uid != p->uid)) || (delta > 0)) {
+			task_rq_unlock(rq, &flags);
+			return -EPERM;
+		}
+	}
+	/*
+	 * The RT tasks don't have caps, but we still allow the caps to be
+	 * set - but as expected it wont have any effect on scheduling until
+	 * the task becomes SCHED_NORMAL/SCHED_BATCH:
+	 */
+	p->sdu.spa.cpu_rate_hard_cap = new_cap;
+	if (p->sdu.spa.cpu_rate_cap < p->sdu.spa.cpu_rate_hard_cap)
+		p->sdu.spa.min_cpu_rate_cap = p->sdu.spa.cpu_rate_cap;
+	else
+		p->sdu.spa.min_cpu_rate_cap = p->sdu.spa.cpu_rate_hard_cap;
+
+	spa_sched_child->reassess_at_renice(p);
+
+	if (rt_task(p))
+		goto out;
+
+	if (task_is_queued(p)) {
+		dec_raw_weighted_load(rq, p);
+		spa_set_load_weight(p);
+		inc_raw_weighted_load(rq, p);
+	} else
+		spa_set_load_weight(p);
+
+	/* (POSSIBLY) TODO: if it's sinbinned and the cap is relaxed then
+	 * release it from the sinbin
+	 */
+out:
+	task_rq_unlock(rq, &flags);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(set_cpu_rate_hard_cap);
+
+unsigned long get_cpu_rate_hard_cap(struct task_struct *p)
+{
+	if (!spa_in_charge)
+		return 1000;
+
+	return p->sdu.spa.cpu_rate_hard_cap;
+}
+
+EXPORT_SYMBOL(get_cpu_rate_hard_cap);
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+long spa_sys_yield(void)
+{
+	runqueue_t *rq = this_rq_lock();
+	struct spa_runqueue_queue *rqq = &rq->qu.spa;
+
+	schedstat_inc(rq, yld_cnt);
+	/* If there's other tasks on this CPU make sure that at least
+	 * one of them get some CPU before this task's next bite of the
+	 * cherry.  Dequeue before looking for the appropriate run
+	 * queue so that we don't find our queue if we were the sole
+	 * occupant of that queue.
+	 */
+	dequeue_task(current, rqq);
+	/*
+	 * special rule: RT tasks will just roundrobin.
+	 */
+	if (likely(!rt_task(current))) {
+		int idx = find_next_bit(rqq->bitmap, SPA_IDLE_PRIO, current->prio);
+
+		if (idx < SPA_IDLE_PRIO) {
+			if ((idx < SPA_BGND_PRIO) || task_is_bgnd(current))
+				current->prio = idx;
+			else
+				current->prio = SPA_BGND_PRIO - 1;
+		}
+	}
+	enqueue_task(current, rqq);
+
+	if (rq->nr_running == 1)
+		schedstat_inc(rq, yld_both_empty);
+
+	/*
+	 * Since we are going to call schedule() anyway, there's
+	 * no need to preempt or enable interrupts:
+	 */
+	__release(rq->lock);
+	_raw_spin_unlock(&rq->lock);
+	preempt_enable_no_resched();
+
+	schedule();
+
+	return 0;
+}
+
+void spa_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	spa_sys_yield();
+}
+
+void spa_init_idle(task_t *idle, int cpu)
+{
+	idle->prio = SPA_IDLE_PRIO;
+	/*
+	 * Initialize scheduling statistics counters as they may provide
+	 * valuable about the CPU e.g. avg_cpu_time_per_cycle for the idle
+	 * task will be an estimate of the average time the CPU is idle.
+	 * sched_init() may not be ready so use INITIAL_JIFFIES instead.
+	 */
+	initialize_stats(idle, INITIAL_TIMESTAMP);
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+void spa_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	deactivate_task(p, rq_src);
+	set_task_cpu(p, dest_cpu);
+	adjust_timestamp(p, rq_dest, rq_src);
+	activate_task(p, rq_dest);
+	preempt_if_warranted(p, rq_dest);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void spa_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO - 1);
+	/* Add idle task to _front_ of it's priority queue */
+	enqueue_task_head(rq->idle, &rq->qu.spa);
+	spa_inc_nr_running(rq->idle, rq);
+}
+
+void spa_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = SPA_IDLE_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+void spa_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (i = 0; i < SPA_IDLE_PRIO; i++) {
+		struct list_head *list = &rq->qu.spa.queue[i].list;
+		while (!list_empty(list))
+			migrate_dead(dead_cpu, list_entry(list->next, task_t, run_list));
+	}
+}
+#endif
+#endif
+
+void spa_sched_init(void)
+{
+	spa_in_charge = 1;
+	init_task.sdu.spa.time_slice = HZ;
+	init_task.sdu.spa.cpu_rate_cap = 1000;
+	init_task.sdu.spa.cpu_rate_hard_cap = 1000;
+	init_task.sdu.spa.min_cpu_rate_cap = 1000;
+	init_task.sdu.spa.sinbin_timer.function = sinbin_release_fn;
+	init_task.sdu.spa.pre_bonus_priority = SPA_BGND_PRIO - 20;
+	init_task.sdu.spa.interactive_bonus = 0;
+	init_task.sdu.spa.auxilary_bonus = 0;
+	update_shares(&init_task);
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+void spa_normalize_rt_task(struct task_struct *p)
+{
+	int queued;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	queued = task_is_queued(p);
+	if (queued)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	update_shares(p);
+	if (queued) {
+		__activate_task(p, rq);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+static inline unsigned long rnd_msecs_to_jiffies(unsigned long msecs)
+{
+	return (msecs * HZ + 500) / 1000;
+}
+
+static inline unsigned long rnd_jiffies_to_msecs(unsigned long jiffs)
+{
+	return (jiffs * 1000 + HZ/2) / HZ;
+}
+
+unsigned long spa_get_time_slice_msecs(void)
+{
+	return rnd_jiffies_to_msecs(time_slice);
+}
+
+int spa_set_time_slice_msecs(unsigned long msecs)
+{
+	unsigned long jiffs = rnd_msecs_to_jiffies(msecs);
+
+	if (jiffs < MIN_TIMESLICE || jiffs > MAX_TIMESLICE)
+		return -1;
+
+	time_slice = jiffs;
+
+	return 0;
+}
+
+unsigned long spa_get_sched_rr_time_slice_msecs(void)
+{
+	return rnd_jiffies_to_msecs(sched_rr_time_slice);
+}
+
+int spa_set_sched_rr_time_slice_msecs(unsigned long msecs)
+{
+	unsigned long jiffs = rnd_msecs_to_jiffies(msecs);
+
+	if (jiffs < MIN_TIMESLICE || jiffs > MAX_TIMESLICE)
+		return -1;
+
+	sched_rr_time_slice = jiffs;
+
+	return 0;
+}
+
+unsigned int spa_get_bgnd_time_slice_multiplier(void)
+{
+	return bgnd_time_slice_multiplier;
+}
+
+int spa_set_bgnd_time_slice_multiplier(unsigned int val)
+{
+	if (val < 1 || val > MAX_BGND_TIME_SLICE_MULTIPLIER)
+		return -1;
+
+	bgnd_time_slice_multiplier = val;
+
+	return 0;
+}
+
+unsigned long spa_get_base_prom_interval_msecs(void)
+{
+	return rnd_jiffies_to_msecs(base_prom_interval);
+}
+
+int spa_set_base_prom_interval_msecs(unsigned long msecs)
+{
+	unsigned long jiffs = rnd_msecs_to_jiffies(msecs);
+
+	if (jiffs < time_slice)
+		return -1;
+
+	base_prom_interval = jiffs;
+
+	return 0;
+}
+
+unsigned int spa_get_promotion_floor(void)
+{
+	return promotion_floor;
+}
+
+int spa_set_promotion_floor(unsigned int val)
+{
+	if (val < MAX_RT_PRIO || val > SPA_BGND_PRIO)
+		return -1;
+
+	promotion_floor = val;
+
+	return 0;
+}
+
+#define no_change(a) (a)
+
+SCHED_DRV_SYSFS_UINT_RW(time_slice, rnd_msecs_to_jiffies, rnd_jiffies_to_msecs,
+			MIN_TIMESLICE, MAX_TIMESLICE);
+SCHED_DRV_SYSFS_UINT_RW(sched_rr_time_slice, rnd_msecs_to_jiffies,
+			rnd_jiffies_to_msecs, MIN_TIMESLICE, MAX_TIMESLICE);
+SCHED_DRV_SYSFS_UINT_RW(base_prom_interval, rnd_msecs_to_jiffies,
+			rnd_jiffies_to_msecs, time_slice, ULONG_MAX);
+SCHED_DRV_SYSFS_UINT_RW(bgnd_time_slice_multiplier, no_change, no_change,
+			1, MAX_BGND_TIME_SLICE_MULTIPLIER);
+SCHED_DRV_SYSFS_UINT_RW(promotion_floor, no_change, no_change,
+			MAX_RT_PRIO, SPA_BGND_PRIO);
+
+#ifdef CONFIG_CPUSCHED_SPA_NF
+static struct attribute *spa_nf_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(time_slice),
+	&SCHED_DRV_SYSFS_ATTR(sched_rr_time_slice),
+	&SCHED_DRV_SYSFS_ATTR(bgnd_time_slice_multiplier),
+	&SCHED_DRV_SYSFS_ATTR(base_prom_interval),
+	&SCHED_DRV_SYSFS_ATTR(promotion_floor),
+	NULL,
+};
+#endif
+
+struct sched_spa_child spa_nf_child = {
+	.soft_cap_effective_prio = spa_nf_soft_cap_effective_prio,
+	.normal_effective_prio = spa_nf_normal_effective_prio,
+	.reassess_at_activation = do_nothing_to_task,
+	.fork_extras = do_nothing_to_task,
+	.runq_data_tick = spa_nf_runq_data_tick,
+	.reassess_at_end_of_ts = do_nothing_to_task,
+	.reassess_at_sinbin_release = do_nothing_to_task,
+	.reassess_at_renice = do_nothing_to_task,
+};
+
+#ifdef CONFIG_CPUSCHED_SPA_NF
+const struct sched_drv spa_nf_sched_drv = {
+	.name = "spa_no_frills",
+	.init_runqueue_queue = spa_init_runqueue_queue,
+	.set_oom_time_slice = spa_set_oom_time_slice,
+#ifdef CONFIG_SMP
+	.set_load_weight = spa_set_load_weight,
+#endif
+	.task_timeslice = spa_task_timeslice,
+	.wake_up_task = spa_wake_up_task,
+	.fork = spa_fork,
+	.wake_up_new_task = spa_wake_up_new_task,
+	.exit = spa_exit,
+#ifdef CONFIG_SMP
+	.move_tasks = spa_move_tasks,
+#endif
+	.tick = spa_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = spa_head_of_queue,
+	.dependent_sleeper_trumps = spa_dependent_sleeper_trumps,
+#endif
+	.schedule = spa_schedule,
+	.set_normal_task_nice = spa_set_normal_task_nice,
+	.init_batch_task = spa_init_batch_task,
+	.setscheduler = spa_setscheduler,
+	.sys_yield = spa_sys_yield,
+	.yield = spa_yield,
+	.init_idle = spa_init_idle,
+	.sched_init = spa_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = spa_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = spa_set_select_idle_first,
+	.set_select_idle_last = spa_set_select_idle_last,
+	.migrate_dead_tasks = spa_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = spa_normalize_rt_task,
+#endif
+	.attrs = spa_nf_attrs,
+};
+#endif
diff -urN oldtree/kernel/sched_spa_ebs.c newtree/kernel/sched_spa_ebs.c
--- oldtree/kernel/sched_spa_ebs.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched_spa_ebs.c	2006-03-08 18:56:30.071756500 +0000
@@ -0,0 +1,395 @@
+/*
+ *  kernel/sched_ebs.c
+ *
+ *  CPU scheduler mode
+ *
+ *  Copyright (C) 2004  Aurema Pty Ltd
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/sched_spa.h>
+
+#include <asm/uaccess.h>
+
+#define MIN_VAL(a, b)		((a) < (b) ? (a) : (b))
+#define EB_RANGE		46
+#define MAX_TOTAL_BONUS		(SPA_BGND_PRIO - (MAX_RT_PRIO + EB_RANGE) - 1)
+/* allow a slot for media streamers and 2 for wake up bonuses */
+#define MAX_MAX_IA_BONUS	MAX_TOTAL_BONUS
+#define DEF_MAX_IA_BONUS 	MIN_VAL(MAX_MAX_IA_BONUS, 15)
+
+#define EB_BASE_PRIO	(MAX_RT_PRIO + MAX_TOTAL_BONUS)
+#define EB_PAR_PRIO	(EB_BASE_PRIO + EB_RANGE / 2)
+
+static unsigned int max_ia_bonus = DEF_MAX_IA_BONUS;
+
+/*
+ * Tasks more sleepy than this are considered interactive
+ */
+static unsigned int iab_incr_threshold = 900;
+
+/*
+ * Tasks less sleepy than this are considered NOT interactive
+ */
+static unsigned int iab_decr_threshold = 50;
+
+/*
+ * Because newly forked processes will get a very high priority we will
+ * give them a shorter initial time slice to prevent them causing problems
+ * if they're CPU hogs.
+ */
+static unsigned long initial_time_slice = ((10 * HZ / 1000) ? : 1);
+
+/*
+ * To avoid overflows during various calculations we need to scale
+ * the average time intervals being used down a bit.  This will bring them
+ * down to approximately microseconds.  The approximation doesn't matter as
+ * it's all local and not exposed to where it could confuse people.
+ */
+#define SCALE_OFFSET 11
+#define SCALE_DOWN(t) ((t) >> (SCALE_OFFSET + SPA_AVG_OFFSET))
+#define SCALED_NSEC_PER_TICK ((1000000000 / HZ) >> SCALE_OFFSET)
+
+struct sched_ebs_runq_data {
+	unsigned long long cpu_time;
+	unsigned long long cycle_len;
+	unsigned int shares;
+};
+
+static DEFINE_PER_CPU(struct sched_ebs_runq_data, ebs_runqs);
+#define cpu_zrq(cpu)	(&per_cpu(ebs_runqs, cpu))
+#define task_zrq(p)	cpu_zrq(task_cpu(p))
+
+/* Requires a <= b or else result could be outside range and divide by zero
+ * becomes a possibility.
+ */
+static inline unsigned int map_ratio(unsigned long long a,
+				     unsigned long long b,
+				     unsigned int range)
+{
+	/*
+	 * shortcut and avoid divide by zero later.
+	 * Relies on a <= b.
+	 */
+	if (a == 0)
+		return 0;
+
+#if BITS_PER_LONG < 64
+	/*
+	 * Assume that there's no 64 bit divide available
+	 */
+	a *= range;
+
+	if (a < b)
+		return 0;
+
+	/*
+	 * Scale down until b less than 32 bits so that we can do
+	 * a divide using do_div()
+	 */
+	while (b > ULONG_MAX) { a >>= 1; b >>= 1; }
+
+	(void)do_div(a, (unsigned long)b);
+
+	return a;
+#else
+	return (a * range) / b;
+#endif
+}
+
+static inline unsigned int map_ratio_sqr(unsigned long long a,
+					 unsigned long long b,
+					 unsigned int range)
+{
+	unsigned int tmp;
+
+	if (unlikely(range == 0))
+		return 0;
+
+	tmp = map_ratio(a, b, range);
+
+	return (tmp * tmp) / range;
+}
+
+static inline void decr_interactive_bonus(struct task_struct *p)
+{
+	if (p->sdu.spa.interactive_bonus > 0)
+		--p->sdu.spa.interactive_bonus;
+}
+
+static inline void fast_decr_interactive_bonus(struct task_struct *p)
+{
+	p->sdu.spa.interactive_bonus /= 2;
+}
+
+static inline void incr_interactive_bonus(struct task_struct *p)
+{
+	if (p->sdu.spa.interactive_bonus < max_ia_bonus)
+		++p->sdu.spa.interactive_bonus;
+	else
+		p->sdu.spa.interactive_bonus = max_ia_bonus;
+}
+
+static inline int ebs_interactive_bonus(const struct task_struct *p)
+{
+	if (p->policy == SCHED_BATCH)
+		return 0;
+
+	return p->sdu.spa.interactive_bonus;
+}
+
+/*
+ * Calculate entitlement based priority (without bonuses).
+ * This never gets called on real time tasks
+ */
+static void ebs_calculate_priority(task_t *p)
+{
+	/*
+	 * Prevent possible divide by zero and take shortcut
+	 */
+	if (unlikely(p->sdu.spa.min_cpu_rate_cap == 0)) {
+		p->sdu.spa.pre_bonus_priority = SPA_BGND_PRIO - 1;
+	} else if (spa_exceeding_cpu_rate_cap(p)) {
+		struct sched_ebs_runq_data *zrq = task_zrq(p);
+		unsigned long long lhs = p->sdu.spa.min_cpu_rate_cap *
+			zrq->cycle_len * zrq->shares;
+		unsigned long long rhs = p->sdu.spa.eb_shares * zrq->cpu_time *
+			1000;
+
+		if (lhs > rhs) {
+			unsigned long long sdacl = \
+				SCALE_DOWN(p->sdu.spa.avg_cycle_length);
+
+			/*
+			 * new yardstick
+			 * Plausible values to match cap for this task.
+			 */
+			zrq->cpu_time = (sdacl * p->sdu.spa.min_cpu_rate_cap)
+				>> 11;
+			zrq->cycle_len = (sdacl * 1000) >> 11;
+			zrq->shares = p->sdu.spa.eb_shares;
+		}
+
+		p->sdu.spa.pre_bonus_priority =
+			spa_eb_soft_cap_priority(p, EB_PAR_PRIO);
+	} else {
+		struct sched_ebs_runq_data *zrq = task_zrq(p);
+		unsigned long long sdacl =
+			SCALE_DOWN(p->sdu.spa.avg_cycle_length);
+		unsigned long long sdacpc =
+			SCALE_DOWN(p->sdu.spa.avg_cpu_per_cycle);
+		unsigned long long lhs = sdacpc * zrq->cycle_len * zrq->shares;
+		unsigned long long rhs = sdacl * p->sdu.spa.eb_shares *
+			zrq->cpu_time;
+
+		if (lhs > rhs) {
+			/* new yardstick */
+			zrq->cpu_time = sdacpc;
+			zrq->cycle_len = sdacl;
+			zrq->shares = p->sdu.spa.eb_shares;
+			p->sdu.spa.pre_bonus_priority = EB_PAR_PRIO;
+		} else {
+			p->sdu.spa.pre_bonus_priority = EB_BASE_PRIO;
+			p->sdu.spa.pre_bonus_priority +=
+				map_ratio_sqr(lhs, rhs, EB_RANGE / 2);
+		}
+	}
+}
+
+static void update_interactive_bonus(task_t *p, unsigned long long tl,
+	unsigned long long bl)
+{
+	tl *= 1000;
+	if (tl > (bl * iab_incr_threshold))
+		incr_interactive_bonus(p);
+	else if (tl < (bl * iab_decr_threshold))
+		fast_decr_interactive_bonus(p);
+	else if (tl < (bl * (iab_incr_threshold + iab_decr_threshold) / 2))
+		decr_interactive_bonus(p);
+}
+
+static void ebs_reassess_at_activation(task_t *p)
+{
+	unsigned long long tl = p->sdu.spa.avg_ia_sleep_per_cycle;
+	unsigned long long bl = p->sdu.spa.avg_cpu_per_cycle;
+
+	if (latency_interactive(p))
+		tl += p->sdu.spa.avg_ia_latency;
+
+	update_interactive_bonus(p, tl, tl + bl);
+
+	ebs_calculate_priority(p);
+}
+
+static void ebs_reassess_at_end_of_ts(task_t *p)
+{
+	unsigned long long tl = p->sdu.spa.avg_ia_sleep_per_cycle;
+	unsigned long long bl = p->sdu.spa.avg_cpu_per_cycle;
+
+	update_interactive_bonus(p, tl, tl + bl);
+
+	ebs_calculate_priority(p);
+}
+
+static void ebs_init_cpu_runq_data(unsigned int cpu)
+{
+	struct sched_ebs_runq_data *zrq = &per_cpu(ebs_runqs, cpu);
+
+	zrq->cpu_time = 0;
+	zrq->cycle_len = 1;
+	zrq->shares = 1;
+}
+
+static void ebs_runq_data_tick(unsigned int cpu, runqueue_t *rq)
+{
+	struct sched_ebs_runq_data *zrq = cpu_zrq(cpu);
+
+	spin_lock(&rq->lock);
+	zrq->cycle_len += SCALED_NSEC_PER_TICK;
+	spin_unlock(&rq->lock);
+}
+
+static void ebs_fork(struct task_struct *p)
+{
+	/*
+	 * On the assumption that they'll be similar to their parents
+	 * let threads keep the same interactive bonus as their parents.
+	 */
+	if (p->pid == p->tgid)
+		p->sdu.spa.interactive_bonus = 0;
+	if (p->sdu.spa.time_slice > initial_time_slice)
+		p->sdu.spa.time_slice = initial_time_slice;
+}
+
+static inline int ebs_soft_cap_effective_prio(const struct task_struct *p)
+{
+	return p->sdu.spa.pre_bonus_priority;
+}
+
+static inline int ebs_effective_prio(const struct task_struct *p)
+{
+	return p->sdu.spa.pre_bonus_priority - ebs_interactive_bonus(p);
+}
+
+static void ebs_reassess_at_renice(struct task_struct *p)
+{
+	if (!rt_task(p))
+		ebs_calculate_priority(p);
+}
+
+struct sched_spa_child ebs_child = {
+	.soft_cap_effective_prio = ebs_effective_prio,
+	.normal_effective_prio = ebs_effective_prio,
+	.reassess_at_activation = ebs_reassess_at_activation,
+	.fork_extras = ebs_fork,
+	.runq_data_tick = ebs_runq_data_tick,
+	.reassess_at_end_of_ts = ebs_reassess_at_end_of_ts,
+	.reassess_at_sinbin_release = ebs_calculate_priority,
+	.reassess_at_renice = ebs_reassess_at_renice,
+};
+
+static void ebs_sched_init(void)
+{
+	int i;
+
+	spa_sched_init();
+
+	for (i = 0; i < NR_CPUS; i++)
+		ebs_init_cpu_runq_data(i);
+
+	spa_sched_child = &ebs_child;
+	init_task.sdu.spa.eb_shares = DEFAULT_EB_SHARES;
+	spa_set_promotion_floor(EB_PAR_PRIO - MAX_TOTAL_BONUS);
+	spa_set_base_prom_interval_msecs(spa_get_time_slice_msecs() * 15);
+}
+
+#include <linux/sched_pvt.h>
+
+static inline unsigned long rnd_msecs_to_jiffies(unsigned long msecs)
+{
+	return (msecs * HZ + 500) / 1000;
+}
+
+static inline unsigned long rnd_jiffies_to_msecs(unsigned long jiffs)
+{
+	return (jiffs * 1000 + HZ/2) / HZ;
+}
+
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW_STATIC(max_ia_bonus, no_change, no_change,
+			       0, MAX_MAX_IA_BONUS);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(iab_incr_threshold, no_change, no_change,
+			       0, 1000);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(iab_decr_threshold, no_change, no_change,
+			       0, 1000);
+SCHED_DRV_SYSFS_UINT_RW(initial_time_slice, rnd_msecs_to_jiffies, rnd_jiffies_to_msecs,
+			1, ((1000 * HZ / 1000) ? : 1));
+
+static struct attribute *ebs_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(time_slice),
+	&SCHED_DRV_SYSFS_ATTR(initial_time_slice),
+	&SCHED_DRV_SYSFS_ATTR(sched_rr_time_slice),
+	&SCHED_DRV_SYSFS_ATTR(bgnd_time_slice_multiplier),
+	&SCHED_DRV_SYSFS_ATTR(base_prom_interval),
+	&SCHED_DRV_SYSFS_ATTR(promotion_floor),
+	&SCHED_DRV_SYSFS_ATTR(max_ia_bonus),
+	&SCHED_DRV_SYSFS_ATTR(iab_incr_threshold),
+	&SCHED_DRV_SYSFS_ATTR(iab_decr_threshold),
+	NULL,
+};
+
+const struct sched_drv spa_ebs_sched_drv = {
+	.name = "spa_ebs",
+	.init_runqueue_queue = spa_init_runqueue_queue,
+	.set_oom_time_slice = spa_set_oom_time_slice,
+#ifdef CONFIG_SMP
+	.set_load_weight = spa_set_load_weight,
+#endif
+	.task_timeslice = spa_task_timeslice,
+	.wake_up_task = spa_wake_up_task,
+	.fork = spa_fork,
+	.wake_up_new_task = spa_wake_up_new_task,
+	.exit = spa_exit,
+	.tick = spa_tick,
+#ifdef CONFIG_SMP
+	.move_tasks = spa_move_tasks,
+#endif
+	.tick = spa_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = spa_head_of_queue,
+	.dependent_sleeper_trumps = spa_dependent_sleeper_trumps,
+#endif
+	.schedule = spa_schedule,
+	.set_normal_task_nice = spa_set_normal_task_nice,
+	.setscheduler = spa_setscheduler,
+	.yield = spa_yield,
+	.sys_yield = spa_sys_yield,
+	.init_idle = spa_init_idle,
+	.sched_init = ebs_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = spa_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = spa_set_select_idle_first,
+	.set_select_idle_last = spa_set_select_idle_last,
+	.migrate_dead_tasks = spa_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = spa_normalize_rt_task,
+#endif
+	.attrs = ebs_attrs,
+};
diff -urN oldtree/kernel/sched_spa_svr.c newtree/kernel/sched_spa_svr.c
--- oldtree/kernel/sched_spa_svr.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched_spa_svr.c	2006-03-08 18:56:30.071756500 +0000
@@ -0,0 +1,196 @@
+/*
+ *  kernel/sched_spa_svr.c
+ *
+ *  CPU scheduler mode
+ *
+ *  Copyright (C) 2004  Aurema Pty Ltd
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/sched.h>
+#include <linux/sched_spa.h>
+
+#define MIN_VAL(a, b)		((a) < (b) ? (a) : (b))
+#define MAX_TOTAL_BONUS		(SPA_BGND_PRIO - (MAX_RT_PRIO + 40) - 1)
+#define MAX_MAX_TPT_BONUS	MAX_TOTAL_BONUS
+#define DEF_MAX_TPT_BONUS 	MIN_VAL(MAX_MAX_TPT_BONUS, 15)
+
+#define NRUN_AVG_OFFSET 7
+#define NRUN_AVG_ALPHA ((1 << NRUN_AVG_OFFSET) - 2)
+#define NRUN_AVG_INCR(a) ((a) << 1)
+#define NRUN_AVG_ONE (1UL << NRUN_AVG_OFFSET)
+#define NRUN_AVG_MUL(a, b) (((a) * (b)) >> NRUN_AVG_OFFSET)
+
+static unsigned int max_tpt_bonus = DEF_MAX_TPT_BONUS;
+
+static DEFINE_PER_CPU(unsigned long, rq_avg_tasks);
+
+static void spa_svr_runq_data_tick(unsigned int cpu, runqueue_t *rq)
+{
+	unsigned long nval = NRUN_AVG_MUL(per_cpu(rq_avg_tasks, cpu),
+					  NRUN_AVG_ALPHA);
+	nval += NRUN_AVG_INCR(rq->nr_running);
+
+	per_cpu(rq_avg_tasks, cpu) = nval;
+}
+
+static void do_nothing_to_task(struct task_struct *p) { }
+
+static inline void decr_throughput_bonus(struct task_struct *p)
+{
+	if (p->sdu.spa.auxilary_bonus > 0)
+		--p->sdu.spa.auxilary_bonus;
+}
+
+static inline void incr_throughput_bonus(struct task_struct *p, unsigned int n)
+{
+	if ((p->sdu.spa.auxilary_bonus + n) > max_tpt_bonus)
+		p->sdu.spa.auxilary_bonus = max_tpt_bonus;
+	else
+		p->sdu.spa.auxilary_bonus += n;
+}
+
+static int spa_svr_effective_prio(const struct task_struct *p)
+{
+	unsigned int bonus = MAX_TOTAL_BONUS;
+
+	/* interactive bonuses only count at wake up
+	 */
+	/* no bonuses for tasks that have exceeded their cap */
+	if (likely(!spa_exceeding_cpu_rate_cap(p)))
+		bonus -= p->sdu.spa.auxilary_bonus;
+
+	return p->static_prio + bonus;
+}
+
+static inline int spa_svr_soft_cap_effective_prio(const struct task_struct *p)
+{
+	return spa_pb_soft_cap_priority(p, p->static_prio + MAX_TOTAL_BONUS);
+}
+
+static void spa_svr_fork(struct task_struct *p)
+{
+	p->sdu.spa.auxilary_bonus = 0;
+}
+
+static void spa_svr_reassess_bonus(struct task_struct *p)
+{
+	unsigned long long expected_delay;
+	unsigned long long load;
+
+	load = per_cpu(rq_avg_tasks, task_cpu(p));
+	if (load <= NRUN_AVG_ONE)
+		expected_delay = 0;
+	else
+		expected_delay = NRUN_AVG_MUL(p->sdu.spa.avg_cpu_per_cycle,
+					      (load - NRUN_AVG_ONE));
+
+	if (p->sdu.spa.avg_delay_per_cycle > expected_delay) {
+		unsigned long acr;
+		unsigned long long n;
+
+		/*
+		 * Rounded integer average cpu per cycle should fit into even
+		 * a 32 bit long. Same is not necessarily true of delay times
+		 * so we're stuck with a 64 bit divide.
+		 */
+		acr = SPA_AVG_RND(p->sdu.spa.avg_cpu_per_cycle) ? : 1;
+		n = SPA_AVG_RND(p->sdu.spa.avg_delay_per_cycle - expected_delay);
+
+		(void)do_div(n, acr);
+		incr_throughput_bonus(p, n + 1);
+	} else
+		decr_throughput_bonus(p);
+}
+
+static struct sched_spa_child spa_svr_child = {
+	.soft_cap_effective_prio = spa_svr_soft_cap_effective_prio,
+	.normal_effective_prio = spa_svr_effective_prio,
+	.reassess_at_activation = spa_svr_reassess_bonus,
+	.fork_extras = spa_svr_fork,
+	.runq_data_tick = spa_svr_runq_data_tick,
+	.reassess_at_end_of_ts = spa_svr_reassess_bonus,
+	.reassess_at_sinbin_release = do_nothing_to_task,
+	.reassess_at_renice = do_nothing_to_task,
+};
+
+static void spa_svr_sched_init(void)
+{
+	int i;
+
+	spa_sched_init();
+	spa_sched_child = &spa_svr_child;
+
+	for (i = 0; i < NR_CPUS; i++)
+		per_cpu(rq_avg_tasks, i) = 0;
+}
+
+#include <linux/sched_pvt.h>
+
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW_STATIC(max_tpt_bonus, no_change, no_change,
+			       0, MAX_MAX_TPT_BONUS);
+
+static struct attribute *spa_svr_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(time_slice),
+	&SCHED_DRV_SYSFS_ATTR(sched_rr_time_slice),
+	&SCHED_DRV_SYSFS_ATTR(bgnd_time_slice_multiplier),
+	&SCHED_DRV_SYSFS_ATTR(base_prom_interval),
+	&SCHED_DRV_SYSFS_ATTR(max_tpt_bonus),
+	NULL,
+};
+
+const struct sched_drv spa_svr_sched_drv = {
+	.name = "spa_svr",
+	.init_runqueue_queue = spa_init_runqueue_queue,
+	.set_oom_time_slice = spa_set_oom_time_slice,
+#ifdef CONFIG_SMP
+	.set_load_weight = spa_set_load_weight,
+#endif
+	.task_timeslice = spa_task_timeslice,
+	.wake_up_task = spa_wake_up_task,
+	.fork = spa_fork,
+	.wake_up_new_task = spa_wake_up_new_task,
+	.exit = spa_exit,
+	.tick = spa_tick,
+#ifdef CONFIG_SMP
+	.move_tasks = spa_move_tasks,
+#endif
+	.tick = spa_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = spa_head_of_queue,
+	.dependent_sleeper_trumps = spa_dependent_sleeper_trumps,
+#endif
+	.schedule = spa_schedule,
+	.set_normal_task_nice = spa_set_normal_task_nice,
+	.init_batch_task = spa_init_batch_task,
+	.setscheduler = spa_setscheduler,
+	.sys_yield = spa_sys_yield,
+	.yield = spa_yield,
+	.init_idle = spa_init_idle,
+	.sched_init = spa_svr_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = spa_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = spa_set_select_idle_first,
+	.set_select_idle_last = spa_set_select_idle_last,
+	.migrate_dead_tasks = spa_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = spa_normalize_rt_task,
+#endif
+	.attrs = spa_svr_attrs,
+};
diff -urN oldtree/kernel/sched_spa_ws.c newtree/kernel/sched_spa_ws.c
--- oldtree/kernel/sched_spa_ws.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched_spa_ws.c	2006-03-08 18:56:30.071756500 +0000
@@ -0,0 +1,343 @@
+/*
+ *  kernel/sched_spa_ws.c
+ *
+ *  CPU scheduler mode
+ *
+ *  Copyright (C) 2004  Aurema Pty Ltd
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/sched.h>
+#include <linux/sched_spa.h>
+
+#define MIN_VAL(a, b)		((a) < (b) ? (a) : (b))
+#define MAX_TOTAL_BONUS		(SPA_BGND_PRIO - (MAX_RT_PRIO + 40) - 1)
+/* allow a slot for media streamers and 2 for wake up bonuses */
+#define MAX_MAX_IA_BONUS	((MAX_TOTAL_BONUS + 1) / 2)
+#define DEF_MAX_IA_BONUS 	MIN_VAL(MAX_MAX_IA_BONUS, 9)
+#define DEF_INITIAL_IA_BONUS	((DEF_MAX_IA_BONUS / 6) ? : 1)
+#define MAX_MAX_FAIRNESS_BONUS	(MAX_TOTAL_BONUS - MAX_MAX_IA_BONUS)
+#define DEF_MAX_FAIRNESS_BONUS	((DEF_MAX_IA_BONUS - 2) ? : 1)
+
+/* If the average sleep is extremely long this is probably not
+ * interactive and is in fact probably something annoying like a log
+ * rotator so let its interactive bonus die away
+ */
+#define WS_BIG_SLEEP SPA_AVG_REAL(2 * 60 * 60LL * NSEC_PER_SEC)
+
+static unsigned int max_ia_bonus = DEF_MAX_IA_BONUS;
+static unsigned int initial_ia_bonus = DEF_INITIAL_IA_BONUS;
+
+#define LSHARES_AVG_OFFSET 7
+#define LSHARES_AVG_ALPHA ((1 << LSHARES_AVG_OFFSET) - 2)
+#define LSHARES_AVG_INCR(a) ((a) << 1)
+#define LSHARES_AVG_REAL(s) ((s) << LSHARES_AVG_OFFSET)
+#define LSHARES_ALMOST_ONE (LSHARES_AVG_REAL(1UL) - 1)
+#define LSHARES_AVG_MUL(a, b) (((a) * (b)) >> LSHARES_AVG_OFFSET)
+
+static unsigned int max_fairness_bonus = DEF_MAX_FAIRNESS_BONUS;
+
+static inline void decr_fairness_bonus(struct task_struct *p)
+{
+	if (p->sdu.spa.auxilary_bonus > 0)
+		--p->sdu.spa.auxilary_bonus;
+}
+
+static inline void incr_fairness_bonus(struct task_struct *p, unsigned int n)
+{
+	if ((p->sdu.spa.auxilary_bonus + n) > max_fairness_bonus)
+		p->sdu.spa.auxilary_bonus = max_fairness_bonus;
+	else
+		p->sdu.spa.auxilary_bonus += n;
+}
+
+static inline int fairness_bonus(const struct task_struct *p)
+{
+	return p->sdu.spa.auxilary_bonus;
+}
+
+static DEFINE_PER_CPU(unsigned long, rq_avg_lshares);
+
+static void spa_ws_runq_data_tick(unsigned int cpu, runqueue_t *rq)
+{
+	unsigned long nval = LSHARES_AVG_MUL(per_cpu(rq_avg_lshares, cpu),
+					  LSHARES_AVG_ALPHA);
+	nval += LSHARES_AVG_INCR(rq->qu.spa.nr_active_eb_shares);
+
+	per_cpu(rq_avg_lshares, cpu) = nval;
+}
+
+static void do_nothing_to_task(struct task_struct *p) { }
+
+/*
+ * Tasks more sleepy than this are considered interactive
+ */
+static unsigned int iab_incr_threshold = 900;
+
+/*
+ * Tasks less sleepy than this are considered NOT interactive
+ */
+static unsigned int iab_decr_threshold = 50;
+
+static inline int current_ia_bonus(const struct task_struct *p)
+{
+	return p->sdu.spa.interactive_bonus;
+}
+
+static inline void decr_interactive_bonus(struct task_struct *p)
+{
+	if (p->sdu.spa.interactive_bonus > 0)
+		--p->sdu.spa.interactive_bonus;
+}
+
+static inline void incr_interactive_bonus(struct task_struct *p)
+{
+	if (p->sdu.spa.interactive_bonus < max_ia_bonus)
+		++p->sdu.spa.interactive_bonus;
+	else
+		p->sdu.spa.interactive_bonus = max_ia_bonus;
+}
+
+static inline void partial_decr_interactive_bonus(struct task_struct *p)
+{
+	if (current_ia_bonus(p) > initial_ia_bonus)
+		decr_interactive_bonus(p);
+}
+
+static inline void partial_incr_interactive_bonus(struct task_struct *p)
+{
+	if (current_ia_bonus(p) < initial_ia_bonus)
+		incr_interactive_bonus(p);
+}
+
+static inline int bonuses(const struct task_struct *p)
+{
+	int bonuses = fairness_bonus(p);
+
+	if (p->policy != SCHED_BATCH)
+		bonuses += current_ia_bonus(p);
+
+	return bonuses;
+}
+
+static int spa_ws_effective_prio(const struct task_struct *p)
+{
+	unsigned int bonus = MAX_TOTAL_BONUS;
+
+	/* interactive bonuses only count at wake up
+	 */
+	/* no bonuses for tasks that have exceeded their cap */
+	if (likely(!spa_exceeding_cpu_rate_cap(p)))
+		bonus -= bonuses(p);
+
+	return p->static_prio + bonus;
+}
+
+static inline int spa_ws_soft_cap_effective_prio(const struct task_struct *p)
+{
+	return spa_pb_soft_cap_priority(p, p->static_prio + MAX_TOTAL_BONUS);
+}
+
+static void spa_ws_fork(struct task_struct *p)
+{
+	p->sdu.spa.auxilary_bonus = 0;
+	/*
+	 * If this is a thread leave it with the same priority as the parent.
+	 * This ensures that media streamers that launch new threads for each
+	 * track have the new thread get off to a good start.
+	 */
+	if (p->tgid != p->pid)
+		return;
+
+	if (max_ia_bonus > initial_ia_bonus)
+		p->sdu.spa.interactive_bonus = initial_ia_bonus;
+	else
+		p->sdu.spa.interactive_bonus = max_ia_bonus;
+}
+
+static void spa_ws_reassess_fairness_bonus(struct task_struct *p)
+{
+	unsigned long long expected_delay;
+	unsigned long long wanr; /* weighted average number running */
+	unsigned long lshares = max(LSHARES_AVG_REAL(task_rq(p)->qu.spa.nr_active_eb_shares), per_cpu(rq_avg_lshares, task_cpu(p)));
+
+	wanr = lshares / p->sdu.spa.eb_shares;
+	if (wanr <= LSHARES_ALMOST_ONE)
+		expected_delay = 0;
+	else
+		expected_delay = LSHARES_AVG_MUL(p->sdu.spa.avg_cpu_per_cycle,
+						(wanr - LSHARES_ALMOST_ONE));
+
+	if (p->sdu.spa.avg_sleep_per_cycle > expected_delay)
+		expected_delay = 0;
+	else
+		expected_delay -= p->sdu.spa.avg_sleep_per_cycle;
+
+	if (p->sdu.spa.avg_delay_per_cycle > expected_delay) {
+		unsigned long acr;
+		unsigned long long n;
+
+		/*
+		 * Rounded integer average cpu per cycle should fit into even
+		 * a 32 bit long. Same is not necessarily true of delay times
+		 * so we're stuck with a 64 bit divide.
+		 */
+		acr = SPA_AVG_RND(p->sdu.spa.avg_cpu_per_cycle) ? : 1;
+		n = SPA_AVG_RND(p->sdu.spa.avg_delay_per_cycle - expected_delay);
+
+		(void)do_div(n, acr);
+		incr_fairness_bonus(p, n + 1);
+	} else
+		decr_fairness_bonus(p);
+}
+
+static inline int spa_ws_eligible(struct task_struct *p)
+{
+	return p->sdu.spa.avg_sleep_per_cycle < WS_BIG_SLEEP;
+}
+
+static inline int spa_sleepiness_exceeds_ppt(const struct task_struct *p,
+					    unsigned int ppt)
+{
+	return RATIO_EXCEEDS_PPT(p->sdu.spa.avg_sleep_per_cycle,
+				 p->sdu.spa.avg_sleep_per_cycle +
+				 p->sdu.spa.avg_cpu_per_cycle,
+				 ppt);
+}
+
+static void spa_ws_reassess_at_activation(struct task_struct *p)
+{
+	spa_ws_reassess_fairness_bonus(p);
+	if (spa_ia_sleepiness_exceeds_ppt(p, iab_incr_threshold)) {
+		if (spa_ws_eligible(p))
+			incr_interactive_bonus(p);
+		else
+			partial_incr_interactive_bonus(p);
+	}
+	else if (!spa_sleepiness_exceeds_ppt(p, iab_decr_threshold))
+		decr_interactive_bonus(p);
+	else if (!spa_ia_sleepiness_exceeds_ppt(p, (iab_decr_threshold + iab_incr_threshold) / 2))
+		partial_decr_interactive_bonus(p);
+}
+
+static void spa_ws_reassess_at_end_of_ts(struct task_struct *p)
+{
+	if (p->sdu.spa.avg_sleep_per_cycle == 0)
+		p->sdu.spa.auxilary_bonus = 0;
+	else
+		spa_ws_reassess_fairness_bonus(p);
+	/* tasks that use a full time slice in their first CPU burst
+	 * lose their initial bonus and have to start from scratch
+	 */
+	if (p->sdu.spa.flags & SPAF_FIRST_RUN) {
+		p->sdu.spa.interactive_bonus = 0;
+		return;
+	}
+
+	/* Don't punish tasks that have done a lot of sleeping for the
+	 * occasional run of short sleeps unless they become a cpu hog.
+	 */
+	if (!spa_sleepiness_exceeds_ppt(p, iab_decr_threshold))
+		decr_interactive_bonus(p);
+	else if (!spa_ia_sleepiness_exceeds_ppt(p, (iab_decr_threshold + iab_incr_threshold) / 2))
+		partial_decr_interactive_bonus(p);
+}
+
+static struct sched_spa_child spa_ws_child = {
+	.soft_cap_effective_prio = spa_ws_soft_cap_effective_prio,
+	.normal_effective_prio = spa_ws_effective_prio,
+	.reassess_at_activation = spa_ws_reassess_at_activation,
+	.fork_extras = spa_ws_fork,
+	.runq_data_tick = spa_ws_runq_data_tick,
+	.reassess_at_end_of_ts = spa_ws_reassess_at_end_of_ts,
+	.reassess_at_sinbin_release = do_nothing_to_task,
+	.reassess_at_renice = do_nothing_to_task,
+};
+
+static void spa_ws_sched_init(void)
+{
+	spa_sched_init();
+	spa_sched_child = &spa_ws_child;
+}
+
+#include <linux/sched_pvt.h>
+
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW_STATIC(max_ia_bonus, no_change, no_change,
+			       0, MAX_MAX_IA_BONUS);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(initial_ia_bonus, no_change, no_change,
+			       0, MAX_MAX_IA_BONUS);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(iab_incr_threshold, no_change, no_change,
+			       0, 1000);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(iab_decr_threshold, no_change, no_change,
+			       0, 1000);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(max_fairness_bonus, no_change, no_change,
+			       0, MAX_MAX_FAIRNESS_BONUS);
+
+static struct attribute *spa_ws_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(time_slice),
+	&SCHED_DRV_SYSFS_ATTR(sched_rr_time_slice),
+	&SCHED_DRV_SYSFS_ATTR(bgnd_time_slice_multiplier),
+	&SCHED_DRV_SYSFS_ATTR(base_prom_interval),
+	&SCHED_DRV_SYSFS_ATTR(max_ia_bonus),
+	&SCHED_DRV_SYSFS_ATTR(initial_ia_bonus),
+	&SCHED_DRV_SYSFS_ATTR(iab_incr_threshold),
+	&SCHED_DRV_SYSFS_ATTR(iab_decr_threshold),
+	&SCHED_DRV_SYSFS_ATTR(max_fairness_bonus),
+	NULL,
+};
+
+const struct sched_drv spa_ws_sched_drv = {
+	.name = "spa_ws",
+	.init_runqueue_queue = spa_init_runqueue_queue,
+	.set_oom_time_slice = spa_set_oom_time_slice,
+#ifdef CONFIG_SMP
+	.set_load_weight = spa_set_load_weight,
+#endif
+	.task_timeslice = spa_task_timeslice,
+	.wake_up_task = spa_wake_up_task,
+	.fork = spa_fork,
+	.wake_up_new_task = spa_wake_up_new_task,
+	.exit = spa_exit,
+	.tick = spa_tick,
+#ifdef CONFIG_SMP
+	.move_tasks = spa_move_tasks,
+#endif
+	.tick = spa_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = spa_head_of_queue,
+	.dependent_sleeper_trumps = spa_dependent_sleeper_trumps,
+#endif
+	.schedule = spa_schedule,
+	.set_normal_task_nice = spa_set_normal_task_nice,
+	.init_batch_task = spa_init_batch_task,
+	.setscheduler = spa_setscheduler,
+	.sys_yield = spa_sys_yield,
+	.yield = spa_yield,
+	.init_idle = spa_init_idle,
+	.sched_init = spa_ws_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = spa_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = spa_set_select_idle_first,
+	.set_select_idle_last = spa_set_select_idle_last,
+	.migrate_dead_tasks = spa_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = spa_normalize_rt_task,
+#endif
+	.attrs = spa_ws_attrs,
+};
diff -urN oldtree/kernel/sched_zaphod.c newtree/kernel/sched_zaphod.c
--- oldtree/kernel/sched_zaphod.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/sched_zaphod.c	2006-03-08 18:56:30.075756750 +0000
@@ -0,0 +1,633 @@
+/*
+ *  kernel/sched_zaphod.c
+ *
+ *  CPU scheduler mode
+ *
+ *  Copyright (C) 2004  Aurema Pty Ltd
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/sched_spa.h>
+
+#include <asm/uaccess.h>
+
+#define MIN_NORMAL_PRIO	MAX_RT_PRIO
+#define ZAPHOD_MAX_PRIO	(MIN_NORMAL_PRIO + 40)
+#define IDLE_PRIO	SPA_IDLE_PRIO
+#define BGND_PRIO	SPA_BGND_PRIO
+#define TASK_ZD(p) (p)->sdu.spa
+#define MIN_RATE_CAP(p) (p)->sdu.spa.min_cpu_rate_cap
+
+#define EB_YARDSTICK_DECAY_INTERVAL 100
+
+struct zaphod_mode {
+	const char *name;
+	void (*calculate_pre_bonus_priority)(task_t *p);
+	int (*soft_cap_effective_prio)(const task_t *p);
+};
+
+static void calculate_pb_pre_bonus_priority(task_t *p);
+static void calculate_eb_pre_bonus_priority(task_t *p);
+static inline int pb_soft_cap_effective_prio(const task_t *p)
+{
+	return spa_pb_soft_cap_priority(p, TASK_ZD(p).pre_bonus_priority);
+}
+static int zaphod_effective_prio(const task_t *p);
+
+static const struct zaphod_mode zaphod_modes[] = {
+	{ .name = "pb",
+	  .calculate_pre_bonus_priority = calculate_pb_pre_bonus_priority,
+	  .soft_cap_effective_prio = pb_soft_cap_effective_prio,
+	},
+	{ .name = "eb",
+	  .calculate_pre_bonus_priority = calculate_eb_pre_bonus_priority,
+	  .soft_cap_effective_prio = zaphod_effective_prio,
+	},
+	{ .name = NULL, }       /* end of list marker */
+};
+
+static const struct zaphod_mode *zm = &zaphod_modes[0];
+
+struct sched_zaphod_runq_data {
+	unsigned long avg_nr_running;
+	atomic_t eb_yardstick;
+	atomic_t eb_ticks_to_decay;
+};
+
+static DEFINE_PER_CPU(struct sched_zaphod_runq_data, zaphod_runqs);
+#define cpu_zrq(cpu)	(&per_cpu(zaphod_runqs, cpu))
+#define task_zrq(p)	cpu_zrq(task_cpu(p))
+
+#define MAX_TOTAL_BONUS (BGND_PRIO - ZAPHOD_MAX_PRIO - 1)
+#define MAX_MAX_IA_BONUS ((MAX_TOTAL_BONUS + 1) / 2)
+#define MAX_MAX_TPT_BONUS (MAX_TOTAL_BONUS - MAX_MAX_IA_BONUS)
+#define DEFAULT_MAX_IA_BONUS ((MAX_MAX_IA_BONUS < 9) ? MAX_MAX_IA_BONUS : 9)
+#define DEFAULT_MAX_TPT_BONUS ((DEFAULT_MAX_IA_BONUS - 2) ? : 1)
+
+#define SCHED_IA_BONUS_OFFSET 8
+#define SCHED_IA_BONUS_ALPHA ((1 << SCHED_IA_BONUS_OFFSET) - 2)
+#define SCHED_IA_BONUS_INCR(a) ((a) << 1)
+#define SCHED_IA_BONUS_MUL(a, b) (((a) * (b)) >> SCHED_IA_BONUS_OFFSET)
+/*
+ * Get the rounded integer value of the interactive bonus
+ */
+#define SCHED_IA_BONUS_RND(x) \
+	(((x) + (1 << (SCHED_IA_BONUS_OFFSET - 1))) >> (SCHED_IA_BONUS_OFFSET))
+
+static unsigned int max_ia_bonus = DEFAULT_MAX_IA_BONUS;
+static unsigned int max_max_ia_bonus = MAX_MAX_IA_BONUS;
+static unsigned int initial_ia_bonus = 5;
+static unsigned int max_tpt_bonus = DEFAULT_MAX_TPT_BONUS;
+static unsigned int max_max_tpt_bonus = MAX_MAX_TPT_BONUS;
+
+/*
+ * Tasks that have a CPU usage rate greater than this threshold (in parts per
+ * thousand) are considered to be CPU bound and start to lose interactive bonus
+ * points
+ */
+static unsigned long cpu_hog_threshold = 900;
+
+/*
+ * Tasks that would sleep for more than this many parts per thousand of the
+ * time if they had the CPU to themselves are considered to be interactive
+ * provided that their average sleep duration per scheduling cycle isn't too
+ * long
+ */
+static unsigned int ia_threshold = 900;
+#define LOWER_MAX_IA_SLEEP SPA_AVG_REAL(15 * 60LL * NSEC_PER_SEC)
+#define UPPER_MAX_IA_SLEEP SPA_AVG_REAL(2 * 60 * 60LL * NSEC_PER_SEC)
+
+static inline void decay_sched_ia_bonus(struct task_struct *p)
+{
+	TASK_ZD(p).interactive_bonus *= SCHED_IA_BONUS_ALPHA;
+	TASK_ZD(p).interactive_bonus >>= SCHED_IA_BONUS_OFFSET;
+}
+
+/*
+ * CPU rate statistics are estimated as a proportions (i.e. real numbers in the
+ * rang 0 to 1 inclusive) using fixed denominator rational numbers.
+ * Needs to be small enough so that we can map bonuses (up to 20) within
+ * a 32 bit integer
+ */
+#define PROPORTION_OFFSET	26
+/* for static initializations */
+#define PROPORTION_ONE		(1UL << PROPORTION_OFFSET)
+#define PROP_FM_PPT(a) \
+	(((unsigned long long)(a) * PROPORTION_ONE) / 1000)
+
+/*
+ * CPU usage rate is estimated as a proportion of a CPU using fixed denominator
+ * rational numbers.
+ */
+#define PROPORTION_OVERFLOW ((1ULL << (64 - PROPORTION_OFFSET)) - 1)
+
+static inline unsigned long long sched_div_64(unsigned long long a,
+					      unsigned long long b)
+{
+#if BITS_PER_LONG < 64
+	/*
+	 * Assume that there's no 64 bit divide available
+	 */
+	if (a < b)
+		return 0;
+	/*
+	 * Scale down until b less than 32 bits so that we can do
+	 * a divide using do_div()
+	 */
+	while (b > ULONG_MAX) { a >>= 1; b >>= 1; }
+
+	(void)do_div(a, (unsigned long)b);
+
+	return a;
+#else
+	return a / b;
+#endif
+}
+
+/*
+ * Convert a / b to a proportion in the range 0 to PROPORTION_ONE
+ * Requires a <= b or may get a divide by zero exception
+ */
+static unsigned long calc_proportion(unsigned long long a, unsigned long long b)
+{
+	if (unlikely(a == b))
+		return PROPORTION_ONE;
+
+	while (a > PROPORTION_OVERFLOW) { a >>= 1; b >>= 1; }
+
+	return sched_div_64(a << PROPORTION_OFFSET, b);
+}
+
+/* Multiply two proportions to give a proportion or multiplys a proportion
+ * by an integer to give an integer
+ */
+static inline  unsigned long proportion_mul(unsigned long a,
+					    unsigned long b)
+{
+	return ((unsigned long long)a * (unsigned long long)b) >> PROPORTION_OFFSET;
+}
+
+/*
+ * Map a proportion onto a small interger range (rounded)
+ * Require: range < 31 (to avoid overflow)
+ */
+static inline unsigned long map_proportion_rnd(unsigned long p,
+					       unsigned long r)
+{
+	return (p * ((r << 1) + 1)) >> (PROPORTION_OFFSET + 1);
+}
+
+/*
+ * Find the square root of a proportion
+ * Require: x <= PROPORTION_ONE
+ */
+static unsigned long proportion_sqrt(unsigned long x)
+{
+	/* use 64 bits to avoid overflow */
+	unsigned long long res, b, ulx;
+	int bshift;
+
+	/*
+	 * Take shortcut AND prevent overflow
+	 */
+	if (x == PROPORTION_ONE)
+		return PROPORTION_ONE;
+
+	res = 0;
+	b = (1UL << (PROPORTION_OFFSET - 1));
+	bshift = PROPORTION_OFFSET - 1;
+	ulx = x << PROPORTION_OFFSET;
+
+	for (; ulx && b; b >>= 1, bshift--) {
+		unsigned long long temp = (((res << 1) + b) << bshift);
+
+		if (ulx >= temp) {
+			res += b;
+                        ulx -= temp;
+		}
+        }
+
+	return res;
+}
+
+static inline unsigned long avg_cpu_usage_rate(const struct task_struct *p)
+{
+	return calc_proportion(p->sdu.spa.avg_cpu_per_cycle, p->sdu.spa.avg_cycle_length);
+}
+
+/*
+ * Check whether a task with an interactive bonus still qualifies and if not
+ * decrease its bonus
+ * This never gets called on real time tasks
+ */
+static void reassess_cpu_boundness(task_t *p)
+{
+	if (p->policy == SCHED_BATCH || max_ia_bonus == 0) {
+		TASK_ZD(p).interactive_bonus = 0;
+		return;
+	}
+
+	if (spa_cpu_usage_rate_exceeds_ppt(p, cpu_hog_threshold))
+		decay_sched_ia_bonus(p);
+	else if (!spa_ia_sleepiness_exceeds_ppt(p, (1000 - cpu_hog_threshold))) {
+		unsigned long ia_sleepiness;
+		unsigned long long bl = p->sdu.spa.avg_cpu_per_cycle +
+			p->sdu.spa.avg_sleep_per_cycle;
+
+		ia_sleepiness = calc_proportion(p->sdu.spa.avg_ia_sleep_per_cycle, bl);
+		decay_sched_ia_bonus(p);
+		TASK_ZD(p).interactive_bonus +=
+			SCHED_IA_BONUS_INCR(map_proportion_rnd(ia_sleepiness,
+							       max_ia_bonus));
+	}
+}
+
+/*
+ * Check whether a task qualifies for an interactive bonus and if it does
+ * increase its bonus
+ * This never gets called on real time tasks
+ */
+static void reassess_interactiveness(task_t *p)
+{
+	if (p->policy == SCHED_BATCH || max_ia_bonus == 0) {
+		TASK_ZD(p).interactive_bonus = 0;
+		return;
+	}
+	/*
+	 * No sleep means not interactive (in most cases), but
+	 */
+	if (unlikely(p->sdu.spa.avg_sleep_per_cycle > LOWER_MAX_IA_SLEEP)) {
+		/*
+		 * Really long sleeps mean it's probably not interactive
+		 */
+		if (unlikely(p->sdu.spa.avg_sleep_per_cycle > UPPER_MAX_IA_SLEEP))
+			decay_sched_ia_bonus(p);
+		return;
+	}
+
+	if (spa_ia_sleepiness_exceeds_ppt(p, ia_threshold)) {
+		decay_sched_ia_bonus(p);
+		TASK_ZD(p).interactive_bonus += SCHED_IA_BONUS_INCR(max_ia_bonus);
+	}
+}
+
+/*
+ * Check whether a task qualifies for a throughput bonus and if it does
+ * give it one
+ * This never gets called on real time tasks
+ */
+#define NRUN_AVG_OFFSET 7
+#define NRUN_AVG_ALPHA ((1 << NRUN_AVG_OFFSET) - 2)
+#define NRUN_AVG_INCR(a) ((a) << 1)
+#define NRUN_AVG_ONE (1UL << NRUN_AVG_OFFSET)
+#define NRUN_AVG_MUL(a, b) (((a) * (b)) >> NRUN_AVG_OFFSET)
+static void recalc_throughput_bonus(task_t *p)
+{
+	unsigned long long ratio;
+	unsigned long long expected_delay;
+	unsigned long long adjusted_delay;
+	struct sched_zaphod_runq_data *zrq = task_zrq(p);
+	unsigned long long load = zrq->avg_nr_running;
+
+	TASK_ZD(p).auxilary_bonus = 0;
+	if (max_tpt_bonus == 0)
+		return;
+
+	if (load <= NRUN_AVG_ONE)
+		expected_delay = 0;
+	else
+		expected_delay = NRUN_AVG_MUL(p->sdu.spa.avg_cpu_per_cycle, (load - NRUN_AVG_ONE));
+
+	/*
+	 * No unexpected delay means no bonus, but
+	 * NB this test also avoids a possible divide by zero error if
+	 * cpu is also zero and negative bonuses
+	 */
+	if (p->sdu.spa.avg_delay_per_cycle <= expected_delay)
+		return;
+
+	adjusted_delay  = p->sdu.spa.avg_delay_per_cycle - expected_delay;
+	ratio = calc_proportion(adjusted_delay, adjusted_delay + p->sdu.spa.avg_cpu_per_cycle);
+	ratio = proportion_sqrt(ratio);
+	TASK_ZD(p).auxilary_bonus = map_proportion_rnd(ratio, max_tpt_bonus);
+}
+
+/*
+ * Calculate priority based priority (without bonuses).
+ * This never gets called on real time tasks
+ */
+static void calculate_pb_pre_bonus_priority(task_t *p)
+{
+	TASK_ZD(p).pre_bonus_priority = p->static_prio + MAX_TOTAL_BONUS;
+}
+
+/*
+ * We're just trying to protect a reading and writing of the yardstick.
+ * We not to fussed about protecting the calculation so the following is
+ * adequate
+ */
+static inline void decay_eb_yardstick(struct sched_zaphod_runq_data *zrq)
+{
+	static const unsigned long decay_per_interval = PROP_FM_PPT(990);
+	unsigned long curry = atomic_read(&zrq->eb_yardstick);
+	unsigned long pny; /* potential new yardstick */
+	struct task_struct *p = current;
+
+	curry = proportion_mul(decay_per_interval, curry);
+	atomic_set(&zrq->eb_ticks_to_decay, EB_YARDSTICK_DECAY_INTERVAL);
+	if (unlikely(rt_task(p) || task_is_bgnd(p)))
+		goto out;
+	if (!spa_exceeding_cpu_rate_cap(p))
+		pny = avg_cpu_usage_rate(p) / TASK_ZD(p).eb_shares;
+	else
+		pny = MIN_RATE_CAP(p) / TASK_ZD(p).eb_shares;
+	if (pny > curry)
+		curry = pny;
+out:
+	if (unlikely(curry >= PROPORTION_ONE))
+		curry = PROPORTION_ONE - 1;
+	atomic_set(&zrq->eb_yardstick, curry);
+}
+
+/*
+ * Calculate entitlement based priority (without bonuses).
+ * This never gets called on real time tasks
+ */
+#define EB_PAR 19
+static void calculate_eb_pre_bonus_priority(task_t *p)
+{
+	/*
+	 * Prevent possible divide by zero and take shortcut
+	 */
+	if (unlikely(MIN_RATE_CAP(p) == 0)) {
+		TASK_ZD(p).pre_bonus_priority = BGND_PRIO - 1;
+	} else if (spa_exceeding_cpu_rate_cap(p)) {
+		struct sched_zaphod_runq_data *zrq = task_zrq(p);
+		unsigned long cap_per_share = MIN_RATE_CAP(p) / TASK_ZD(p).eb_shares;
+		unsigned long prop = calc_proportion(MIN_RATE_CAP(p), avg_cpu_usage_rate(p));
+
+		TASK_ZD(p).pre_bonus_priority = (BGND_PRIO - 1);
+		TASK_ZD(p).pre_bonus_priority -= map_proportion_rnd(prop, EB_PAR + 1);
+		if (cap_per_share > atomic_read(&zrq->eb_yardstick)) {
+			if (likely(cap_per_share < PROPORTION_ONE))
+				atomic_set(&zrq->eb_yardstick, cap_per_share);
+			else
+				atomic_set(&zrq->eb_yardstick, PROPORTION_ONE - 1);
+		}
+
+	} else {
+		struct sched_zaphod_runq_data *zrq = task_zrq(p);
+		unsigned long usage_per_share = avg_cpu_usage_rate(p) / TASK_ZD(p).eb_shares;
+
+		if (usage_per_share > atomic_read(&zrq->eb_yardstick)) {
+			if (likely(usage_per_share < PROPORTION_ONE))
+				atomic_set(&zrq->eb_yardstick, usage_per_share);
+			else
+				atomic_set(&zrq->eb_yardstick, PROPORTION_ONE - 1);
+			TASK_ZD(p).pre_bonus_priority = MAX_RT_PRIO + MAX_TOTAL_BONUS + EB_PAR;
+		} else {
+			unsigned long prop;
+
+			prop = calc_proportion(usage_per_share, atomic_read(&zrq->eb_yardstick));
+			TASK_ZD(p).pre_bonus_priority = MAX_RT_PRIO + MAX_TOTAL_BONUS;
+			TASK_ZD(p).pre_bonus_priority += map_proportion_rnd(prop, EB_PAR);
+		}
+	}
+}
+
+static inline void calculate_pre_bonus_priority(task_t *p)
+{
+	zm->calculate_pre_bonus_priority(p);
+}
+
+static void zaphod_init_cpu_runq_data(unsigned int cpu)
+{
+	struct sched_zaphod_runq_data *zrq = &per_cpu(zaphod_runqs, cpu);
+
+	zrq->avg_nr_running = 0;
+	atomic_set(&zrq->eb_yardstick, 0);
+	atomic_set(&zrq->eb_ticks_to_decay, EB_YARDSTICK_DECAY_INTERVAL + cpu);
+}
+
+struct sched_zaphod_runq_data *zaphod_cpu_runq_data(unsigned int cpu)
+{
+	return cpu_zrq(cpu);
+}
+
+static void zaphod_runq_data_tick(unsigned int cpu, runqueue_t *rq)
+{
+	struct sched_zaphod_runq_data *zrq = cpu_zrq(cpu);
+	unsigned long nval = NRUN_AVG_MUL(zrq->avg_nr_running, NRUN_AVG_ALPHA);
+	nval += NRUN_AVG_INCR(rq->nr_running);
+
+	zrq->avg_nr_running = nval;
+
+	if (atomic_dec_and_test(&zrq->eb_ticks_to_decay))
+		decay_eb_yardstick(zrq);
+}
+
+static void zaphod_fork(struct task_struct *p)
+{
+	TASK_ZD(p).interactive_bonus = (max_ia_bonus >= initial_ia_bonus) ?
+				initial_ia_bonus : max_ia_bonus;
+	TASK_ZD(p).interactive_bonus <<= SCHED_IA_BONUS_OFFSET;
+	TASK_ZD(p).auxilary_bonus =  0;
+}
+
+static int zaphod_effective_prio(const struct task_struct *p)
+{
+	unsigned int bonus = 0;
+
+	/* no bonuses for tasks that have exceeded their cap */
+	if (likely(!spa_exceeding_cpu_rate_cap(p))) {
+		/* No IA bonus when waking from (declared) non AI sleep */
+		if ((p->sdu.spa.flags & SPAF_NONIASLEEP) == 0)
+			bonus = SCHED_IA_BONUS_RND(TASK_ZD(p).interactive_bonus);
+		bonus += TASK_ZD(p).auxilary_bonus;
+	}
+
+	return TASK_ZD(p).pre_bonus_priority - bonus;
+}
+
+static inline int zaphod_soft_cap_effective_prio(const struct task_struct *p)
+{
+	return zm->soft_cap_effective_prio(p);
+}
+
+static void zaphod_reassess_at_activation(struct task_struct *p)
+{
+	recalc_throughput_bonus(p);
+	reassess_interactiveness(p);
+	calculate_pre_bonus_priority(p);
+}
+
+static void zaphod_reassess_at_end_of_ts(struct task_struct *p)
+{
+	recalc_throughput_bonus(p);
+	/* if a whole time slice gets used during the first or second
+	 * CPU burst then the initial interactive bonus is forfeit and the
+	 * task starts again from scratch trying to establish its interactive
+	 * bona fides
+	 */
+	if (p->sdu.spa.flags & SPAF_FIRST_RUN)
+		TASK_ZD(p).interactive_bonus = 0;
+	else
+		reassess_cpu_boundness(p);
+	/*
+	 * Interactive bonus is not updated here as long CPU bursts (greater
+	 * than a time slice) are atypical of interactive tasks
+	 */
+	calculate_pre_bonus_priority(p);
+}
+
+static void zaphod_reassess_at_sinbin_release(struct task_struct *p)
+{
+	calculate_pre_bonus_priority(p);
+}
+
+static void zaphod_reassess_at_renice(struct task_struct *p)
+{
+	if (!rt_task(p))
+		calculate_pre_bonus_priority(p);
+}
+
+struct sched_spa_child zaphod_child = {
+	.soft_cap_effective_prio = zaphod_soft_cap_effective_prio,
+	.normal_effective_prio = zaphod_effective_prio,
+	.reassess_at_activation = zaphod_reassess_at_activation,
+	.fork_extras = zaphod_fork,
+	.runq_data_tick = zaphod_runq_data_tick,
+	.reassess_at_end_of_ts = zaphod_reassess_at_end_of_ts,
+	.reassess_at_sinbin_release = zaphod_reassess_at_sinbin_release,
+	.reassess_at_renice = zaphod_reassess_at_renice,
+};
+
+static void zaphod_sched_init(void)
+{
+	int i;
+
+	spa_sched_init();
+
+	for (i = 0; i < NR_CPUS; i++)
+		zaphod_init_cpu_runq_data(i);
+
+	spa_sched_child = &zaphod_child;
+}
+
+#include <linux/sched_pvt.h>
+
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW_STATIC(max_ia_bonus, no_change, no_change,
+			       0, max_max_ia_bonus);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(initial_ia_bonus, no_change, no_change,
+			       0, max_max_ia_bonus);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(max_tpt_bonus, no_change, no_change, 0,
+			       max_max_tpt_bonus);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(ia_threshold, no_change, no_change, 0, 1000);
+SCHED_DRV_SYSFS_UINT_RW_STATIC(cpu_hog_threshold, no_change, no_change,
+			       0, 1000);
+
+static ssize_t show_zaphod_mode(char *page)
+{
+	return sprintf(page, "%s\n", zm->name);
+}
+
+static ssize_t store_zaphod_mode(const char *page, size_t count)
+{
+	int i;
+	int clen = strlen(page);
+
+	{
+		char *nlp = strrchr(page, '\n');
+
+		if (nlp != NULL)
+			clen = nlp - page;
+	}
+
+	for (i = 0; zaphod_modes[i].name != NULL; i++)
+		if (strncmp(page, zaphod_modes[i].name, clen) == 0)
+			break;
+	if (zaphod_modes[i].name == NULL)
+		return -EINVAL;
+	else /* set the zaphod mode */
+		zm = &zaphod_modes[i];
+
+	return count;
+}
+
+struct sched_drv_sysfs_entry zaphod_mode_sdse = {
+	.attr = { .name = "mode", .mode = S_IRUGO | S_IWUSR },
+	.show = show_zaphod_mode,
+	.store = store_zaphod_mode,
+};
+
+static struct attribute *zaphod_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(time_slice),
+	&SCHED_DRV_SYSFS_ATTR(sched_rr_time_slice),
+	&SCHED_DRV_SYSFS_ATTR(bgnd_time_slice_multiplier),
+	&SCHED_DRV_SYSFS_ATTR(base_prom_interval),
+	&SCHED_DRV_SYSFS_ATTR(max_ia_bonus),
+	&SCHED_DRV_SYSFS_ATTR(initial_ia_bonus),
+	&SCHED_DRV_SYSFS_ATTR(max_tpt_bonus),
+	&SCHED_DRV_SYSFS_ATTR(ia_threshold),
+	&SCHED_DRV_SYSFS_ATTR(cpu_hog_threshold),
+	&SCHED_DRV_SYSFS_ATTR(zaphod_mode),
+	NULL,
+};
+
+const struct sched_drv zaphod_sched_drv = {
+	.name = "zaphod",
+	.init_runqueue_queue = spa_init_runqueue_queue,
+	.set_oom_time_slice = spa_set_oom_time_slice,
+#ifdef CONFIG_SMP
+	.set_load_weight = spa_set_load_weight,
+#endif
+	.task_timeslice = spa_task_timeslice,
+	.wake_up_task = spa_wake_up_task,
+	.fork = spa_fork,
+	.wake_up_new_task = spa_wake_up_new_task,
+	.exit = spa_exit,
+	.tick = spa_tick,
+#ifdef CONFIG_SMP
+	.move_tasks = spa_move_tasks,
+#endif
+	.tick = spa_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = spa_head_of_queue,
+	.dependent_sleeper_trumps = spa_dependent_sleeper_trumps,
+#endif
+	.schedule = spa_schedule,
+	.set_normal_task_nice = spa_set_normal_task_nice,
+	.init_batch_task = spa_init_batch_task,
+	.setscheduler = spa_setscheduler,
+	.yield = spa_yield,
+	.sys_yield = spa_sys_yield,
+	.init_idle = spa_init_idle,
+	.sched_init = zaphod_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = spa_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = spa_set_select_idle_first,
+	.set_select_idle_last = spa_set_select_idle_last,
+	.migrate_dead_tasks = spa_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = spa_normalize_rt_task,
+#endif
+	.attrs = zaphod_attrs,
+};
diff -urN oldtree/kernel/staircase.c newtree/kernel/staircase.c
--- oldtree/kernel/staircase.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/staircase.c	2006-03-08 18:56:30.079757000 +0000
@@ -0,0 +1,1074 @@
+/*
+ *  kernel/staircase.c
+ *  Copyright (C) 2002-2006 Con Kolivas
+ *
+ * 2006-02-22 Staircase scheduler by Con Kolivas <kernel@kolivas.org>
+ *            Staircase v14.1
+ */
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/security.h>
+#include <linux/cpu.h>
+#include <linux/hardirq.h>
+#include <linux/sched_pvt.h>
+#include <linux/sched_runq.h>
+
+/*
+ * Unique staircase process flags used by scheduler.
+ */
+#define SF_NONSLEEP	0x00000001	/* Waiting on in kernel activity */
+
+static void staircase_init_runqueue_queue(union runqueue_queue *qup)
+{
+	int k;
+
+	qup->staircase.cache_ticks = 0;
+	qup->staircase.preempted = 0;
+
+	for (k = 0; k < STAIRCASE_MAX_PRIO; k++) {
+		INIT_LIST_HEAD(qup->staircase.queue + k);
+		__clear_bit(k, qup->staircase.bitmap);
+	}
+	// delimiter for bitsearch
+	__set_bit(STAIRCASE_MAX_PRIO, qup->staircase.bitmap);
+}
+
+static void staircase_set_oom_time_slice(struct task_struct *p,
+	unsigned long t)
+{
+	p->sdu.staircase.slice = p->sdu.staircase.time_slice = t;
+}
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
+#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
+#define MAX_USER_PRIO		(USER_PRIO(STAIRCASE_MAX_PRIO))
+
+/*
+ * Some helpers for converting nanosecond timing to jiffy resolution
+ */
+#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
+#define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
+#define NSJIFFY			(1000000000 / HZ)	/* One jiffy in ns */
+
+int sched_compute __read_mostly = 0;
+/*
+ *This is the time all tasks within the same priority round robin.
+ *compute setting is reserved for dedicated computational scheduling
+ *and has twenty times larger intervals. Set to a minimum of 6ms.
+ */
+#define _RR_INTERVAL		((6 * HZ / 1001) + 1)
+#define RR_INTERVAL()		(_RR_INTERVAL * (1 + 16 * sched_compute))
+#define DEF_TIMESLICE		(RR_INTERVAL() * 19)
+
+#define TASK_PREEMPTS_CURR(p, rq) \
+	((p)->prio < (rq)->curr->prio)
+
+/*
+ * Get nanosecond clock difference without overflowing unsigned long.
+ */
+static unsigned long ns_diff(const unsigned long long v1,
+	const unsigned long long v2)
+{
+	unsigned long long vdiff;
+	if (likely(v1 > v2)) {
+		vdiff = v1 - v2;
+#if BITS_PER_LONG < 64
+		if (vdiff > (1 << 31))
+			vdiff = 1 << 31;
+#endif
+	} else {
+		/*
+		 * Rarely the clock appears to go backwards. There should
+		 * always be a positive difference so return 1.
+		 */
+		vdiff = 1;
+	}
+	return (unsigned long)vdiff;
+}
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static inline void dequeue_task(struct task_struct *p,
+	struct staircase_runqueue_queue *rqq)
+{
+	list_del_init(&p->run_list);
+	if (list_empty(rqq->queue + p->prio))
+		__clear_bit(p->prio, rqq->bitmap);
+	p->sdu.staircase.ns_debit = 0;
+}
+
+static void enqueue_task(struct task_struct *p,
+	struct staircase_runqueue_queue *rqq)
+{
+	sched_info_queued(p);
+	list_add_tail(&p->run_list, rqq->queue + p->prio);
+	__set_bit(p->prio, rqq->bitmap);
+}
+
+static inline void requeue_task(struct task_struct *p,
+	struct staircase_runqueue_queue *rq)
+{
+	list_move_tail(&p->run_list, rq->queue + p->prio);
+}
+
+/*
+ * Used by the migration code - we pull tasks from the head of the
+ * remote queue so we want these tasks to show up at the head of the
+ * local queue:
+ */
+static inline void enqueue_task_head(struct task_struct *p,
+	struct staircase_runqueue_queue *rqq)
+{
+	list_add(&p->run_list, rqq->queue + p->prio);
+	__set_bit(p->prio, rqq->bitmap);
+}
+
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task(p, &rq->qu.staircase);
+	inc_nr_running(p, rq);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * __activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+{
+	enqueue_task_head(p, &rq->qu.staircase);
+	inc_nr_running(p, rq);
+}
+#endif
+
+/*
+ * Bonus - How much higher than its base priority an interactive task can run.
+ */
+static inline unsigned int bonus(const task_t *p)
+{
+	return TASK_USER_PRIO(p);
+}
+
+static unsigned int fastcall rr_interval(const task_t * p)
+{
+	unsigned int rr_interval = RR_INTERVAL();
+	int nice = TASK_NICE(p);
+
+	if (nice < 0 && !rt_task(p))
+		rr_interval += -(nice);
+
+	return rr_interval;
+}
+
+/*
+ * slice - the duration a task runs before getting requeued at its best
+ * priority and has its bonus decremented.
+ */
+static unsigned int slice(const task_t *p)
+{
+	unsigned int slice, rr;
+
+	slice = rr = rr_interval(p);
+	if (likely(!rt_task(p)))
+		slice += (39 - TASK_USER_PRIO(p)) * rr;
+	return slice;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+ * scheduling class and "nice" value.
+ */
+
+static unsigned int static_prio_timeslice(const int sp)
+{
+	unsigned int ts, nice = PRIO_TO_NICE(sp);
+
+	ts = RR_INTERVAL() - nice;
+	ts += (19 - nice) * ts;
+
+	return ts;
+}
+
+#define TIME_SLICE_NICE_ZERO (20 * RR_INTERVAL())
+#define LOAD_WEIGHT(lp) \
+	(((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
+#define PRIO_TO_LOAD_WEIGHT(prio) \
+	LOAD_WEIGHT(static_prio_timeslice(prio))
+#define RTPRIO_TO_LOAD_WEIGHT(rp) \
+	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
+
+static inline void staircase_set_load_weight(task_t *p)
+{
+	if (rt_task(p)) {
+		if (p == task_rq(p)->migration_thread)
+			/*
+			 * The migration thread does the actual balancing.
+			 * Giving its load any weight will skew balancing
+			 * adversely.
+			 */
+			p->load_weight = 0;
+		else
+			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
+	} else
+		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
+}
+#else
+static inline void staircase_set_load_weight(task_t *p)
+{
+}
+#endif
+
+/*
+ * We increase our bonus by sleeping more than the time we ran.
+ * The ratio of sleep to run gives us the cpu% that we last ran and determines
+ * the maximum bonus we can acquire.
+ */
+static void inc_bonus(task_t *p, const unsigned long totalrun,
+	const unsigned long sleep)
+{
+	unsigned int best_bonus;
+
+	best_bonus = sleep / (totalrun + 1);
+	if (p->sdu.staircase.bonus >= best_bonus)
+		return;
+
+	p->sdu.staircase.bonus++;
+	best_bonus = bonus(p);
+	if (p->sdu.staircase.bonus > best_bonus)
+		p->sdu.staircase.bonus = best_bonus;
+}
+
+static void dec_bonus(task_t *p)
+{
+	if (p->sdu.staircase.bonus)
+		p->sdu.staircase.bonus--;
+}
+
+/*
+ * sched_interactive - sysctl which allows interactive tasks to have bonuss
+ */
+int sched_interactive __read_mostly = 1;
+
+/*
+ * effective_prio - dynamic priority dependent on bonus.
+ * The priority normally decreases by one each RR_INTERVAL.
+ * As the bonus increases the initial priority starts at a higher "stair" or
+ * priority for longer.
+ */
+static int effective_prio(task_t *p)
+{
+	int prio;
+	unsigned int full_slice, used_slice = 0;
+	unsigned int best_bonus, rr;
+
+	if (rt_task(p))
+		return p->prio;
+
+	full_slice = slice(p);
+	if (full_slice > p->sdu.staircase.slice)
+		used_slice = full_slice - p->sdu.staircase.slice;
+
+	best_bonus = bonus(p);
+	prio = MAX_RT_PRIO + best_bonus;
+	if (sched_interactive && !sched_compute && p->policy != SCHED_BATCH)
+		prio -= p->sdu.staircase.bonus;
+
+	rr = rr_interval(p);
+	prio += used_slice / rr;
+	if (prio > STAIRCASE_MAX_PRIO - 1)
+		prio = STAIRCASE_MAX_PRIO - 1;
+	return prio;
+}
+
+static inline void continue_slice(task_t *p)
+{
+	unsigned long total_run = NS_TO_JIFFIES(p->sdu.staircase.totalrun);
+
+	if (total_run >= p->sdu.staircase.slice) {
+ 		p->sdu.staircase.totalrun -=
+ 			JIFFIES_TO_NS(p->sdu.staircase.slice);
+		dec_bonus(p);
+	} else {
+		unsigned int remainder;
+
+		p->sdu.staircase.slice -= total_run;
+		remainder = p->sdu.staircase.slice % rr_interval(p);
+		if (remainder)
+			p->sdu.staircase.time_slice = remainder;
+ 	}
+}
+
+/*
+ * recalc_task_prio - this checks for tasks that run ultra short timeslices
+ * or have just forked a thread/process and make them continue their old
+ * slice instead of starting a new one at high priority.
+ */
+static inline void recalc_task_prio(task_t *p, const unsigned long long now)
+{
+	unsigned long sleep_time = ns_diff(now, p->timestamp);
+
+	/*
+	 * Add the total for this last scheduled run (p->runtime) to the
+	 * running total so far used (p->totalrun).
+	 */
+	p->sdu.staircase.totalrun += p->sdu.staircase.runtime;
+
+	/*
+	 * If we sleep longer than our running total and have not set the
+	 * PF_NONSLEEP flag we gain a bonus.
+	 */
+	if (sleep_time >= p->sdu.staircase.totalrun &&
+		!(p->sdu.staircase.sflags & SF_NONSLEEP) &&
+		!sched_compute) {
+			inc_bonus(p, p->sdu.staircase.totalrun, sleep_time);
+			p->sdu.staircase.totalrun = 0;
+			return;
+	}
+
+	/*
+	 * If we have not set the PF_NONSLEEP flag we elevate priority by the
+	 * amount of time we slept.
+	 */
+	if (p->sdu.staircase.sflags & SF_NONSLEEP)
+		p->sdu.staircase.sflags &= ~SF_NONSLEEP;
+	else
+		p->sdu.staircase.totalrun -= sleep_time;
+
+	continue_slice(p);
+}
+
+/*
+ * activate_task - move a task to the runqueue and do priority recalculation
+ *
+ * Update all the scheduling statistics stuff. (sleep average
+ * calculation, priority modifiers, etc.)
+ */
+static void activate_task(task_t *p, runqueue_t *rq, const int local)
+{
+	unsigned long long now = sched_clock();
+	unsigned long rr = rr_interval(p);
+
+#ifdef CONFIG_SMP
+	if (!local) {
+		/* Compensate for drifting sched_clock */
+		runqueue_t *this_rq = this_rq();
+		now = (now - this_rq->timestamp_last_tick)
+			+ rq->timestamp_last_tick;
+	}
+#endif
+	p->sdu.staircase.slice = slice(p);
+	p->sdu.staircase.time_slice = p->sdu.staircase.slice % rr ? : rr;
+	if (!rt_task(p)) {
+		recalc_task_prio(p, now);
+		p->sdu.staircase.sflags &= ~SF_NONSLEEP;
+		p->prio = effective_prio(p);
+	}
+	p->timestamp = now;
+	__activate_task(p, rq);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static void fastcall deactivate_task(task_t *p, runqueue_t *rq)
+{
+	dec_nr_running(p, rq);
+	dequeue_task(p, &rq->qu.staircase);
+}
+
+/*
+ * CACHE_DELAY is the time preemption is delayed in sched_compute mode
+ * and is set to a nominal 10ms.
+ */
+#define CACHE_DELAY	(10 * (HZ) / 1001 + 1)
+
+/*
+ * Check to see if p preempts rq->curr and resched if it does. In compute
+ * mode we do not preempt for at least CACHE_DELAY and set rq->preempted.
+ */
+static void fastcall preempt(const task_t *p, runqueue_t *rq)
+{
+	if (p->prio >= rq->curr->prio)
+		return;
+	if (!sched_compute || rq->qu.staircase.cache_ticks >= CACHE_DELAY ||
+		!p->mm || rt_task(p))
+			resched_task(rq->curr);
+	rq->qu.staircase.preempted = 1;
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @old_state: thetask's state before being woken
+ * @sync: do a synchronous wakeup?
+ * @rq: The run queue on which the task is to be placed (already locked)
+ */
+static void staircase_wake_up_task(task_t *p, runqueue_t *rq,
+	unsigned int old_state, const int sync)
+{
+	int same_cpu = (rq == this_rq());
+
+	if (old_state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible--;
+
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption, if the woken up task will run on
+	 * this cpu. (in this case the 'I will reschedule' promise of
+	 * the waker guarantees that the freshly woken up task is going
+	 * to be considered on this CPU.)
+	 */
+	activate_task(p, rq, same_cpu);
+	if (!sync || !same_cpu)
+		preempt(p, rq);
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+static void staircase_fork(task_t *__unused)
+{
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+static void staircase_wake_up_new_task(task_t *p,
+	const unsigned long clone_flags)
+{
+	unsigned long flags;
+	int this_cpu, cpu;
+	runqueue_t *rq, *this_rq;
+
+	rq = task_rq_lock(p, &flags);
+	BUG_ON(p->state != TASK_RUNNING);
+	this_cpu = smp_processor_id();
+	cpu = task_cpu(p);
+
+	/*
+	 * Forked process gets no bonus to prevent fork bombs.
+	 */
+	p->sdu.staircase.bonus = 0;
+
+	if (likely(cpu == this_cpu)) {
+		current->sdu.staircase.sflags |= SF_NONSLEEP;
+		activate_task(p, rq, 1);
+		if (!(clone_flags & CLONE_VM))
+			/*
+			 * The VM isn't cloned, so we're in a good position to
+			 * do child-runs-first in anticipation of an exec. This
+			 * usually avoids a lot of COW overhead.
+			 */
+			set_need_resched();
+		/*
+		 * We skip the following code due to cpu == this_cpu
+	 	 *
+		 *   task_rq_unlock(rq, &flags);
+		 *   this_rq = task_rq_lock(current, &flags);
+		 */
+		this_rq = rq;
+	} else {
+		this_rq = cpu_rq(this_cpu);
+
+		/*
+		 * Not the local CPU - must adjust timestamp. This should
+		 * get optimised away in the !CONFIG_SMP case.
+		 */
+		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
+					+ rq->timestamp_last_tick;
+		activate_task(p, rq, 0);
+		preempt(p, rq);
+
+		/*
+		 * Parent and child are on different CPUs, now get the parent
+		 * runqueue to update the parent's ->sdu.staircase.sleep_avg:
+		 */
+		task_rq_unlock(rq, &flags);
+		this_rq = task_rq_lock(current, &flags);
+		current->sdu.staircase.sflags |= SF_NONSLEEP;
+	}
+
+	task_rq_unlock(this_rq, &flags);
+}
+
+/*
+ * Potentially available exiting-child timeslices are
+ * retrieved here - this way the parent does not get
+ * penalized for creating too many threads.
+ *
+ * (this cannot be used to 'generate' timeslices
+ * artificially, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+static void staircase_exit(task_t *__unused)
+{
+}
+
+#ifdef CONFIG_SMP
+/*
+ * pull_task - move a task from a remote runqueue to the local runqueue.
+ * Both runqueues must be locked.
+ */
+static void pull_task(runqueue_t *src_rq, task_t *p, runqueue_t *this_rq,
+	const int this_cpu)
+{
+	dequeue_task(p, &src_rq->qu.staircase);
+	dec_nr_running(p, src_rq);
+	set_task_cpu(p, this_cpu);
+	inc_nr_running(p, this_rq);
+	enqueue_task(p, &this_rq->qu.staircase);
+	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+				+ this_rq->timestamp_last_tick;
+	/*
+	 * Note that idle threads have a prio of STAIRCASE_MAX_PRIO, for this
+	 * test to be always true for them.
+	 */
+	preempt(p, this_rq);
+}
+
+/*
+ * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
+ * as part of a balancing operation within "domain". Returns the number of
+ * tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int staircase_move_tasks(runqueue_t *this_rq, const int this_cpu,
+	runqueue_t *busiest, unsigned long max_nr_move, unsigned long max_load_move,
+	struct sched_domain *sd, const enum idle_type idle, int *all_pinned)
+{
+	struct list_head *head, *curr;
+	int idx, pulled = 0, pinned = 0;
+	long rem_load_move;
+	task_t *tmp;
+
+	if (max_nr_move == 0 || max_load_move == 0)
+		goto out;
+
+	rem_load_move = max_load_move;
+	pinned = 1;
+
+	/* Start searching at priority 0: */
+	idx = 0;
+skip_bitmap:
+	if (!idx)
+		idx = sched_find_first_bit(busiest->qu.staircase.bitmap);
+	else
+		idx = find_next_bit(busiest->qu.staircase.bitmap,
+			STAIRCASE_MAX_PRIO, idx);
+	if (idx >= STAIRCASE_MAX_PRIO)
+		goto out;
+
+	head = busiest->qu.staircase.queue + idx;
+	curr = head->prev;
+skip_queue:
+	tmp = list_entry(curr, task_t, run_list);
+
+	curr = curr->prev;
+
+	if (tmp->load_weight > rem_load_move ||
+	    !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+
+#ifdef CONFIG_SCHEDSTATS
+	if (task_hot(tmp, busiest->timestamp_last_tick, sd))
+		schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+
+	pull_task(busiest, tmp, this_rq, this_cpu);
+	pulled++;
+	rem_load_move -= tmp->load_weight;
+
+	/*
+	 * We only want to steal up to the prescribed number of tasks
+	 * and the prescribed amount of biased load.
+	 */
+	if (pulled < max_nr_move && rem_load_move > 0) {
+		if (curr != head)
+			goto skip_queue;
+		idx++;
+		goto skip_bitmap;
+	}
+out:
+	if (all_pinned)
+		*all_pinned = pinned;
+
+	return pulled;
+}
+#endif
+
+static void time_slice_expired(task_t *p, runqueue_t *rq)
+{
+	struct staircase_runqueue_queue *rqq = &rq->qu.staircase;
+
+	set_tsk_need_resched(p);
+	dequeue_task(p, rqq);
+	p->prio = effective_prio(p);
+	p->sdu.staircase.time_slice = rr_interval(p);
+	enqueue_task(p, rqq);
+}
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ */
+static void staircase_tick(struct task_struct *p, struct runqueue *rq,
+	unsigned long long now)
+{
+	int cpu = smp_processor_id();
+	unsigned long debit, expired_balance = rq->nr_running;
+
+	if (p == rq->idle) {
+		if (wake_priority_sleeper(rq))
+			goto out;
+		rebalance_tick(cpu, rq, SCHED_IDLE);
+		return;
+	}
+
+	/* Task might have expired already, but not scheduled off yet */
+	if (unlikely(!task_is_queued(p))) {
+		set_tsk_need_resched(p);
+		goto out;
+	}
+
+	/*
+	 * SCHED_FIFO tasks never run out of timeslice.
+	 */
+	if (unlikely(p->policy == SCHED_FIFO)) {
+		expired_balance = 0;
+		goto out;
+	}
+
+	spin_lock(&rq->lock);
+	debit = ns_diff(rq->timestamp_last_tick, p->timestamp);
+	p->sdu.staircase.ns_debit += debit;
+	if (p->sdu.staircase.ns_debit < NSJIFFY)
+		goto out_unlock;
+	p->sdu.staircase.ns_debit %= NSJIFFY;
+	/*
+	 * Tasks lose bonus each time they use up a full slice().
+	 */
+	if (!--p->sdu.staircase.slice) {
+		dec_bonus(p);
+		p->sdu.staircase.slice = slice(p);
+		time_slice_expired(p, rq);
+		p->sdu.staircase.totalrun = 0;
+		goto out_unlock;
+	}
+	/*
+	 * Tasks that run out of time_slice but still have slice left get
+	 * requeued with a lower priority && RR_INTERVAL time_slice.
+	 */
+	if (!--p->sdu.staircase.time_slice) {
+		time_slice_expired(p, rq);
+		goto out_unlock;
+	}
+	rq->qu.staircase.cache_ticks++;
+	if (rq->qu.staircase.preempted &&
+		rq->qu.staircase.cache_ticks >= CACHE_DELAY) {
+		set_tsk_need_resched(p);
+		goto out_unlock;
+	}
+	expired_balance = 0;
+out_unlock:
+	spin_unlock(&rq->lock);
+out:
+	if (expired_balance > 1)
+		rebalance_tick(cpu, rq, NOT_IDLE);
+}
+
+#ifdef CONFIG_SCHED_SMT
+static struct task_struct *staircase_head_of_queue(union runqueue_queue *rqq)
+{
+	return list_entry(rqq->staircase.queue[sched_find_first_bit(rqq->staircase.bitmap)].next,
+		task_t, run_list);
+}
+
+static int staircase_dependent_sleeper_trumps(const struct task_struct *p1,
+	const struct task_struct * p2, struct sched_domain *sd)
+{
+	return (p1->sdu.staircase.time_slice * (100 - sd->per_cpu_gain) /
+		100) > slice(p2);
+}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ */
+static void staircase_schedule(void)
+{
+	long *switch_count;
+	int cpu, idx;
+	struct task_struct *prev = current, *next;
+	struct runqueue *rq = this_rq();
+	unsigned long long now = sched_clock();
+	unsigned long debit;
+	struct list_head *queue;
+
+	spin_lock_irq(&rq->lock);
+
+	prev->sdu.staircase.runtime = ns_diff(now, prev->timestamp);
+	debit = ns_diff(now, rq->timestamp_last_tick) % NSJIFFY;
+	prev->sdu.staircase.ns_debit += debit;
+
+	if (unlikely(current->flags & PF_DEAD))
+		current->state = EXIT_DEAD;
+	/*
+	 * if entering off of a kernel preemption go straight
+	 * to picking the next task.
+	 */
+	switch_count = &prev->nivcsw;
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		switch_count = &prev->nvcsw;
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev))))
+			prev->state = TASK_RUNNING;
+		else {
+			if (prev->state == TASK_UNINTERRUPTIBLE) {
+				rq->nr_uninterruptible++;
+				prev->sdu.staircase.sflags |= SF_NONSLEEP;
+			}
+			deactivate_task(prev, rq);
+		}
+	}
+
+	cpu = smp_processor_id();
+	if (unlikely(!rq->nr_running)) {
+go_idle:
+		idle_balance(cpu, rq);
+		if (!rq->nr_running) {
+			next = rq->idle;
+			wake_sleeping_dependent(cpu, rq);
+			/*
+			 * wake_sleeping_dependent() might have released
+			 * the runqueue, so break out if we got new
+			 * tasks meanwhile:
+			 */
+			if (!rq->nr_running)
+				goto switch_tasks;
+		}
+	} else {
+		if (dependent_sleeper(cpu, rq)) {
+			next = rq->idle;
+			goto switch_tasks;
+		}
+		/*
+		 * dependent_sleeper() releases and reacquires the runqueue
+		 * lock, hence go into the idle loop if the rq went
+		 * empty meanwhile:
+		 */
+		if (unlikely(!rq->nr_running))
+			goto go_idle;
+	}
+
+	idx = sched_find_first_bit(rq->qu.staircase.bitmap);
+	queue = rq->qu.staircase.queue + idx;
+	next = list_entry(queue->next, task_t, run_list);
+
+switch_tasks:
+	if (next == rq->idle)
+		schedstat_inc(rq, sched_goidle);
+	prev->timestamp = now;
+
+	prefetch(next);
+	prefetch_stack(next);
+	clear_tsk_need_resched(prev);
+	rcu_qsctr_inc(task_cpu(prev));
+
+	update_cpu_clock(prev, rq, now);
+
+	sched_info_switch(prev, next);
+	if (likely(prev != next)) {
+		rq->qu.staircase.preempted = 0;
+		rq->qu.staircase.cache_ticks = 0;
+		next->timestamp = now;
+		rq->nr_switches++;
+		rq->curr = next;
+		++*switch_count;
+
+		prepare_task_switch(rq, next);
+		prev = context_switch(rq, prev, next);
+		barrier();
+		/*
+		 * this_rq must be evaluated again because prev may have moved
+		 * CPUs since it called schedule(), thus the 'rq' on its stack
+		 * frame will be invalid.
+		 */
+		finish_task_switch(this_rq(), prev);
+	} else
+		spin_unlock_irq(&rq->lock);
+}
+
+static void staircase_set_normal_task_nice(task_t *p, long nice)
+{
+	int queued;
+	int old_prio, new_prio, delta;
+	struct runqueue *rq = task_rq(p);
+	struct staircase_runqueue_queue *rqq = &rq->qu.staircase;
+
+	queued = task_is_queued(p);
+	if (queued) {
+		dequeue_task(p, rqq);
+		dec_raw_weighted_load(rq, p);
+	}
+
+	old_prio = p->prio;
+	new_prio = NICE_TO_PRIO(nice);
+	delta = new_prio - old_prio;
+	p->static_prio = NICE_TO_PRIO(nice);
+	staircase_set_load_weight(p);
+	p->prio += delta;
+
+	if (queued) {
+		inc_raw_weighted_load(rq, p);
+		enqueue_task(p, rqq);
+		/*
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
+		 */
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+			resched_task(rq->curr);
+	}
+}
+
+static void staircase_init_batch_task(task_t *__unused)
+{
+}
+
+/*
+ * setscheduler - change the scheduling policy and/or RT priority of a thread.
+ */
+static void staircase_setscheduler(task_t *p, int policy, int prio)
+{
+	int oldprio;
+	int queued;
+	runqueue_t *rq = task_rq(p);
+
+	queued = task_is_queued(p);
+	if (queued)
+		deactivate_task(p, rq);
+	oldprio = p->prio;
+	__setscheduler(p, policy, prio);
+	if (queued) {
+		__activate_task(p, rq);
+		/*
+		 * Reschedule if we are currently running on this runqueue and
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
+		 */
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else
+			preempt(p, rq);
+	}
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * this function yields the current CPU by moving the calling thread
+ * to the expired array. If there are no other threads running on this
+ * CPU then this function will return.
+ */
+
+static long staircase_sys_yield(void)
+{
+	int newprio;
+	runqueue_t *rq = this_rq_lock();
+	struct staircase_runqueue_queue *rqq = &rq->qu.staircase;
+
+	schedstat_inc(rq, yld_cnt);
+	newprio = current->prio;
+	current->sdu.staircase.slice = slice(current);
+	current->sdu.staircase.time_slice = rr_interval(current);
+	if (likely(!rt_task(current)))
+		newprio = STAIRCASE_MAX_PRIO - 1;
+
+	if (newprio != current->prio) {
+		dequeue_task(current, rqq);
+		current->prio = newprio;
+		enqueue_task(current, rqq);
+	} else
+		requeue_task(current, rqq);
+
+	/*
+	 * Since we are going to call schedule() anyway, there's
+	 * no need to preempt or enable interrupts:
+	 */
+	__release(rq->lock);
+	_raw_spin_unlock(&rq->lock);
+	preempt_enable_no_resched();
+
+	schedule();
+
+	return 0;
+}
+
+static void staircase_yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	staircase_sys_yield();
+}
+
+static void staircase_init_idle(task_t *idle, int cpu)
+{
+	idle->prio = STAIRCASE_MAX_PRIO;
+}
+
+#ifdef CONFIG_SMP
+/* source and destination queues will be already locked */
+static void staircase_migrate_queued_task(struct task_struct *p, int dest_cpu)
+{
+	struct runqueue *rq_src = task_rq(p);
+	struct runqueue *rq_dest = cpu_rq(dest_cpu);
+
+	/*
+	 * Sync timestamp with rq_dest's before activating.
+	 * The same thing could be achieved by doing this step
+	 * afterwards, and pretending it was a local activate.
+	 * This way is cleaner and logically correct.
+	 */
+	p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+			+ rq_dest->timestamp_last_tick;
+	deactivate_task(p, rq_src);
+	set_task_cpu(p, dest_cpu);
+	activate_task(p, rq_dest, 0);
+	preempt(p, rq_dest);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static	void staircase_set_select_idle_first(struct runqueue *rq)
+{
+	__setscheduler(rq->idle, SCHED_FIFO, MAX_RT_PRIO-1);
+	/* Add idle task to _front_ of it's priority queue */
+	__activate_idle_task(rq->idle, rq);
+}
+
+static	void staircase_set_select_idle_last(struct runqueue *rq)
+{
+	deactivate_task(rq->idle, rq);
+	rq->idle->static_prio = STAIRCASE_MAX_PRIO;
+	__setscheduler(rq->idle, SCHED_NORMAL, 0);
+}
+
+static void staircase_migrate_dead_tasks(unsigned int dead_cpu)
+{
+	unsigned i;
+	struct runqueue *rq = cpu_rq(dead_cpu);
+
+	for (i = 0; i < STAIRCASE_MAX_PRIO; i++) {
+		struct list_head *list = &rq->qu.staircase.queue[i];
+		while (!list_empty(list))
+			migrate_dead(dead_cpu, list_entry(list->next, task_t,
+				run_list));
+	}
+}
+#endif
+#endif
+
+static void staircase_sched_init(void)
+{
+	init_task.sdu.staircase.time_slice = HZ;
+	init_task.sdu.staircase.slice = HZ;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void staircase_normalize_rt_task(struct task_struct *p)
+{
+	int queued;
+	unsigned long flags;
+	runqueue_t *rq;
+
+	rq = task_rq_lock(p, &flags);
+
+	queued = task_is_queued(p);
+	if (queued)
+		deactivate_task(p, rq);
+	__setscheduler(p, SCHED_NORMAL, 0);
+	if (queued) {
+		__activate_task(p, rq);
+		resched_task(rq->curr);
+	}
+
+	task_rq_unlock(rq, &flags);
+}
+#endif
+
+#ifdef CONFIG_SYSFS
+#define no_change(a) (a)
+SCHED_DRV_SYSFS_UINT_RW(sched_compute, no_change, no_change, 0, 1);
+SCHED_DRV_SYSFS_UINT_RW(sched_interactive, no_change, no_change, 0, 1);
+
+static struct attribute *staircase_attrs[] = {
+	&SCHED_DRV_SYSFS_ATTR(sched_compute),
+	&SCHED_DRV_SYSFS_ATTR(sched_interactive),
+	NULL,
+};
+#endif
+
+const struct sched_drv staircase_sched_drv = {
+	.name = "staircase",
+	.init_runqueue_queue = staircase_init_runqueue_queue,
+	.set_oom_time_slice = staircase_set_oom_time_slice,
+#ifdef CONFIG_SMP
+	.set_load_weight = staircase_set_load_weight,
+#endif
+	.task_timeslice = slice,
+	.wake_up_task = staircase_wake_up_task,
+	.fork = staircase_fork,
+	.wake_up_new_task = staircase_wake_up_new_task,
+	.exit = staircase_exit,
+#ifdef CONFIG_SMP
+	.move_tasks = staircase_move_tasks,
+#endif
+	.tick = staircase_tick,
+#ifdef CONFIG_SCHED_SMT
+	.head_of_queue = staircase_head_of_queue,
+	.dependent_sleeper_trumps = staircase_dependent_sleeper_trumps,
+#endif
+	.schedule = staircase_schedule,
+	.set_normal_task_nice = staircase_set_normal_task_nice,
+	.init_batch_task = staircase_init_batch_task,
+	.setscheduler = staircase_setscheduler,
+	.sys_yield = staircase_sys_yield,
+	.yield = staircase_yield,
+	.init_idle = staircase_init_idle,
+	.sched_init = staircase_sched_init,
+#ifdef CONFIG_SMP
+	.migrate_queued_task = staircase_migrate_queued_task,
+#ifdef CONFIG_HOTPLUG_CPU
+	.set_select_idle_first = staircase_set_select_idle_first,
+	.set_select_idle_last = staircase_set_select_idle_last,
+	.migrate_dead_tasks = staircase_migrate_dead_tasks,
+#endif
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	.normalize_rt_task = staircase_normalize_rt_task,
+#endif
+#ifdef CONFIG_SYSFS
+	.attrs = staircase_attrs,
+#endif
+};
diff -urN oldtree/mm/oom_kill.c newtree/mm/oom_kill.c
--- oldtree/mm/oom_kill.c	2006-03-08 18:48:03.004066750 +0000
+++ newtree/mm/oom_kill.c	2006-03-08 18:56:30.127760000 +0000
@@ -238,7 +238,7 @@
 	 * all the memory it needs. That way it should be able to
 	 * exit() and clear out its resources quickly...
 	 */
-	p->time_slice = HZ;
+	set_oom_time_slice(p, HZ);
 	set_tsk_thread_flag(p, TIF_MEMDIE);
 
 	force_sig(SIGKILL, p);
diff -urN oldtree/net/sunrpc/sched.c newtree/net/sunrpc/sched.c
--- oldtree/net/sunrpc/sched.c	2006-03-08 18:48:03.160076500 +0000
+++ newtree/net/sunrpc/sched.c	2006-03-08 18:56:30.127760000 +0000
@@ -290,7 +290,7 @@
 	if (action == NULL)
 		action = rpc_wait_bit_interruptible;
 	return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
-			action, TASK_INTERRUPTIBLE);
+			action, TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
 }
 EXPORT_SYMBOL(__rpc_wait_for_completion_task);
 
@@ -677,7 +677,7 @@
 		/* Note: Caller should be using rpc_clnt_sigmask() */
 		status = out_of_line_wait_on_bit(&task->tk_runstate,
 				RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
-				TASK_INTERRUPTIBLE);
+				TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
 		if (status == -ERESTARTSYS) {
 			/*
 			 * When a sync task receives a signal, it exits with
diff -urN oldtree/net/sunrpc/svcsock.c newtree/net/sunrpc/svcsock.c
--- oldtree/net/sunrpc/svcsock.c	2006-03-08 18:48:03.160076500 +0000
+++ newtree/net/sunrpc/svcsock.c	2006-03-08 18:56:30.131760250 +0000
@@ -1213,7 +1213,7 @@
 		 * We have to be able to interrupt this wait
 		 * to bring down the daemons ...
 		 */
-		set_current_state(TASK_INTERRUPTIBLE);
+		set_current_state(TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
 		add_wait_queue(&rqstp->rq_wait, &wait);
 		spin_unlock_bh(&serv->sv_lock);
 
