Index: MM-2.6.12/include/linux/sched_drv.h
===================================================================
--- MM-2.6.12.orig/include/linux/sched_drv.h
+++ MM-2.6.12/include/linux/sched_drv.h
@@ -27,7 +27,6 @@ struct sched_drv {
 	int (*move_tasks)(runqueue_t *, int, runqueue_t *, unsigned long,
 		 struct sched_domain *, enum idle_type, int *);
 #endif
-	void (*systime_hook)(runqueue_t *, cputime_t);
 	void (*tick)(struct task_struct*, struct runqueue *, unsigned long long);
 #ifdef CONFIG_SCHED_SMT
 	struct task_struct *(*head_of_queue)(union runqueue_queue *);
Index: MM-2.6.12/include/linux/sched_runq.h
===================================================================
--- MM-2.6.12.orig/include/linux/sched_runq.h
+++ MM-2.6.12/include/linux/sched_runq.h
@@ -41,7 +41,6 @@ struct staircase_runqueue_queue {
 	struct list_head queue[STAIRCASE_NUM_PRIO_SLOTS - 1];
 	unsigned int cache_ticks;
 	unsigned int preempted;
-	unsigned long systime_centile;
 };
 #endif
 
Index: MM-2.6.12/kernel/nicksched.c
===================================================================
--- MM-2.6.12.orig/kernel/nicksched.c
+++ MM-2.6.12/kernel/nicksched.c
@@ -974,7 +974,6 @@ const struct sched_drv nick_sched_drv = 
 #ifdef CONFIG_SMP
 	.move_tasks = nick_move_tasks,
 #endif
-	.systime_hook = blank_systime_hook,
 	.tick = nick_tick,
 #ifdef CONFIG_SCHED_SMT
 	.head_of_queue = nick_head_of_queue,
Index: MM-2.6.12/kernel/sched_drv.c
===================================================================
--- MM-2.6.12.orig/kernel/sched_drv.c
+++ MM-2.6.12/kernel/sched_drv.c
@@ -135,10 +135,3 @@ void __init sched_drv_sysfs_init(void)
 		(void)kobject_register(&sched_drv_kobj);
  	}
 }
-
-/*
- * Dummy functions
- */
-void blank_systime_hook(runqueue_t *rq, cputime_t cputime)
-{
-}
Index: MM-2.6.12/kernel/sched_spa.c
===================================================================
--- MM-2.6.12.orig/kernel/sched_spa.c
+++ MM-2.6.12/kernel/sched_spa.c
@@ -356,7 +356,7 @@ static void activate_task(task_t *p, run
 		else
 			p->sdu.spa.time_slice = normal_task_timeslice(p);
 	}
-	p->sdu.spa.flags &= ~SPAF_UISLEEP;
+	p->sdu.spa.flags &= ~(SPAF_UISLEEP | SPAF_NONIASLEEP);
 	__activate_task(p, rq);
 }
 
@@ -398,6 +398,13 @@ static void spa_wake_up_task(struct task
 		rq->nr_uninterruptible--;
 
 	/*
+	 * Tasks waking from (declared) non interactive sleep will not receive
+	 * any interactive bonus.
+	 */
+	if (old_state & TASK_NONINTERACTIVE)
+		p->sdu.spa.flags |= SPAF_NONIASLEEP;
+
+	/*
 	 * Sync wakeups (i.e. those types of wakeups where the waker
 	 * has indicated that it will leave the CPU in short order)
 	 * don't trigger a preemption, if the woken up task will run on
@@ -1504,7 +1511,6 @@ const struct sched_drv spa_nf_sched_drv 
 #ifdef CONFIG_SMP
 	.move_tasks = spa_move_tasks,
 #endif
-	.systime_hook = blank_systime_hook,
 	.tick = spa_tick,
 #ifdef CONFIG_SCHED_SMT
 	.head_of_queue = spa_head_of_queue,
@@ -1551,7 +1557,6 @@ const struct sched_drv zaphod_sched_drv 
 	.head_of_queue = spa_head_of_queue,
 	.dependent_sleeper_trumps = spa_dependent_sleeper_trumps,
 #endif
-	.systime_hook = blank_systime_hook,
 	.schedule = spa_schedule,
 	.set_normal_task_nice = spa_set_normal_task_nice,
 	.setscheduler = spa_setscheduler,
Index: MM-2.6.12/kernel/staircase.c
===================================================================
--- MM-2.6.12.orig/kernel/staircase.c
+++ MM-2.6.12/kernel/staircase.c
@@ -2,8 +2,8 @@
  *  kernel/staircase.c
  *  Copyright (C) 1991-2005  Linus Torvalds
  *
- * 2005-05-26 Staircase scheduler by Con Kolivas
- *            Staircase v11.2
+ * 2005-06-07 Staircase scheduler by Con Kolivas
+ *            Staircase v11.3
  */
 #include <linux/sched.h>
 #include <linux/init.h>
@@ -17,9 +17,8 @@
 /*
  * Unique staircase process flags used by scheduler.
  */
-#define SF_FORKED	0x00000001	/* I have just forked */
+#define SF_NONSLEEP	0x00000001	/* Waiting on in kernel activity */
 #define SF_YIELDED	0x00000002	/* I have just yielded */
-#define SF_UISLEEP	0x00000004	/* Uninterruptible sleep */
 
 #define task_is_queued(p) (!list_empty(&(p)->run_list))
 
@@ -263,31 +262,29 @@ static void continue_slice(task_t *p)
  * slice instead of starting a new one at high priority.
  */
 static inline void recalc_task_prio(task_t *p, unsigned long long now,
-	unsigned long rq_systime, unsigned long rq_running)
+	unsigned long rq_running)
 {
 	unsigned long sleep_time = ns_diff(now, p->timestamp);
 
 	/*
 	 * Priority is elevated back to best by amount of sleep_time.
-	 * sleep_time is scaled down by in-kernel system time and by
-	 * number of tasks currently running.
+	 * sleep_time is scaled down by number of tasks currently running.
 	 */
-	sleep_time /= rq_running + 1;
-	if (rq_systime)
-		sleep_time = sleep_time / 200 * (100 - rq_systime);
+	if (rq_running > 1)
+		sleep_time /= rq_running;
 
 	p->sdu.staircase.totalrun += p->sdu.staircase.runtime;
 	if (NS_TO_JIFFIES(p->sdu.staircase.totalrun) >=
 		p->sdu.staircase.slice && NS_TO_JIFFIES(sleep_time) <
 		p->sdu.staircase.slice) {
-			p->sdu.staircase.sflags &= ~SF_FORKED;
+			p->sdu.staircase.sflags &= ~SF_NONSLEEP;
 			dec_burst(p);
 			goto new_slice;
 	}
 
-	if (p->sdu.staircase.sflags & SF_FORKED) {
+	if (p->sdu.staircase.sflags & SF_NONSLEEP) {
 		continue_slice(p);
-		p->sdu.staircase.sflags &= ~SF_FORKED;
+		p->sdu.staircase.sflags &= ~SF_NONSLEEP;
 		return;
 	}
 
@@ -297,7 +294,7 @@ static inline void recalc_task_prio(task
 	}
 
 	if (sleep_time >= p->sdu.staircase.totalrun) {
-		if (!(p->sdu.staircase.sflags & SF_UISLEEP))
+		if (!(p->sdu.staircase.sflags & SF_NONSLEEP))
 			inc_burst(p);
 		goto new_slice;
 	}
@@ -330,9 +327,8 @@ static void activate_task(task_t *p, run
 #endif
 	p->sdu.staircase.slice = slice(p);
 	p->sdu.staircase.time_slice = rr_interval(p);
-	recalc_task_prio(p, now, rq->qu.staircase.systime_centile / 100,
-		rq->nr_running);
-	p->sdu.staircase.sflags &= ~SF_UISLEEP;
+	recalc_task_prio(p, now, rq->nr_running);
+	p->sdu.staircase.sflags &= ~SF_NONSLEEP;
 	p->prio = effective_prio(p);
 	p->timestamp = now;
 	__activate_task(p, rq);
@@ -387,6 +383,13 @@ static void staircase_wake_up_task(struc
 		rq->nr_uninterruptible--;
 
 	/*
+	 * Tasks that have marked their sleep as noninteractive get
+	 * woken up without their sleep counting.
+	 */
+	if (old_state & TASK_NONINTERACTIVE)
+		p->sdu.staircase.sflags |= SF_NONSLEEP;
+
+	/*
 	 * Sync wakeups (i.e. those types of wakeups where the waker
 	 * has indicated that it will leave the CPU in short order)
 	 * don't trigger a preemption, if the woken up task will run on
@@ -432,7 +435,7 @@ static void staircase_wake_up_new_task(t
 	p->sdu.staircase.burst = 0;
 
 	if (likely(cpu == this_cpu)) {
-		current->sdu.staircase.sflags |= SF_FORKED;
+		current->sdu.staircase.sflags |= SF_NONSLEEP;
 		activate_task(p, rq, 1);
 		if (!(clone_flags & CLONE_VM))
 			/*
@@ -467,7 +470,7 @@ static void staircase_wake_up_new_task(t
 		 */
 		task_rq_unlock(rq, &flags);
 		this_rq = task_rq_lock(current, &flags);
-		current->sdu.staircase.sflags |= SF_FORKED;
+		current->sdu.staircase.sflags |= SF_NONSLEEP;
 	}
 
 	task_rq_unlock(this_rq, &flags);
@@ -587,12 +590,6 @@ static void time_slice_expired(task_t *p
 	enqueue_task(p, rqq);
 }
 
-static void staircase_systime_hook(runqueue_t *rq, cputime_t cputime)
-{
-	/* For calculating rolling percentage of sys time per runqueue */
-	rq->qu.staircase.systime_centile += cputime * 100;
-}
-
 /*
  * This function gets called by the timer code, with HZ frequency.
  * We call it with interrupts disabled.
@@ -603,10 +600,6 @@ static void staircase_tick(struct task_s
 	int cpu = smp_processor_id();
 	unsigned long debit, expired_balance = rq->nr_running;
 
-	/* Rolling percentage systime per runqueue */
-	rq->qu.staircase.systime_centile = rq->qu.staircase.systime_centile *
-		99 / 100;
-
 	if (p == rq->idle) {
 		if (wake_priority_sleeper(rq))
 			goto out;
@@ -715,7 +708,7 @@ static void staircase_schedule(void)
 		else {
 			if (prev->state == TASK_UNINTERRUPTIBLE) {
 				rq->nr_uninterruptible++;
-				prev->sdu.staircase.sflags |= SF_UISLEEP;
+				prev->sdu.staircase.sflags |= SF_NONSLEEP;
 			}
 			deactivate_task(prev, rq);
 		}
@@ -1022,7 +1015,6 @@ const struct sched_drv staircase_sched_d
 #ifdef CONFIG_SMP
 	.move_tasks = staircase_move_tasks,
 #endif
-	.systime_hook = staircase_systime_hook,
 	.tick = staircase_tick,
 #ifdef CONFIG_SCHED_SMT
 	.head_of_queue = staircase_head_of_queue,
Index: MM-2.6.12/include/linux/sched_task.h
===================================================================
--- MM-2.6.12.orig/include/linux/sched_task.h
+++ MM-2.6.12/include/linux/sched_task.h
@@ -54,6 +54,7 @@ struct spa_sched_drv_task {
 
 #define SPAF_SINBINNED	(1 << 0)	/* I am sinbinned */
 #define SPAF_UISLEEP	(1 << 1)	/* Uninterruptible sleep */
+#define SPAF_NONIASLEEP	(1 << 2)	/* Non interactive sleep */
 
 #define task_is_sinbinned(p) (unlikely(((p)->sdu.spa.flags & SPAF_SINBINNED) != 0))
 
Index: MM-2.6.12/kernel/sched_zaphod.c
===================================================================
--- MM-2.6.12.orig/kernel/sched_zaphod.c
+++ MM-2.6.12/kernel/sched_zaphod.c
@@ -396,7 +396,9 @@ unsigned int zaphod_effective_prio(struc
 
 	/* no bonuses for tasks that have exceeded their cap */
 	if (likely(TASK_CPUSTATS(p).cpu_usage_rate < MIN_RATE_CAP(p))) {
-		bonus = SCHED_IA_BONUS_RND(TASK_ZD(p).interactive_bonus);
+		/* No IA bonus when waking from (declared) non AI sleep */
+		if ((p->sdu.spa.flags & SPAF_NONIASLEEP) == 0)
+			bonus = SCHED_IA_BONUS_RND(TASK_ZD(p).interactive_bonus);
 		bonus += TASK_ZD(p).throughput_bonus;
 	}
 
