diff -urN oldtree/block/ll_rw_blk.c newtree/block/ll_rw_blk.c
--- oldtree/block/ll_rw_blk.c	2006-09-24 17:03:56.000000000 -0400
+++ newtree/block/ll_rw_blk.c	2006-09-27 15:16:06.000000000 -0400
@@ -40,6 +40,8 @@
 static void init_request_from_bio(struct request *req, struct bio *bio);
 static int __make_request(request_queue_t *q, struct bio *bio);
 static struct io_context *current_io_context(gfp_t gfp_flags, int node);
+static int blk_protect_register(request_queue_t *q);
+static void blk_protect_unregister(request_queue_t *q);
 
 /*
  * For the allocated request tables
@@ -357,6 +359,18 @@
 
 EXPORT_SYMBOL(blk_queue_issue_flush_fn);
 
+void blk_queue_issue_protect_fn(request_queue_t *q, issue_protect_fn *ipf)
+{
+       q->issue_protect_fn = ipf;
+}
+EXPORT_SYMBOL(blk_queue_issue_protect_fn);
+
+void blk_queue_issue_unprotect_fn(request_queue_t *q, issue_unprotect_fn *iuf)
+{
+       q->issue_unprotect_fn = iuf;
+}
+EXPORT_SYMBOL(blk_queue_issue_unprotect_fn);
+
 /*
  * Cache flushing for ordered writes handling
  */
@@ -4042,6 +4056,7 @@
 		return ret;
 	}
 
+	blk_protect_register(q);
 	return 0;
 }
 
@@ -4050,6 +4065,7 @@
 	request_queue_t *q = disk->queue;
 
 	if (q && q->request_fn) {
+		blk_protect_unregister(q);
 		elv_unregister_queue(q);
 
 		kobject_uevent(&q->kobj, KOBJ_REMOVE);
@@ -4057,3 +4073,112 @@
 		kobject_put(&disk->kobj);
 	}
 }
+
+/*
+ * Restore the unplugging timer that we re-used
+ * to implement the queue freeze timeout...
+ */
+static void blk_unfreeze_work(void *data)
+{
+       request_queue_t *q = (request_queue_t *) data;
+
+       INIT_WORK(&q->unplug_work, blk_unplug_work, q);
+       q->unplug_timer.function = blk_unplug_timeout;
+
+       q->issue_unprotect_fn(q);
+}
+
+/*
+ * Called when the queue freeze timeout expires...
+ */
+static void blk_unfreeze_timeout(unsigned long data)
+{
+       request_queue_t *q = (request_queue_t *) data;
+       kblockd_schedule_work(&q->unplug_work);
+}
+
+/*
+ * The lower level driver parks and freezes the queue, and this block layer
+ *  function sets up the freeze timeout timer on return. If the queue is
+ *  already frozen then this is called to extend the timer...
+ */
+void blk_freeze_queue(request_queue_t *q, int seconds)
+{
+       /* set/reset the timer */
+       mod_timer(&q->unplug_timer, msecs_to_jiffies(seconds*1000) + jiffies);
+
+       /* we do this every iteration - is this sane? */
+       INIT_WORK(&q->unplug_work, blk_unfreeze_work, q);
+       q->unplug_timer.function = blk_unfreeze_timeout;
+}
+
+/*
+ * When reading the 'protect' attribute, we return boolean frozen or active
+ * todo:
+ * - maybe we should return seconds remaining instead?
+ */
+static ssize_t queue_protect_show(struct request_queue *q, char *page)
+{
+       return queue_var_show(blk_queue_stopped(q), (page));
+}
+
+/*
+ * When writing the 'protect' attribute, input is the number of seconds
+ * to freeze the queue for. We call a lower level helper function to
+ * park the heads and freeze/block the queue, then we make a block layer
+ * call to setup the thaw timeout. If input is 0, then we thaw the queue.
+ */
+static ssize_t queue_protect_store(struct request_queue *q, const char *page, size_t count)
+{
+       unsigned long freeze = 0;
+       queue_var_store(&freeze, page, count);
+
+       if(freeze>0) {
+/* Park and freeze */
+	       if (!blk_queue_stopped(q))
+		       q->issue_protect_fn(q);
+/* set / reset the thaw timer */
+	       blk_freeze_queue(q, freeze);
+       }
+       else
+	       blk_unfreeze_timeout((unsigned long) q);
+
+       return count;
+}
+
+static struct queue_sysfs_entry queue_protect_entry = {
+       .attr = {.name = "protect", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_protect_show,
+       .store = queue_protect_store,
+};
+
+static int blk_protect_register(request_queue_t *q)
+{
+	int error = 0;
+
+/* check that the lower level driver has a protect handler */
+	if (!q->issue_protect_fn)
+		return 1;
+
+/* create the attribute */
+	error = sysfs_create_file(&q->kobj, &queue_protect_entry.attr);
+	if(error){
+		printk(KERN_ERR
+		       "blk_protect_register(): failed to create protect queue attribute!\n");
+		return error;
+	}
+
+	kobject_get(&q->kobj);
+	return 0;
+}
+
+static void blk_protect_unregister(request_queue_t *q)
+{
+/* check that the lower level driver has a protect handler */
+	if (!q->issue_protect_fn)
+		return;
+
+/* remove the attribute */
+	sysfs_remove_file(&q->kobj,&queue_protect_entry.attr);
+	kobject_put(&q->kobj);
+}
diff -urN oldtree/drivers/ata/libata-core.c newtree/drivers/ata/libata-core.c
--- oldtree/drivers/ata/libata-core.c	2006-09-24 17:03:56.000000000 -0400
+++ newtree/drivers/ata/libata-core.c	2006-09-27 15:12:35.000000000 -0400
@@ -74,6 +74,10 @@
 
 struct workqueue_struct *ata_aux_wq;
 
+int libata_protect_method = 0;
+module_param_named(protect_method, libata_protect_method, int, 0444);
+MODULE_PARM_DESC(protect_method, "hdaps disk protection method (0=autodetect, 1=unload, 2=standby)");
+
 int atapi_enabled = 1;
 module_param(atapi_enabled, int, 0444);
 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
diff -urN oldtree/drivers/ata/libata-scsi.c newtree/drivers/ata/libata-scsi.c
--- oldtree/drivers/ata/libata-scsi.c	2006-09-27 14:03:02.000000000 -0400
+++ newtree/drivers/ata/libata-scsi.c	2006-09-27 15:15:23.000000000 -0400
@@ -841,6 +841,42 @@
 	}
 }
 
+extern int scsi_protect_queue(request_queue_t *q, int unload);
+extern int scsi_unprotect_queue(request_queue_t *q);
+
+static int ata_scsi_issue_protect_fn(request_queue_t *q)
+{
+        struct scsi_device *sdev = q->queuedata;
+        struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
+        struct ata_device *dev = &ap->device[sdev->id];
+        int unload;
+ 
+        if (libata_protect_method == 1) {
+                unload = 1;
+                printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload method requested, overriding drive capability check..\n");    
+        }
+        else if (libata_protect_method == 2) {
+                unload = 0;
+                printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): standby method requested, overriding drive capability check..\n");
+        }
+        else if (ata_id_has_unload(dev->id)) {
+                unload = 1;
+                printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload support reported by drive..\n");
+        }
+        else {
+                unload = 0;
+                printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload support NOT reported by drive!..\n");
+        }
+ 
+        /* call scsi_protect_queue, requesting either unload or standby */
+        return scsi_protect_queue(q, unload);
+}
+
+static int ata_scsi_issue_unprotect_fn(request_queue_t *q)
+{
+        return scsi_unprotect_queue(q);
+}
+
 /**
  *	ata_scsi_slave_config - Set SCSI device attributes
  *	@sdev: SCSI device to examine
@@ -864,6 +900,8 @@
 
 	if (dev)
 		ata_scsi_dev_config(sdev, dev);
+        blk_queue_issue_protect_fn(sdev->request_queue, ata_scsi_issue_protect_fn);
+        blk_queue_issue_unprotect_fn(sdev->request_queue, ata_scsi_issue_unprotect_fn);
 
 	return 0;	/* scsi layer doesn't check return value, sigh */
 }
diff -urN oldtree/drivers/ata/libata.h newtree/drivers/ata/libata.h
--- oldtree/drivers/ata/libata.h	2006-09-24 17:03:56.000000000 -0400
+++ newtree/drivers/ata/libata.h	2006-09-27 15:13:05.000000000 -0400
@@ -40,6 +40,7 @@
 
 /* libata-core.c */
 extern struct workqueue_struct *ata_aux_wq;
+extern int libata_protect_method;
 extern int atapi_enabled;
 extern int atapi_dmadir;
 extern int libata_fua;
diff -urN oldtree/drivers/ide/ide-disk.c newtree/drivers/ide/ide-disk.c
--- oldtree/drivers/ide/ide-disk.c	2006-09-24 17:03:56.000000000 -0400
+++ newtree/drivers/ide/ide-disk.c	2006-09-27 15:11:26.000000000 -0400
@@ -72,6 +72,10 @@
 #include <asm/io.h>
 #include <asm/div64.h>
 
+int idedisk_protect_method = 0;
+module_param_named(protect_method, idedisk_protect_method, int, 0444);
+MODULE_PARM_DESC(protect_method, "hdaps disk protection method (0=autodetect, 1=unload, 2=standby)");
+
 struct ide_disk_obj {
 	ide_drive_t	*drive;
 	ide_driver_t	*driver;
@@ -731,6 +735,154 @@
 }
 
 /*
+ * todo:
+ *  - we freeze the queue regardless of success and rely on the 
+ *    ide_protect_queue function to thaw immediately if the command
+ *    failed (to be consistent with the libata handler)... should 
+ *    we also inspect here?
+ */
+void ide_end_protect_rq(struct request *rq, int error)
+{
+	struct completion *waiting = rq->waiting;
+
+	/* spin lock already accquired */
+	if (!blk_queue_stopped(rq->q))
+		blk_stop_queue(rq->q);
+
+	complete(waiting);
+}
+
+int ide_unprotect_queue(request_queue_t *q)
+{
+	struct request	rq;
+	unsigned long flags;
+	int		pending = 0, rc = 0;
+	ide_drive_t 	*drive = q->queuedata;
+	u8 		args[7], *argbuf = args;
+
+	if (!blk_queue_stopped(q))
+		return -EIO;
+
+	/* Are there any pending jobs on the queue? */
+	pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
+	
+	spin_lock_irqsave(q->queue_lock, flags);
+	blk_start_queue(q);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	/* The unload feature of the IDLE_IMMEDIATE command
+	   temporarily disables HD power management from spinning down
+	   the disk. Any other command will reenable HD pm, so, if
+	   there are no pending jobs on the queue, another
+	   CHECK_POWER_MODE1 command without the unload feature should do
+	   just fine. */
+	if (!pending) {
+		printk(KERN_DEBUG "ide_unprotect_queue(): No pending I/O, re-enabling power management..\n");
+		memset(args, 0, sizeof(args));
+		argbuf[0] = 0xe5; /* CHECK_POWER_MODE1 */
+		ide_init_drive_cmd(&rq);
+		rq.flags = REQ_DRIVE_TASK;
+		rq.buffer = argbuf;
+		rc = ide_do_drive_cmd(drive, &rq, ide_head_wait);
+	}
+
+	return rc;
+}
+
+int ide_protect_queue(request_queue_t *q, int unload)
+{
+	ide_drive_t 	*drive = q->queuedata;
+	struct request	rq;
+	u8 		args[7], *argbuf = args;
+	int		ret = 0;
+	DECLARE_COMPLETION(wait);
+
+	memset(&rq, 0, sizeof(rq));
+	memset(args, 0, sizeof(args));
+
+	if (blk_queue_stopped(q))
+		return -EIO;
+
+	if (unload) {
+		argbuf[0] = 0xe1;
+		argbuf[1] = 0x44;
+		argbuf[3] = 0x4c;
+		argbuf[4] = 0x4e;
+		argbuf[5] = 0x55;
+	} else
+		argbuf[0] = 0xe0;
+
+	/* Issue the park command & freeze */
+	ide_init_drive_cmd(&rq);
+
+	rq.flags = REQ_DRIVE_TASK;
+	rq.buffer = argbuf;
+	rq.waiting = &wait;
+	rq.end_io = ide_end_protect_rq;
+
+	ret = ide_do_drive_cmd(drive, &rq, ide_next);
+	wait_for_completion(&wait);
+	rq.waiting = NULL;
+
+	if (ret)
+	{
+		printk(KERN_DEBUG "ide_protect_queue(): Warning: head NOT parked!..\n");
+		ide_unprotect_queue(q);
+		return ret;
+	}
+
+	if (unload) {
+		if (args[3] == 0xc4)
+			printk(KERN_DEBUG "ide_protect_queue(): head parked..\n");
+		else {
+			/* error parking the head */
+			printk(KERN_DEBUG "ide_protect_queue(): head NOT parked!..\n");
+			ret = -EIO;
+			ide_unprotect_queue(q);
+		}
+	} else
+		printk(KERN_DEBUG "ide_protect_queue(): head park not requested, used standby!..\n");
+
+	return ret;
+}	
+
+int idedisk_issue_protect_fn(request_queue_t *q)
+{
+	ide_drive_t		*drive = q->queuedata;
+	int unload;
+
+	/*
+	 * Check capability of the device -
+	 *  - if "idle immediate with unload" is supported we use that, else
+	 *    we use "standby immediate" and live with spinning down the drive..
+	 *    (Word 84, bit 13 of IDENTIFY DEVICE data)
+	 */
+	if (idedisk_protect_method == 1) {
+		unload = 1;	
+		printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload method requested, overriding drive capability check..\n");
+	}
+	else if (idedisk_protect_method == 2) {
+		unload = 0;	
+		printk(KERN_DEBUG "idedisk_issue_protect_fn(): standby method requested, overriding drive capability check..\n");
+	}
+	else if (drive->id->cfsse & (1 << 13)) {
+		unload = 1;
+		printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload support reported by drive..\n");
+	}
+	else {
+		unload = 0;
+		printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload support NOT reported by drive!..\n");
+	}
+
+	return ide_protect_queue(q, unload);
+}
+
+int idedisk_issue_unprotect_fn(request_queue_t *q)
+{
+	return ide_unprotect_queue(q);
+}
+
+/*
  * This is tightly woven into the driver->do_special can not touch.
  * DON'T do it again until a total personality rewrite is committed.
  */
@@ -986,6 +1138,9 @@
 		drive->wcache = 1;
 
 	write_cache(drive, 1);
+
+	blk_queue_issue_protect_fn(drive->queue, idedisk_issue_protect_fn);
+	blk_queue_issue_unprotect_fn(drive->queue, idedisk_issue_unprotect_fn);
 }
 
 static void ide_cacheflush_p(ide_drive_t *drive)
diff -urN oldtree/drivers/ide/ide-io.c newtree/drivers/ide/ide-io.c
--- oldtree/drivers/ide/ide-io.c	2006-09-24 17:03:56.000000000 -0400
+++ newtree/drivers/ide/ide-io.c	2006-09-27 15:11:26.000000000 -0400
@@ -1261,6 +1261,17 @@
 		}
 
 		/*
+		 * Don't accept a request when the queue is stopped (unless we
+		 * are resuming from suspend). Prevents existing queue entries 
+		 * being processed after queue is stopped by the hard disk 
+		 * protection mechanism...
+		 */
+		if (test_bit(QUEUE_FLAG_STOPPED, &drive->queue->queue_flags) && !blk_pm_resume_request(rq)) {
+			hwgroup->busy = 0;
+			break;
+		}
+
+		/*
 		 * Sanity: don't accept a request that isn't a PM request
 		 * if we are currently power managed. This is very important as
 		 * blk_stop_queue() doesn't prevent the elv_next_request()
@@ -1744,6 +1755,9 @@
 		where = ELEVATOR_INSERT_FRONT;
 		rq->cmd_flags |= REQ_PREEMPT;
 	}
+	if (action == ide_next)
+		where = ELEVATOR_INSERT_FRONT;
+
 	__elv_add_request(drive->queue, rq, where, 0);
 	ide_do_request(hwgroup, IDE_NO_IRQ);
 	spin_unlock_irqrestore(&ide_lock, flags);
diff -urN oldtree/drivers/scsi/scsi_lib.c newtree/drivers/scsi/scsi_lib.c
--- oldtree/drivers/scsi/scsi_lib.c	2006-09-24 17:03:56.000000000 -0400
+++ newtree/drivers/scsi/scsi_lib.c	2006-09-27 15:11:32.000000000 -0400
@@ -2277,3 +2277,211 @@
 	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
 }
 EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
+
+/*
+ * As per blk_end_sync_rq(), except calls scsi_internal_device_block
+ * to block the queue at command completion. Only called by
+ * scsi_protect_execute_wait().
+ * todo:
+ *  - we block the queue regardless of success and rely on the
+ *    scsi_protect_queue function to unblock if the command
+ *    failed... should we also inspect here?
+ */
+static void scsi_protect_wait_done(struct request *req, int uptodate)
+{
+	struct completion *waiting = req->waiting;
+	struct scsi_device *sdev = req->q->queuedata;
+
+	req->waiting = NULL;
+	__blk_put_request(req->q, req);
+	scsi_internal_device_block(sdev);
+
+	/*
+	 * complete last, if this is a stack request the process (and thus
+	 * the rq pointer) could be invalid right after this complete()
+	 */
+	complete(waiting);
+}
+
+/**
+ * scsi_protect_execute_wait - insert request
+ * @sdev:	scsi device
+ * @cmd:	scsi command
+ * @cmd_len:	length of scsi cdb
+ * @data_direction: data direction
+ * @buffer:	data buffer (this can be a kernel buffer or scatterlist)
+ * @bufflen:	len of buffer
+ * @sshdr:	request sense data structure
+ * @timeout:	request timeout in seconds
+ * @retries:	number of times to retry request
+ * @done:	end_io function
+ * @flags:	or into request flags
+ *
+ * Notes:	Derived from scsi_execute_async but waiting for completion.
+ *		Also sets completion function to scsi_protect_wait_done
+ **/
+int scsi_protect_execute_wait(struct scsi_device *sdev,
+    			      const unsigned char *cmd,
+			      int cmd_len, int data_direction,
+			      void *buffer, unsigned bufflen,
+			      void *sense, int timeout, int retries,
+			      void (*done)(struct request *, int),
+			      gfp_t gfp)
+{
+	struct request *req;
+	int ret = DRIVER_ERROR << 24;
+	int write = (data_direction == DMA_TO_DEVICE);
+	DECLARE_COMPLETION_ONSTACK(wait);
+
+	req = blk_get_request(sdev->request_queue, write, gfp);
+	if (!req)
+		return ret;
+	req->flags |= REQ_BLOCK_PC | REQ_QUIET;
+
+	if (bufflen && blk_rq_map_kern(req->q, req, buffer, bufflen, gfp))
+		goto free_req;
+
+	req->cmd_len = cmd_len;
+	memcpy(req->cmd, cmd, req->cmd_len);
+	req->sense = sense;
+	req->sense_len = 0;
+	req->timeout = timeout;
+	req->retries = retries;
+
+	/*
+	 * we need an extra reference to the request, so we can look at
+	 * it after io completion
+	 */
+	req->ref_count++;
+
+	req->waiting = &wait;
+	blk_execute_rq_nowait(req->q, NULL, req, 1, done);
+	wait_for_completion(&wait);
+	req->waiting = NULL;
+
+	ret = req->errors;
+free_req:
+	blk_put_request(req);
+	return ret;
+}
+
+/*
+ * scsi_unprotect_queue()
+ *  - release the queue that was previously blocked
+ */
+int scsi_unprotect_queue(request_queue_t *q)
+{
+	struct scsi_device *sdev = q->queuedata;
+	int rc = 0, pending = 0;
+	u8 scsi_cmd[MAX_COMMAND_SIZE];
+	struct scsi_sense_hdr sshdr;
+
+	if (sdev->sdev_state != SDEV_BLOCK)
+		return -ENXIO;
+
+	/* Are there any pending jobs on the queue? */
+	pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
+
+	rc = scsi_internal_device_unblock(sdev);
+	if (rc)
+		return rc;
+
+	if (!pending) {
+		printk(KERN_DEBUG "scsi_unprotect_queue(): No pending I/O, re-enabling power management..\n");
+
+		memset(scsi_cmd, 0, sizeof(scsi_cmd));
+		scsi_cmd[0]  = ATA_16;
+		scsi_cmd[1]  = (3 << 1); /* Non-data */
+		/* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
+		scsi_cmd[14] = 0xe5; /* CHECK_POWER_MODE1 */
+
+		/* Good values for timeout and retries?  Values below
+   		   from scsi_ioctl_send_command() for default case... */
+		if (scsi_execute_req(sdev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
+		   		     (10*HZ), 5))
+			rc = -EIO;
+	}
+	return rc;
+}
+EXPORT_SYMBOL_GPL(scsi_unprotect_queue);
+
+/*
+ * scsi_protect_queue()
+ *  - build and issue the park/standby command..
+ *  - queue is blocked during command completion handler
+ */
+int scsi_protect_queue(request_queue_t *q, int unload)
+{
+	struct scsi_device *sdev = q->queuedata;
+	int rc = 0;
+	u8 scsi_cmd[MAX_COMMAND_SIZE];
+	u8 args[7];
+	u8 *sense, *desc;
+
+	if (sdev->sdev_state != SDEV_RUNNING)
+		return -ENXIO;
+
+	sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOWAIT);
+	if (!sense)
+		return DRIVER_ERROR << 24;
+	memset(args, 0, sizeof(args));
+
+	if (unload) {
+		args[0] = 0xe1;
+		args[1] = 0x44;
+		args[3] = 0x4c;
+		args[4] = 0x4e;
+		args[5] = 0x55;
+	} else
+		args[0] = 0xe0;
+
+	memset(scsi_cmd, 0, sizeof(scsi_cmd));
+	scsi_cmd[0]  = ATA_16;
+	scsi_cmd[1]  = (3 << 1); /* Non-data */
+	scsi_cmd[2]  = 0x20;     /* no off.line, or data xfer, request cc */
+	scsi_cmd[4]  = args[1];
+	scsi_cmd[6]  = args[2];
+	scsi_cmd[8]  = args[3];
+	scsi_cmd[10] = args[4];
+	scsi_cmd[12] = args[5];
+	scsi_cmd[14] = args[0];
+
+	rc = scsi_protect_execute_wait(sdev, scsi_cmd,
+				       COMMAND_SIZE(scsi_cmd[0]), DMA_NONE,
+				       NULL, 0, sense, (10*HZ), 5,
+				       &scsi_protect_wait_done, GFP_NOWAIT);
+
+	if (rc != ((DRIVER_SENSE << 24) + SAM_STAT_CHECK_CONDITION)) {
+		printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
+		scsi_unprotect_queue(q);		/* just in case we still managed to block */
+		rc = -EIO;
+		goto out;
+	}
+
+	desc = sense + 8;
+
+	/* Retrieve data from check condition */
+	args[1] = desc[3];
+	args[2] = desc[5];
+	args[3] = desc[7];
+	args[4] = desc[9];
+	args[5] = desc[11];
+	args[0] = desc[13];
+
+	if (unload) {
+		if (args[3] == 0xc4)
+			printk(KERN_DEBUG "scsi_protect_queue(): head parked..\n");
+		else {
+			/* error parking the head */
+			printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
+			rc = -EIO;
+			scsi_unprotect_queue(q);
+		}
+	} else
+		printk(KERN_DEBUG "scsi_protect_queue(): head park not requested, used standby!..\n");
+
+out:
+	kfree(sense);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(scsi_protect_queue);
diff -urN oldtree/include/linux/ata.h newtree/include/linux/ata.h
--- oldtree/include/linux/ata.h	2006-09-24 17:03:56.000000000 -0400
+++ newtree/include/linux/ata.h	2006-09-27 15:11:32.000000000 -0400
@@ -283,6 +283,7 @@
 #define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
 #define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
 #define ata_id_hpa_enabled(id)	((id)[85] & (1 << 10))
+#define ata_id_has_unload(id)   ((id)[84] & (1 << 13))
 #define ata_id_has_fua(id)	((id)[84] & (1 << 6))
 #define ata_id_has_flush(id)	((id)[83] & (1 << 12))
 #define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
diff -urN oldtree/include/linux/blkdev.h newtree/include/linux/blkdev.h
--- oldtree/include/linux/blkdev.h	2006-09-24 17:03:56.000000000 -0400
+++ newtree/include/linux/blkdev.h	2006-09-27 15:11:32.000000000 -0400
@@ -345,6 +345,8 @@
 typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
 typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
 typedef void (softirq_done_fn)(struct request *);
+typedef int (issue_protect_fn) (request_queue_t *);
+typedef int (issue_unprotect_fn) (request_queue_t *);
 
 enum blk_queue_state {
 	Queue_down,
@@ -387,6 +389,8 @@
 	issue_flush_fn		*issue_flush_fn;
 	prepare_flush_fn	*prepare_flush_fn;
 	softirq_done_fn		*softirq_done_fn;
+	issue_protect_fn	*issue_protect_fn;
+	issue_unprotect_fn	*issue_unprotect_fn;
 
 	/*
 	 * Dispatch queue sorting
@@ -740,6 +744,8 @@
 extern unsigned blk_ordered_cur_seq(request_queue_t *);
 extern unsigned blk_ordered_req_seq(struct request *);
 extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
+extern void blk_queue_issue_protect_fn(request_queue_t *, issue_protect_fn *);
+extern void blk_queue_issue_unprotect_fn(request_queue_t *, issue_unprotect_fn *);
 
 extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
 extern void blk_dump_rq_flags(struct request *, char *);
diff -urN oldtree/include/linux/ide.h newtree/include/linux/ide.h
--- oldtree/include/linux/ide.h	2006-09-24 17:03:56.000000000 -0400
+++ newtree/include/linux/ide.h	2006-09-27 15:11:32.000000000 -0400
@@ -1090,6 +1090,7 @@
  */
 typedef enum {
 	ide_wait,	/* insert rq at end of list, and wait for it */
+	ide_next,	/* insert rq immediately after current request */
 	ide_preempt,	/* insert rq in front of current request */
 	ide_head_wait,	/* insert rq in front of current request and wait for it */
 	ide_end		/* insert rq at end of list, but don't wait for it */
diff -urN oldtree/kernel/sched_staircase.c newtree/kernel/sched_staircase.c
--- oldtree/kernel/sched_staircase.c	2006-09-26 19:11:25.000000000 -0400
+++ newtree/kernel/sched_staircase.c	2006-09-27 15:07:22.000000000 -0400
@@ -424,7 +424,7 @@
 
 		/* runqueue-specific stats */
 		seq_printf(seq,
-		    "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
+		    "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
 		    cpu, rq->yld_both_empty,
 		    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
 		    rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
@@ -571,7 +571,7 @@
 	if (t->sched_info.last_queued)
 		delta_jiffies = now - t->sched_info.last_queued;
 	sched_info_dequeued(t);
-        t->sched_info.max_delay = max(t->sched_info.max_delay, diff);
+        t->sched_info.max_delay = max(t->sched_info.max_delay, delta_jiffies);
 	t->sched_info.run_delay += delta_jiffies;
 	t->sched_info.last_arrival = now;
 	t->sched_info.pcnt++;
Files oldtree/scripts/kconfig/mconf and newtree/scripts/kconfig/mconf differ
